Mittwoch, 29. Mai 2019

Künstliche Intelligenz SourceCode Python Teil-8

import numpy as np
from keras.datasets import cifar10

(xTrain, yTrain), (xTest, yTest) = cifar10.load_data()
noOfClasses = 10
im = []
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(noOfClasses):
    ax = fig.add_subplot(2, 5, i+1, xticks=[], yticks=[])
    frist = np.flatnonzero(yTrain == i)[0]
    im.append(xTrain[frist,:,:,:])
    ax.set_title(i)
    ax.imshow(im[i])
plt.show()
from keras.utils import np_utils
YTrain = np_utils.to_categorical(yTrain, noOfClasses)
YTest  = np_utils.to_categorical(yTest, noOfClasses)
XTrain = xTrain/255.0
XTest  = xTest/255.0
from keras.models import Sequential
from keras import layers
from keras.regularizers import l2
l2Reg = 0.001
CNN = Sequential()
CNN.add(layers.Conv2D(32,(3,3),padding='same',activation='relu',kernel_regularizer=l2(l2Reg),input_shape=(32,32,3)))
CNN.add(layers.MaxPool2D(pool_size=(2, 2),padding='same'))
CNN.add(layers.Conv2D(32,(3,3),padding='same',activation='relu',kernel_regularizer=l2(l2Reg)))
CNN.add(layers.MaxPool2D(pool_size=(2, 2),padding='same'))
CNN.add(layers.Conv2D(64,(3,3),padding='same',activation='relu',kernel_regularizer=l2(l2Reg)))
CNN.add(layers.MaxPool2D(pool_size=(2, 2),padding='same'))
CNN.add(layers.Conv2D(64,(3,3),padding='same',activation='relu',kernel_regularizer=l2(l2Reg)))
CNN.add(layers.MaxPool2D(pool_size=(2, 2),padding='same'))
CNN.add(layers.Flatten())
CNN.add(layers.Dense(512,activation='relu',kernel_regularizer=l2(l2Reg)))
CNN.add(layers.Dense(256,activation='relu',kernel_regularizer=l2(l2Reg)))
CNN.add(layers.Dense(10,activation='softmax'))
CNN.summary()
CNN.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
CNN.fit(XTrain,YTrain,epochs=20,batch_size=64)
scores = CNN.evaluate(XTest,YTest,batch_size=64)
print("Accuracy: %.2f%%" % (scores[1]*100))
yPred = CNN.predict(XTest)
choise = np.argmax(yPred, axis=1)
konfusionMatrix = np.zeros((noOfClasses,noOfClasses))
for i in range(noOfClasses):
    index = np.flatnonzero(yTest == i)
    for j in range(10):
        index2 = np.flatnonzero(choise[index] == j)
        konfusionMatrix[i,j] = len(index2)
print(konfusionMatrix)
from keras import backend as K
def outputMoasik(imSingle, outLayer):
    pic = imSingle[np.newaxis,...]
   
    outputSingleLayer = K.function([CNN.layers[0].input],[CNN.layers[outLayer].output])
    picFilter = outputSingleLayer([pic])[0]
   
    gridy = 8
    gridx = 4 if outLayer < 4 else 8
    size = picFilter[0,:,:,0].shape[0]
    mosaik = np.zeros( (gridx*size,gridy*size))
   
    for l in range(0,picFilter.shape[3]):
        x = int(np.floor(l / gridy))
        y = l%gridy
        mosaik[x*size:(x+1)*size,y*size:(y+1)*size] = picFilter[0,:,:,l]
    plt.figure()
    plt.imshow(mosaik,cmap='binary')
    plt.show()
outputMoasik(im[6], 2)
:::::::::::::::::::::::::::::::::::::::::::::::::
import numpy as np
def softmax(z):
    return(np.exp(z)/np.sum(np.exp(z)))
def crossEntropy(p,q):
    return( -np.sum(p*np.log(q)))
mseZ = 0.0
mseP = 0.0
croE = 0.0
r = np.zeros((3,3))
r[0,0] = 1; r[1,1]= 1; r[2,0] = 1
z = np.zeros((3,3))
z[0] = np.array([0.9, 0.2, 0.1])
z[1] = np.array([0.1, 0.9, 0.2])
z[2] = np.array([0.7, 0.3, 0.5])
for i in range(3):
    mseZ += np.sum( (r[i]-z[i])**2)
    p = softmax(z[i])
    mseP += np.sum( (r[i]-p)**2)
    croE += crossEntropy(r[i],z[i])
    print(p.round(2)  , np.sum(p))   
mseZ = mseZ/9
mseP = mseP/9
croE = croE/9
print(mseZ.round(2),mseP.round(2),croE.round(2))

z[0] = np.array([0.6, 0.1, 0.1])
z[1] = np.array([0.2, 0.8, 0.1])
z[2] = np.array([0.5, 0.4, 0.4])
for i in range(3):
    mseZ += np.sum( (r[i]-z[i])**2)
    p = softmax(z[i])
    mseP += np.sum( (r[i]-p)**2)
    croE += crossEntropy(r[i],z[i])
    print(p.round(2)  , np.sum(p))    
mseZ = mseZ/9
mseP = mseP/9
croE = croE/9
print(mseZ.round(2),mseP.round(2),croE.round(2))

::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
dog = mpimg.imread('hundKlein.png')
convolutionMatrix  = np.array([ [ 1,  0 , -1], [0, 0, 0], [-1, 0, 1]] )
dogPadding = np.zeros( (dog.shape[0]+2, dog.shape[1]+2) )
dogPadding[1:dog.shape[0]+1,1:dog.shape[1]+1] = dog
dogConv = np.zeros_like(dogPadding)
for i in range(0,dog.shape[0]):
    for j in range(0,dog.shape[1]):
        dogConv[i+1,j+1] = np.sum((convolutionMatrix*dogPadding[i:i+3,j:j+3]))
       
plt.figure()
plt.imshow(dogPadding, cmap='gray')
plt.figure()
plt.imshow(dogConv, cmap='gray')
skriptBeispiel = np.zeros((7,7))
skriptBeispiel[1,1:6] = np.array([1, 0, 1, 2, 0])
skriptBeispiel[2,1:6] = np.array([1, 0, 1, 2, 3])
skriptBeispiel[3,1:6] = np.array([0, 0, 1, 2, 3])
skriptBeispiel[4,1:6] = np.array([0, 0, 0, 1, 2])
skriptBeispiel[5,1:6] = np.array([0, 0, 1, 2, 2])
skriptCon = np.zeros_like(skriptBeispiel)
convolutionMatrix  = np.array([ [ 1,  0 , -1], [0, 0, 0], [-1, 0, 1]] )
for i in range(0,5):
    for j in range(0,5):
        skriptCon[i+1,j+1] = np.sum((convolutionMatrix*skriptBeispi

::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras import regularizers
np.random.seed(42)
X = np.random.rand(1200,2)
groupeA = 0.4*np.sin(2*np.pi*X[:,0] + X[:,0]**2) +0.55 > X[:,1]
groupeB = 0.4*np.sin(2*np.pi*X[:,0] + X[:,0]**2) +0.55 <= X[:,1]
Y = np.zeros(X.shape[0])
Y[groupeA] = 1
Y[groupeB] = -1
groupeE = np.abs(0.4*np.sin(2*np.pi*X[:,0] + X[:,0]**2) +0.55 - X[:,1])< 0.2
index = np.flatnonzero(groupeE)
flip = np.random.rand(index.shape[0]) < 1/4
Y[index[flip]] = (-1)*Y[index[flip]]
groupeAdata = Y>0
groupeBdata = Y<0 br="">XTrain = X
YTrain = np.zeros( (X.shape[0],2) )
YTrain[groupeAdata,0] = 1
YTrain[groupeBdata,1] = 1
t = np.linspace(0,1,200)
b = 0.4*np.sin(2*np.pi*t + t**2) +0.55
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(X[groupeAdata,0],X[groupeAdata,1],marker='+',c='k')
ax.scatter(X[groupeBdata,0],X[groupeBdata,1],marker='*',c='gray')
ax.plot(t,b,'r--',lw=2)
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_title('Trainingsdaten')
myANN = Sequential()
myANN.add(Dense(256,input_dim=2,kernel_initializer='normal',activation='relu'))
myANN.add(Dense(256,kernel_initializer='random_uniform',activation='relu'))
myANN.add(Dense(128,kernel_initializer='random_uniform',activation='relu'))
myANN.add(Dense(128,kernel_initializer='random_uniform',activation='relu'))
myANN.add(Dense(2,kernel_initializer='normal',activation='sigmoid'))
myANN.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
myANN.fit(XTrain,YTrain, epochs=500,batch_size=20)
XTest = np.random.rand(20000,2)
yp = myANN.predict(XTest)
groupeAp = yp[:,0] > yp[:,1]
groupeBp = yp[:,1] > yp[:,0] 
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(XTest[groupeAp,0],XTest[groupeAp,1],marker='+',c='k')
ax.scatter(XTest[groupeBp,0],XTest[groupeBp,1],marker='*',c='gray')
ax.plot(t,b,'r--',lw=2)
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_title('Ohne Regularisierung')
myANN = Sequential()
myANN.add(Dense(256,input_dim=2,kernel_initializer='normal',activation='relu',kernel_regularizer=regularizers.l2(0.0001)))
myANN.add(Dense(256,kernel_initializer='random_uniform',activation='relu',kernel_regularizer=regularizers.l2(0.0001)))
myANN.add(Dense(128,kernel_initializer='random_uniform',activation='relu',kernel_regularizer=regularizers.l2(0.0001)))
myANN.add(Dense(128,kernel_initializer='random_uniform',activation='relu',kernel_regularizer=regularizers.l2(0.0001)))
myANN.add(Dense(2,kernel_initializer='normal',activation='sigmoid'))
myANN.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
myANN.fit(XTrain,YTrain, epochs=500,batch_size=20)
XTest = np.random.rand(20000,2)
yp = myANN.predict(XTest)
groupeAp = yp[:,0] > yp[:,1]
groupeBp = yp[:,1] > yp[:,0] 
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(XTest[groupeAp,0],XTest[groupeAp,1],marker='+',c='k')
ax.scatter(XTest[groupeBp,0],XTest[groupeBp,1],marker='*',c='gray')
ax.plot(t,b,'r--',lw=2)
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_title('$L_2$-Regularisierung')
from keras.layers import Dropout
myANN = Sequential()
myANN.add(Dense(256,input_dim=2,kernel_initializer='normal',activation='relu'))
myANN.add(Dropout(0.5))
myANN.add(Dense(256,kernel_initializer='random_uniform',activation='relu'))
myANN.add(Dropout(0.5))
myANN.add(Dense(128,kernel_initializer='random_uniform',activation='relu'))
myANN.add(Dropout(0.5))
myANN.add(Dense(128,kernel_initializer='random_uniform',activation='relu'))
myANN.add(Dense(2,kernel_initializer='normal',activation='sigmoid'))
myANN.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
myANN.fit(XTrain,YTrain, epochs=500,batch_size=20)
XTest = np.random.rand(20000,2)
yp = myANN.predict(XTest)
groupeAp = yp[:,0] > yp[:,1]
groupeBp = yp[:,1] > yp[:,0] 
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(XTest[groupeAp,0],XTest[groupeAp,1],marker='+',c='k')
ax.scatter(XTest[groupeBp,0],XTest[groupeBp,1],marker='*',c='gray')
ax.plot(t,b,'r--',lw=2)
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_title('Dropout-Regularisierung')

myANN = Sequential()
myANN.add(Dense(256,input_dim=2,kernel_initializer='normal',activation='relu',kernel_regularizer=regularizers.l1(0.00001)))
myANN.add(Dense(256,kernel_initializer='random_uniform',activation='relu',kernel_regularizer=regularizers.l1(0.00001)))
myANN.add(Dense(128,kernel_initializer='random_uniform',activation='relu',kernel_regularizer=regularizers.l1(0.00001)))
myANN.add(Dense(128,kernel_initializer='random_uniform',activation='relu',kernel_regularizer=regularizers.l1(0.00001)))
myANN.add(Dense(2,kernel_initializer='normal',activation='sigmoid'))
myANN.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
myANN.fit(XTrain,YTrain, epochs=500,batch_size=20)
XTest = np.random.rand(20000,2)
yp = myANN.predict(XTest)
groupeAp = yp[:,0] > yp[:,1]
groupeBp = yp[:,1] > yp[:,0] 
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(XTest[groupeAp,0],XTest[groupeAp,1],marker='+',c='k')
ax.scatter(XTest[groupeBp,0],XTest[groupeBp,1],marker='*',c='gray')
ax.plot(t,b,'r--',lw=2)
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_title('$L_1$-Regularisierung')

::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
np.random.seed(42)
fFloat  = open("BostonFeature.csv","r")
X = np.loadtxt(fFloat, delimiter=","); fFloat.close()
fFloat  = open("BostonTarget.csv","r")
Y = np.loadtxt(fFloat, delimiter=","); fFloat.close()
yMin = Y.min(axis=0); yMax = Y.max(axis=0)
Y = (Y - yMin) / (yMax - yMin)
TrainSet     = np.random.choice(X.shape[0],int(X.shape[0]*0.80), replace=False)
XTrain       = X[TrainSet,:]
YTrain       = Y[TrainSet]
TestSet      = np.delete(np.arange(0, len(Y) ), TrainSet)
XTest        = X[TestSet,:]
YTest        = Y[TestSet]
myANN = Sequential()
myANN.add(Dense(10,input_dim=13,kernel_initializer='normal',activation='relu',use_bias=False))
myANN.add(Dense(10,kernel_initializer='random_uniform',activation='relu',use_bias=False))
myANN.add(Dense(1,kernel_initializer='normal',activation='linear',use_bias=False))
myANN.compile(loss='mean_squared_error', optimizer='adam')
history = myANN.fit(XTrain,YTrain, epochs=1000, verbose=False)
yp = myANN.predict(XTest)
yp = yp.reshape(yp.shape[0])
errorT = (yMax - yMin)*(yp - YTest)
print(np.mean(np.abs(errorT)))
myANN = Sequential()
myANN.add(Dense(10,input_dim=13,kernel_initializer='normal',activation='relu',use_bias=False))
myANN.add(Dense(10,kernel_initializer='random_uniform',activation='relu',use_bias=False))
myANN.add(Dense(1,kernel_initializer='normal',activation='linear',use_bias=False))
myANN.compile(loss='mean_squared_error', optimizer='adam')
def divValTrainSet(X,Y):
    ValSet    = np.random.choice(X.shape[0],int(X.shape[0]*0.25),replace=False)
    TrainSet  = np.delete(np.arange(0, Y.shape[0] ), ValSet)
    XVal     = X[ValSet,:]
    YVal     = Y[ValSet]
    X        = X[TrainSet,:]
    Y        = Y[TrainSet]
    return (XVal, YVal, X, Y)
(XVal, YVal, XTr, YTr) = divValTrainSet(XTrain,YTrain)
import keras
earlystop  = keras.callbacks.EarlyStopping(monitor='loss', patience=20, verbose=False)
checkpoint = keras.callbacks.ModelCheckpoint('bestW.h5', monitor='loss', verbose=False, save_weights_only=True, save_best_only=True)
callbacksList = [earlystop, checkpoint]
history = myANN.fit(XTr,YTr, epochs=1000, validation_data=(XVal, YVal), callbacks=callbacksList, verbose=False)
import matplotlib.pyplot as plt
lossMonitor = np.array(history.history['loss'])
valLossMonitor = np.array(history.history['val_loss'])
counts = np.arange(lossMonitor.shape[0])
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(counts,lossMonitor,'k', label='Trainingsdaten')
ax.plot(counts,valLossMonitor,'r:', label='Validierungsdaten')
ax.set_xlabel('Lernzyklus')
ax.set_ylabel('Fehler')
ax.legend()
myANN.load_weights('bestW.h5')
yp = myANN.predict(XTest)
yp = yp.reshape(yp.shape[0])
errorT = (yMax - yMin)*(yp - YTest)
print(np.mean(np.abs(errorT)))
yp = myANN.predict(XTrain)
yp = yp.reshape(yp.shape[0])
error = (yMax - yMin)*(yp - YTrain)
print(np.mean(np.abs(error)))
fig = plt.figure()
ax = fig.add_subplot(1,2,1)
ax.set_title('Verteilung der Abweichungen auf der Trainingsmenge')
ax.hist(error,color='gray')
ax.set_xlabel('Abweichung in Tausenden')
ax.set_ylabel('Anzahl')
ax = fig.add_subplot(1,2,2)
ax.set_title('Verteilung der Abweichungen auf der Testmenge')
ax.hist(errorT,color='gray')
ax.set_xlabel('Abweichung in Tausenden')
ax.set_ylabel('Anzahl')


::::::::::::::::::::::::::::::::::::::::::::::::::::
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense
from keras.utils import np_utils
from keras.datasets import mnist
(XTrain, yTrain), (XTest, yTest) = mnist.load_data()
fig = plt.figure()
for i in range(9):
    ax = fig.add_subplot(3,3,i+1)
    ax.imshow(XTrain[i], cmap='gray', interpolation='none')
    ax.set_title(yTrain[i])
plt.tight_layout() 
   
XTrain = XTrain.reshape(60000, 784)
XTest = XTest.reshape(10000, 784)
XTrain = XTrain/255
XTest  = XTest/255
YTrain = np_utils.to_categorical(yTrain, 10)
YTest = np_utils.to_categorical(yTest, 10)
myANN = Sequential()
myANN.add(Dense(80,input_dim=784,activation='relu'))
myANN.add(Dense(40,activation='relu'))
myANN.add(Dense(10,activation='sigmoid'))
myANN.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
myANN.fit(XTrain, YTrain, batch_size=24, epochs=10, verbose=True)
score = myANN.evaluate(XTest, YTest, verbose=False)
print('Test score:', score[0])
print('Test accuracy:', score[1])

::::::::::::::::::::::::::::::::::::::::::::::::::::::
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense
from keras.utils import np_utils
from keras.datasets import mnist
(XTrain, yTrain), (XTest, yTest) = mnist.load_data()
fig = plt.figure()
for i in range(9):
    ax = fig.add_subplot(3,3,i+1)
    ax.imshow(XTrain[i], cmap='gray', interpolation='none')
    ax.set_title(yTrain[i])
plt.tight_layout() 
   
XTrain = XTrain.reshape(60000, 784)
XTest = XTest.reshape(10000, 784)
XTrain = XTrain/255
XTest  = XTest/255
YTrain = np_utils.to_categorical(yTrain, 10)
YTest = np_utils.to_categorical(yTest, 10)
myANN = Sequential()
myANN.add(Dense(80,input_dim=784,activation='relu'))
myANN.add(Dense(40,activation='relu'))
myANN.add(Dense(10,activation='sigmoid'))
myANN.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
myANN.fit(XTrain, YTrain, batch_size=24, epochs=10, verbose=True)
score = myANN.evaluate(XTest, YTest, verbose=False)
print('Test score:', score[0])
print('Test accuracy:', score[1])

:::::::::::::::::::::::::::::::::::::::::::::

Keine Kommentare:

Kommentar veröffentlichen

Hinweis: Nur ein Mitglied dieses Blogs kann Kommentare posten.