Mittwoch, 29. Mai 2019

Künstliche Intelligenz SourceCode Python Teil-7

::::::::::::::::::::::::::::::::::::::
import numpy as np
import scipy.special
import copy
class MLPNet:           
    def __init__(self, hiddenlayer=(10,10),classification=False):
        self.hl = hiddenlayer; self.classification = classification
        self.xMin = 0.0; self.xMax = 1.0
        self.W = []
        self._sigmoid = lambda x: scipy.special.expit(x)
    def _initWeights(self):
        self.W.append((np.random.rand(self.hl[0],self.il) - 0.5 ))
        self.W.append((np.random.rand(self.hl[1],self.hl[0]) - 0.5))
        self.W.append((np.random.rand(self.ol,self.hl[1]) - 0.5))
    def _calOut(self,X):
        O1 = self._sigmoid(self.W[0]@X.T)
        O2 = self._sigmoid(self.W[1]@O1)
        y = (self.W[len(self.W)-1]@O2).T
        return(y)
    def predict(self,X):
        X = (X - self.xMin) / (self.xMax - self.xMin)
        X = np.hstack( (X,np.ones(X.shape[0])[:,None]) )
        y = self._calOut(X)
        if self.classification: y = np.round(y) #*\label{code:fullmlpbatch:1}
        return(y)
   
    def fit(self,X,Y,eta=0.75,maxIter=200,vareps=10**-3,scale=True,XT=None,YT=None): #*\label{code:fullmlpbatch:4}
        self.xMin = X.min(axis=0) if scale else 0
        self.xMax = X.max(axis=0) if scale else 1
        X = (X - self.xMin) / (self.xMax - self.xMin)
        X = np.hstack( (X,np.ones(X.shape[0])[:,None]) )
        if len(Y.shape) == 1:
            Y = Y[:,None]
        self.il = X.shape[1]
        self.ol = Y.shape[1] #*\label{code:fullmlpbatch:morethanone}
        self._initWeights()
        (XVal, YVal, X, Y) = self._divValTrainSet(X,Y)         #*\label{code:fullmlpbatch:3}
        self.train(X,Y,XVal,YVal,eta,maxIter,vareps,XT,YT)
    
    def train(self,X,Y,XVal=None,YVal=None,eta=0.75,maxIter=200,vareps=10**-3,XT=None,YT=None):   
        if XVal is None: (XVal, YVal, X, Y) = self._divValTrainSet(X,Y)
        if len(Y.shape) == 1: Y = Y[:,None]
        if len(YVal.shape) == 1: YVal = YVal[:,None]
        if self.il != X.shape[1]: X = np.hstack( (X,np.ones(X.shape[0])[:,None]) )
        if self.il != XVal.shape[1]: XVal = np.hstack( (XVal,np.ones(XVal.shape[0])[:,None]) )
        dW = []
        for i in range(len(self.W)):
            dW.append(np.zeros_like(self.W[i]))
        yp = self._calOut(XVal)
        if self.classification: yp = np.round(yp)
        meanE = (np.sum((YVal-yp)**2)/XVal.shape[0])/YVal.shape[1]
        minError = meanE
        minW = copy.deepcopy(self.W)
        self.errorVal=[]; self.errorTrain=[]; self.errorTest=[] #*\label{code:fullmlpbatch:2}
        mixSet = np.random.choice(X.shape[0],X.shape[0],replace=False)
        counter = 0           
        while meanE > vareps and counter < maxIter:
            counter += 1
            for m in range(self.ol): #*\label{code:fullmlpbatch:5}
                for i in mixSet:
                    x = X[i,:]
                    O1 = self._sigmoid(self.W[0]@x.T)
                    O2 = self._sigmoid(self.W[1]@O1)
                    temp = self.W[2][m,:]*O2*(1-O2)[None,:]
                    dW[2] = O2
                    dW[1] = temp.T@O1[:,None].T  
                    dW[0] = (O1*(1-O1)*(temp@self.W[1])).T@x[:,None].T
                    yp = self._calOut(x)[m]
                    yfactor = np.sum(Y[i,m]-yp)
                    for j in range(len(self.W)):    
                        self.W[j] += eta * yfactor* dW[j]
            yp = self._calOut(XVal)
            if self.classification: yp = np.round(yp)
            meanE = (np.sum((YVal-yp)**2)/XVal.shape[0])/YVal.shape[1] #*\label{code:fullmlpbatch:6}
            self.errorVal.append(meanE)
            if meanE < minError:
                minError = meanE
                minW = copy.deepcopy(self.W)     
                self.valChoise = counter
               
            if XT is not None:
                yp = self.predict(XT)
                if len(YT.shape) == 1: YT = YT[:,None];
                meanETest = (np.sum((YT-yp)**2)/XT.shape[0])/YT.shape[1]
                self.errorTest.append(meanETest)
               
                yp = self._calOut(X)
                if self.classification:
                    yp = np.round(yp)
                meanETrain = (np.sum((Y-yp)**2)/X.shape[0])/Y.shape[1]
                self.errorTrain.append(meanETrain)
        self.W = copy.deepcopy(minW)
   
    def _divValTrainSet(self, X,Y):
        self.ValSet    = np.random.choice(X.shape[0],int(X.shape[0]*0.25),replace=False)
        self.TrainSet  = np.delete(np.arange(0, Y.shape[0] ), self.ValSet)
        XVal     = X[self.ValSet,:]
        YVal     = Y[self.ValSet]
        X        = X[self.TrainSet,:]
        Y        = Y[self.TrainSet]
        return (XVal, YVal, X, Y)
   
    def exportNet(self, filePrefix):
        np.savetxt(filePrefix+"MinMax.csv", np.array([self.xMin, self.xMax]), delimiter=",")
        np.savetxt(filePrefix+"W0.csv", self.W[0], delimiter=",")
        np.savetxt(filePrefix+"W1.csv", self.W[1], delimiter=",")
        np.savetxt(filePrefix+"W2.csv", self.W[2], delimiter=",")
   
    def importNet(self,filePrefix, classification=False):
        MinMax = np.loadtxt(filePrefix+'MinMax.csv',delimiter=",")
        W2 = np.loadtxt(filePrefix+'W2.csv',delimiter=",")
        W1 = np.loadtxt(filePrefix+'W1.csv',delimiter=",")   
        W0 = np.loadtxt(filePrefix+'W0.csv',delimiter=",")
        self.W = [W0,W1,W2]
        self.hl = (W0.shape[0], W2.shape[1])
        self.il = W0.shape[1]
        self.ol = W2.shape[0]
        self.xMin = MinMax[0]
        self.xMax = MinMax[1]
        self.classification = classification
if __name__ == '__main__':
    np.random.seed(42)
    X = np.random.rand(1250,2)
    Y = np.zeros( (1250,2) )
    index1 = (X[:,0] - 0.25)**2 + (X[:,1] - 0.25)**2 < 0.2**2
    Y[index1,0] = 1
    index2 = (X[:,0] - 0.75)**2 + (X[:,1] - 0.75)**2 < 0.2**2
    Y[index2,1] = 1
   
    TrainSet     = np.random.choice(X.shape[0],int(X.shape[0]*0.70), replace=False)
    XTrain       = X[TrainSet,:]
    YTrain       = Y[TrainSet]
    TestSet      = np.delete(np.arange(0, len(Y) ), TrainSet)
    XTest        = X[TestSet,:]
    YTest        = Y[TestSet]
   
    myPredict = MLPNet(hiddenlayer=(24,24),classification=True)
    myPredict.fit(XTrain,YTrain,maxIter=1200, XT=XTest , YT=YTest)
    yp = myPredict.predict(XTest)
   
    fp = np.sum(np.abs(yp - YTest))/len(TestSet)*100
    print('richtig klassifiziert %0.1f%%' % (100-fp))
    print('falsch klassifiziert %0.1f%%' % (fp))
    myPredict.exportNet('foobar')
    justTest = MLPNet()
    justTest.importNet('foobar',classification=True)
    yp = justTest.predict(XTest)
    fp = np.sum(np.abs(yp - YTest))/len(TestSet)*100
    print('richtig klassifiziert %0.1f%%' % (100-fp))
    print('falsch klassifiziert %0.1f%%' % (fp))
   
   
    import matplotlib.pyplot as plt
    from mpl_toolkits.mplot3d import Axes3D
    from matplotlib import cm
    plt.close('all')
    fig1 = plt.figure(1)
    ax = fig1.add_subplot(1,1,1)
    circle1 = plt.Circle((0.25, 0.25), 0.2, color='k', alpha=0.3)
    circle2 = plt.Circle((0.75, 0.75), 0.2, color='k', alpha=0.3)
    ax.add_artist(circle1)
    ax.add_artist(circle2)
   
    index1 = np.logical_and( (XTest[:,0] - 0.25)**2 + (XTest[:,1] - 0.25)**2 < 0.2**2 , yp[: ,0]==0 )
    ax.scatter(XTest[index1,0],XTest[index1,1], marker='v',c='r')
    index2 = np.logical_and(  (XTest[:,0] - 0.75)**2 + (XTest[:,1] - 0.75)**2 < 0.2**2, yp[: ,1]==0 )
    ax.scatter(XTest[index2,0],XTest[index2,1], marker='^',c='r')
   
    ax.scatter(XTest[yp[:,0]==1,0],XTest[yp[:,0]==1,1], marker='+',c='k')
    ax.scatter(XTest[yp[:,1]==1,0],XTest[yp[:,1]==1,1], marker='o',c='k')
    ax.set_xlabel('$x_0$')
    ax.set_ylabel('$x_1$')
    ax.set_xlim(0,1)
    ax.set_ylim(0,1)
    ax.axis('equal')
   
    fig3 = plt.figure(3)
    ax = fig3.add_subplot(1,1,1)
    epochen = np.arange(len(myPredict.errorVal))
    ax.plot(epochen, np.array(myPredict.errorVal), 'r-.' , label='Validierung') 
    ax.plot(epochen, np.array(myPredict.errorTest), 'k--', label='Test')  
    ax.plot(epochen, np.array(myPredict.errorTrain), 'k:', label='Training' ) 
    ax.legend()
    ax.set_xlabel('Lernzyklus')
    ax.set_ylabel('Durchschnittlicher Fehler')
:::::::::::::::::::::::::::::::::::::::::::::::::::::::
import numpy as np
np.random.seed(42)
fFloat  = open("Autoklassifizierung.csv","r")
dataset = np.loadtxt(fFloat, delimiter=",")
fFloat.close()
y = dataset[:,0]
x = np.ones( (len(y),3) ) #*\label{code:heblernen1}
x[:,0:2] = dataset[:,1:3] #*\label{code:heblernen2}
xMin = x[:,0:2].min(axis=0); xMax = x[:,0:2].max(axis=0)
x[:,0:2] = (x[:,0:2] - xMin) / (xMax - xMin) #*\label{code:heblernen3}
t = 0; tmax=100000
eta = 0.25
Dw = np.zeros(3)
w = np.random.rand(3) - 0.5
convergenz = 1
def myHeaviside(x):
    y = np.ones_like(x,dtype=np.float)
    y[x <= 0] = 0
    return(y)
   
while (convergenz > 0) and (t    t = t +1;
    WaehleBeispiel = np.random.randint(len(y))
    xB = x[WaehleBeispiel,:].T
    yB = y[WaehleBeispiel]
    error = yB - myHeaviside(w@xB)
    for j in range(len(xB)):
        Dw[j]= eta*error*xB[j]
        w[j] = w[j] + Dw[j]
    convergenz =  np.linalg.norm(y-myHeaviside(w@x.T))

def predict(x,w,xMin,xMax):
    xC = np.ones( (x.shape[0],3) )
    xC[:,0:2] = x
    xC[:,0:2] = (xC[:,0:2] - xMin) / (xMax - xMin); print(xC)
    y = w@xC.T
    y[y>0] = 1
    y[y<= 0] = 0
    return(y)
# SEAT Ibiza, Sokda Octavia, Toyota Avensis und Yaris GRMN
xTest = np.array([[12490, 48], [31590, 169],[24740, 97], [30800, 156]])
yPredict = predict(xTest,w,xMin,xMax)
print(yPredict)
import matplotlib.pyplot as plt
from matplotlib import cm
a= np.linspace(-1, 1, 50)
b=-w[0]/w[1]*a-w[2]/w[1]
fig = plt.figure(1)
ax = fig.add_subplot(1,1,1)
ax.plot(a,b,'k', linewidth=1.5, linestyle='dashed')
indexA = np.flatnonzero(y>0.5)
indexB = np.flatnonzero(y<0 .5="" br="">ax.scatter(x[indexA,0],x[indexA,1],color='red', marker='o')
ax.scatter(x[indexB,0],x[indexB,1],color='black', marker='+')
ax.set_xlabel('$x_0$')
ax.set_ylabel('$x_1$')
ax.set_ylim([-0.25,1.25])
ax.set_xlim([0,1])
ax.set_title("Berechnet mit Random Seed 42")
xBool = np.array([[1, 0],[0, 1],[1, 1],[0, 0]])
w = np.array([1, 1, -0.5])
print(predict(xBool,w,0,1))
w = np.array([1, 1, -1.5])
print(predict(xBool,w,0,1))

::::::::::::::::::::::::::::::::::::::::::::::::::::::::
import numpy as np
from simpleMLP import simpleMLP
np.random.seed(42)
X = np.random.rand(1250,2)
Y = np.zeros(1250)
index = (X[:,0] - 0.5)**2 + (X[:,1] - 0.5)**2 < 0.2**2
Y[index] = 1
TrainSet     = np.random.choice(X.shape[0],int(X.shape[0]*0.80), replace=False)
XTrain       = X[TrainSet,:]
YTrain       = Y[TrainSet]
falsePositive = np.random.choice(len(TrainSet),int(len(TrainSet)*0.15), replace=False) #*\label{code:kreislernendreck:1}  
YTrain[falsePositive] = 1 #*\label{code:kreislernendreck:2}
TestSet      = np.delete(np.arange(0, len(Y) ), TrainSet)
XTest        = X[TestSet,:]
YTest        = Y[TestSet]
myPredict = simpleMLP(hiddenlayer=(120,120))
myPredict.fit(XTrain,YTrain,maxIter=600)
yp = myPredict.predict(XTest)
diff = np.abs(np.round(yp).T - YTest)
fp = np.sum(diff)/len(TestSet)*100
print('richtig klassifiziert %0.1f%%' % (100-fp))
print('falsch klassifiziert %0.1f%%' % (fp))
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
fig1 = plt.figure(1)
ax = fig1.add_subplot(1,1,1)
ax.scatter(XTrain[:,0],XTrain[:,1],c =YTrain, marker='o', edgecolors='k', cmap=cm.Greys)
ax.set_xlabel('$x_0$')
ax.set_ylabel('$x_1$')
fig2 = plt.figure(2)
ax = fig2.add_subplot(1,1,1)
epochen = np.arange(len(myPredict.error))
ax.plot(epochen, np.array(myPredict.error), 'k' ) 
ax.set_xlabel('Lernzyklus')
ax.set_ylabel('Durchschnittlicher Fehler')
fig1 = plt.figure(3)
ax = fig1.add_subplot(1,1,1)
ax.scatter(XTest[:,0],XTest[:,1],c =np.round(yp).reshape(yp.shape[0]), marker='o', edgecolors='k', cmap=cm.Greys)
ax.set_xlabel('$x_0$')
ax.set_ylabel('$x_1$')

..............................................
import numpy as np
import matplotlib.pyplot as plt
from fullMLP import MLPNet
def regressionPlot(x,y,network,setName=''):
    yp = network.predict(x)         #*\label{code:regPlot:0}
    A = np.ones( (x.shape[0],2) )   #*\label{code:regPlot:1}
    A[:,0] = yp.reshape(x.shape[0]) #*\label{code:regPlot:2}
    m, c= np.linalg.lstsq(A,y)[0]   #*\label{code:regPlot:3}
    fig = plt.figure()
    ax = fig.add_subplot(1,1,1)
    myTitle = '%s: %f * x + %f ' %(setName, m,c)
    ax.set_title(myTitle)
    ax.scatter(yp,y, marker='+', color=(0.5,0.5,0.5))
    ax.set_xlabel('ANN Output')
    ax.set_ylabel('Data Set')
    alpha = min(yp.min(),y.min())
    omega = max(yp.max(),y.max())
    xPlot = np.linspace(alpha,omega,10)
    ax.plot(xPlot,xPlot,'k')
    ax.plot(xPlot,xPlot*m+c,'r--')
    ax.set_xlim([alpha,omega])
    ax.set_ylim([alpha,omega])
np.random.seed(42)
fFloat  = open("BostonFeature.csv","r")
X = np.loadtxt(fFloat, delimiter=","); fFloat.close()
fFloat  = open("BostonTarget.csv","r")
Y = np.loadtxt(fFloat, delimiter=","); fFloat.close()
yMin = Y.min(axis=0); yMax = Y.max(axis=0)
Y = (Y - yMin) / (yMax - yMin)
TrainSet     = np.random.choice(X.shape[0],int(X.shape[0]*0.80), replace=False)
XTrain       = X[TrainSet,:]; YTrain       = Y[TrainSet]
TestSet      = np.delete(np.arange(0, len(Y) ), TrainSet)
XTest        = X[TestSet,:]; YTest        = Y[TestSet]
myPredict = MLPNet(hiddenlayer=(10,10))
myPredict.fit(XTrain,YTrain, eta=0.5, maxIter=1000, XT=XTest , YT=YTest)
regressionPlot(XTest,YTest,myPredict, setName='Testset')
regressionPlot(X,Y,myPredict, setName='All Data')
regressionPlot(XTrain[myPredict.TrainSet,:],YTrain[myPredict.TrainSet],myPredict, setName='Trainingset')
regressionPlot(XTrain[myPredict.ValSet,:],YTrain[myPredict.ValSet],myPredict, setName='Validationset')
ypTest = myPredict.predict(XTest)   
print('Mittlerer Fehler %0.2f' % (np.mean(np.abs(ypTest - YTest[:,None]))))
yp = myPredict.predict(XTrain)
yp = yp.reshape(yp.shape[0])
error = (yMax - yMin)*(yp - YTrain)
print(np.mean(np.abs(error)))
import matplotlib.pyplot as plt
fig1 = plt.figure()
ax = fig1.add_subplot(1,1,1)
epochen = np.arange(len(myPredict.errorVal))
ax.plot(epochen, np.array(myPredict.errorVal), 'r-.' , label='Validierung') 
ax.plot(epochen, np.array(myPredict.errorTest), 'k--', label='Test')  
ax.plot(epochen, np.array(myPredict.errorTrain), 'k:', label='Training' ) 
ax.legend()
ax.set_xlabel('Lernzyklus')
ax.set_ylabel('Durchschnittlicher Fehler')
yp = myPredict.predict(XTest)
yp = yp.reshape(yp.shape[0])
errorT = (yMax - yMin)*(yp - YTest)
print(np.mean(np.abs(errorT)))
fig = plt.figure()
ax = fig.add_subplot(1,2,1)
ax.set_title('Verteilung der Abweichungen auf der Trainingsmenge')
ax.hist(error,color='gray')
ax.set_xlabel('Abweichung in Tausenden')
ax.set_ylabel('Anzahl')
ax = fig.add_subplot(1,2,2)
ax.set_title('Verteilung der Abweichungen auf der Testmenge')
ax.hist(errorT,color='gray')
ax.set_xlabel('Abweichung in Tausenden')
ax.set_ylabel('Anzahl')

:::::::::::::::::::::::::::::::::::::::
import numpy as np
import scipy.special
import copy
class simpleMLP:          
    def __init__(self, hiddenlayer=(10,10)):
        self.hl = hiddenlayer
        self.xMin = 0.0; self.xMax = 1.0
        self.W = []
        self._sigmoid = lambda x: scipy.special.expit(x)
    def _initWeights(self):
        self.W.append((np.random.rand(self.hl[0],self.il) - 0.5 ))
        self.W.append((np.random.rand(self.hl[1],self.hl[0]) - 0.5))
        self.W.append((np.random.rand(self.ol,self.hl[1]) - 0.5))
    def _calOut(self,X):
        O1 = self._sigmoid(self.W[0]@X.T)
        O2 = self._sigmoid(self.W[1]@O1)
        y = (self.W[len(self.W)-1]@O2).T
        return(y)
   
    def predict(self,X):
        X = (X - self.xMin) / (self.xMax - self.xMin) #*\label{code:mlpbatch:predict:norm}
        X = np.hstack( (X,np.ones(X.shape[0])[:,None]) ) #*\label{code:mlpbatch:bias1}
        y = self._calOut(X)
       
        return(y)
    def fit(self,X,Y,eta=0.75,maxIter=200,vareps=10**-3,scale=True):
        self.xMin = X.min(axis=0) if scale else 0
        self.xMax = X.max(axis=0) if scale else 1
        X = (X - self.xMin) / (self.xMax - self.xMin)
        X = np.hstack( (X,np.ones(X.shape[0])[:,None]) ) #*\label{code:mlpbatch:bias2}
        if len(Y.shape) == 1:
            Y = Y[:,None]
        self.il = X.shape[1] #*\label{code:mlpbatch:initw1}
        self.ol = 1
        self._initWeights() #*\label{code:mlpbatch:initw2}
       
        self.train(X,Y,eta,maxIter,vareps)
    
    def train(self,X,Y,eta,maxIter=200,vareps=10**-3):
        if len(Y.shape) == 1: Y = Y[:,None]
       
        if self.il != X.shape[1]: X = np.hstack( (X,np.ones(X.shape[0])[:,None]) )
       
        dW = [] #*\label{code:mlpbatch:initdw:s}
        for i in range(len(self.W)):
            dW.append(np.zeros_like(self.W[i])) #*\label{code:mlpbatch:initdw:e}
        yp = self._calOut(X)
       
        meanE = np.sum((Y-yp)**2)/X.shape[0] #*\label{code:mlp:error:1}
        minError = meanE #*\label{code:mlp:error:2}
        minW = copy.deepcopy(self.W) #*\label{code:mlp:error:3}
        self.error=[]   #*\label{code:mlp:error:4}  
        mixSet = np.random.choice(X.shape[0],X.shape[0],replace=False) #*\label{code:mlp:mischen}
        counter = 0           
        while meanE > vareps and counter < maxIter: #*\label{code:mlpbatch:abbruch}
            counter += 1
           
            for i in mixSet:  #*\label{code:mlpbatch:-1}        
                x = X[i,:]
                O1 = self._sigmoid(self.W[0]@x.T)
                O2 = self._sigmoid(self.W[1]@O1)
                temp = self.W[2]*O2*(1-O2)[None,:]
                dW[2] = O2 #*\label{code:mlpbatch:2}
                dW[1] = temp.T@O1[:,None].T #*\label{code:mlpbatch:3}  
                dW[0] = (O1*(1-O1)*(temp@self.W[1])).T@x[:,None].T #*\label{code:mlpbatch:4} 
                yp = self._calOut(x)
                yfactor = np.sum(Y[i]-yp)
                for j in range(len(self.W)): #*\label{code:mlpbatch:update}
                    self.W[j] += eta * yfactor* dW[j]
   
            yp = self._calOut(X)      #*\label{code:mlperror:1}    
           
            meanE = (np.sum((Y-yp)**2)/X.shape[0])
            self.error.append(meanE)
            if meanE < minError:
                minError = meanE
                minW = copy.deepcopy(self.W)  #*\label{code:mlperror:2}   
        self.W = copy.deepcopy(minW) #*\label{code:mllcopy}
       
if __name__ == '__main__':
    np.random.seed(42)
    XTrain = np.random.rand(2500,2)
    YTrain = np.sin(2*np.pi*(XTrain[:,0] + 0.5*XTrain[:,1])) + 0.5*XTrain[:,1]
    Noise = np.random.rand(YTrain.shape[0]) - 0.5
    YTrain = (1+ 0.05*Noise)*YTrain
   
    XTest = np.random.rand(500,2)
    YTest = np.sin(2*np.pi*(XTest[:,0] + 0.5*XTest[:,1])) + 0.5*XTest[:,1]
   
    myPredict = simpleMLP(hiddenlayer=(8,8))
    myPredict.fit(XTrain,YTrain)
    yp = np.squeeze(myPredict.predict(XTest))
   
    import matplotlib.pyplot as plt
    from mpl_toolkits.mplot3d import Axes3D
    from matplotlib import cm
    fig1 = plt.figure(1)
    ax = fig1.add_subplot(1,1,1, projection='3d')
    ax.scatter(XTest[:,0],XTest[:,1],yp,alpha=0.6,c =yp, cmap=cm.Greys)
    ax.set_xlabel('x[0]')
    ax.set_ylabel('x[1]')
    ax.set_zlabel('$y_p$')
   
    fig2 = plt.figure(2)
    ax = fig2.add_subplot(1,1,1, projection='3d')
    ax.scatter(XTest[:,0],XTest[:,1],yp.T-YTest,alpha=0.6,c =yp.T-YTest, cmap=cm.Greys)
    ax.set_xlabel('x[0]')
    ax.set_ylabel('x[1]')
    ax.set_zlabel('$y_p - y$')
   
    fig3 = plt.figure(3)
    ax = fig3.add_subplot(1,1,1)
    epochen = np.arange(len(myPredict.error))
    ax.plot(epochen, np.array(myPredict.error), 'k' ) 
    ax.set_xlabel('Lernzyklus')
    ax.set_ylabel('Durchschnittlicher Fehler')

::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
import numpy as np
from simpleMLP import simpleMLP
np.random.seed(42)
X = np.random.rand(1250,2)
Y = np.zeros(1250)
index = (X[:,0] - 0.25)**2 + (X[:,1] - 0.25)**2 < 0.2**2
Y[index] = 1
index = (X[:,0] - 0.75)**2 + (X[:,1] - 0.75)**2 < 0.2**2
Y[index] = 2
TrainSet     = np.random.choice(X.shape[0],int(X.shape[0]*0.70), replace=False)
XTrain       = X[TrainSet,:]
YTrain       = Y[TrainSet]
TestSet      = np.delete(np.arange(0, len(Y) ), TrainSet)
XTest        = X[TestSet,:]
YTest        = Y[TestSet]
myPredict = simpleMLP(hiddenlayer=(32,32))
myPredict.fit(XTrain,YTrain,maxIter=2000)
yp = myPredict.predict(XTest)
diff = np.abs(np.round(yp.T) - YTest).astype(bool)
fp = np.sum(diff)/len(TestSet)*100
print('richtig klassifiziert %0.1f%%' % (100-fp))
print('falsch klassifiziert %0.1f%%' % (fp))
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
plt.close('all')
fig1 = plt.figure(1)
ax = fig1.add_subplot(1,1,1)
ax.scatter(XTrain[:,0],XTrain[:,1],c =YTrain, marker='o', edgecolors='k', cmap=cm.Greys)
ax.set_xlabel('$x_0$')
ax.set_ylabel('$x_1$')
fig2 = plt.figure(2)
ax = fig2.add_subplot(1,1,1)
epochen = np.arange(len(myPredict.error))
ax.plot(epochen, np.array(myPredict.error), 'k' ) 
ax.set_xlabel('Lernzyklus')
ax.set_ylabel('Durchschnittlicher Fehler')
yp = yp.reshape(XTest.shape[0])
fig3 = plt.figure(3)
ax = fig3.add_subplot(1,1,1, projection='3d')
ax.scatter(XTest[:,0],XTest[:,1],yp,alpha=0.6,c =yp, cmap=cm.Greys)
ax.set_xlabel('x[0]')
ax.set_ylabel('x[1]')
ax.set_zlabel('$y_p$')
::::::::::::::::::::::::::::::::::::::::::::::::::::::

Keine Kommentare:

Kommentar veröffentlichen

Hinweis: Nur ein Mitglied dieses Blogs kann Kommentare posten.