1history = model.fit()
2print(history.history.keys())
3
4import matplotlib.pylab as plt
5from matplotlib.pyplot import figure
6figure(figsize=(8, 6))
7plt.plot(history.history['loss'])
8plt.plot(history.history['val_loss'])
9plt.title('model loss')
10plt.ylabel('loss')
11plt.xlabel('epoch')
12plt.legend(['train', 'test'], loc='upper left')
13plt.show()
1import keras
2from matplotlib import pyplot as plt
3history = model1.fit(train_x, train_y,validation_split = 0.1, epochs=50, batch_size=4)
4plt.plot(history.history['acc'])
5plt.plot(history.history['val_acc'])
6plt.title('model accuracy')
7plt.ylabel('accuracy')
8plt.xlabel('epoch')
9plt.legend(['train', 'val'], loc='upper left')
10plt.show()
11
1# Visualize training history
2from keras.models import Sequential
3from keras.layers import Dense
4import matplotlib.pyplot as plt
5import numpy
6# load pima indians dataset
7dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
8# split into input (X) and output (Y) variables
9X = dataset[:,0:8]
10Y = dataset[:,8]
11# create model
12model = Sequential()
13model.add(Dense(12, input_dim=8, activation='relu'))
14model.add(Dense(8, activation='relu'))
15model.add(Dense(1, activation='sigmoid'))
16# Compile model
17model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
18# Fit the model
19history = model.fit(X, Y, validation_split=0.33, epochs=150, batch_size=10, verbose=0)
20# list all data in history
21print(history.history.keys())
22# summarize history for accuracy
23plt.plot(history.history['accuracy'])
24plt.plot(history.history['val_accuracy'])
25plt.title('model accuracy')
26plt.ylabel('accuracy')
27plt.xlabel('epoch')
28plt.legend(['train', 'test'], loc='upper left')
29plt.show()
30# summarize history for loss
31plt.plot(history.history['loss'])
32plt.plot(history.history['val_loss'])
33plt.title('model loss')
34plt.ylabel('loss')
35plt.xlabel('epoch')
36plt.legend(['train', 'test'], loc='upper left')
37plt.show()
38