您可以使用model.stop_training
参数来停止训练。
例如,如果我们想在第二个时期的第三批停止训练,那么您可以执行如下操作。
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
import numpy as np
import pandas as pd
class My_Callback(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs={}):
self.epoch = epoch
def on_batch_end(self, batch, logs={}):
if self.epoch == 1 and batch == 3:
print (f"\nStopping at Epoch {self.epoch}, Batch {batch}")
self.model.stop_training = True
X_train = np.random.random((100, 3))
y_train = pd.get_dummies(np.argmax(X_train[:, :3], axis=1)).values
clf = Sequential()
clf.add(Dense(9, activation='relu', input_dim=3))
clf.add(Dense(3, activation='softmax'))
clf.compile(loss='categorical_crossentropy', optimizer=SGD())
clf.fit(X_train, y_train, epochs=10, batch_size=16, callbacks=[My_Callback()])
Output:
Epoch 1/10
100/100 [==============================] - 0s 337us/step - loss: 1.0860
Epoch 2/10
16/100 [===>..........................] - ETA: 0s - loss: 1.0830
Stopping at Epoch 1, Batch 3
<keras.callbacks.callbacks.History at 0x7ff2e3eeee10>