Keras Cheat Sheet Python For Data Science: Model Architecture Inspect Model
Keras Cheat Sheet Python For Data Science: Model Architecture Inspect Model
Keras Cheat Sheet Python For Data Science: Model Architecture Inspect Model
Sequential Model
Python For Data Science
>>> model.add(Dense(12,
metrics=['accuracy'])
input_dim=8,
Keras activation='relu'))
>>> model.add(Dense(8,kernel_initializer='uniform',activation='relu'))
>>> model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
>>> model.add(Dense(512,activation='relu',input_shape=(784,)))
loss='mse',
>>> model.add(Dense(32,
activation='relu',
Convolutional Neural Network (CNN) > Model Training
input_dim=100))
>>> model.compile(optimizer='rmsprop',
>>> model2.add(Conv2D(32,(3,3),padding='same',input_shape=x_train.shape[1:]))
y_train4,
loss='binary_crossentropy',
>>> model2.add(Activation('relu'))
batch_size=32,
metrics=['accuracy'])
>>> model2.add(Conv2D(32,(3,3)))
epochs=15,
>>> model.fit(data,labels,epochs=10,batch_size=32)
>>> model2.add(Activation('relu'))
verbose=1,
> Data
>>> model2.add(Activation('relu'))
>>>
>>>
model2.add(Conv2D(64,(3, 3)))
model2.add(Activation('relu'))
> Evaluate Your Model's Performance
>>> model2.add(MaxPooling2D(pool_size=(2,2)))
Your data needs to be stored as NumPy arrays or as a list of NumPy arrays. Ideally, you split the data in training and >>> model2.add(Dropout(0.25))
>>> score = model3.evaluate(x_test,
test sets, for which you can also resort to the train_test_split module of sklearn.cross_validation. >>> model2.add(Flatten())
y_test,
>>> model2.add(Dense(512))
batch_size=32)
>>> model2.add(Activation('relu'))
>>> model2.add(Dense(num_classes))
>>>
>>>
from tensorflow.keras.datasets import boston_housing, mnist,
cifar10, imdb
(x_train,y_train),(x_test,y_test) = mnist.load_data()
>>> model2.add(Activation('softmax'))
> Save/ Reload Models
>>>
>>>
(x_train2,y_train2),(x_test2,y_test2) = boston_housing.load_data()
(x_train3,y_train3),(x_test3,y_test3) = cifar10.load_data()
>>> model3.add(LSTM(128,dropout=0.2,recurrent_dropout=0.2))
>>> model2.compile(loss='categorical_crossentropy',
optimizer=opt,
Early Stopping
Sequence Padding Train and Test Sets
>>> from tensorflow.keras.callbacks import EarlyStopping
random_state=42) batch_size=32,
epochs=15,
Standardization/Normalization callbacks=[early_stopping_monitor])
>>> from tensorflow.keras.utils import to_categorical
>>> Y_test3 = to_categorical(y_test3, num_classes) >>> standardized_X_test = scaler.transform(x_test2) Learn Data Skills Online at www.DataCamp.com