123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960 |
- from datasets import Datasets
- import tensorflow as tf
- from sklearn.model_selection import train_test_split
- import matplotlib.pyplot as plt
- def main():
-
- x, y = Datasets.load_movie_review()
- x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.3)
-
- tokenizer = tf.keras.preprocessing.text.Tokenizer()
- tokenizer.fit_on_texts(x)
- x_train = tokenizer.texts_to_sequences(x_train)
- x_test = tokenizer.texts_to_sequences(x_test)
- num_words = len(tokenizer.word_index)
-
- x_train = tf.keras.preprocessing.sequence.pad_sequences(x_train, maxlen=200)
- x_test = tf.keras.preprocessing.sequence.pad_sequences(x_test, maxlen=200)
- y_train = tf.keras.utils.to_categorical(y_train, num_classes=2)
- y_test = tf.keras.utils.to_categorical(y_test, num_classes=2)
-
- model = tf.keras.Sequential([
- tf.keras.layers.Embedding(
- input_dim=num_words + 1,
- output_dim=300,
- input_length=200,
- trainable=True,
- ),
- tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(128, implementation=2)),
- tf.keras.layers.Dropout(0.5),
- tf.keras.layers.Dense(2, activation="softmax"),
- ])
-
- model.compile(
- optimizer="adam",
- loss="categorical_crossentropy",
- metrics=["acc"],
- )
-
- history = model.fit(
- x_train, y_train,
- batch_size=32,
- epochs=10,
- validation_data=(x_test, y_test),
- )
-
-
- plt.plot(history.epoch, history.history.get("acc"))
- plt.plot(history.epoch, history.history.get("val_acc"))
- plt.show()
- if __name__ == "__main__":
- main()
|