Jovian
⭐️
Sign In
In [1]:
import tensorflow as tf
from tensorflow import keras
In [2]:
imdb = keras.datasets.imdb

(trainx, trainy), (testx, testy) = imdb.load_data(num_words=10000)
print(trainx.shape)
print(testx.shape)
(25000,) (25000,)
In [3]:
# in this data set, the number represet the index of each words in the review
print(trainx[0])
print("length = ",len(trainx[0]))
print("============================")
print(trainx[1])
print("length = ",len(trainx[1]))
[1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25, 100, 43, 838, 112, 50, 670, 2, 9, 35, 480, 284, 5, 150, 4, 172, 112, 167, 2, 336, 385, 39, 4, 172, 4536, 1111, 17, 546, 38, 13, 447, 4, 192, 50, 16, 6, 147, 2025, 19, 14, 22, 4, 1920, 4613, 469, 4, 22, 71, 87, 12, 16, 43, 530, 38, 76, 15, 13, 1247, 4, 22, 17, 515, 17, 12, 16, 626, 18, 2, 5, 62, 386, 12, 8, 316, 8, 106, 5, 4, 2223, 5244, 16, 480, 66, 3785, 33, 4, 130, 12, 16, 38, 619, 5, 25, 124, 51, 36, 135, 48, 25, 1415, 33, 6, 22, 12, 215, 28, 77, 52, 5, 14, 407, 16, 82, 2, 8, 4, 107, 117, 5952, 15, 256, 4, 2, 7, 3766, 5, 723, 36, 71, 43, 530, 476, 26, 400, 317, 46, 7, 4, 2, 1029, 13, 104, 88, 4, 381, 15, 297, 98, 32, 2071, 56, 26, 141, 6, 194, 7486, 18, 4, 226, 22, 21, 134, 476, 26, 480, 5, 144, 30, 5535, 18, 51, 36, 28, 224, 92, 25, 104, 4, 226, 65, 16, 38, 1334, 88, 12, 16, 283, 5, 16, 4472, 113, 103, 32, 15, 16, 5345, 19, 178, 32] length = 218 ============================ [1, 194, 1153, 194, 8255, 78, 228, 5, 6, 1463, 4369, 5012, 134, 26, 4, 715, 8, 118, 1634, 14, 394, 20, 13, 119, 954, 189, 102, 5, 207, 110, 3103, 21, 14, 69, 188, 8, 30, 23, 7, 4, 249, 126, 93, 4, 114, 9, 2300, 1523, 5, 647, 4, 116, 9, 35, 8163, 4, 229, 9, 340, 1322, 4, 118, 9, 4, 130, 4901, 19, 4, 1002, 5, 89, 29, 952, 46, 37, 4, 455, 9, 45, 43, 38, 1543, 1905, 398, 4, 1649, 26, 6853, 5, 163, 11, 3215, 2, 4, 1153, 9, 194, 775, 7, 8255, 2, 349, 2637, 148, 605, 2, 8003, 15, 123, 125, 68, 2, 6853, 15, 349, 165, 4362, 98, 5, 4, 228, 9, 43, 2, 1157, 15, 299, 120, 5, 120, 174, 11, 220, 175, 136, 50, 9, 4373, 228, 8255, 5, 2, 656, 245, 2350, 5, 4, 9837, 131, 152, 491, 18, 2, 32, 7464, 1212, 14, 9, 6, 371, 78, 22, 625, 64, 1382, 9, 8, 168, 145, 23, 4, 1690, 15, 16, 4, 1355, 5, 28, 6, 52, 154, 462, 33, 89, 78, 285, 16, 145, 95] length = 189
In [4]:
word_index = imdb.get_word_index()

# reserved first 4 items for another usage
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2  # unknown
word_index["<UNUSED>"] = 3

print(word_index["a"])

# create reverse mapping
inv_map = {v: k for k, v in word_index.items()}
print(inv_map[6])
6 a
In [5]:
def decode_sentence(sentence):
    return ' '.join( [ inv_map.get(ind, '?') for ind in sentence ] )

print(decode_sentence(trainx[0]))
<START> this film was just brilliant casting location scenery story direction everyone's really suited the part they played and you could just imagine being there robert <UNK> is an amazing actor and now the same being director <UNK> father came from the same scottish island as myself so i loved the fact there was a real connection with this film the witty remarks throughout the film were great it was just brilliant so much that i bought the film as soon as it was released for <UNK> and would recommend it to everyone to watch and the fly fishing was amazing really cried at the end it was so sad and you know what they say if you cry at a film it must have been good and this definitely was also <UNK> to the two little boy's that played the <UNK> of norman and paul they were just brilliant children are often left out of the <UNK> list i think because the stars that play them all grown up are such a big profile for the whole film but these children are amazing and should be praised for what they have done don't you think the whole story was so lovely because it was true and was someone's life after all that was shared with us all
In [6]:
# since the sentence were not in same length, 
# one way to handle this problem is make them same length
# for the value, to avoid mass up data, we will use <PAD>. 
# The index of PAD is 0, that's how & why it is 0.
train_data = keras.preprocessing.sequence.pad_sequences(trainx, maxlen=256, padding='post', value= word_index["<PAD>"])
print(train_data[0])

test_data = keras.preprocessing.sequence.pad_sequences(testx, maxlen=256, padding='post', value= word_index["<PAD>"])
print(test_data[0])

print("len = ", train_data.shape, "  ", test_data.shape)
[ 1 14 22 16 43 530 973 1622 1385 65 458 4468 66 3941 4 173 36 256 5 25 100 43 838 112 50 670 2 9 35 480 284 5 150 4 172 112 167 2 336 385 39 4 172 4536 1111 17 546 38 13 447 4 192 50 16 6 147 2025 19 14 22 4 1920 4613 469 4 22 71 87 12 16 43 530 38 76 15 13 1247 4 22 17 515 17 12 16 626 18 2 5 62 386 12 8 316 8 106 5 4 2223 5244 16 480 66 3785 33 4 130 12 16 38 619 5 25 124 51 36 135 48 25 1415 33 6 22 12 215 28 77 52 5 14 407 16 82 2 8 4 107 117 5952 15 256 4 2 7 3766 5 723 36 71 43 530 476 26 400 317 46 7 4 2 1029 13 104 88 4 381 15 297 98 32 2071 56 26 141 6 194 7486 18 4 226 22 21 134 476 26 480 5 144 30 5535 18 51 36 28 224 92 25 104 4 226 65 16 38 1334 88 12 16 283 5 16 4472 113 103 32 15 16 5345 19 178 32 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] [ 1 591 202 14 31 6 717 10 10 2 2 5 4 360 7 4 177 5760 394 354 4 123 9 1035 1035 1035 10 10 13 92 124 89 488 7944 100 28 1668 14 31 23 27 7479 29 220 468 8 124 14 286 170 8 157 46 5 27 239 16 179 2 38 32 25 7944 451 202 14 6 717 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] len = (25000, 256) (25000, 256)
In [7]:
vocab_size = 10000

model = keras.Sequential([
    keras.layers.Embedding(vocab_size, 16),
    # the difference between GlobalAveragePolling1D is it average the value on pooling layer
    # while others may use max or min,etc.
    # the reason of pooling layer is 
    keras.layers.GlobalAveragePooling1D(),
    keras.layers.Dense(16, activation='relu'),
    keras.layers.Dense(1, activation='sigmoid')
])

model.summary()
WARNING:tensorflow:From F:\Anaconda\lib\site-packages\tensorflow\python\ops\resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version. Instructions for updating: Colocations handled automatically by placer. _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding (Embedding) (None, None, 16) 160000 _________________________________________________________________ global_average_pooling1d (Gl (None, 16) 0 _________________________________________________________________ dense (Dense) (None, 16) 272 _________________________________________________________________ dense_1 (Dense) (None, 1) 17 ================================================================= Total params: 160,289 Trainable params: 160,289 Non-trainable params: 0 _________________________________________________________________
In [8]:
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

In [9]:
# we seperate train data into train & validation to avoid using test data
x_val = train_data[:10000]
partial_x_train = train_data[10000:]

y_val = trainy[:10000]
partial_y_train = trainy[10000:]

print(x_val.shape, " ", y_val.shape)
print(partial_x_train.shape, " ", partial_y_train.shape)
(10000, 256) (10000,) (15000, 256) (15000,)
In [10]:
history = model.fit(partial_x_train,
                    partial_y_train,
                    epochs=40,
                    batch_size=512,
                    validation_data=(x_val, y_val),
                    verbose=1)
Train on 15000 samples, validate on 10000 samples WARNING:tensorflow:From F:\Anaconda\lib\site-packages\tensorflow\python\ops\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.cast instead. Epoch 1/40 15000/15000 [==============================] - 1s 71us/sample - loss: 0.6922 - acc: 0.5926 - val_loss: 0.6908 - val_acc: 0.5952 Epoch 2/40 15000/15000 [==============================] - 1s 37us/sample - loss: 0.6880 - acc: 0.6653 - val_loss: 0.6846 - val_acc: 0.7231 Epoch 3/40 15000/15000 [==============================] - 1s 36us/sample - loss: 0.6780 - acc: 0.7314 - val_loss: 0.6714 - val_acc: 0.7255 Epoch 4/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.6591 - acc: 0.7498 - val_loss: 0.6487 - val_acc: 0.7204 Epoch 5/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.6286 - acc: 0.7773 - val_loss: 0.6151 - val_acc: 0.7705 Epoch 6/40 15000/15000 [==============================] - 1s 36us/sample - loss: 0.5874 - acc: 0.8024 - val_loss: 0.5746 - val_acc: 0.8016 Epoch 7/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.5402 - acc: 0.8258 - val_loss: 0.5298 - val_acc: 0.8173 Epoch 8/40 15000/15000 [==============================] - 1s 36us/sample - loss: 0.4919 - acc: 0.8445 - val_loss: 0.4876 - val_acc: 0.8307 Epoch 9/40 15000/15000 [==============================] - 1s 36us/sample - loss: 0.4465 - acc: 0.8608 - val_loss: 0.4493 - val_acc: 0.8428 Epoch 10/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.4061 - acc: 0.8716 - val_loss: 0.4174 - val_acc: 0.8507 Epoch 11/40 15000/15000 [==============================] - 1s 36us/sample - loss: 0.3718 - acc: 0.8812 - val_loss: 0.3909 - val_acc: 0.8577 Epoch 12/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.3428 - acc: 0.8887 - val_loss: 0.3702 - val_acc: 0.8622 Epoch 13/40 15000/15000 [==============================] - 1s 36us/sample - loss: 0.3191 - acc: 0.8940 - val_loss: 0.3523 - val_acc: 0.8692 Epoch 14/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.2980 - acc: 0.9003 - val_loss: 0.3391 - val_acc: 0.8722 Epoch 15/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.2804 - acc: 0.9038 - val_loss: 0.3284 - val_acc: 0.8732 Epoch 16/40 15000/15000 [==============================] - 1s 36us/sample - loss: 0.2646 - acc: 0.9098 - val_loss: 0.3193 - val_acc: 0.8755 Epoch 17/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.2501 - acc: 0.9139 - val_loss: 0.3117 - val_acc: 0.8771 Epoch 18/40 15000/15000 [==============================] - 1s 36us/sample - loss: 0.2374 - acc: 0.9192 - val_loss: 0.3053 - val_acc: 0.8793 Epoch 19/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.2259 - acc: 0.9218 - val_loss: 0.2998 - val_acc: 0.8811 Epoch 20/40 15000/15000 [==============================] - 1s 36us/sample - loss: 0.2156 - acc: 0.9253 - val_loss: 0.2960 - val_acc: 0.8819 Epoch 21/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.2052 - acc: 0.9312 - val_loss: 0.2926 - val_acc: 0.8821 Epoch 22/40 15000/15000 [==============================] - 1s 36us/sample - loss: 0.1964 - acc: 0.9333 - val_loss: 0.2902 - val_acc: 0.8831 Epoch 23/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.1876 - acc: 0.9379 - val_loss: 0.2887 - val_acc: 0.8833 Epoch 24/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.1800 - acc: 0.9409 - val_loss: 0.2865 - val_acc: 0.8846 Epoch 25/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.1722 - acc: 0.9450 - val_loss: 0.2852 - val_acc: 0.8842 Epoch 26/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.1653 - acc: 0.9480 - val_loss: 0.2854 - val_acc: 0.8836 Epoch 27/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.1587 - acc: 0.9502 - val_loss: 0.2848 - val_acc: 0.8847 Epoch 28/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.1525 - acc: 0.9532 - val_loss: 0.2846 - val_acc: 0.8848 Epoch 29/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.1470 - acc: 0.9559 - val_loss: 0.2867 - val_acc: 0.8825 Epoch 30/40 15000/15000 [==============================] - 1s 36us/sample - loss: 0.1413 - acc: 0.9575 - val_loss: 0.2856 - val_acc: 0.8856 Epoch 31/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.1356 - acc: 0.9599 - val_loss: 0.2866 - val_acc: 0.8860 Epoch 32/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.1304 - acc: 0.9626 - val_loss: 0.2880 - val_acc: 0.8866 Epoch 33/40 15000/15000 [==============================] - 1s 36us/sample - loss: 0.1254 - acc: 0.9638 - val_loss: 0.2901 - val_acc: 0.8848 Epoch 34/40 15000/15000 [==============================] - 1s 36us/sample - loss: 0.1210 - acc: 0.9658 - val_loss: 0.2916 - val_acc: 0.8857 Epoch 35/40 15000/15000 [==============================] - 1s 36us/sample - loss: 0.1168 - acc: 0.9665 - val_loss: 0.2947 - val_acc: 0.8847 Epoch 36/40 15000/15000 [==============================] - 1s 36us/sample - loss: 0.1124 - acc: 0.9694 - val_loss: 0.2961 - val_acc: 0.8856 Epoch 37/40 15000/15000 [==============================] - 1s 38us/sample - loss: 0.1082 - acc: 0.9708 - val_loss: 0.2983 - val_acc: 0.8847 Epoch 38/40 15000/15000 [==============================] - 1s 37us/sample - loss: 0.1043 - acc: 0.9723 - val_loss: 0.3015 - val_acc: 0.8835 Epoch 39/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.1010 - acc: 0.9731 - val_loss: 0.3045 - val_acc: 0.8829 Epoch 40/40 15000/15000 [==============================] - 1s 35us/sample - loss: 0.0971 - acc: 0.9750 - val_loss: 0.3066 - val_acc: 0.8831
In [11]:
result = model.evaluate(test_data, testy);
print(result)
25000/25000 [==============================] - 0s 19us/sample - loss: 0.3272 - acc: 0.8728 [0.3272118095302582, 0.87284]
In [17]:
# start to visualize the history
import matplotlib.pyplot as plt

print(history.history.keys())

plt.plot(history.history['loss'])
plt.plot(history.history['acc'])
plt.plot(history.history['val_loss'])
plt.plot(history.history['val_acc'])

plt.ylabel('accuracy/loss')
plt.xlabel('epoch')
plt.legend(['train loss', 'train acc', 'test loss', 'test acc'], loc='upper left')
plt.show()
dict_keys(['loss', 'acc', 'val_loss', 'val_acc'])
Notebook Image