In [1]:
!pip install tensorflow-gpu


Collecting tensorflow-gpu
  Downloading https://files.pythonhosted.org/packages/0a/93/c7bca39b23aae45cd2e85ad3871c81eccc63b9c5276e926511e2e5b0879d/tensorflow_gpu-2.1.0-cp36-cp36m-manylinux2010_x86_64.whl (421.8MB)
     |████████████████████████████████| 421.8MB 38kB/s 
Requirement already satisfied: keras-applications>=1.0.8 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu) (1.0.8)
Requirement already satisfied: wheel>=0.26; python_version >= "3" in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu) (0.33.6)
Requirement already satisfied: protobuf>=3.8.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu) (3.10.0)
Requirement already satisfied: six>=1.12.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu) (1.12.0)
Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu) (0.9.0)
Requirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu) (1.11.2)
Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu) (1.15.0)
Collecting tensorboard<2.2.0,>=2.1.0
  Downloading https://files.pythonhosted.org/packages/40/23/53ffe290341cd0855d595b0a2e7485932f473798af173bbe3a584b99bb06/tensorboard-2.1.0-py3-none-any.whl (3.8MB)
     |████████████████████████████████| 3.8MB 54.6MB/s 
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu) (1.1.0)
Requirement already satisfied: keras-preprocessing>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu) (1.1.0)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu) (3.1.0)
Requirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu) (0.1.8)
Collecting tensorflow-estimator<2.2.0,>=2.1.0rc0
  Downloading https://files.pythonhosted.org/packages/18/90/b77c328a1304437ab1310b463e533fa7689f4bfc41549593056d812fab8e/tensorflow_estimator-2.1.0-py2.py3-none-any.whl (448kB)
     |████████████████████████████████| 450kB 55.7MB/s 
Requirement already satisfied: numpy<2.0,>=1.16.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu) (1.17.5)
Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu) (0.8.1)
Requirement already satisfied: scipy==1.4.1; python_version >= "3" in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu) (1.4.1)
Requirement already satisfied: gast==0.2.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu) (0.2.2)
Requirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.8->tensorflow-gpu) (2.8.0)
Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.8.0->tensorflow-gpu) (42.0.2)
Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.2.0,>=2.1.0->tensorflow-gpu) (2.21.0)
Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.2.0,>=2.1.0->tensorflow-gpu) (0.4.1)
Collecting google-auth<2,>=1.6.3
  Downloading https://files.pythonhosted.org/packages/8d/5f/a1a02695b96d0e09c38abf7d1576b137979cea3d060d60891622cf61276d/google_auth-1.10.1-py2.py3-none-any.whl (76kB)
     |████████████████████████████████| 81kB 12.8MB/s 
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.2.0,>=2.1.0->tensorflow-gpu) (3.1.1)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.2.0,>=2.1.0->tensorflow-gpu) (0.16.0)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.2.0,>=2.1.0->tensorflow-gpu) (3.0.4)
Requirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.2.0,>=2.1.0->tensorflow-gpu) (2.8)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.2.0,>=2.1.0->tensorflow-gpu) (2019.11.28)
Requirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.2.0,>=2.1.0->tensorflow-gpu) (1.24.3)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.2.0,>=2.1.0->tensorflow-gpu) (1.3.0)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.2.0,>=2.1.0->tensorflow-gpu) (4.0.0)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.2.0,>=2.1.0->tensorflow-gpu) (0.2.7)
Requirement already satisfied: rsa<4.1,>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.2.0,>=2.1.0->tensorflow-gpu) (4.0)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.2.0,>=2.1.0->tensorflow-gpu) (3.1.0)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.6/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard<2.2.0,>=2.1.0->tensorflow-gpu) (0.4.8)
ERROR: tensorflow 1.15.0 has requirement tensorboard<1.16.0,>=1.15.0, but you'll have tensorboard 2.1.0 which is incompatible.
ERROR: tensorflow 1.15.0 has requirement tensorflow-estimator==1.15.1, but you'll have tensorflow-estimator 2.1.0 which is incompatible.
ERROR: tensorboard 2.1.0 has requirement grpcio>=1.24.3, but you'll have grpcio 1.15.0 which is incompatible.
ERROR: google-colab 1.0.0 has requirement google-auth~=1.4.0, but you'll have google-auth 1.10.1 which is incompatible.
Installing collected packages: google-auth, tensorboard, tensorflow-estimator, tensorflow-gpu
  Found existing installation: google-auth 1.4.2
    Uninstalling google-auth-1.4.2:
      Successfully uninstalled google-auth-1.4.2
  Found existing installation: tensorboard 1.15.0
    Uninstalling tensorboard-1.15.0:
      Successfully uninstalled tensorboard-1.15.0
  Found existing installation: tensorflow-estimator 1.15.1
    Uninstalling tensorflow-estimator-1.15.1:
      Successfully uninstalled tensorflow-estimator-1.15.1
Successfully installed google-auth-1.10.1 tensorboard-2.1.0 tensorflow-estimator-2.1.0 tensorflow-gpu-2.1.0

In [1]:
from __future__ import absolute_import, division, print_function, unicode_literals

import numpy as np

import tensorflow as tf

import tensorflow_hub as hub
import tensorflow_datasets as tfds

print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
print("GPU is", "available" if tf.config.experimental.list_physical_devices("GPU") else "NOT AVAILABLE")


Version:  2.1.0
Eager mode:  True
Hub version:  0.7.0
GPU is available

In [2]:
# 将训练集按照 6:4 的比例进行切割,从而最终我们将得到 15,000
# 个训练样本, 10,000 个验证样本以及 25,000 个测试样本
train_validation_split = tfds.Split.TRAIN.subsplit([6, 4])

(train_data, validation_data), test_data = tfds.load(
    name="imdb_reviews", 
    split=(train_validation_split, tfds.Split.TEST),
    as_supervised=True)


Downloading and preparing dataset imdb_reviews (80.23 MiB) to /root/tensorflow_datasets/imdb_reviews/plain_text/0.1.0...



WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_datasets/core/file_format_adapter.py:209: tf_record_iterator (from tensorflow.python.lib.io.tf_record) is deprecated and will be removed in a future version.
Instructions for updating:
Use eager execution and: 
`tf.data.TFRecordDataset(path)`
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_datasets/core/file_format_adapter.py:209: tf_record_iterator (from tensorflow.python.lib.io.tf_record) is deprecated and will be removed in a future version.
Instructions for updating:
Use eager execution and: 
`tf.data.TFRecordDataset(path)`




Dataset imdb_reviews downloaded and prepared to /root/tensorflow_datasets/imdb_reviews/plain_text/0.1.0. Subsequent calls will reuse this data.

In [3]:
train_examples_batch, train_labels_batch = next(iter(train_data.batch(10)))
train_examples_batch


Out[3]:
<tf.Tensor: shape=(10,), dtype=string, numpy=
array([b"As a lifelong fan of Dickens, I have invariably been disappointed by adaptations of his novels.<br /><br />Although his works presented an extremely accurate re-telling of human life at every level in Victorian Britain, throughout them all was a pervasive thread of humour that could be both playful or sarcastic as the narrative dictated. In a way, he was a literary caricaturist and cartoonist. He could be serious and hilarious in the same sentence. He pricked pride, lampooned arrogance, celebrated modesty, and empathised with loneliness and poverty. It may be a clich\xc3\xa9, but he was a people's writer.<br /><br />And it is the comedy that is so often missing from his interpretations. At the time of writing, Oliver Twist is being dramatised in serial form on BBC television. All of the misery and cruelty is their, but non of the humour, irony, and savage lampoonery. The result is just a dark, dismal experience: the story penned by a journalist rather than a novelist. It's not really Dickens at all.<br /><br />'Oliver!', on the other hand, is much closer to the mark. The mockery of officialdom is perfectly interpreted, from the blustering beadle to the drunken magistrate. The classic stand-off between the beadle and Mr Brownlow, in which the law is described as 'a ass, a idiot' couldn't have been better done. Harry Secombe is an ideal choice.<br /><br />But the blinding cruelty is also there, the callous indifference of the state, the cold, hunger, poverty and loneliness are all presented just as surely as The Master would have wished.<br /><br />And then there is crime. Ron Moody is a treasure as the sleazy Jewish fence, whilst Oliver Reid has Bill Sykes to perfection.<br /><br />Perhaps not surprisingly, Lionel Bart - himself a Jew from London's east-end - takes a liberty with Fagin by re-interpreting him as a much more benign fellow than was Dicken's original. In the novel, he was utterly ruthless, sending some of his own boys to the gallows in order to protect himself (though he was also caught and hanged). Whereas in the movie, he is presented as something of a wayward father-figure, a sort of charitable thief rather than a corrupter of children, the latter being a long-standing anti-semitic sentiment. Otherwise, very few liberties are taken with Dickens's original. All of the most memorable elements are included. Just enough menace and violence is retained to ensure narrative fidelity whilst at the same time allowing for children' sensibilities. Nancy is still beaten to death, Bullseye narrowly escapes drowning, and Bill Sykes gets a faithfully graphic come-uppance.<br /><br />Every song is excellent, though they do incline towards schmaltz. Mark Lester mimes his wonderfully. Both his and my favourite scene is the one in which the world comes alive to 'who will buy'. It's schmaltzy, but it's Dickens through and through.<br /><br />I could go on. I could commend the wonderful set-pieces, the contrast of the rich and poor. There is top-quality acting from more British regulars than you could shake a stick at.<br /><br />I ought to give it 10 points, but I'm feeling more like Scrooge today. Soak it up with your Christmas dinner. No original has been better realised.",
       b"Oh yeah! Jenna Jameson did it again! Yeah Baby! This movie rocks. It was one of the 1st movies i saw of her. And i have to say i feel in love with her, she was great in this move.<br /><br />Her performance was outstanding and what i liked the most was the scenery and the wardrobe it was amazing you can tell that they put a lot into the movie the girls cloth were amazing.<br /><br />I hope this comment helps and u can buy the movie, the storyline is awesome is very unique and i'm sure u are going to like it. Jenna amazed us once more and no wonder the movie won so many awards. Her make-up and wardrobe is very very sexy and the girls on girls scene is amazing. specially the one where she looks like an angel. It's a must see and i hope u share my interests",
       b"I saw this film on True Movies (which automatically made me sceptical) but actually - it was good. Why? Not because of the amazing plot twists or breathtaking dialogue (of which there is little) but because actually, despite what people say I thought the film was accurate in it's depiction of teenagers dealing with pregnancy.<br /><br />It's NOT Dawson's Creek, they're not graceful, cool witty characters who breeze through sexuality with effortless knowledge. They're kids and they act like kids would. <br /><br />They're blunt, awkward and annoyingly confused about everything. Yes, this could be by accident and they could just be bad actors but I don't think so. Dermot Mulroney gives (when not trying to be cool) a very believable performance and I loved him for it. Patricia Arquette IS whiny and annoying, but she was pregnant and a teenagers? The combination of the two isn't exactly lavender on your pillow. The plot was VERY predictable and but so what? I believed them, his stress and inability to cope - her brave, yet slightly misguided attempts to bring them closer together. I think the characters, acted by anyone else, WOULD indeed have been annoying and unbelievable but they weren't. It reflects the surreality of the situation they're in, that he's sitting in class and she walks on campus with the baby. I felt angry at her for that, I felt angry at him for being such a child and for blaming her. I felt it all.<br /><br />In the end, I loved it and would recommend it.<br /><br />Watch out for the scene where Dermot Mulroney runs from the disastrous counselling session - career performance.",
       b'This was a wonderfully clever and entertaining movie that I shall never tire of watching many, many times. The casting was magnificent in matching up the young with the older characters. There are those of us out here who really do appreciate good actors and an intelligent story format. As for Judi Dench, she is beautiful and a gift to any kind of production in which she stars. I always make a point to see Judi Dench in all her performances. She is a superb actress and a pleasure to watch as each transformation of her character comes to life. I can only be grateful when I see such an outstanding picture for most of the motion pictures made more recently lack good characters, good scripts and good acting. The movie public needs heroes, not deviant manikins, who lack ingenuity and talent. How wonderful to see old favorites like Leslie Caron, Olympia Dukakis and Cleo Laine. I would like to see this movie win the awards it deserves. Thank you again for a tremendous night of entertainment. I congratulate the writer, director, producer, and all those who did such a fine job.',
       b'I have no idea what the other reviewer is talking about- this was a wonderful movie, and created a sense of the era that feels like time travel. The characters are truly young, Mary is a strong match for Byron, Claire is juvenile and a tad annoying, Polidori is a convincing beaten-down sycophant... all are beautiful, curious, and decadent... not the frightening wrecks they are in Gothic.<br /><br />Gothic works as an independent piece of shock film, and I loved it for different reasons, but this works like a Merchant and Ivory film, and was from my readings the best capture of what the summer must have felt like. Romantic, yes, but completely rekindles my interest in the lives of Shelley and Byron every time I think about the film. One of my all-time favorites.',
       b"This was soul-provoking! I am an Iranian, and living in th 21st century, I didn't know that such big tribes have been living in such conditions at the time of my grandfather!<br /><br />You see that today, or even in 1925, on one side of the world a lady or a baby could have everything served for him or her clean and on-demand, but here 80 years ago, people ventured their life to go to somewhere with more grass. It's really interesting that these Persians bear those difficulties to find pasture for their sheep, but they lose many the sheep on their way.<br /><br />I praise the Americans who accompanied this tribe, they were as tough as Bakhtiari people.",
       b'Just because someone is under the age of 10 does not mean they are stupid. If your child likes this film you\'d better have him/her tested. I am continually amazed at how so many people can be involved in something that turns out so bad. This "film" is a showcase for digital wizardry AND NOTHING ELSE. The writing is horrid. I can\'t remember when I\'ve heard such bad dialogue. The songs are beyond wretched. The acting is sub-par but then the actors were not given much. Who decided to employ Joey Fatone? He cannot sing and he is ugly as sin.<br /><br />The worst thing is the obviousness of it all. It is as if the writers went out of their way to make it all as stupid as possible. Great children\'s movies are wicked, smart and full of wit - films like Shrek and Toy Story in recent years, Willie Wonka and The Witches to mention two of the past. But in the continual dumbing-down of American more are flocking to dreck like Finding Nemo (yes, that\'s right), the recent Charlie & The Chocolate Factory and eye-crossing trash like Red Riding Hood.',
       b"I absolutely LOVED this movie when I was a kid. I cried every time I watched it. It wasn't weird to me. I totally identified with the characters. I would love to see it again (and hope I wont be disappointed!). Pufnstuf rocks!!!! I was really drawn in to the fantasy world. And to me the movie was loooong. I wonder if I ever saw the series and have confused them? The acting I thought was strong. I loved Jack Wilde. He was so dreamy to an 10 year old (when I first saw the movie, not in 1970. I can still remember the characters vividly. The flute was totally believable and I can still 'feel' the evil woods. Witchy poo was scary - I wouldn't want to cross her path.",
       b'A very close and sharp discription of the bubbling and dynamic emotional world of specialy one 18year old guy, that makes his first experiences in his gay love to an other boy, during an vacation with a part of his family.<br /><br />I liked this film because of his extremly clear and surrogated storytelling , with all this "Sound-close-ups" and quiet moments wich had been full of intensive moods.<br /><br />',
       b"This is the most depressing film I have ever seen. I first saw it as a child and even thinking about it now really upsets me. I know it was set in a time when life was hard and I know these people were poor and the crops were vital. Yes, I get all that. What I find hard to take is I can't remember one single light moment in the entire film. Maybe it was true to life, I don't know. I'm quite sure the acting was top notch and the direction and quality of filming etc etc was wonderful and I know that every film can't have a happy ending but as a family film it is dire in my opinion.<br /><br />I wouldn't recommend it to anyone who wants to be entertained by a film. I can't stress enough how this film affected me as a child. I was talking about it recently and all the sad memories came flooding back. I think it would have all but the heartless reaching for the Prozac."],
      dtype=object)>

In [4]:
train_labels_batch


Out[4]:
<tf.Tensor: shape=(10,), dtype=int64, numpy=array([1, 1, 1, 1, 1, 1, 0, 1, 1, 0])>

In [5]:
embedding = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(embedding, input_shape=[], 
                           dtype=tf.string, trainable=True)
hub_layer(train_examples_batch[:3])


Out[5]:
<tf.Tensor: shape=(3, 20), dtype=float32, numpy=
array([[ 3.9819887 , -4.4838037 ,  5.177359  , -2.3643482 , -3.2938678 ,
        -3.5364532 , -2.4786978 ,  2.5525482 ,  6.688532  , -2.3076782 ,
        -1.9807833 ,  1.1315885 , -3.0339816 , -0.7604128 , -5.743445  ,
         3.4242578 ,  4.790099  , -4.03061   , -5.992149  , -1.7297493 ],
       [ 3.4232912 , -4.230874  ,  4.1488533 , -0.29553518, -6.802391  ,
        -2.5163853 , -4.4002395 ,  1.905792  ,  4.7512794 , -0.40538004,
        -4.3401685 ,  1.0361497 ,  0.9744097 ,  0.71507156, -6.2657013 ,
         0.16533905,  4.560262  , -1.3106939 , -3.1121316 , -2.1338716 ],
       [ 3.8508697 , -5.003031  ,  4.8700504 , -0.04324996, -5.893603  ,
        -5.2983093 , -4.004676  ,  4.1236343 ,  6.267754  ,  0.11632943,
        -3.5934832 ,  0.8023905 ,  0.56146765,  0.9192484 , -7.3066816 ,
         2.8202746 ,  6.2000837 , -3.5709393 , -4.564525  , -2.305622  ]],
      dtype=float32)>

In [6]:
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.summary()


Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
keras_layer (KerasLayer)     (None, 20)                400020    
_________________________________________________________________
dense (Dense)                (None, 16)                336       
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 17        
=================================================================
Total params: 400,373
Trainable params: 400,373
Non-trainable params: 0
_________________________________________________________________

In [0]:
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

In [9]:
history = model.fit(train_data.shuffle(10000).batch(512),
                    epochs=20,
                    validation_data=validation_data.batch(512),
                    verbose=1)


Epoch 1/20
30/30 [==============================] - 6s 191ms/step - loss: 0.8107 - accuracy: 0.5497 - val_loss: 0.6806 - val_accuracy: 0.6072
Epoch 2/20
30/30 [==============================] - 5s 161ms/step - loss: 0.6392 - accuracy: 0.6407 - val_loss: 0.6124 - val_accuracy: 0.6666
Epoch 3/20
30/30 [==============================] - 5s 162ms/step - loss: 0.5870 - accuracy: 0.6936 - val_loss: 0.5715 - val_accuracy: 0.7088
Epoch 4/20
30/30 [==============================] - 5s 173ms/step - loss: 0.5491 - accuracy: 0.7277 - val_loss: 0.5387 - val_accuracy: 0.7353
Epoch 5/20
30/30 [==============================] - 5s 174ms/step - loss: 0.5136 - accuracy: 0.7593 - val_loss: 0.5091 - val_accuracy: 0.7570
Epoch 6/20
30/30 [==============================] - 5s 178ms/step - loss: 0.4826 - accuracy: 0.7833 - val_loss: 0.4850 - val_accuracy: 0.7708
Epoch 7/20
30/30 [==============================] - 6s 187ms/step - loss: 0.4512 - accuracy: 0.8011 - val_loss: 0.4559 - val_accuracy: 0.7951
Epoch 8/20
30/30 [==============================] - 5s 179ms/step - loss: 0.4210 - accuracy: 0.8216 - val_loss: 0.4325 - val_accuracy: 0.8074
Epoch 9/20
30/30 [==============================] - 5s 179ms/step - loss: 0.3927 - accuracy: 0.8402 - val_loss: 0.4107 - val_accuracy: 0.8186
Epoch 10/20
30/30 [==============================] - 5s 169ms/step - loss: 0.3664 - accuracy: 0.8515 - val_loss: 0.3897 - val_accuracy: 0.8295
Epoch 11/20
30/30 [==============================] - 5s 183ms/step - loss: 0.3414 - accuracy: 0.8642 - val_loss: 0.3742 - val_accuracy: 0.8371
Epoch 12/20
30/30 [==============================] - 5s 173ms/step - loss: 0.3174 - accuracy: 0.8756 - val_loss: 0.3567 - val_accuracy: 0.8469
Epoch 13/20
30/30 [==============================] - 5s 168ms/step - loss: 0.2988 - accuracy: 0.8835 - val_loss: 0.3442 - val_accuracy: 0.8529
Epoch 14/20
30/30 [==============================] - 5s 175ms/step - loss: 0.2793 - accuracy: 0.8943 - val_loss: 0.3331 - val_accuracy: 0.8569
Epoch 15/20
30/30 [==============================] - 5s 166ms/step - loss: 0.2609 - accuracy: 0.9030 - val_loss: 0.3241 - val_accuracy: 0.8627
Epoch 16/20
30/30 [==============================] - 5s 179ms/step - loss: 0.2429 - accuracy: 0.9091 - val_loss: 0.3164 - val_accuracy: 0.8653
Epoch 17/20
30/30 [==============================] - 5s 175ms/step - loss: 0.2295 - accuracy: 0.9153 - val_loss: 0.3109 - val_accuracy: 0.8690
Epoch 18/20
30/30 [==============================] - 5s 176ms/step - loss: 0.2171 - accuracy: 0.9232 - val_loss: 0.3062 - val_accuracy: 0.8706
Epoch 19/20
30/30 [==============================] - 5s 176ms/step - loss: 0.2023 - accuracy: 0.9291 - val_loss: 0.3014 - val_accuracy: 0.8725
Epoch 20/20
30/30 [==============================] - 5s 174ms/step - loss: 0.1912 - accuracy: 0.9345 - val_loss: 0.2983 - val_accuracy: 0.8750

In [10]:
results = model.evaluate(test_data.batch(512), verbose=2)
for name, value in zip(model.metrics_names, results):
  print("%s: %.3f" % (name, value))


loss: 0.317
accuracy: 0.865

In [11]:
# 一个映射单词到整数索引的词典
word_index = imdb.get_word_index()

# 保留第一个索引
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2  # unknown
word_index["<UNUSED>"] = 3

reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])

def decode_review(text):
    return ' '.join([reverse_word_index.get(i, '?') for i in text])


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-11-3a5d955dd936> in <module>()
----> 1 word_index = imdb.get_word_index()
      2 
      3 # 保留第一个索引
      4 word_index = {k:(v+3) for k,v in word_index.items()}
      5 word_index["<PAD>"] = 0

NameError: name 'imdb' is not defined

In [12]:
imdb = keras.datasets.imdb

(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-12-35f9740dbe66> in <module>()
----> 1 imdb = keras.datasets.imdb
      2 
      3 (train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)

NameError: name 'keras' is not defined

In [13]:
from __future__ import absolute_import, division, print_function, unicode_literals

import tensorflow as tf
from tensorflow import keras

import numpy as np

print(tf.__version__)


2.1.0

In [14]:
imdb = keras.datasets.imdb

(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)


Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz
17465344/17464789 [==============================] - 1s 0us/step

In [15]:
# 一个映射单词到整数索引的词典
word_index = imdb.get_word_index()

# 保留第一个索引
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2  # unknown
word_index["<UNUSED>"] = 3

reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])

def decode_review(text):
    return ' '.join([reverse_word_index.get(i, '?') for i in text])


Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb_word_index.json
1646592/1641221 [==============================] - 0s 0us/step

In [16]:
decode_review(train_data[0])


Out[16]:
"<START> this film was just brilliant casting location scenery story direction everyone's really suited the part they played and you could just imagine being there robert <UNK> is an amazing actor and now the same being director <UNK> father came from the same scottish island as myself so i loved the fact there was a real connection with this film the witty remarks throughout the film were great it was just brilliant so much that i bought the film as soon as it was released for <UNK> and would recommend it to everyone to watch and the fly fishing was amazing really cried at the end it was so sad and you know what they say if you cry at a film it must have been good and this definitely was also <UNK> to the two little boy's that played the <UNK> of norman and paul they were just brilliant children are often left out of the <UNK> list i think because the stars that play them all grown up are such a big profile for the whole film but these children are amazing and should be praised for what they have done don't you think the whole story was so lovely because it was true and was someone's life after all that was shared with us all"

In [0]:
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
                                                        value=word_index["<PAD>"],
                                                        padding='post',
                                                        maxlen=256)

test_data = keras.preprocessing.sequence.pad_sequences(test_data,
                                                       value=word_index["<PAD>"],
                                                       padding='post',
                                                       maxlen=256)
# 由于电影评论长度必须相同,我们将使用 pad_sequences 函数来使长度标准化:

In [19]:
len(train_data[0]), len(train_data[1])


Out[19]:
(256, 256)

In [20]:
print(train_data[0])


[   1   14   22   16   43  530  973 1622 1385   65  458 4468   66 3941
    4  173   36  256    5   25  100   43  838  112   50  670    2    9
   35  480  284    5  150    4  172  112  167    2  336  385   39    4
  172 4536 1111   17  546   38   13  447    4  192   50   16    6  147
 2025   19   14   22    4 1920 4613  469    4   22   71   87   12   16
   43  530   38   76   15   13 1247    4   22   17  515   17   12   16
  626   18    2    5   62  386   12    8  316    8  106    5    4 2223
 5244   16  480   66 3785   33    4  130   12   16   38  619    5   25
  124   51   36  135   48   25 1415   33    6   22   12  215   28   77
   52    5   14  407   16   82    2    8    4  107  117 5952   15  256
    4    2    7 3766    5  723   36   71   43  530  476   26  400  317
   46    7    4    2 1029   13  104   88    4  381   15  297   98   32
 2071   56   26  141    6  194 7486   18    4  226   22   21  134  476
   26  480    5  144   30 5535   18   51   36   28  224   92   25  104
    4  226   65   16   38 1334   88   12   16  283    5   16 4472  113
  103   32   15   16 5345   19  178   32    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0]

In [21]:
# 输入形状是用于电影评论的词汇数目(10,000 词)
vocab_size = 10000

model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))

model.summary()


Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding (Embedding)        (None, None, 16)          160000    
_________________________________________________________________
global_average_pooling1d (Gl (None, 16)                0         
_________________________________________________________________
dense_2 (Dense)              (None, 16)                272       
_________________________________________________________________
dense_3 (Dense)              (None, 1)                 17        
=================================================================
Total params: 160,289
Trainable params: 160,289
Non-trainable params: 0
_________________________________________________________________

In [0]:
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

In [0]:
x_val = train_data[:10000]
partial_x_train = train_data[10000:]

y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]

In [24]:
history = model.fit(partial_x_train,
                    partial_y_train,
                    epochs=40,
                    batch_size=512,
                    validation_data=(x_val, y_val),
                    verbose=1)


Train on 15000 samples, validate on 10000 samples
Epoch 1/40
15000/15000 [==============================] - 1s 65us/sample - loss: 0.6915 - accuracy: 0.5946 - val_loss: 0.6893 - val_accuracy: 0.5978
Epoch 2/40
15000/15000 [==============================] - 1s 36us/sample - loss: 0.6844 - accuracy: 0.6839 - val_loss: 0.6791 - val_accuracy: 0.7392
Epoch 3/40
15000/15000 [==============================] - 1s 35us/sample - loss: 0.6693 - accuracy: 0.7595 - val_loss: 0.6606 - val_accuracy: 0.7523
Epoch 4/40
15000/15000 [==============================] - 1s 35us/sample - loss: 0.6437 - accuracy: 0.7713 - val_loss: 0.6319 - val_accuracy: 0.7697
Epoch 5/40
15000/15000 [==============================] - 1s 35us/sample - loss: 0.6066 - accuracy: 0.7939 - val_loss: 0.5934 - val_accuracy: 0.7883
Epoch 6/40
15000/15000 [==============================] - 1s 35us/sample - loss: 0.5613 - accuracy: 0.8130 - val_loss: 0.5497 - val_accuracy: 0.8033
Epoch 7/40
15000/15000 [==============================] - 1s 33us/sample - loss: 0.5123 - accuracy: 0.8317 - val_loss: 0.5056 - val_accuracy: 0.8215
Epoch 8/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.4645 - accuracy: 0.8505 - val_loss: 0.4640 - val_accuracy: 0.8386
Epoch 9/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.4211 - accuracy: 0.8649 - val_loss: 0.4288 - val_accuracy: 0.8475
Epoch 10/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.3838 - accuracy: 0.8771 - val_loss: 0.3997 - val_accuracy: 0.8541
Epoch 11/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.3525 - accuracy: 0.8837 - val_loss: 0.3772 - val_accuracy: 0.8593
Epoch 12/40
15000/15000 [==============================] - 1s 35us/sample - loss: 0.3260 - accuracy: 0.8908 - val_loss: 0.3577 - val_accuracy: 0.8655
Epoch 13/40
15000/15000 [==============================] - 0s 33us/sample - loss: 0.3038 - accuracy: 0.8981 - val_loss: 0.3439 - val_accuracy: 0.8706
Epoch 14/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.2846 - accuracy: 0.9032 - val_loss: 0.3311 - val_accuracy: 0.8715
Epoch 15/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.2675 - accuracy: 0.9087 - val_loss: 0.3220 - val_accuracy: 0.8744
Epoch 16/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.2530 - accuracy: 0.9128 - val_loss: 0.3135 - val_accuracy: 0.8781
Epoch 17/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.2395 - accuracy: 0.9179 - val_loss: 0.3064 - val_accuracy: 0.8787
Epoch 18/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.2271 - accuracy: 0.9207 - val_loss: 0.3019 - val_accuracy: 0.8796
Epoch 19/40
15000/15000 [==============================] - 0s 33us/sample - loss: 0.2163 - accuracy: 0.9261 - val_loss: 0.2978 - val_accuracy: 0.8830
Epoch 20/40
15000/15000 [==============================] - 1s 35us/sample - loss: 0.2061 - accuracy: 0.9287 - val_loss: 0.2933 - val_accuracy: 0.8828
Epoch 21/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.1964 - accuracy: 0.9340 - val_loss: 0.2903 - val_accuracy: 0.8839
Epoch 22/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.1877 - accuracy: 0.9377 - val_loss: 0.2884 - val_accuracy: 0.8847
Epoch 23/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.1797 - accuracy: 0.9417 - val_loss: 0.2882 - val_accuracy: 0.8840
Epoch 24/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.1715 - accuracy: 0.9449 - val_loss: 0.2860 - val_accuracy: 0.8854
Epoch 25/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.1645 - accuracy: 0.9487 - val_loss: 0.2857 - val_accuracy: 0.8842
Epoch 26/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.1572 - accuracy: 0.9515 - val_loss: 0.2852 - val_accuracy: 0.8855
Epoch 27/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.1509 - accuracy: 0.9534 - val_loss: 0.2857 - val_accuracy: 0.8856
Epoch 28/40
15000/15000 [==============================] - 1s 36us/sample - loss: 0.1446 - accuracy: 0.9560 - val_loss: 0.2869 - val_accuracy: 0.8860
Epoch 29/40
15000/15000 [==============================] - 1s 34us/sample - loss: 0.1389 - accuracy: 0.9592 - val_loss: 0.2871 - val_accuracy: 0.8850
Epoch 30/40
15000/15000 [==============================] - 1s 35us/sample - loss: 0.1332 - accuracy: 0.9610 - val_loss: 0.2889 - val_accuracy: 0.8862
Epoch 31/40
15000/15000 [==============================] - 0s 33us/sample - loss: 0.1282 - accuracy: 0.9631 - val_loss: 0.2904 - val_accuracy: 0.8868
Epoch 32/40
15000/15000 [==============================] - 1s 35us/sample - loss: 0.1239 - accuracy: 0.9636 - val_loss: 0.2922 - val_accuracy: 0.8856
Epoch 33/40
15000/15000 [==============================] - 1s 36us/sample - loss: 0.1183 - accuracy: 0.9671 - val_loss: 0.2944 - val_accuracy: 0.8840
Epoch 34/40
15000/15000 [==============================] - 1s 35us/sample - loss: 0.1138 - accuracy: 0.9681 - val_loss: 0.2964 - val_accuracy: 0.8851
Epoch 35/40
15000/15000 [==============================] - 0s 33us/sample - loss: 0.1101 - accuracy: 0.9689 - val_loss: 0.3009 - val_accuracy: 0.8813
Epoch 36/40
15000/15000 [==============================] - 1s 35us/sample - loss: 0.1051 - accuracy: 0.9711 - val_loss: 0.3017 - val_accuracy: 0.8844
Epoch 37/40
15000/15000 [==============================] - 1s 36us/sample - loss: 0.1017 - accuracy: 0.9727 - val_loss: 0.3036 - val_accuracy: 0.8847
Epoch 38/40
15000/15000 [==============================] - 1s 35us/sample - loss: 0.0976 - accuracy: 0.9739 - val_loss: 0.3066 - val_accuracy: 0.8842
Epoch 39/40
15000/15000 [==============================] - 1s 33us/sample - loss: 0.0935 - accuracy: 0.9765 - val_loss: 0.3107 - val_accuracy: 0.8836
Epoch 40/40
15000/15000 [==============================] - 0s 33us/sample - loss: 0.0896 - accuracy: 0.9771 - val_loss: 0.3131 - val_accuracy: 0.8832

In [25]:
results = model.evaluate(test_data,  test_labels, verbose=2)

print(results)


25000/25000 - 2s - loss: 0.3339 - accuracy: 0.8724
[0.33387627046585083, 0.87244]

In [26]:
history_dict = history.history
history_dict.keys()


Out[26]:
dict_keys(['loss', 'accuracy', 'val_loss', 'val_accuracy'])

In [27]:
import matplotlib.pyplot as plt

acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']

epochs = range(1, len(acc) + 1)

# “bo”代表 "蓝点"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b代表“蓝色实线”
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

plt.show()



In [28]:
plt.clf()   # 清除数字

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()

plt.show()



In [0]:
# 使用 seaborn 绘制矩阵图 (pairplot)
!pip install -q seaborn

In [30]:
from __future__ import absolute_import, division, print_function, unicode_literals

import pathlib

import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns

import tensorflow as tf

from tensorflow import keras
from tensorflow.keras import layers

print(tf.__version__)


2.1.0

In [31]:
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path


Downloading data from http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data
32768/30286 [================================] - 0s 5us/step
Out[31]:
'/root/.keras/datasets/auto-mpg.data'

In [32]:
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
                'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
                      na_values = "?", comment='\t',
                      sep=" ", skipinitialspace=True)

dataset = raw_dataset.copy()
dataset.tail()


Out[32]:
MPG Cylinders Displacement Horsepower Weight Acceleration Model Year Origin
393 27.0 4 140.0 86.0 2790.0 15.6 82 1
394 44.0 4 97.0 52.0 2130.0 24.6 82 2
395 32.0 4 135.0 84.0 2295.0 11.6 82 1
396 28.0 4 120.0 79.0 2625.0 18.6 82 1
397 31.0 4 119.0 82.0 2720.0 19.4 82 1

In [33]:
dataset.isna().sum()


Out[33]:
MPG             0
Cylinders       0
Displacement    0
Horsepower      6
Weight          0
Acceleration    0
Model Year      0
Origin          0
dtype: int64

In [0]:
dataset = dataset.dropna()

In [0]:
origin = dataset.pop('Origin')

In [36]:
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()


Out[36]:
MPG Cylinders Displacement Horsepower Weight Acceleration Model Year USA Europe Japan
393 27.0 4 140.0 86.0 2790.0 15.6 82 1.0 0.0 0.0
394 44.0 4 97.0 52.0 2130.0 24.6 82 0.0 1.0 0.0
395 32.0 4 135.0 84.0 2295.0 11.6 82 1.0 0.0 0.0
396 28.0 4 120.0 79.0 2625.0 18.6 82 1.0 0.0 0.0
397 31.0 4 119.0 82.0 2720.0 19.4 82 1.0 0.0 0.0

In [0]:
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)

In [39]:
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")


Out[39]:
<seaborn.axisgrid.PairGrid at 0x7f67b78f1780>

In [40]:
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats


Out[40]:
count mean std min 25% 50% 75% max
Cylinders 314.0 5.477707 1.699788 3.0 4.00 4.0 8.00 8.0
Displacement 314.0 195.318471 104.331589 68.0 105.50 151.0 265.75 455.0
Horsepower 314.0 104.869427 38.096214 46.0 76.25 94.5 128.00 225.0
Weight 314.0 2990.251592 843.898596 1649.0 2256.50 2822.5 3608.00 5140.0
Acceleration 314.0 15.559236 2.789230 8.0 13.80 15.5 17.20 24.8
Model Year 314.0 75.898089 3.675642 70.0 73.00 76.0 79.00 82.0
USA 314.0 0.624204 0.485101 0.0 0.00 1.0 1.00 1.0
Europe 314.0 0.178344 0.383413 0.0 0.00 0.0 0.00 1.0
Japan 314.0 0.197452 0.398712 0.0 0.00 0.0 0.00 1.0

In [0]:
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')

In [0]:
def norm(x):
  return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)

In [0]:
def build_model():
  model = keras.Sequential([
    layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
    layers.Dense(64, activation='relu'),
    layers.Dense(1)
  ])

  optimizer = tf.keras.optimizers.RMSprop(0.001)

  model.compile(loss='mse',
                optimizer=optimizer,
                metrics=['mae', 'mse'])
  return model

In [0]:
model = build_model()

In [45]:
model.summary()


Model: "sequential_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_4 (Dense)              (None, 64)                640       
_________________________________________________________________
dense_5 (Dense)              (None, 64)                4160      
_________________________________________________________________
dense_6 (Dense)              (None, 1)                 65        
=================================================================
Total params: 4,865
Trainable params: 4,865
Non-trainable params: 0
_________________________________________________________________

In [46]:
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result


Out[46]:
array([[-0.63184714],
       [-0.35544115],
       [-0.52243835],
       [-0.43572852],
       [-0.10833971],
       [-0.15133286],
       [-0.1377702 ],
       [-0.08098418],
       [-0.20510164],
       [-0.45814574]], dtype=float32)

In [47]:
# 通过为每个完成的时期打印一个点来显示训练进度
class PrintDot(keras.callbacks.Callback):
  def on_epoch_end(self, epoch, logs):
    if epoch % 100 == 0: print('')
    print('.', end='')

EPOCHS = 1000

history = model.fit(
  normed_train_data, train_labels,
  epochs=EPOCHS, validation_split = 0.2, verbose=0,
  callbacks=[PrintDot()])


....................................................................................................
....................................................................................................
....................................................................................................
....................................................................................................
....................................................................................................
....................................................................................................
....................................................................................................
....................................................................................................
....................................................................................................
....................................................................................................

In [48]:
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()


Out[48]:
loss mae mse val_loss val_mae val_mse epoch
995 2.657506 1.005216 2.657506 9.196441 2.211369 9.196440 995
996 2.886383 1.070914 2.886383 9.030826 2.207536 9.030826 996
997 2.756728 1.085937 2.756727 9.361803 2.230031 9.361803 997
998 2.731188 1.053474 2.731188 9.842029 2.296555 9.842030 998
999 2.798108 1.062170 2.798108 9.378514 2.214941 9.378514 999

In [49]:
def plot_history(history):
  hist = pd.DataFrame(history.history)
  hist['epoch'] = history.epoch

  plt.figure()
  plt.xlabel('Epoch')
  plt.ylabel('Mean Abs Error [MPG]')
  plt.plot(hist['epoch'], hist['mae'],
           label='Train Error')
  plt.plot(hist['epoch'], hist['val_mae'],
           label = 'Val Error')
  plt.ylim([0,5])
  plt.legend()

  plt.figure()
  plt.xlabel('Epoch')
  plt.ylabel('Mean Square Error [$MPG^2$]')
  plt.plot(hist['epoch'], hist['mse'],
           label='Train Error')
  plt.plot(hist['epoch'], hist['val_mse'],
           label = 'Val Error')
  plt.ylim([0,20])
  plt.legend()
  plt.show()


plot_history(history)



In [50]:
model = build_model()

# patience 值用来检查改进 epochs 的数量
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)

history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
                    validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])

plot_history(history)


...................................................