Commit 6b5d147c authored by Mark Daoust's avatar Mark Daoust
Browse files

Created using Colaboratory

parent fb90cf6e
...@@ -16,6 +16,16 @@ ...@@ -16,6 +16,16 @@
} }
}, },
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"[View in Colaboratory](https://colab.research.google.com/github/MarkDaoust/models/blob/get_started/samples/core/get_started/eager.ipynb)"
]
},
{ {
"metadata": { "metadata": {
"id": "rwxGnsA92emp", "id": "rwxGnsA92emp",
...@@ -355,7 +365,7 @@ ...@@ -355,7 +365,7 @@
"train_dataset = tf.contrib.data.make_csv_dataset(\n", "train_dataset = tf.contrib.data.make_csv_dataset(\n",
" train_dataset_fp, batch_size, \n", " train_dataset_fp, batch_size, \n",
" column_names=column_names,\n", " column_names=column_names,\n",
" label_name='species',\n", " label_name=label_name,\n",
" num_epochs=1)" " num_epochs=1)"
], ],
"execution_count": 0, "execution_count": 0,
...@@ -381,11 +391,11 @@ ...@@ -381,11 +391,11 @@
}, },
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"features, labels = next(iter(train_dataset))\n", "for features, labels in train_dataset.take(1):\n",
" \n", " plt.scatter(features['petal_length'], features['sepal_length'], \n",
"plt.scatter(features['petal_length'], features['petal_width'])\n", " c=labels, cmap='viridis')\n",
"plt.xlabel(\"Petal Length\")\n", " plt.xlabel(\"Petal Length\")\n",
"plt.ylabel(\"Petal Width\")\n" " plt.ylabel(\"Sepal Length\")\n"
], ],
"execution_count": 0, "execution_count": 0,
"outputs": [] "outputs": []
...@@ -400,7 +410,6 @@ ...@@ -400,7 +410,6 @@
"To simplify the model building, let's repackage the features dictionary into an array with shape ``(batch_size,num_features)`.\n", "To simplify the model building, let's repackage the features dictionary into an array with shape ``(batch_size,num_features)`.\n",
"\n", "\n",
"To do this we'll write a simple function using the [tf.stack](https://www.tensorflow.org/api_docs/python/tf/data/dataset/map) method to pack the features into a single array. Then we'll use the [tf.data.Dataset.map](https://www.tensorflow.org/api_docs/python/tf/data/dataset/map) method to apply this function to each `(features,label)` pair in the dataset. :\n" "To do this we'll write a simple function using the [tf.stack](https://www.tensorflow.org/api_docs/python/tf/data/dataset/map) method to pack the features into a single array. Then we'll use the [tf.data.Dataset.map](https://www.tensorflow.org/api_docs/python/tf/data/dataset/map) method to apply this function to each `(features,label)` pair in the dataset. :\n"
] ]
}, },
{ {
...@@ -412,8 +421,8 @@ ...@@ -412,8 +421,8 @@
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"def pack_features_vector(features,labels):\n", "def pack_features_vector(features,labels):\n",
" features = tf.stack([features[name] for name in feature_names],\n", " values = [value for value in features.values()]\n",
" axis=1)\n", " features = tf.stack(values, axis=1)\n",
" return features, labels\n", " return features, labels\n",
" \n", " \n",
"train_dataset = train_dataset.map(pack_features_vector)" "train_dataset = train_dataset.map(pack_features_vector)"
...@@ -439,9 +448,8 @@ ...@@ -439,9 +448,8 @@
}, },
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"features,labels = next(iter(train_dataset))\n", "for features,labels in train_dataset.take(1):\n",
" \n", " print(features[:5])"
"features[:5]"
], ],
"execution_count": 0, "execution_count": 0,
"outputs": [] "outputs": []
...@@ -539,8 +547,8 @@ ...@@ -539,8 +547,8 @@
}, },
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"prediction = model(features)\n", "predictions = model(features)\n",
"prediction[:5]" "predictions[:5]"
], ],
"execution_count": 0, "execution_count": 0,
"outputs": [] "outputs": []
...@@ -554,7 +562,32 @@ ...@@ -554,7 +562,32 @@
"source": [ "source": [
"For each example it returns a *[logit](https://developers.google.com/machine-learning/crash-course/glossary#logits)* score for each class. \n", "For each example it returns a *[logit](https://developers.google.com/machine-learning/crash-course/glossary#logits)* score for each class. \n",
"\n", "\n",
"You can convert logits to probabilities for each class using the [tf.nn.softmax](https://www.tensorflow.org/api_docs/python/tf/nn/softmax) function.\n", "You can convert logits to probabilities for each class using the [tf.nn.softmax](https://www.tensorflow.org/api_docs/python/tf/nn/softmax) function."
]
},
{
"metadata": {
"id": "2fas18iHoiGB",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"prob = tf.nn.softmax(predictions[:5])\n",
"\n",
"prob"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "uRZmchElo481",
"colab_type": "text"
},
"cell_type": "markdown",
"source": [
"Taking the `tf.argmax` across the `classes` axis would give us the predicted class index.\n",
"\n", "\n",
"The model hasn't been trained yet, so these aren't very good predictions." "The model hasn't been trained yet, so these aren't very good predictions."
] ]
...@@ -567,7 +600,20 @@ ...@@ -567,7 +600,20 @@
}, },
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"tf.nn.softmax(prediction[:5])" "tf.argmax(predictions, axis=1)"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "8w3eDAp9o0G9",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"labels"
], ],
"execution_count": 0, "execution_count": 0,
"outputs": [] "outputs": []
...@@ -660,7 +706,7 @@ ...@@ -660,7 +706,7 @@
"def grad(model, inputs, targets):\n", "def grad(model, inputs, targets):\n",
" with tf.GradientTape() as tape:\n", " with tf.GradientTape() as tape:\n",
" loss_value = loss(model, inputs, targets)\n", " loss_value = loss(model, inputs, targets)\n",
" return tape.gradient(loss_value, model.trainable_variables)" " return tape.gradient(loss_value, model.trainable_variables), loss_value"
], ],
"execution_count": 0, "execution_count": 0,
"outputs": [] "outputs": []
...@@ -732,10 +778,11 @@ ...@@ -732,10 +778,11 @@
}, },
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"grads, loss_value = grad(model, features, labels)\n",
"\n",
"print(\"Step: \", global_step.numpy())\n", "print(\"Step: \", global_step.numpy())\n",
"print(\"Initial loss:\", loss(model, features, labels).numpy())\n", "print(\"Initial loss:\", loss_value.numpy())\n",
"\n", "\n",
"grads = grad(model, features, labels)\n",
"optimizer.apply_gradients(zip(grads, model.variables), global_step)\n", "optimizer.apply_gradients(zip(grads, model.variables), global_step)\n",
"\n", "\n",
"print()\n", "print()\n",
...@@ -789,12 +836,12 @@ ...@@ -789,12 +836,12 @@
" # Training loop - using batches of 32\n", " # Training loop - using batches of 32\n",
" for x, y in train_dataset:\n", " for x, y in train_dataset:\n",
" # Optimize the model\n", " # Optimize the model\n",
" grads = grad(model, x, y)\n", " grads, loss_value = grad(model, x, y)\n",
" optimizer.apply_gradients(zip(grads, model.variables),\n", " optimizer.apply_gradients(zip(grads, model.variables),\n",
" global_step)\n", " global_step)\n",
"\n", "\n",
" # Track progress\n", " # Track progress\n",
" epoch_loss_avg(loss(model, x, y)) # add current batch loss\n", " epoch_loss_avg(loss_value) # add current batch loss\n",
" # compare predicted label to actual label\n", " # compare predicted label to actual label\n",
" epoch_accuracy(tf.argmax(model(x), axis=1, output_type=tf.int32), y)\n", " epoch_accuracy(tf.argmax(model(x), axis=1, output_type=tf.int32), y)\n",
"\n", "\n",
...@@ -1034,4 +1081,4 @@ ...@@ -1034,4 +1081,4 @@
] ]
} }
] ]
} }
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment