Commit 6b5d147c authored by Mark Daoust's avatar Mark Daoust
Browse files

Created using Colaboratory

parent fb90cf6e
......@@ -16,6 +16,16 @@
}
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"[View in Colaboratory](https://colab.research.google.com/github/MarkDaoust/models/blob/get_started/samples/core/get_started/eager.ipynb)"
]
},
{
"metadata": {
"id": "rwxGnsA92emp",
......@@ -355,7 +365,7 @@
"train_dataset = tf.contrib.data.make_csv_dataset(\n",
" train_dataset_fp, batch_size, \n",
" column_names=column_names,\n",
" label_name='species',\n",
" label_name=label_name,\n",
" num_epochs=1)"
],
"execution_count": 0,
......@@ -381,11 +391,11 @@
},
"cell_type": "code",
"source": [
"features, labels = next(iter(train_dataset))\n",
" \n",
"plt.scatter(features['petal_length'], features['petal_width'])\n",
"plt.xlabel(\"Petal Length\")\n",
"plt.ylabel(\"Petal Width\")\n"
"for features, labels in train_dataset.take(1):\n",
" plt.scatter(features['petal_length'], features['sepal_length'], \n",
" c=labels, cmap='viridis')\n",
" plt.xlabel(\"Petal Length\")\n",
" plt.ylabel(\"Sepal Length\")\n"
],
"execution_count": 0,
"outputs": []
......@@ -400,7 +410,6 @@
"To simplify the model building, let's repackage the features dictionary into an array with shape ``(batch_size,num_features)`.\n",
"\n",
"To do this we'll write a simple function using the [tf.stack](https://www.tensorflow.org/api_docs/python/tf/data/dataset/map) method to pack the features into a single array. Then we'll use the [tf.data.Dataset.map](https://www.tensorflow.org/api_docs/python/tf/data/dataset/map) method to apply this function to each `(features,label)` pair in the dataset. :\n"
]
},
{
......@@ -412,8 +421,8 @@
"cell_type": "code",
"source": [
"def pack_features_vector(features,labels):\n",
" features = tf.stack([features[name] for name in feature_names],\n",
" axis=1)\n",
" values = [value for value in features.values()]\n",
" features = tf.stack(values, axis=1)\n",
" return features, labels\n",
" \n",
"train_dataset = train_dataset.map(pack_features_vector)"
......@@ -439,9 +448,8 @@
},
"cell_type": "code",
"source": [
"features,labels = next(iter(train_dataset))\n",
" \n",
"features[:5]"
"for features,labels in train_dataset.take(1):\n",
" print(features[:5])"
],
"execution_count": 0,
"outputs": []
......@@ -539,8 +547,8 @@
},
"cell_type": "code",
"source": [
"prediction = model(features)\n",
"prediction[:5]"
"predictions = model(features)\n",
"predictions[:5]"
],
"execution_count": 0,
"outputs": []
......@@ -554,7 +562,32 @@
"source": [
"For each example it returns a *[logit](https://developers.google.com/machine-learning/crash-course/glossary#logits)* score for each class. \n",
"\n",
"You can convert logits to probabilities for each class using the [tf.nn.softmax](https://www.tensorflow.org/api_docs/python/tf/nn/softmax) function.\n",
"You can convert logits to probabilities for each class using the [tf.nn.softmax](https://www.tensorflow.org/api_docs/python/tf/nn/softmax) function."
]
},
{
"metadata": {
"id": "2fas18iHoiGB",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"prob = tf.nn.softmax(predictions[:5])\n",
"\n",
"prob"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "uRZmchElo481",
"colab_type": "text"
},
"cell_type": "markdown",
"source": [
"Taking the `tf.argmax` across the `classes` axis would give us the predicted class index.\n",
"\n",
"The model hasn't been trained yet, so these aren't very good predictions."
]
......@@ -567,7 +600,20 @@
},
"cell_type": "code",
"source": [
"tf.nn.softmax(prediction[:5])"
"tf.argmax(predictions, axis=1)"
],
"execution_count": 0,
"outputs": []
},
{
"metadata": {
"id": "8w3eDAp9o0G9",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
"labels"
],
"execution_count": 0,
"outputs": []
......@@ -660,7 +706,7 @@
"def grad(model, inputs, targets):\n",
" with tf.GradientTape() as tape:\n",
" loss_value = loss(model, inputs, targets)\n",
" return tape.gradient(loss_value, model.trainable_variables)"
" return tape.gradient(loss_value, model.trainable_variables), loss_value"
],
"execution_count": 0,
"outputs": []
......@@ -732,10 +778,11 @@
},
"cell_type": "code",
"source": [
"grads, loss_value = grad(model, features, labels)\n",
"\n",
"print(\"Step: \", global_step.numpy())\n",
"print(\"Initial loss:\", loss(model, features, labels).numpy())\n",
"print(\"Initial loss:\", loss_value.numpy())\n",
"\n",
"grads = grad(model, features, labels)\n",
"optimizer.apply_gradients(zip(grads, model.variables), global_step)\n",
"\n",
"print()\n",
......@@ -789,12 +836,12 @@
" # Training loop - using batches of 32\n",
" for x, y in train_dataset:\n",
" # Optimize the model\n",
" grads = grad(model, x, y)\n",
" grads, loss_value = grad(model, x, y)\n",
" optimizer.apply_gradients(zip(grads, model.variables),\n",
" global_step)\n",
"\n",
" # Track progress\n",
" epoch_loss_avg(loss(model, x, y)) # add current batch loss\n",
" epoch_loss_avg(loss_value) # add current batch loss\n",
" # compare predicted label to actual label\n",
" epoch_accuracy(tf.argmax(model(x), axis=1, output_type=tf.int32), y)\n",
"\n",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment