"git@developer.sourcefind.cn:orangecat/ollama.git" did not exist on "12ab8f8f5f9a1e206d57cb7ce664bffff5faeed3"
Commit a03c1ac1 authored by Mark Daoust's avatar Mark Daoust
Browse files

Fix review comments.

Add note about keras layers and models.
parent e1be4e9d
...@@ -385,7 +385,7 @@ ...@@ -385,7 +385,7 @@
"with tf.Graph().as_default(): \n", "with tf.Graph().as_default(): \n",
" with tf.Session():\n", " with tf.Session():\n",
" try:\n", " try:\n",
" print(tf_f(tf.constant(0)).eval())\n", " print(f(tf.constant(0)).eval())\n",
" except tf.errors.InvalidArgumentError as e:\n", " except tf.errors.InvalidArgumentError as e:\n",
" print('Got error message:\\n %s' % e.message)" " print('Got error message:\\n %s' % e.message)"
], ],
...@@ -451,7 +451,8 @@ ...@@ -451,7 +451,8 @@
"def f(n):\n", "def f(n):\n",
" z = []\n", " z = []\n",
" # We ask you to tell us the element dtype of the list\n", " # We ask you to tell us the element dtype of the list\n",
" z = autograph.utils.set_element_type(z, tf.int32)\n", " autograph.utils.set_element_type(z, tf.int32)\n",
" \n",
" for i in range(n):\n", " for i in range(n):\n",
" z.append(i)\n", " z.append(i)\n",
" # when you're done with the list, stack it\n", " # when you're done with the list, stack it\n",
...@@ -474,7 +475,7 @@ ...@@ -474,7 +475,7 @@
}, },
"cell_type": "markdown", "cell_type": "markdown",
"source": [ "source": [
"### Nested If statement" "### Nested if statements"
] ]
}, },
{ {
...@@ -571,6 +572,16 @@ ...@@ -571,6 +572,16 @@
"execution_count": 0, "execution_count": 0,
"outputs": [] "outputs": []
}, },
{
"metadata": {
"id": "hy99pRWpMcuN",
"colab_type": "text"
},
"cell_type": "markdown",
"source": [
""
]
},
{ {
"metadata": { "metadata": {
"id": "4LfnJjm0Bm0B", "id": "4LfnJjm0Bm0B",
...@@ -578,10 +589,14 @@ ...@@ -578,10 +589,14 @@
}, },
"cell_type": "markdown", "cell_type": "markdown",
"source": [ "source": [
"## Advanced example: A training, loop in-graph\n", "## Advanced example: A training loop in-graph\n",
"\n", "\n",
"Writing control flow in AutoGraph is easy, so running a training loop in a TensorFlow graph should be easy as well! \n", "Writing control flow in AutoGraph is easy, so running a training loop in a TensorFlow graph should be easy as well! \n",
"\n", "\n",
"<!--TODO(markdaoust) link to examples showing autograph **in** keras models when ready-->\n",
"\n",
"Important: While this example wraps up a `tf.keras.Model` using autograph, `tf.contrib.autograph` is fully compatible with `tf.keras` and can be used in the definitions [custom of keras layers and models](http://tensorflow.org/guide/keras#build_advanced_models). The easiest way is to `@autograph.convert()` the `call` method.\n",
"\n",
"Here, we show an example of training a simple Keras model on MNIST, where the entire training process -- loading batches, calculating gradients, updating parameters, calculating validation accuracy, and repeating until convergence -- is done in-graph." "Here, we show an example of training a simple Keras model on MNIST, where the entire training process -- loading batches, calculating gradients, updating parameters, calculating validation accuracy, and repeating until convergence -- is done in-graph."
] ]
}, },
...@@ -700,20 +715,20 @@ ...@@ -700,20 +715,20 @@
" # to convert these lists into their graph equivalent,\n", " # to convert these lists into their graph equivalent,\n",
" # we need to specify the element type of the lists.\n", " # we need to specify the element type of the lists.\n",
" train_losses = []\n", " train_losses = []\n",
" train_losses = autograph.utils.set_element_type(train_losses, tf.float32)\n", " autograph.utils.set_element_type(train_losses, tf.float32)\n",
" test_losses = []\n", " test_losses = []\n",
" test_losses = autograph.utils.set_element_type(test_losses, tf.float32)\n", " autograph.utils.set_element_type(test_losses, tf.float32)\n",
" train_accuracies = []\n", " train_accuracies = []\n",
" train_accuracies = autograph.utils.set_element_type(train_accuracies, tf.float32)\n", " autograph.utils.set_element_type(train_accuracies, tf.float32)\n",
" test_accuracies = []\n", " test_accuracies = []\n",
" test_accuracies = autograph.utils.set_element_type(test_accuracies, tf.float32)\n", " autograph.utils.set_element_type(test_accuracies, tf.float32)\n",
" \n", " \n",
" # This entire training loop will be run in-graph.\n", " # This entire training loop will be run in-graph.\n",
" i = tf.constant(0)\n", " i = tf.constant(0)\n",
" while i < hp.max_steps:\n", " while i < hp.max_steps:\n",
" train_x, train_y = get_next_batch(train_ds)\n", " train_x, train_y = get_next_batch(train_ds)\n",
" test_x, test_y = get_next_batch(test_ds)\n", " test_x, test_y = get_next_batch(test_ds)\n",
" # add get next\n", "\n",
" step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt)\n", " step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt)\n",
" step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n", " step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n",
" if i % (hp.max_steps // 10) == 0:\n", " if i % (hp.max_steps // 10) == 0:\n",
...@@ -790,19 +805,6 @@ ...@@ -790,19 +805,6 @@
], ],
"execution_count": 0, "execution_count": 0,
"outputs": [] "outputs": []
},
{
"metadata": {
"id": "ZpEIfs5jn6jw",
"colab_type": "code",
"colab": {}
},
"cell_type": "code",
"source": [
""
],
"execution_count": 0,
"outputs": []
} }
] ]
} }
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment