Commit 741690f9 authored by Dan Moldovan's avatar Dan Moldovan
Browse files

Fix a couple of bugs: replaced the fizzbuzz call whose signature no longer...

Fix a couple of bugs: replaced the fizzbuzz call whose signature no longer matched, and remove the Flatter layer which seems to be buggy. Also include minor edits to match the style guide.
parent 22e248ce
......@@ -593,19 +593,19 @@
"cell_type": "code",
"source": [
"@autograph.convert()\n",
"def fizzbuzz_each(nums):\n",
"def squares(nums):\n",
"\n",
" result = []\n",
" autograph.set_element_type(result, tf.string)\n",
" autograph.set_element_type(result, tf.int64)\n",
"\n",
" for num in nums: \n",
" result.append(fizzbuzz(num))\n",
" result.append(num * num)\n",
" \n",
" return autograph.stack(result)\n",
" \n",
"with tf.Graph().as_default(): \n",
" with tf.Session() as sess:\n",
" print(sess.run(fizzbuzz_each(tf.constant(np.arange(10)))))"
" print(sess.run(squares(tf.constant(np.arange(10)))))"
],
"execution_count": 0,
"outputs": []
......@@ -679,24 +679,24 @@
"\n",
"@autograph.convert()\n",
"def collatz(x):\n",
" x=tf.reshape(x,())\n",
" assert x>0\n",
" x = tf.reshape(x,())\n",
" assert x > 0\n",
" n = tf.convert_to_tensor((0,)) \n",
" while not tf.equal(x,1):\n",
" n+=1\n",
" while not tf.equal(x, 1):\n",
" n += 1\n",
" if tf.equal(x%2, 0):\n",
" x = x//2\n",
" x = x // 2\n",
" else:\n",
" x = 3*x+1\n",
" x = 3 * x + 1\n",
" \n",
" return n\n",
"\n",
"with tf.Graph().as_default():\n",
" model = tf.keras.Sequential([\n",
" tf.keras.layers.Lambda(collatz, input_shape=(1,), output_shape=(), )\n",
" tf.keras.layers.Lambda(collatz, input_shape=(1,), output_shape=())\n",
" ])\n",
" \n",
"result = model.predict(np.array([6171])) #261\n",
"result = model.predict(np.array([6171]))\n",
"result"
],
"execution_count": 0,
......@@ -738,7 +738,7 @@
" def build(self,input_shape):\n",
" super().build(input_shape.as_list())\n",
" self.depth = len(self.layers)\n",
" self.plims = np.linspace(self.pfirst, self.plast, self.depth+1)[:-1]\n",
" self.plims = np.linspace(self.pfirst, self.plast, self.depth + 1)[:-1]\n",
" \n",
" @autograph.convert()\n",
" def call(self, inputs):\n",
......@@ -749,7 +749,7 @@
" \n",
" p = tf.random_uniform((self.depth,))\n",
" \n",
" keeps = p<=self.plims\n",
" keeps = (p <= self.plims)\n",
" x = inputs\n",
" \n",
" count = tf.reduce_sum(tf.cast(keeps, tf.int32))\n",
......@@ -781,7 +781,7 @@
},
"cell_type": "code",
"source": [
"train_batch = np.random.randn(64, 28,28,1).astype(np.float32)"
"train_batch = np.random.randn(64, 28, 28, 1).astype(np.float32)"
],
"execution_count": 0,
"outputs": []
......@@ -811,9 +811,9 @@
" for n in range(20):\n",
" model.add(\n",
" layers.Conv2D(filters=16, activation=tf.nn.relu,\n",
" kernel_size=(3,3), padding='same'))\n",
" kernel_size=(3, 3), padding='same'))\n",
"\n",
" model.build(tf.TensorShape((None, None, None,1)))\n",
" model.build(tf.TensorShape((None, None, None, 1)))\n",
" \n",
" init = tf.global_variables_initializer()"
],
......@@ -918,7 +918,6 @@
"source": [
"def mlp_model(input_shape):\n",
" model = tf.keras.Sequential((\n",
" tf.keras.layers.Flatten(),\n",
" tf.keras.layers.Dense(100, activation='relu', input_shape=input_shape),\n",
" tf.keras.layers.Dense(100, activation='relu'),\n",
" tf.keras.layers.Dense(10, activation='softmax')))\n",
......@@ -927,7 +926,7 @@
"\n",
"\n",
"def predict(m, x, y):\n",
" y_p = m(x)\n",
" y_p = m(tf.reshape(x, (-1, 28 * 28)))\n",
" losses = tf.keras.losses.categorical_crossentropy(y, y_p)\n",
" l = tf.reduce_mean(losses)\n",
" accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n",
......@@ -959,7 +958,7 @@
"def get_next_batch(ds):\n",
" itr = ds.make_one_shot_iterator()\n",
" image, label = itr.get_next()\n",
" x = tf.to_float(image)/255.0\n",
" x = tf.to_float(image) / 255.0\n",
" y = tf.one_hot(tf.squeeze(label), 10)\n",
" return x, y "
],
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment