diff --git a/site/en/tutorials/keras/overfit_and_underfit.ipynb b/site/en/tutorials/keras/overfit_and_underfit.ipynb index 87c80de937..29fbfc49c8 100644 --- a/site/en/tutorials/keras/overfit_and_underfit.ipynb +++ b/site/en/tutorials/keras/overfit_and_underfit.ipynb @@ -543,10 +543,10 @@ " model.summary()\n", "\n", " history = model.fit(\n", - " train_ds,\n", + " train_ds.map(lambda x, y: (x, tf.expand_dims(y, axis=-1))),\n", " steps_per_epoch = STEPS_PER_EPOCH,\n", " epochs=max_epochs,\n", - " validation_data=validate_ds,\n", + " validation_data=validate_ds.map(lambda x, y: (x, tf.expand_dims(y, axis=-1))),\n", " callbacks=get_callbacks(name),\n", " verbose=0)\n", " return history" @@ -977,7 +977,7 @@ "source": [ "`l2(0.001)` means that every coefficient in the weight matrix of the layer will add `0.001 * weight_coefficient_value**2` to the total **loss** of the network.\n", "\n", - "That is why we're monitoring the `binary_crossentropy` directly. Because it doesn't have this regularization component mixed in.\n", + "That is why you need to monitor the `binary_crossentropy` directly. Because it doesn't have this regularization component mixed in.\n", "\n", "So, that same `\"Large\"` model with an `L2` regularization penalty performs much better:\n" ] @@ -1228,10 +1228,9 @@ } ], "metadata": { - "accelerator": "GPU", "colab": { "name": "overfit_and_underfit.ipynb", - "toc_visible": true + "toc_visible": true }, "kernelspec": { "display_name": "Python 3",