From 2eb7d1e4c0ec1343d58ba6e310c28e7804e16e1f Mon Sep 17 00:00:00 2001 From: Hossein Sheikhi Date: Tue, 19 May 2020 15:09:18 -0700 Subject: [PATCH 1/2] Modify tf2 linear regression loss function --- .../notebooks/2_BasicModels/linear_regression.ipynb | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tensorflow_v2/notebooks/2_BasicModels/linear_regression.ipynb b/tensorflow_v2/notebooks/2_BasicModels/linear_regression.ipynb index 17b57b8a..83ad9a53 100644 --- a/tensorflow_v2/notebooks/2_BasicModels/linear_regression.ipynb +++ b/tensorflow_v2/notebooks/2_BasicModels/linear_regression.ipynb @@ -56,8 +56,7 @@ "X = np.array([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,\n", " 7.042,10.791,5.313,7.997,5.654,9.27,3.1])\n", "Y = np.array([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,\n", - " 2.827,3.465,1.65,2.904,2.42,2.94,1.3])\n", - "n_samples = X.shape[0]" + " 2.827,3.465,1.65,2.904,2.42,2.94,1.3])\n" ] }, { @@ -76,7 +75,7 @@ "\n", "# Mean square error.\n", "def mean_square(y_pred, y_true):\n", - " return tf.reduce_sum(tf.pow(y_pred-y_true, 2)) / (2 * n_samples)\n", + " return tf.reduce_mean(tf.square(y_pred - y_true))\n", "\n", "# Stochastic Gradient Descent Optimizer.\n", "optimizer = tf.optimizers.SGD(learning_rate)" From 855709fa85925afb4d0e444e0aad9b78ebf3e8f6 Mon Sep 17 00:00:00 2001 From: Hossein Sheikhi Date: Wed, 20 May 2020 13:33:37 -0700 Subject: [PATCH 2/2] neural_network.ipynp syntax error has been corrected --- tensorflow_v2/notebooks/3_NeuralNetworks/neural_network.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_v2/notebooks/3_NeuralNetworks/neural_network.ipynb b/tensorflow_v2/notebooks/3_NeuralNetworks/neural_network.ipynb index 77926535..9ecf0f2c 100644 --- a/tensorflow_v2/notebooks/3_NeuralNetworks/neural_network.ipynb +++ b/tensorflow_v2/notebooks/3_NeuralNetworks/neural_network.ipynb @@ -116,7 +116,7 @@ " # Set forward pass.\n", " def call(self, x, is_training=False):\n", " x = self.fc1(x)\n", - " x = self.fc2(x)\n" + " x = self.fc2(x)\n", " x = self.out(x)\n", " if not is_training:\n", " # tf cross entropy expect logits without softmax, so only\n",