Skip to content
Permalink
Browse files

tensorflow2.0

  • Loading branch information...
czy36mengfei committed Mar 11, 2019
1 parent ebf68b0 commit 6a32897ef1f72642047c31480abc0416c6f3ed9b
@@ -543,6 +543,91 @@
"vae.fit(x_train, x_train, epochs=3, batch_size=64)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"自己编写训练方法"
]
},
{
"cell_type": "code",
"execution_count": 50,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Start of epoch 0\n",
"step 0: mean loss = tf.Tensor(213.26726, shape=(), dtype=float32)\n",
"step 100: mean loss = tf.Tensor(6.5270114, shape=(), dtype=float32)\n",
"step 200: mean loss = tf.Tensor(3.3300452, shape=(), dtype=float32)\n",
"step 300: mean loss = tf.Tensor(2.2522914, shape=(), dtype=float32)\n",
"step 400: mean loss = tf.Tensor(1.7097591, shape=(), dtype=float32)\n",
"step 500: mean loss = tf.Tensor(1.3835965, shape=(), dtype=float32)\n",
"step 600: mean loss = tf.Tensor(1.1659158, shape=(), dtype=float32)\n",
"step 700: mean loss = tf.Tensor(1.0099952, shape=(), dtype=float32)\n",
"step 800: mean loss = tf.Tensor(0.89288116, shape=(), dtype=float32)\n",
"step 900: mean loss = tf.Tensor(0.8014734, shape=(), dtype=float32)\n",
"Start of epoch 1\n",
"step 0: mean loss = tf.Tensor(0.77191323, shape=(), dtype=float32)\n",
"step 100: mean loss = tf.Tensor(0.70431703, shape=(), dtype=float32)\n",
"step 200: mean loss = tf.Tensor(0.6486862, shape=(), dtype=float32)\n",
"step 300: mean loss = tf.Tensor(0.60191154, shape=(), dtype=float32)\n",
"step 400: mean loss = tf.Tensor(0.56213117, shape=(), dtype=float32)\n",
"step 500: mean loss = tf.Tensor(0.52777255, shape=(), dtype=float32)\n",
"step 600: mean loss = tf.Tensor(0.49796674, shape=(), dtype=float32)\n",
"step 700: mean loss = tf.Tensor(0.47174037, shape=(), dtype=float32)\n",
"step 800: mean loss = tf.Tensor(0.4485459, shape=(), dtype=float32)\n",
"step 900: mean loss = tf.Tensor(0.4277973, shape=(), dtype=float32)\n",
"Start of epoch 2\n",
"step 0: mean loss = tf.Tensor(0.42051753, shape=(), dtype=float32)\n",
"step 100: mean loss = tf.Tensor(0.40269083, shape=(), dtype=float32)\n",
"step 200: mean loss = tf.Tensor(0.38661462, shape=(), dtype=float32)\n",
"step 300: mean loss = tf.Tensor(0.3719676, shape=(), dtype=float32)\n",
"step 400: mean loss = tf.Tensor(0.35864368, shape=(), dtype=float32)\n",
"step 500: mean loss = tf.Tensor(0.3463759, shape=(), dtype=float32)\n",
"step 600: mean loss = tf.Tensor(0.33514142, shape=(), dtype=float32)\n",
"step 700: mean loss = tf.Tensor(0.3247494, shape=(), dtype=float32)\n",
"step 800: mean loss = tf.Tensor(0.3151487, shape=(), dtype=float32)\n",
"step 900: mean loss = tf.Tensor(0.3061987, shape=(), dtype=float32)\n"
]
}
],
"source": [
"train_dataset = tf.data.Dataset.from_tensor_slices(x_train)\n",
"train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)\n",
"\n",
"original_dim = 784\n",
"vae = VAE(original_dim, 64, 32)\n",
"\n",
"optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)\n",
"mse_loss_fn = tf.keras.losses.MeanSquaredError()\n",
"\n",
"loss_metric = tf.keras.metrics.Mean()\n",
"\n",
"# Iterate over epochs.\n",
"for epoch in range(3):\n",
" print('Start of epoch %d' % (epoch,))\n",
"\n",
" # Iterate over the batches of the dataset.\n",
" for step, x_batch_train in enumerate(train_dataset):\n",
" with tf.GradientTape() as tape:\n",
" reconstructed = vae(x_batch_train)\n",
" # Compute reconstruction loss\n",
" loss = mse_loss_fn(x_batch_train, reconstructed)\n",
" loss += sum(vae.losses) # Add KLD regularization loss\n",
" \n",
" grads = tape.gradient(loss, vae.trainable_variables)\n",
" optimizer.apply_gradients(zip(grads, vae.trainable_variables))\n",
" \n",
" loss_metric(loss)\n",
" \n",
" if step % 100 == 0:\n",
" print('step %s: mean loss = %s' % (step, loss_metric.result()))"
]
},
{
"cell_type": "code",
"execution_count": null,
@@ -0,0 +1,123 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# tensorflow2教程-keras模型保持和序列化\n",
"\n",
"## 1.保持序列模型和函数模型"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"3_layer_mlp\"\n",
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
"digits (InputLayer) [(None, 784)] 0 \n",
"_________________________________________________________________\n",
"dense_1 (Dense) (None, 64) 50240 \n",
"_________________________________________________________________\n",
"dense_2 (Dense) (None, 64) 4160 \n",
"_________________________________________________________________\n",
"predictions (Dense) (None, 10) 650 \n",
"=================================================================\n",
"Total params: 55,050\n",
"Trainable params: 55,050\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n",
"60000/60000 [==============================] - 1s 23us/sample - loss: 0.3136\n"
]
}
],
"source": [
"# 构建一个简单的模型并训练\n",
"from __future__ import absolute_import, division, print_function\n",
"import tensorflow as tf\n",
"tf.keras.backend.clear_session()\n",
"from tensorflow import keras\n",
"from tensorflow.keras import layers\n",
"\n",
"inputs = keras.Input(shape=(784,), name='digits')\n",
"x = layers.Dense(64, activation='relu', name='dense_1')(inputs)\n",
"x = layers.Dense(64, activation='relu', name='dense_2')(x)\n",
"outputs = layers.Dense(10, activation='softmax', name='predictions')(x)\n",
"\n",
"model = keras.Model(inputs=inputs, outputs=outputs, name='3_layer_mlp')\n",
"model.summary()\n",
"(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\n",
"x_train = x_train.reshape(60000, 784).astype('float32') / 255\n",
"x_test = x_test.reshape(10000, 784).astype('float32') / 255\n",
"\n",
"model.compile(loss='sparse_categorical_crossentropy',\n",
" optimizer=keras.optimizers.RMSprop())\n",
"history = model.fit(x_train, y_train,\n",
" batch_size=64,\n",
" epochs=1)\n",
"\n",
"predictions = model.predict(x_test)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 1.1保持全模型\n",
"可以对整个模型进行保存,其保持的内容包括:\n",
"- 该模型的架构\n",
"- 模型的权重(在训练期间学到的)\n",
"- 模型的训练配置(你传递给编译的),如果有的话\n",
"- 优化器及其状态(如果有的话)(这使您可以从中断的地方重新启动训练)\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"model.save('the_save_model.h5')\n",
"new_model = keras.models.load_model('the_save_model.h5')\n",
"new_prediction = new_model.predict(x_test)\n",
"np.testing.assert_allclose(predictions, new_prediction, atol=1e-6) # 预测结果一样"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Oops, something went wrong.

0 comments on commit 6a32897

Please sign in to comment.
You can’t perform that action at this time.