尝试通过 TensorBoard 分析 Mnist 数据集
Trying to Analyze Mnist Dataset Through TensorBoard
我是运行下面的代码,在这一行遇到错误。
tb.configure(argv=[None, '6006', 'C:/Users/ryans/'])
错误信息如下:
tb.configure(argv=[None, '6006', 'C:/Users/ryans/'])
usage: tensorboard [-h] [--helpfull] [--logdir PATH] [--host ADDR]
[--port PORT] [--purge_orphaned_data BOOL]
[--reload_interval SECONDS] [--db URI] [--db_import]
[--db_import_use_op] [--inspect] [--version_tb] [--tag TAG]
[--event_file PATH] [--path_prefix PATH]
[--window_title TEXT] [--max_reload_threads COUNT]
[--reload_task TYPE]
[--samples_per_plugin SAMPLES_PER_PLUGIN]
[--debugger_data_server_grpc_port PORT]
[--debugger_port PORT] [--master_tpu_unsecure_channel ADDR]
tensorboard: error: unrecognized arguments: 6006 C:/Users/ryans/
An exception has occurred, use %tb to see the full traceback.
这是我的代码。
# my code ...
import os
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set(style="darkgrid")
import datetime
from tensorboard import program
tb = program.TensorBoard()
tb.configure(argv=[None, '6006', 'C:/Users/ryans/'])
url = tb.launch()
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
def create_model():
return tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model = create_model()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
log_dir = "C:/Users/ryans/logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model.fit(x=x_train,
y=y_train,
epochs=5,
validation_data=(x_test, y_test),
callbacks=[tensorboard_callback])
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
train_dataset = train_dataset.shuffle(60000).batch(64)
test_dataset = test_dataset.batch(64)
# The training code follows the advanced quickstart tutorial, but shows how to log metrics to TensorBoard. Choose loss and optimizer:
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
# Create stateful metrics that can be used to accumulate values during training and logged at any point:
# Define our metrics
train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy')
test_loss = tf.keras.metrics.Mean('test_loss', dtype=tf.float32)
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('test_accuracy')
def train_step(model, optimizer, x_train, y_train):
with tf.GradientTape() as tape:
predictions = model(x_train, training=True)
loss = loss_object(y_train, predictions)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_loss(loss)
train_accuracy(y_train, predictions)
def test_step(model, x_test, y_test):
predictions = model(x_test)
loss = loss_object(y_test, predictions)
test_loss(loss)
test_accuracy(y_test, predictions)
# Set up summary writers to write the summaries to disk in a different logs directory:
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/gradient_tape/' + current_time + '/train'
test_log_dir = 'logs/gradient_tape/' + current_time + '/test'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_summary_writer = tf.summary.create_file_writer(test_log_dir)
# Start training. Use tf.summary.scalar() to log metrics (loss and accuracy) during training/testing within the scope of the summary writers to write the summaries to disk. You have control over which metrics to log and how often to do it. Other tf.summary functions enable logging other types of data.
model = create_model() # reset our model
EPOCHS = 5
for epoch in range(EPOCHS):
for (x_train, y_train) in train_dataset:
train_step(model, optimizer, x_train, y_train)
with train_summary_writer.as_default():
tf.summary.scalar('loss', train_loss.result(), step=epoch)
tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)
for (x_test, y_test) in test_dataset:
test_step(model, x_test, y_test)
with test_summary_writer.as_default():
tf.summary.scalar('loss', test_loss.result(), step=epoch)
tf.summary.scalar('accuracy', test_accuracy.result(), step=epoch)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print (template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
# Reset metrics every epoch
train_loss.reset_states()
test_loss.reset_states()
train_accuracy.reset_states()
test_accuracy.reset_states()
所有代码均来自此link。
https://www.tensorflow.org/tensorboard/get_started
我希望能够转到“http://localhost:6006/”并查看 TensorBoard。我怎样才能做到这一点?谢谢!!
我认为你应该检查你的参数向量。也许这是正确的语法。
tb.configure(argv=[None, '--logdir', 'C:/Users/ryans/logs/fit/'])
我是运行下面的代码,在这一行遇到错误。
tb.configure(argv=[None, '6006', 'C:/Users/ryans/'])
错误信息如下:
tb.configure(argv=[None, '6006', 'C:/Users/ryans/'])
usage: tensorboard [-h] [--helpfull] [--logdir PATH] [--host ADDR]
[--port PORT] [--purge_orphaned_data BOOL]
[--reload_interval SECONDS] [--db URI] [--db_import]
[--db_import_use_op] [--inspect] [--version_tb] [--tag TAG]
[--event_file PATH] [--path_prefix PATH]
[--window_title TEXT] [--max_reload_threads COUNT]
[--reload_task TYPE]
[--samples_per_plugin SAMPLES_PER_PLUGIN]
[--debugger_data_server_grpc_port PORT]
[--debugger_port PORT] [--master_tpu_unsecure_channel ADDR]
tensorboard: error: unrecognized arguments: 6006 C:/Users/ryans/
An exception has occurred, use %tb to see the full traceback.
这是我的代码。
# my code ...
import os
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set(style="darkgrid")
import datetime
from tensorboard import program
tb = program.TensorBoard()
tb.configure(argv=[None, '6006', 'C:/Users/ryans/'])
url = tb.launch()
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
def create_model():
return tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model = create_model()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
log_dir = "C:/Users/ryans/logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model.fit(x=x_train,
y=y_train,
epochs=5,
validation_data=(x_test, y_test),
callbacks=[tensorboard_callback])
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
train_dataset = train_dataset.shuffle(60000).batch(64)
test_dataset = test_dataset.batch(64)
# The training code follows the advanced quickstart tutorial, but shows how to log metrics to TensorBoard. Choose loss and optimizer:
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
# Create stateful metrics that can be used to accumulate values during training and logged at any point:
# Define our metrics
train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy')
test_loss = tf.keras.metrics.Mean('test_loss', dtype=tf.float32)
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('test_accuracy')
def train_step(model, optimizer, x_train, y_train):
with tf.GradientTape() as tape:
predictions = model(x_train, training=True)
loss = loss_object(y_train, predictions)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_loss(loss)
train_accuracy(y_train, predictions)
def test_step(model, x_test, y_test):
predictions = model(x_test)
loss = loss_object(y_test, predictions)
test_loss(loss)
test_accuracy(y_test, predictions)
# Set up summary writers to write the summaries to disk in a different logs directory:
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/gradient_tape/' + current_time + '/train'
test_log_dir = 'logs/gradient_tape/' + current_time + '/test'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_summary_writer = tf.summary.create_file_writer(test_log_dir)
# Start training. Use tf.summary.scalar() to log metrics (loss and accuracy) during training/testing within the scope of the summary writers to write the summaries to disk. You have control over which metrics to log and how often to do it. Other tf.summary functions enable logging other types of data.
model = create_model() # reset our model
EPOCHS = 5
for epoch in range(EPOCHS):
for (x_train, y_train) in train_dataset:
train_step(model, optimizer, x_train, y_train)
with train_summary_writer.as_default():
tf.summary.scalar('loss', train_loss.result(), step=epoch)
tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)
for (x_test, y_test) in test_dataset:
test_step(model, x_test, y_test)
with test_summary_writer.as_default():
tf.summary.scalar('loss', test_loss.result(), step=epoch)
tf.summary.scalar('accuracy', test_accuracy.result(), step=epoch)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print (template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
# Reset metrics every epoch
train_loss.reset_states()
test_loss.reset_states()
train_accuracy.reset_states()
test_accuracy.reset_states()
所有代码均来自此link。
https://www.tensorflow.org/tensorboard/get_started
我希望能够转到“http://localhost:6006/”并查看 TensorBoard。我怎样才能做到这一点?谢谢!!
我认为你应该检查你的参数向量。也许这是正确的语法。
tb.configure(argv=[None, '--logdir', 'C:/Users/ryans/logs/fit/'])