最近学习了一些TensorFlow2.0的常用函数,大致如下:

install

pip install tensorflow==2.0.0-beta1
pip install tensorflow-gpu==2.0.0-beta0

import tensorflow as tf

tf.test.is_gpu_avialble()

tf.keras

tf.keras.Sequential()
tf.keras.Input()
tf.keras.utils.to_categorial() #one-hot
tf.keras.Model(inputs=inputs, outputs=[out1, out2])

# tf.keras.layers
tf.keras.layers.Flatten()
tf.keras.layers.Dense()
tf.keras.layers.concatenate()
tf.keras.layers.Conv2D()
tf.keras.layers.Batchnormalization()
tf.keras.layers.MaxPooling2D()
tf.keras.layers.Dropout()
tf.keras.layers.GlobalAveragePooling2D()
tf.keras.layers.Conv2DTranspose() #反卷积,用于FCN网络(图像语义分割)
tf.keras.layers.LSTM()
tf.keras.layers.GRU()

# tf.keras.preprocessing
tf.keras.preprocessing.sequence.pad_sequences() #文本长度填充
tf.keras.preprocessing.image.array_to_img() #使用plt.imshow()显示图像

# tf.keras.optimizers
tf.keras.optimizers.Adam()

# tf.keras.losses
tf.keras.losses.SparseCategorialCrossentropy() #数字标签的多分类,返回一个方法,loss_func(y, y_)
tf.keras.losses.sparse_categorial_crossentropy(y_true, y_pred, from_logits = False)
tf.keras.losses.BinaryCrossentropy() #二分类

# tf.keras.metrics
tf.keras.metrics.Mean('train_loss')
tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy')

# tf.keras.callbacks
tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
tf.keras.calllbacks.LearningRateScheduler()
tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                   moniter='val_loss',
                                   save_best_only=False, #True,选择monitor最好的检查点进行保存
                                   save_weights_only=True,
                                   mode='auto',
                                   save_freq='epoch',
                                   verbose=0)
# tf.keras.applications
tf.keras.applications.xception.Xception(include_top=False,
                                        weigths='imagenet',
                                        input_shape=(224,224,3),
                                        pooling='avg')
tf.keras.applications.VGG16(include_top=False,
                            weigths='imagenet',
                            input_shape=(256,256,3))
tf.keras.applications.MobileNetV2(include_top=False,
                                  weigths=None, #仅使用MobileNetV2的架构,没有使用权重
                                  input_shape=(224,224,3))

model

model.add()
model.summary()
model.compile()
model.fit()
model.evaluate()
model.predict()
y_ = model(x) #函数式API调用
model.trainable_variables #模型可训练参数

model save and reload

# 模型整体
model.save('./my_model.h5')  #模型整体保存
new_model = tf.keras.models.load_model('./my_model.h5') #加载模型

# 模型结构
json_config = model.to_json() #获取模型结构
model = tf.keras.model.model_from_json(json_config) #加载模型结构

# 模型权重
weights = model.get_weights() #获取模型权重
model.set_weights(weights) #加载模型权重
model.save_weights('./my_weights.h5') #保存模型权重到磁盘
model.load_weights('./my_weights.h5') #从磁盘加载模型权重

# 检查点文件
model = tf.keras.models.load_model(checkpoint_path) #加载检查点文件
model.load_weights(checkpoint_path) #加载检查点文件中的权重%% 预训练模型

checkpoint

checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) #初始化检查点文件
checkpoint.save(file_prefix=cp_prefix) #每个epoch保存一次检查点
checkpoint.restore(tf.train.lastest_checkpoint(cp_dir)) #恢复最新的检查点文件

pre-train

pre_train_model.get_layer(layer_name).output
pre_train_model.trainable = False

model.fit

history = model.fit(ds_train, 
                    epochs=5, 
                    steps_per_epoch=train_image.shape[0]//64,
                    validation_data=ds_test, 
                    validation_steps=test_image.shape[0]//64,
                    callbacks=[my_callback])
history.epoch
history.history.get('acc')

tf.GradientTape

with tf.GradientTape() as t:
    predictions = model(images)
    loss_step = tf.keras.losses.SparseCategorialCrossentropy(from_logits=False)(labels, predictions)

grads = t.gradient(loss_step, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))

train_loss = tf.keras.metrics.Mean('train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy')

train_loss(loss_step)
train_accuracy(labels, predictions) 

train_loss.result()
train_accuracy.result()

train_loss.reset_states()
train_accuracy.reset_states()

tf.data

tf.data.Dataset.from_tensor_slices()
tf.data.Dataset.zip()
AUTOTUNE = tf.data.experimental.AUTOTUNE

datasets

dataset.shuffle()
dataset.repeat()
dataset.batch()
dataset.map(func, num_parallel_calls=AUTOTUNE)
dataset.skip()
dataset.take()
dataset.prefetch(AUTOTUNE)
next(iter(dataset)) #按batch查看数据

tf.image

tf.image.decode_jpeg()
tf.image.decode_png()
tf.image.resize()
tf.image.random_crop()
tf.image.random_flip_left_right(image)
tf.image.random_flip_up_down(image)
tf.image.random_brigtness(image, 0.5)
tf.image.random_contrast(image, 0, 1)
tf.image.random_hue(image, max_delta=0.3)
tf.image.random_saturation(image, lower=0.2, upper=1.0)

tf.summary

train_writer = tf.summary.create_file_writer(train_log_dir)

with train_writer.set_as_default():
    tf.summary.scalar('loss', train_loss.result(), step=epoch)

tf.config

tf.config.experimental.list_physical_devices()
tf.config.experimental.set_visible_devices()
tf.config.experimental.set_memory_growth()
tf.config.experimental.set_virtual_device_configration()

tf.其他

tf.io.read_file(img_path)
tf.cast()
tf.expand_dims()
tf.reshape()
tf.argmax()

plt

plt.imshow(tf.keras.preprocessing.image.array_to_img(img[0]))
plt.plot()
plt.legend()

display

import Ipython.display as display
display.display(display.Image(image_path))

pathlib

data_root = pathlib.Path(data_dir)
data_root.iterdir()
list(data_root.glob('*/*'))
sorted(item.name for item in data_root.glob('*/'))

glob

glob.glob('./test/*.jpg')

etree

from lxml import etree
xml = open(path).read()
sel = etree.HTML(xml)
width = int(sel.xpath('//size/width/text()')[0])