构建模型的3种方法

技术

TensorFlow的高阶API主要是tensorflow.keras.models.

本章我们主要详细介绍tensorflow.keras.models相关的以下内容。

  • 模型的构建(Sequential、functional API、Model子类化)

  • 模型的训练(内置fit方法、内置train_on_batch方法、自定义训练循环、单GPU训练模型、多GPU训练模型、TPU训练模型)

  • 模型的部署(tensorflow serving部署模型、使用spark(scala)调用tensorflow模型)

〇,模型构建方法概述

可以使用以下3种方式构建模型:使用Sequential按层顺序构建模型,使用函数式API构建任意结构模型,继承Model基类构建自定义模型。

对于顺序结构的模型,优先使用Sequential方法构建。

如果模型有多输入或者多输出,或者模型需要共享权重,或者模型具有残差连接等非顺序结构,推荐使用函数式API进行创建。

如果无特定必要,尽可能避免使用Model子类化的方式构建模型,这种方式提供了极大的灵活性,但也有更大的概率出错。

下面以IMDB电影评论的分类问题为例,演示3种创建模型的方法。


          
import numpy as np   
import pandas as pd   
import tensorflow as tf  
from tqdm import tqdm   
from tensorflow.keras import *  
  
  
train_token_path = "./data/imdb/train\_token.csv"  
test_token_path = "./data/imdb/test\_token.csv"  
  
MAX_WORDS = 10000  # We will only consider the top 10,000 words in the dataset  
MAX_LEN = 200  # We will cut reviews after 200 words  
BATCH_SIZE = 20   
  
# 构建管道  
def parse\_line(line):  
    t = tf.strings.split(line,"\t")  
    label = tf.reshape(tf.cast(tf.strings.to_number(t[0]),tf.int32),(-1,))  
    features = tf.cast(tf.strings.to_number(tf.strings.split(t[1]," ")),tf.int32)  
    return (features,label)  
  
ds_train=  tf.data.TextLineDataset(filenames = [train_token_path]) \  
   .map(parse_line,num_parallel_calls = tf.data.experimental.AUTOTUNE) \  
   .shuffle(buffer_size = 1000).batch(BATCH_SIZE) \  
   .prefetch(tf.data.experimental.AUTOTUNE)  
  
ds_test=  tf.data.TextLineDataset(filenames = [test_token_path]) \  
   .map(parse_line,num_parallel_calls = tf.data.experimental.AUTOTUNE) \  
   .shuffle(buffer_size = 1000).batch(BATCH_SIZE) \  
   .prefetch(tf.data.experimental.AUTOTUNE)  

      

一,Sequential按层顺序创建模型


          
tf.keras.backend.clear_session()  
  
model = models.Sequential()  
  
model.add(layers.Embedding(MAX_WORDS,7,input_length=MAX_LEN))  
model.add(layers.Conv1D(filters = 64,kernel_size = 5,activation = "relu"))  
model.add(layers.MaxPool1D(2))  
model.add(layers.Conv1D(filters = 32,kernel_size = 3,activation = "relu"))  
model.add(layers.MaxPool1D(2))  
model.add(layers.Flatten())  
model.add(layers.Dense(1,activation = "sigmoid"))  
  
model.compile(optimizer='Nadam',  
            loss='binary\_crossentropy',  
            metrics=['accuracy',"AUC"])  
  
model.summary()  

      

picture.image

picture.image


          
import datetime  
baselogger = callbacks.BaseLogger(stateful_metrics=["AUC"])  
logdir = "./data/keras\_model/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")  
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)  
history = model.fit(ds_train,validation_data = ds_test,  
        epochs = 6,callbacks=[baselogger,tensorboard_callback])  

      

picture.image


          
%matplotlib inline  
%config InlineBackend.figure_format = 'svg'  
import matplotlib.pyplot as plt  
def plot\_metric(history, metric):  
    train_metrics = history.history[metric]  
    val_metrics = history.history['val\_'+metric]  
    epochs = range(1, len(train_metrics) + 1)  
    plt.plot(epochs, train_metrics, 'bo--')  
    plt.plot(epochs, val_metrics, 'ro-')  
    plt.title('Training and validation '+ metric)  
    plt.xlabel("Epochs")  
    plt.ylabel(metric)  
    plt.legend(["train\_"+metric, 'val\_'+metric])  
    plt.show()  

      

picture.image

二,函数式API创建任意结构模型


          
tf.keras.backend.clear_session()  
  
inputs = layers.Input(shape=[MAX_LEN])  
x  = layers.Embedding(MAX_WORDS,7)(inputs)  
  
branch1 = layers.SeparableConv1D(64,3,activation="relu")(x)  
branch1 = layers.MaxPool1D(3)(branch1)  
branch1 = layers.SeparableConv1D(32,3,activation="relu")(branch1)  
branch1 = layers.GlobalMaxPool1D()(branch1)  
  
branch2 = layers.SeparableConv1D(64,5,activation="relu")(x)  
branch2 = layers.MaxPool1D(5)(branch2)  
branch2 = layers.SeparableConv1D(32,5,activation="relu")(branch2)  
branch2 = layers.GlobalMaxPool1D()(branch2)  
  
branch3 = layers.SeparableConv1D(64,7,activation="relu")(x)  
branch3 = layers.MaxPool1D(7)(branch3)  
branch3 = layers.SeparableConv1D(32,7,activation="relu")(branch3)  
branch3 = layers.GlobalMaxPool1D()(branch3)  
  
concat = layers.Concatenate()([branch1,branch2,branch3])  
outputs = layers.Dense(1,activation = "sigmoid")(concat)  
  
model = models.Model(inputs = inputs,outputs = outputs)  
  
model.compile(optimizer='Nadam',  
            loss='binary\_crossentropy',  
            metrics=['accuracy',"AUC"])  
  
model.summary()  

      

picture.image

picture.image

picture.image


          
import datetime  
logdir = "./data/keras\_model/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")  
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)  
history = model.fit(ds_train,validation_data = ds_test,epochs = 6,callbacks=[tensorboard_callback])  

      

picture.image

picture.image

三,Model子类化创建自定义模型


          
# 先自定义一个残差模块,为自定义Layer  
  
class ResBlock(layers.Layer):  
    def \_\_init\_\_(self, kernel\_size, **kwargs):  
        super(ResBlock, self).__init__(**kwargs)  
        self.kernel_size = kernel_size  
  
    def build(self,input\_shape):  
        self.conv1 = layers.Conv1D(filters=64,kernel_size=self.kernel_size,  
                                   activation = "relu",padding="same")  
        self.conv2 = layers.Conv1D(filters=32,kernel_size=self.kernel_size,  
                                   activation = "relu",padding="same")  
        self.conv3 = layers.Conv1D(filters=input_shape[-1],  
                                   kernel_size=self.kernel_size,activation = "relu",padding="same")  
        self.maxpool = layers.MaxPool1D(2)  
        super(ResBlock,self).build(input_shape) # 相当于设置self.built = True  
  
    def call(self, inputs):  
        x = self.conv1(inputs)  
        x = self.conv2(x)  
        x = self.conv3(x)  
        x = layers.Add()([inputs,x])  
        x = self.maxpool(x)  
        return x  
  
    #如果要让自定义的Layer通过Functional API 组合成模型时可以序列化,需要自定义get\_config方法。  
    def get\_config(self):    
        config = super(ResBlock, self).get_config()  
        config.update({'kernel\_size': self.kernel_size})  
        return config  

      

picture.image


          
# 自定义模型,实际上也可以使用Sequential或者FunctionalAPI  
class ImdbModel(models.Model):  
    def \_\_init\_\_(self):  
        super(ImdbModel, self).__init__()  
  
    def build(self,input\_shape):  
        self.embedding = layers.Embedding(MAX_WORDS,7)  
        self.block1 = ResBlock(7)  
        self.block2 = ResBlock(5)  
        self.dense = layers.Dense(1,activation = "sigmoid")  
        super(ImdbModel,self).build(input_shape)  
  
    def call(self, x):  
        x = self.embedding(x)  
        x = self.block1(x)  
        x = self.block2(x)  
        x = layers.Flatten()(x)  
        x = self.dense(x)  
        return(x)  

      

          
# 自定义模型,实际上也可以使用Sequential或者FunctionalAPI  
  
tf.keras.backend.clear_session()  
  
model = ImdbModel()  
model.build(input_shape =(None,200))  
model.summary()  
  
model.compile(optimizer='Nadam',  
            loss='binary\_crossentropy',  
            metrics=['accuracy',"AUC"])  

      

picture.image

picture.image


          
import datetime  
  
logdir = "./tflogs/keras\_model/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")  
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)  
history = model.fit(ds_train,validation_data = ds_test,  
                    epochs = 6,callbacks=[tensorboard_callback])  

      

picture.image

picture.image

picture.image

picture.image

picture.image

猜你喜欢❤️:

公众号后台回复关键字: tensorflow ,获取本书github项目源码和对应数据集!

picture.image

0
0
0
0
评论
未登录
看完啦,登录分享一下感受吧~
暂无评论