基于tensorflow2.x版本python代码实现深度学习回归预测(以lstm为例)基于tensorflow2.x版本python代码实现深度学习回归预测(以lstm为例)
代码实现(能直接跑通本⽂中的代码)
import  os
import  tensorflow as tf
import  numpy as np
from tensorflow import keras
from general import*
#import  tensorflow as tf
#from tensorflow import keras
dels import Sequential
from keras.layers import*
#from general import *
tf.random.set_seed(22)
np.random.seed(22)
assert tf.__version__.startswith('2.')
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from sklearn import datasets
ss_validation import train_test_split
#⽣成回归训练集
def generate_regression_train_data():
dataset_path = _file("auto-mpg.data","archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_pathtensorflow版本选择
column_names =['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration','Model Year','Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values ="?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = py()
dataset = dataset.dropna()
origin = dataset.pop('Origin')
dataset['USA']=(origin ==1)*1.0
dataset['Europe']=(origin ==2)*1.0
dataset['Japan']=(origin ==3)*1.0
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = anspose()
y_train = train_dataset.pop('MPG')
y_test = test_dataset.pop('MPG')
def norm(x):
return(x - train_stats['mean'])/ train_stats['std']
X_train = norm(train_dataset)
X_test = norm(test_dataset)
x_train = np.array(X_train)
x_test = np.array(X_test)
y_train = np.array(y_train)
y_train = np.array(y_train)
y_test = np.array(y_test)
return x_train, y_train, x_test, y_test
#Func构建⽅式
class FuncMRegressor(keras.Model):
def__init__(self, units, num_classes, num_layers):
super(FuncMRegressor, self).__init__()
# lls = [keras.layers.LSTMCell(units) for _ in range(num_layers)]
#
# = keras.layers.lls, unroll=True)
< = keras.layers.LSTM(units, return_sequences=True)
# return_sequences设置lstm中单个细胞中返回时间点的类型。⾮最后⼀层,return_sequences=True才能满⾜三维输⼊的数据格式要求
#默认 False。在输出序列中,返回单个 hidden state值还是返回全部time step 的 hidden state值。 False 返回单个, true 返回全部。
<2 = keras.layers.LSTM(units)#最后⼀层,return_sequences=False,对于单个样本⽽⾔,返回的是⼀维(对于所有样本是⼆维)
#        self.fc1 = layers.Dense(64, activation='relu')
#        self.fc2 = layers.Dense(64, activation='relu')
# lls = (keras.layers.LSTMCell(units) for _ in range(num_layers))
#
# = keras.layers.lls, return_sequences=True, return_state=True)
# = keras.layers.LSTM(units, unroll=True)
# = keras.layers.lls)
# have 1000 words totally, every word will be embedding into 100 length vector
# the max sentence lenght is 80 words
#        bedding = keras.layers.Embedding(1000, 100, input_length=9)
#指定隐含层的shape-->[top_words,100],input_length参数是最初输⼊的⼆维矩阵的维度。
# 这样,根据隐含层原理,隐含层输出的数据shape为-->[None,input_length,100]=[None,80,100]
#        self.soft_max = keras.layers.Softmax()
#        self.fc1 = keras.layers.Dense(32, activation='relu')
self.fc3 = keras.layers.Dense(1)#最后⼀层全连接层。对于N分类问题,最后⼀层全连接输出个数为N个;对于回归问题,最后⼀层全连接层的输出为1 #定义输⼊输出
def call(self, inputs, training=None, mask=None):###*** 该函数重写了RNN的call()⽅法,该⽅法是模型输出表达式(即,模型)
#print('y', inputs.shape)
#1 利⽤隐含层将⼆维数组变为三维数组
#    [None,80]-->[None,80,100]
# [b, sentence len] => [b, sentence len, word embedding]
#        y = bedding(inputs)  #⽤隐含层的⽬的是让本来⼆维的数据变成三维输⼊到lstm中(⼤多数的深度学习算法都要求输⼊的数据是三维,对于传统的如房价预测,需要将⼆维数组通过⼀定⽅法转换为3维数组)
#        print("y1 shape:",y.shape)
#2 得到了满⾜lstm(⼤多数深度学习算法)输⼊的三维数据格式后,搭建lstm⽹络
#搭建神经⽹络:可以根据需要搭建输⼊、多层结构层、dropout、激活函数、池化等
y = (inputs)#要求输⼊到lstm模型中的数据必须是三维
#print("y2 shape:", y.shape)
y = 2(y)
# print('rnn', x.shape)
#3 确定输出。
#y = self.fc1(y)  #全连接层
#print(y.shape)
#y = self.fc2(y)
y = self.fc3(y)
return y
#Seque构建⽅式(推荐)
class SequeRegressor():
def__init__(self, units):
def__init__(self, units):
self.units = units
#构建神经⽹络模型:(根据各层输⼊输出的shape)搭建⽹络结构、确定损失函数、确定优化器def build_model(self, loss, optimizer, metrics):
optimizer=optimizer,#优化器的选择很重要
metrics=metrics)
if __name__ =="__main__":
print("**********************【Func搭建⽅式】********************")
# 获取⼆维数组(特征提取过的)
x_train, y_train, x_test, y_test = generate_regression_train_data()
# 把⼆维数组改为三维数组(通过np.newaxis随机增加⼀维数组)
x_train = x_train[:,:, np.newaxis]
x_test = x_test[:,:, np.newaxis]
#训练模型
units =64#细胞个数
num_classes =1#回归问题,该参数为1
batch_size =32
epochs =35
model = FuncMRegressor(units, num_classes, num_layers=2)
optimizer = tf.keras.optimizers.RMSprop(0.001)
modelpile(loss='mse',
optimizer=optimizer,
metrics=['mae','mse'])
# modelpile(optimizer=keras.optimizers.Adam(0.001),
#                  loss=keras.losses.BinaryCrossentropy(from_logits=False),
#                  metrics=['accuracy', 'mse'])
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
validation_data=(x_test, y_test), verbose=1)
# 模型应⽤预测
#model.predict只返回y_pred
out = model.predict(x_train)
#print("out:", out)
#evaluate⽤于评估您训练的模型。它的输出是准确度或损失,⽽不是对输⼊数据的预测。
scores = model.evaluate(x_test, y_test, batch_size, verbose=1)
print("Final test loss and accuracy :", scores)
print("**********************【Seque搭建⽅式】********************")
#1 获取训练数据集,并调整为三维输⼊格式
x_train, y_train, x_test, y_test = generate_regression_train_data()
x_train = x_train[:,:, np.newaxis]
x_test = x_test[:,:, np.newaxis]
#2 构建神经⽹络模型:(根据各层输⼊输出的shape)搭建⽹络结构、确定损失函数、确定优化器    units =64#lstm细胞个数
loss ="mse"#损失函数类型
optimizer = tf.keras.optimizers.RMSprop(0.001)#优化器类型
metrics =['mae','mse']#评估⽅法类型
srlstm = SequeRegressor(units)
srlstm.build_model(loss, optimizer, metrics)
srlstm.build_model(loss, optimizer, metrics)
#3 训练模型
epochs =35
batch_size =32
score = del.evaluate(x_test, y_test, batch_size=16)
print("model score:", score)
#5 模型应⽤:预测
prediction = del.predict(x_test)

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。