沈阳高端网页/seo企业优化顾问
环境准备:
1、anaconda官网下载
下载地址
https://www.anaconda.com/distribution/
注意选用该电脑相应的系统和64/32位。
已安装Python使用环境的请跳过此步骤。
已安装Python使用环境的请跳过此步骤。
2、pytorch安装
https://pytorch.org/get-started/previous-versions/
基础框架:
1、数据准备
假设我们现在用开盘价、收盘价、最高价和最低价来预测下一天涨幅。其中,开盘价、收盘价、最高价、最低价使用连续5天的数据,那么模型输入数据为4x5的矩阵。矩阵的每一行分别对应开盘价、收盘价、最高价、最低价。模型输出为下一天涨幅。下面采用随机的方法生成10个样本数据,输入为维度为10x4x5,目标涨幅为10x1。
# 构建输入数据集
class Data_set(Dataset):def __init__(self, transform=None):self.transform = transformself.x_data = np.random.randint(0, 10, (10,4,5))self.y_data = np.random.randint(0, 10, (10,1))def __getitem__(self, index):x, y = self.pull_item(index)return x, ydef __len__(self):return self.x_data.shape[0] def pull_item(self, index):return self.x_data[index, :, :], self.y_data[index, :]
2、定义神经网络模型
这里简单定义一个两层模型,第一层为卷积层,第二层为全连接层。
class MyModel(nn.Module):def __init__(self, num_classes=10):super(MyModel, self).__init__()self.model_name = "test"self.conv = nn.Conv1d(4, 1, 3, 1, 1)self.fc = nn.Linear(5, 1)def forward(self, x):x = self.conv(x)x = x.view(-1, 5)return self.fc(x)
3、损失函数定义
损失函数用于计算模型输出与真实结果之间的误差,可以自己定义,也可以直接使用pytorch中的损失函数。
class MyLoss(nn.Module):def __init__(self):super().__init__()def forward(self, x, y):return torch.mean(torch.pow((x - y), 2))
4、完整代码
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 13 20:30:41 2020@author: yehx
"""import argparse
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable# 构建输入数据集
class Data_set(Dataset):def __init__(self, transform=None):self.transform = transformself.x_data = np.random.randint(0, 10, (10,4,5))self.y_data = np.random.randint(0, 10, (10,1))def __getitem__(self, index):x, y = self.pull_item(index)return x, ydef __len__(self):return self.x_data.shape[0] def pull_item(self, index):return self.x_data[index, :, :], self.y_data[index, :]class MyModel(nn.Module):def __init__(self, num_classes=10):super(MyModel, self).__init__()self.model_name = "test"self.conv = nn.Conv1d(4, 1, 3, 1, 1)self.fc = nn.Linear(5, 1)def forward(self, x):x = self.conv(x)x = x.view(-1, 5)return self.fc(x)class MyLoss(nn.Module):def __init__(self):super().__init__()def forward(self, x, y):return torch.mean(torch.pow((x - y), 2))if __name__ == "__main__":parser = argparse.ArgumentParser(description='基础模型参数配置')train_set = parser.add_mutually_exclusive_group()parser.add_argument('--batch_size', default=2, type=int,help='Batch size for training')args = parser.parse_args()dataset = Data_set()data_loader = DataLoader(dataset, args.batch_size, shuffle=True)Net = MyModel()criterion = MyLoss()#criterion = nn.MSELoss() #也可使用pytorch自带的损失函数optimzer = torch.optim.SGD(Net.parameters(), lr=0.001)Net.train()loss_list = []num_epoches = 20for epoch in range(num_epoches):for i, data in enumerate(data_loader):inputs, labels = datainputs, labels = Variable(inputs).float(), Variable(labels).float()out = Net(inputs)loss = criterion(out, labels) # 计算误差optimzer.zero_grad() # 清除梯度loss.backward()optimzer.step()loss_list.append(loss.item())if (epoch+1) % 10 == 0:print('[INFO] {}/{}: Loss: {:.4f}'.format(epoch+1, num_epoches, loss.item()))#作图:误差loss在迭代过程中的变化情况plt.plot(loss_list, label='loss for every epoch')plt.legend()plt.show() #训练的模型参数 print('[INFO] 训练后模型的参数:')for name,parameters in Net.named_parameters():print(name,':',parameters)#测试模型结果print('[INFO] 计算某个样本模型运算结果:')Net.eval()x_data = np.random.randint(0, 10, (4,5))x_data = torch.tensor(x_data).float()x_data = x_data.unsqueeze(0)y_data = Net(x_data)print(y_data.item())#模型保存torch.save(Net, 'model0.pth')#模型加载print('[INFO] 验证模型加载运算结果:')model0 =torch.load('model0.pth')y_data = model0 (x_data)print(y_data.item())