我是靠谱客的博主 满意眼睛,最近开发中收集的这篇文章主要介绍山东大学类脑计算实验四 LSTM的实现山东大学类脑计算实验四 LSTM的实现,觉得挺不错的,现在分享给大家,希望可以做个参考。

概述

山东大学类脑计算实验四 LSTM的实现

实验内容

根据 LSTM 模型的相关知识,使用 Python 语言实现一个简单 LSTM 模
型。

实验要求

(1) 随机产生 0-127 之间的两个八位的二进制整数,作为一组输
入数据,将这两个数的和作为一个标签,这三个数据组成一
组训练数据,训练数据的组数应尽可能多。
(2) 创建 LSTM 网络。
(3) 实现两个八位的二进制整数的加法运算,网络能够输出正确
的加法运算结果。

requirements:

numpy=1.20
torch=1.7.1

(用最新的版本应该也行但是不一定未来会不会有大更新)

import math

import numpy as np
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader

np.random.seed(1)


# 1.生成训练数据

def convert(x):
    """
    格式转换函数,自动双向转换
    :param x:
    :return:
    """
    if np.array(x).shape == ():
        x_ = np.array([[x]], dtype=np.uint8)
        return np.unpackbits(x_)
    else:
        res = 0
        for idx, _ in enumerate(np.array(x)):
            res += (2 ** (7 - idx)) * _
        return res


data = []
labels = []
data_test = []
labels_test = []
for i in range(500):
    a_ = np.random.randint(63)
    b_ = np.random.randint(63)
    c_ = convert(a_ + b_)
    x_ = list(zip(list(convert(a_)), list(convert(b_))))
    if i < 400:
        data.append(x_)
        labels.append(c_)
    else:
        data_test.append(x_)
        labels_test.append(c_)

data = torch.tensor(data, dtype=torch.float32)
labels = torch.tensor(labels, dtype=torch.float32)
data_test = torch.tensor(data_test, dtype=torch.float32)
labels_test = torch.tensor(labels_test, dtype=torch.float32)


# 2.创建网络
class LSTM(nn.Module):
    def __init__(self, input_sz, hidden_sz):
        super().__init__()
        # self.m = nn.LSTM(input_size=input_sz,hidden_size=hidden_sz)
        # self.hidden_out = nn.Linear(hidden_sz,127)
        self.input_size = input_sz
        self.hidden_size = hidden_sz
        self.U_i = nn.Parameter(torch.Tensor(input_sz, hidden_sz))
        self.V_i = nn.Parameter(torch.Tensor(hidden_sz, hidden_sz))
        self.b_i = nn.Parameter(torch.Tensor(hidden_sz))

        # f_t
        self.U_f = nn.Parameter(torch.Tensor(input_sz, hidden_sz))
        self.V_f = nn.Parameter(torch.Tensor(hidden_sz, hidden_sz))
        self.b_f = nn.Parameter(torch.Tensor(hidden_sz))

        # c_t
        self.U_c = nn.Parameter(torch.Tensor(input_sz, hidden_sz))
        self.V_c = nn.Parameter(torch.Tensor(hidden_sz, hidden_sz))
        self.b_c = nn.Parameter(torch.Tensor(hidden_sz))

        # o_t
        self.U_o = nn.Parameter(torch.Tensor(input_sz, hidden_sz))
        self.V_o = nn.Parameter(torch.Tensor(hidden_sz, hidden_sz))
        self.b_o = nn.Parameter(torch.Tensor(hidden_sz))

        self.init_weights()

    def init_weights(self):
        stdv = 1.0 / math.sqrt(self.hidden_size)
        for weight in self.parameters():
            weight.data.uniform_(-stdv, stdv)

    def forward(self, x, init_states=None):
        # return self.m(x)

        bs, seq_sz, _ = x.size()
        hidden_seq = []

        if init_states is None:
            h_t, c_t = (
                torch.zeros(bs, self.hidden_size).to(x.device),
                torch.zeros(bs, self.hidden_size).to(x.device)
            )
        else:
            h_t, c_t = init_states
        for t in range(seq_sz):
            x_t = x[:, t, :]

            i_t = torch.sigmoid(x_t @ self.U_i + h_t @ self.V_i + self.b_i)
            f_t = torch.sigmoid(x_t @ self.U_f + h_t @ self.V_f + self.b_f)
            g_t = torch.tanh(x_t @ self.U_c + h_t @ self.V_c + self.b_c)
            o_t = torch.sigmoid(x_t @ self.U_o + h_t @ self.V_o + self.b_o)
            c_t = f_t * c_t + i_t * g_t
            h_t = o_t * torch.tanh(c_t)

            hidden_seq.append(h_t.unsqueeze(0))
        hidden_seq = torch.cat(hidden_seq, dim=0)
        hidden_seq = hidden_seq.transpose(0, 1).contiguous()
        return hidden_seq, (h_t, c_t)


class data_set(Dataset):
    def __init__(self, flag="train"):
        if flag == "train":
            self.data_ = data
            self.data_labels_ = labels
        else:
            self.data_ = data_test
            self.data_labels_ = labels_test

    def __len__(self):
        return len(self.data_)

    def __getitem__(self, item):
        return self.data_[item], self.data_labels_[item]


train_dataset = data_set(flag='train')
train_dataloader = DataLoader(dataset=train_dataset, batch_size=1, shuffle=True)
test_dataset = data_set(flag='test')
test_dataloader = DataLoader(dataset=test_dataset, batch_size=1, shuffle=True)
lstm = LSTM(2, 8)
optimizer = torch.optim.Adam(lstm.parameters(), lr=0.01)
criterion = torch.nn.MSELoss()
training_loss = []
training_acc = []
for epoch in range(200):
    print("epoch ====================", epoch)
    lstm.train()
    # 训练
    epoch_loss = []
    for idx, (data_x, data_y) in enumerate(train_dataloader):
        outputs, (h, c) = lstm(data_x)
        optimizer.zero_grad()
        loss = criterion(data_y, outputs[:,-1])
        epoch_loss.append(loss.detach())
        loss.backward()
        optimizer.step()
    training_loss.append(np.mean(epoch_loss))
    # 测试
    acc = 0
    for idx, (data_x, data_y) in enumerate(test_dataloader, 0):
        outputs, (h, c) = lstm(data_x)
        res = np.round(outputs[-1][-1].tolist())
        print(round(convert(data_y.squeeze().tolist())), "pred >", round(convert(res)))
        if round(convert(data_y.squeeze().tolist())) == round(convert(res)):
            acc += 1
    acc /= 100
    training_acc.append(acc)
    print("epoch ", epoch, " acc :", acc)

import matplotlib.pyplot as plt
plt.figure()
plt.plot(training_loss)
plt.plot(training_acc)
plt.legend(["training_loss","acc"])
plt.show()

最后

以上就是满意眼睛为你收集整理的山东大学类脑计算实验四 LSTM的实现山东大学类脑计算实验四 LSTM的实现的全部内容,希望文章能够帮你解决山东大学类脑计算实验四 LSTM的实现山东大学类脑计算实验四 LSTM的实现所遇到的程序开发问题。

如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。

本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
点赞(37)

评论列表共有 0 条评论

立即
投稿
返回
顶部