用于文本分类的神经网络

最近在尝试使用AI进行文本分类。(我不是专业的,只是参考类似的例子,学着去搭建神经网络。也只能弄一些简单的)因为数据集是自己收集制作的,时间有限,所以数据集不大,使得模型过拟合了。因为最终效果不好,就不放出整个项目的代码,只放出神经网络模型的代码,既作为个人的记录,也供参考学习。

BiLSTM

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import torch
import torch.nn.functional as F


class BiLSTM(torch.nn.Module):
def __init__(self, vocab_size, embed_size, hidden_size, num_layers, num_classes):
super().__init__()

self.embedding = torch.nn.Embedding(vocab_size, embed_size)
self.lstm = torch.nn.LSTM(embed_size, hidden_size, num_layers=num_layers, batch_first=True, bidirectional=True)
self.linear = torch.nn.Linear(hidden_size, num_classes)
self.softmax = torch.nn.Softmax(dim=1)

def forward(self, inputs):
emb = self.embedding(inputs)
# [batch_size, time_steps, embed_size]

output, (hidden, _) = self.lstm(emb)
# output: [batch_size, time_steps, num_directions*hidden_size]
# hidden: [num_layers*num_directions, batch_size, hidden_size]

hidden = hidden.permute(1, 0, 2)
# [batch_size, num_layers*num_directions, hidden_size]

hidden = torch.sum(hidden, dim=1)
# [batch_size, 1, hidden_size]

hidden = hidden.squeeze(dim=1)
# [batch_size, hidden_size]

out = F.softmax(self.linear(hidden), dim=1)
# [batch_size, num_classes]

return out

BiLSTM-Attention

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import torch
import torch.nn.functional as F


class BiLSTMAttn(torch.nn.Module):
def __init__(self, vocab_size, embed_size, hidden_size, num_layers, num_classes):
super(BiLSTMAttn, self).__init__()

self.embedding = torch.nn.Embedding(vocab_size, embed_size)
self.lstm = torch.nn.LSTM(embed_size, hidden_size, num_layers=num_layers, batch_first=True, bidirectional=True)
self.attention_weights_layer = torch.nn.Sequential(
torch.nn.Linear(hidden_size, hidden_size),
torch.nn.ReLU(inplace=True)
)
self.liner = torch.nn.Linear(hidden_size, num_classes)
self.softmax = torch.nn.Softmax(dim=1)

def forward(self, inputs):
emb = self.embedding(inputs)
# [batch_size, time_steps, embed_size]

output, (hidden, _) = self.lstm(emb)
# output: [batch_size, time_steps, num_directions*hidden_size]
# hidden: [num_layers*num_directions, batch_size, hidden_size]

"""将双向lstm的输出拆分为前向输出和后向输出"""
(forward_out, backward_out) = torch.chunk(output, 2, dim=2)
out = forward_out + backward_out
# [batch_size, time_steps, hidden_size]

hidden = hidden.permute(1, 0, 2)
# [batch_size, num_layers*num_directions, hidden_size]

hidden = torch.sum(hidden, dim=1)
# [batch_size, 1, hidden_size]

hidden = hidden.squeeze(dim=1)
# [batch_size, hidden_size]

"""用hidden生成attention的权重"""
attention_w = self.attention_weights_layer(hidden)
# [batch_size, hidden_size]
attention_w = attention_w.unsqueeze(dim=1)
# [batch_size, 1, hidden_size]

attention_context = torch.bmm(attention_w, out.transpose(1, 2))
softmax_w = F.softmax(attention_context, dim=-1)
# [batch_size, 1, time_steps]

out = torch.bmm(softmax_w, out)
# [batch_size, 1, hidden_size]

out = out.squeeze(dim=1)
# [batch_size, hidden_size]

out = F.softmax(self.linear(hidden), dim=1)
# [batch_size, num_classes]

return out

一些函数的作用

  • torch.mmtorch.bmmtorch.matmul

    mm
    bmm
    matmul

    总结:

    • torch.mm只能进行矩阵乘法,(n×m)的tensor与(m×p)的tensor相乘,得到(n×p)的tensor​
    • torch.bmm进行batch矩阵乘法,即第一个维度是batch_size,(batch_size×n×m)​的tensor与(batch_size×m×p)的tensor相乘,得到(batch_size×n×p)的tensor
    • torch.matmul可以进行张量乘法

  • torch.transposetorch.permute

    transpose
    permute

    总结:

    • torch.transpose交换tensor中的两个维度
    • torch.permute按顺序将tensor换位

用于文本分类的神经网络
https://zuoguan.netlify.app/2022/09/18/用于文本分类的神经网络/
作者
坐观是只皮卡丘
发布于
2022年9月18日
许可协议