复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232import os from pathlib import Path from PIL import Image from torch.utils.data import DataLoader,Dataset import numpy as np from torchvision import transforms, models import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split import torch from torch import nn import torch.optim as optim import torch.nn.functional as F import random import time %matplotlib inline # 定义猫为0,狗为1 label_dict = {'cat':0 ,'dog':1 } path = Path('D:图像数据集kaggle_cat_vs_dog') os.listdir(path) #train, test1两个文件夹, test1中存放测试集数据,本次实验暂时不用 # 得到所有文件名 all_file_name = os.listdir(path/'train') # 随机打乱 random.shuffle(all_file_name) # 得到打乱后的标签 all_labels = [label_dict[i.split('.')[0]] for i in all_file_name] # 训练/验证集拆分 Xtrain, Xvalid, Ytrain, Yvalid = train_test_split(all_file_name ,all_labels ,test_size=0.3) # 图像预处理 train_transforms = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) test_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # 建立dataset class CatvsDogDataset(Dataset): def __init__(self, path, mode, file_names, labels): ''' mode: 1.train(train and valid) 2.test1(test) ''' self.data_list = [path/mode/file for file in file_names] self.labels = labels self.mode = mode def __getitem__(self, index): img = Image.open(self.data_list[index]) label = self.labels[index] if self.mode == 'train': return train_transforms(img), torch.LongTensor([label]) else: return test_transforms(img) def __len__(self): return len(self.labels) # 看看长什么样 #img = Image.open(path/'train'/Xtrain[0]) #print(img.size) #print(train_transforms(img).shape) #print(train_transforms(img).numpy().shape) #print(np.transpose(train_transforms(img).numpy(),[1,2,0]).shape) #print((train_transforms(img).permute(1,2,0)).shape) #img = np.array(img) #plt.imshow(img) # 建立dataloader train_loader = DataLoader(CatvsDogDataset(path, 'train', Xtrain, Ytrain) ,shuffle=True ,batch_size=32 # ,pin_memory=True # ,num_workers=2 ) valid_loader = DataLoader(CatvsDogDataset(path, 'train', Xvalid, Yvalid) ,shuffle=True ,batch_size=32 # ,pin_memory=True # ,num_workers=2 ) # 测试一下dataloader能不能用 #tmpx,tmpy = next(iter(train_loader)) #print(tmpx.shape) #print(tmpy.shape) # 定义特征提取的预训练网络 class Feature_net(nn.Module): def __init__(self, model): super().__init__() if model == 'vgg': vgg = models.vgg19(pretrained=True #用人家训练好的现成的网络参数 ) # 获取模型的特征提取层(所有的卷积相关的层) self.feature = nn.Sequential(*list(vgg.children())[:-2]) # self.feature.add_module(name='global average' # ,module=nn.AvgPool2d(9) # ) elif model == 'inception_v3': inception = models.inception_v3(pretrained=True) self.feature = nn.Sequential(*list(inception.childrenldren())[:-1]) self.feature._modules.pop('13') self.feature.add_module('global average', nn.AvgPool2d(35)) elif model == 'resnet152': resnet = models.resnet152(pretrained=True) self.feature = nn.Sequential(*list(resnet.children())[:-1]) def forward(self, x): x = self.feature(x) x = x.view(x.size(0), -1) return x # # nn.AvgPool2d() # # nn.AdaptiveAvgPool2d() #自适应平均池化层 # model = models.vgg16(pretrained = True) # print(model) # print(nn.Sequential(*list(model.children())[:-2])) # model = models.inception_v3(pretrained=True) # print(model) # model = models.resnet152(pretrained=True) # print(model) # model.add_module() # model._modules.pop('avgpool') # model._modules # 自己定义全连接层 class Classifier(nn.Module): def __init__(self, dim, n_class): super().__init__() self.fc = nn.Sequential( nn.Linear(dim, 1000) ,nn.ReLU(inplace=True) ,nn.Dropout(0.5) ,nn.Linear(1000, n_class) ) def forward(self, x): x = self.fc(x) return x model_fe = Feature_net('vgg') for parma in model_fe.parameters(): parma.requires_grad = False #固定卷积层的参数 model_clf = Classifier(25088, 2) # if torch.cuda.is_available(): # model_fe = model_fe.cuda() # model_clf = model_clf.cuda() model_fe = model_fe.cuda() model_clf = model_clf.cuda() criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model_clf.parameters()) now = lambda x:time.time() begin_time = now() ################### # 电脑配置烂+dataloader存在IO阻塞的问题,这里只训练一轮 model_clf.train() for epoch in range(1): sum_loss = 0.0 for i, data in enumerate(train_loader): imgs, labels = data imgs, labels = imgs.cuda(), labels.cuda() if torch.cuda.is_available(): imgs, labels = imgs.cuda(), labels.cuda() x = model_fe(imgs) x = model_clf(x) loss = criterion(x, labels.squeeze()) optimizer.zero_grad() loss.backward() optimizer.step() # print(loss) sum_loss += loss.item() if i % 100 == 99: print('[%d,%d] loss:%.03f' % (epoch + 1, i + 1, sum_loss / 100)) sum_loss = 0.0 ################## print(f'耗时:{now()-begin_time}s') #测一下 tmpx,tmpy = next(iter(valid_loader)) print(tmpx.shape) with torch.no_grad(): model_fe.eval() model_clf.eval() out = model_fe(tmpx.cuda()) out = model_clf(out) print(torch.max(out,1)[1]) print(tmpy.squeeze()) print(sum(torch.max(out,1)[1].cpu() == tmpy.squeeze()).numpy()/32) # 测试一下模型在验证集上的表现 model_fe.eval() model_clf.eval() with torch.no_grad(): eval_acc = 0 for data in valid_loader: img, label = data if torch.cuda.is_available(): img = img.cuda() label = label.cuda() out = model_fe(img) out = model_clf(out) _, pred = torch.max(out, 1) num_correct = (pred == label.squeeze()).sum() eval_acc += num_correct.item() print(f'Acc: {eval_acc/len(Yvalid)}') #output acc 0.934
最后
以上就是落后抽屉最近收集整理的关于pytorch 迁移学习 猫狗大战 note的全部内容,更多相关pytorch内容请搜索靠谱客的其他文章。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复