-
Notifications
You must be signed in to change notification settings - Fork 23
/
Copy pathmodels_vit.py
148 lines (121 loc) · 5.15 KB
/
models_vit.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
# --------------------------------------------------------
# References:
# MAE: /~https://github.com/facebookresearch/mae
# timm: /~https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: /~https://github.com/facebookresearch/deit
# --------------------------------------------------------
from functools import partial
import numpy as np
import math
import torch
from torch import nn, einsum
from einops import rearrange
import itertools
import timm_utils.models.vision_transformer
from util.pos_embed import get_2d_sincos_pos_embed
class VisionTransformer(timm_utils.models.vision_transformer.VisionTransformer):
""" Vision Transformer with support for global average pooling
"""
def __init__(self, global_pool=False, **kwargs):
super(VisionTransformer, self).__init__(**kwargs)
# Added by Samar, need default pos embedding
pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.patch_embed.num_patches ** .5),
cls_token=True)
self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
self.attn_bias = get_alibi(attention_heads=self.attention_heads,
num_patches=self.num_patches)
self.global_pool = global_pool
if self.global_pool:
norm_layer = kwargs['norm_layer']
embed_dim = kwargs['embed_dim']
self.fc_norm = norm_layer(embed_dim)
del self.norm # remove the original norm
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
if self.global_pool:
x = x[:, 1:, :].mean(dim=1) # global pool without cls token
outcome = self.fc_norm(x)
else:
x = self.norm(x)
outcome = x[:, 0]
return outcome
def get_alibi(attention_heads, num_patches):
points = list(itertools.product(range(int(math.sqrt(num_patches))), range(int(math.sqrt(num_patches)))))
def get_slopes(n):
def get_slopes_power_of_2(n):
start = (2 ** (-2 ** -(math.log2(n) - 3)))
ratio = start
return [start * ratio ** i for i in range(n)]
if math.log2(n).is_integer():
return get_slopes_power_of_2(n)
else:
closest_power_of_2 = 2 ** math.floor(math.log2(n))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes(2 * closest_power_of_2)[0::2][
:n - closest_power_of_2]
slopes = torch.Tensor(get_slopes(attention_heads)).unsqueeze(1)
idxs = []
for p1 in points:
for p2 in points:
dist = math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
idxs.append(dist * slopes * -1)
all_bias = torch.cat(idxs, dim=1)
return all_bias.view(1, attention_heads, num_patches, num_patches)
def vit_base_patch16(**kwargs):
model = VisionTransformer(
embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_large_patch16(**kwargs):
model = VisionTransformer(
embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_huge_patch14(**kwargs):
model = VisionTransformer(
embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base_patch16_128(**kwargs):
model = VisionTransformer(
img_size=80, patch_size=8, in_chans=12,
embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base_patch8_120(**kwargs):
model = VisionTransformer(
img_size=120, patch_size=8, in_chans=12,
embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_large_patch16_128(**kwargs):
model = VisionTransformer(
img_size=128, patch_size=8, in_chans=12,
embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
if __name__ == '__main__':
import time
input = torch.rand(1,12, 128, 128)
input = torch.rand(1, 12, 120, 120)
model = vit_base_patch8_120()
t1 = time.time()
output = model(input)
t2 = time.time()
t = t2-t1
print(t)
# print(output.shape)
# from fvcore.nn import FlopCountAnalysis, parameter_count_table
#
# # model = resnet101()
# model = vit_base_patch16_128()
# tensor = (torch.rand(1, 12, 128, 128),)
# flops = FlopCountAnalysis(model, tensor)
# print(flops.total())
# print(parameter_count_table(model))