-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcore_compression.py
More file actions
141 lines (123 loc) · 4.95 KB
/
core_compression.py
File metadata and controls
141 lines (123 loc) · 4.95 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import torch
import torch.nn as nn
from loguru import logger
from modelutils import find_layers
from matq import TensorQ
from quant import Quantizer
@torch.no_grad()
def opt_delta_lr(
model,
delta_model,
dataloader,
nsamples,
wbits,
sym,
trits,
rank,
args
):
device = model.device
print("Starting LR quantizer initialization...")
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.decoder.layers
delta_layers = delta_model.model.decoder.layers
model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.to(device)
model.model.decoder.embed_positions = model.model.decoder.embed_positions.to(device)
if hasattr(model.model.decoder, 'project_out') and model.model.decoder.project_out:
model.model.decoder.project_out = model.model.decoder.project_out.to(device)
if hasattr(model.model.decoder, 'project_in') and model.model.decoder.project_in:
model.model.decoder.project_in = model.model.decoder.project_in.to(device)
layers[0] = layers[0].to(device)
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros(
(nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=device
)
cache = {'i': 0, 'attention_mask': None}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_mask'] = kwargs['attention_mask']
raise ValueError
layers[0] = Catcher(layers[0])
for batch in dataloader:
try:
model(batch[0].to(device))
except ValueError:
pass
layers[0] = layers[0].module
model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.cpu()
model.model.decoder.embed_positions = model.model.decoder.embed_positions.cpu()
if hasattr(model.model.decoder, 'project_out') and model.model.decoder.project_out:
model.model.decoder.project_out = model.model.decoder.project_out.cpu()
if hasattr(model.model.decoder, 'project_in') and model.model.decoder.project_in:
model.model.decoder.project_in = model.model.decoder.project_in.cpu()
torch.cuda.empty_cache()
outs = torch.zeros_like(inps)
original_outs = torch.zeros_like(inps)
attention_mask = cache['attention_mask']
logger.info("Ready, creating lr quantizers...")
quantizers = {}
l_quantizers = {}
lr_tensors = {}
for i in range(len(delta_layers)):
layer = delta_layers[i].to(device)
original_layer = layers[i].to(device)
subset = find_layers(layer)
lr_gptq = {}
for name in subset:
lr_gptq[name] = TensorQ(subset[name], rank)
lr_gptq[name].quantizer = Quantizer()
lr_gptq[name].quantizer.configure(
wbits,
perchannel=True,
sym=sym,
mse=False,
trits = trits,
)
lr_gptq[name].l_quantizer = Quantizer()
lr_gptq[name].l_quantizer.configure(
wbits,
perchannel=True,
sym=sym,
mse=False,
trits = trits,
)
def add_batch(name):
def temp(_, inp, out):
lr_gptq[name].add_batch_lr(inp[0].data, out.data)
return temp
handles = []
for name in subset:
handles.append(subset[name].register_forward_hook(add_batch(name)))
for j in range(nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask)[0]
original_outs[j] = original_layer(inps[j].unsqueeze(0), attention_mask=attention_mask)[0]
for h in handles:
h.remove()
for name in subset:
logger.info(f"Quantizing {name}...")
lr_gptq[name].lr_quant(
percdamp=args['percdamp'],
groupsize=args['groupsize'],
actorder=args['actorder'],
)
lr_tensors[f'<R>.model.decoder.layers.{i}.{name}'] = lr_gptq[name].R
lr_tensors[f'<L>.model.decoder.layers.{i}.{name}'] = lr_gptq[name].L
quantizers[f'model.decoder.layers.{i}.{name}'] = lr_gptq[name].quantizer
l_quantizers[f'model.decoder.layers.{i}.{name}'] = lr_gptq[name].l_quantizer
lr_gptq[name].free()
for j in range(nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask)[0]
original_outs[j] = original_layer(inps[j].unsqueeze(0), attention_mask=attention_mask)[0]
layers[i] = layer.cpu()
del layer
del lr_gptq
torch.cuda.empty_cache()
inps, outs = original_outs, inps
model.config.use_cache = use_cache
return quantizers, l_quantizers, lr_tensors