-
Notifications
You must be signed in to change notification settings - Fork 210
/
Copy pathed14c8b2-2ac1-41e0-acea-3cc55cd94f83.txt
2165 lines (2092 loc) · 134 KB
/
ed14c8b2-2ac1-41e0-acea-3cc55cd94f83.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import os
import sys
with open(sys.argv[0]) as f:
code = f.read() # read the code of this file ASAP, for logging
import uuid
import glob
import time
import contextlib
from dataclasses import dataclass
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torch.distributed as dist
import torch._inductor.config as config
from torch.nn.parallel import DistributedDataParallel as DDP
# Use of FlexAttention contributed by @KoszarskyB
from torch.nn.attention.flex_attention import flex_attention, create_block_mask
flex_attention = torch.compile(flex_attention, dynamic=False)
create_block_mask = torch.compile(create_block_mask, dynamic=False)
# -----------------------------------------------------------------------------
# Muon optimizer
def zeropower_via_svd(G, steps=None):
U, S, V = G.svd()
return U @ V.T
@torch.compile
def zeropower_via_newtonschulz5(G, steps=10, eps=1e-7):
"""
Newton-Schulz iteration to compute the zeroth power / orthogonalization of G. We opt to use a
quintic iteration whose coefficients are selected to maximize the slope at zero. For the purpose
of minimizing steps, it turns out to be empirically effective to keep increasing the slope at
zero even beyond the point where the iteration no longer converges all the way to one everywhere
on the interval. This iteration therefore does not produce UV^T but rather something like US'V^T
where S' is diagonal with S_{ii}' ~ Uniform(0.5, 1.5), which turns out not to hurt model
performance at all relative to UV^T, where USV^T = G is the SVD.
"""
assert len(G.shape) == 2
a, b, c = (3.4445, -4.7750, 2.0315)
X = G.bfloat16()
X /= (X.norm() + eps) # ensure top singular value <= 1
if G.size(0) > G.size(1):
X = X.T
for _ in range(steps):
A = X @ X.T
B = b * A + c * A @ A # adapted from suggestion by @jxbz, @leloykun, and @YouJiacheng
X = a * X + B @ X
if G.size(0) > G.size(1):
X = X.T
return X
zeropower_backends = dict(svd=zeropower_via_svd, newtonschulz5=zeropower_via_newtonschulz5)
class Muon(torch.optim.Optimizer):
"""
Muon - MomentUm Orthogonalized by Newton-schulz
Muon internally runs standard SGD-momentum, and then performs an orthogonalization post-
processing step, in which each 2D parameter's update is replaced with the nearest orthogonal
matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has
the advantage that it can be stably run in bfloat16 on the GPU.
Some warnings:
- This optimizer assumes that all parameters passed in are 2D.
- It should not be used for the embedding layer, the final fully connected layer, or any {0,1}-D
parameters; those should all be optimized by a standard method (e.g., AdamW).
- To use it with 4D convolutional filters, it works well to just flatten their last 3 dimensions.
- We believe it is unlikely to work well for training with small batch size.
- We believe it may not work well for finetuning pretrained models, but we haven't tested this.
- We have not yet tried this optimizer for training scenarios larger than NanoGPT (124M).
Arguments:
lr: The learning rate used by the internal SGD.
momentum: The momentum used by the internal SGD.
nesterov: Whether to use Nesterov-style momentum in the internal SGD. (recommended)
backend: The chosen backend for the orthogonalization step. (recommended: 'newtonschulz5')
backend_steps: The number of iteration steps to use in the backend, if it is iterative.
"""
def __init__(self, params, lr=0.02, momentum=0.95, nesterov=True,
backend='newtonschulz5', backend_steps=5):
defaults = dict(lr=lr, momentum=momentum, nesterov=nesterov, backend=backend, backend_steps=backend_steps)
super().__init__(params, defaults)
def step(self):
for group in self.param_groups:
lr = group['lr']
momentum = group['momentum']
zeropower_backend = zeropower_backends[group['backend']]
# generate weight updates in distributed fashion
total_params = sum(p.numel() for p in group['params'])
updates_flat = torch.zeros(total_params, device='cuda', dtype=torch.bfloat16)
curr_idx = 0
for i, p in enumerate(group['params']):
# luckily this will perfectly distribute a transformer with multiple of 4 layers to 8 GPUs
if i % int(os.environ['WORLD_SIZE']) == int(os.environ['RANK']):
g = p.grad
assert g is not None
state = self.state[p]
if 'momentum_buffer' not in state:
state['momentum_buffer'] = torch.zeros_like(g)
buf = state['momentum_buffer']
buf.mul_(momentum).add_(g)
g = g.add(buf, alpha=momentum) if group['nesterov'] else buf
g = zeropower_backend(g, steps=group['backend_steps'])
g *= max(1, g.size(0)/g.size(1))**0.5
updates_flat[curr_idx:curr_idx+p.numel()] = g.flatten()
curr_idx += p.numel()
# sync updates across devices. we are not memory-constrained so can do this simple deserialization
dist.all_reduce(updates_flat, op=dist.ReduceOp.SUM)
# deserialize and apply updates
curr_idx = 0
for p in group['params']:
g = updates_flat[curr_idx:curr_idx+p.numel()].view_as(p.data).type_as(p.data)
p.data.add_(g, alpha=-lr)
curr_idx += p.numel()
# -----------------------------------------------------------------------------
# PyTorch nn.Module definitions for the GPT-2 model
def norm(x):
return F.rms_norm(x, (x.size(-1),))
class CastedLinear(nn.Linear):
def __init__(self, in_features, out_features):
super().__init__(in_features, out_features, bias=False)
def forward(self, x):
return F.linear(x, self.weight.to(x.dtype))
class Rotary(torch.nn.Module):
def __init__(self, dim, base=10000):
super().__init__()
self.register_buffer('inv_freq', (1 / base) ** (torch.arange(0, dim, 2) / dim))
self.seq_len_cached = None
self.cos_cached = None
self.sin_cached = None
def forward(self, x):
seq_len = x.shape[1]
if seq_len != self.seq_len_cached:
t = torch.arange(seq_len, device=x.device)
freqs = torch.outer(t, self.inv_freq)
self.seq_len_cached = seq_len
self.cos_cached = freqs.cos()
self.sin_cached = freqs.sin()
cos, sin = self.cos_cached[None, :, None, :], self.sin_cached[None, :, None, :]
# apply_rotary_emb(x, cos, sin)
x1, x2 = x.chunk(2, dim=3)
y1 = x1 * cos + x2 * sin
y2 = x1 * (-sin) + x2 * cos
return torch.cat((y1, y2), 3).type_as(x)
class CausalSelfAttention(nn.Module):
def __init__(self, dim, n_head):
super().__init__()
assert dim % n_head == 0
self.n_head = n_head
self.c_q = CastedLinear(dim, dim)
self.c_k = CastedLinear(dim, dim)
self.c_v = CastedLinear(dim, dim)
# value residual lambda
self.lamb = nn.Parameter(torch.tensor(0.5)) # @Grad62304977
# rotary embeddings
self.rotary = Rotary(dim // n_head) # dim // n_head = head_dim
# output projection
self.c_proj = CastedLinear(dim, dim)
self.c_proj.weight.data.zero_() # zero init suggested by @Grad62304977
def forward(self, x, vi, block_mask):
B, T = x.size(0), x.size(1) # batch size, sequence length
assert B == 1, "Must use batch size = 1 for FlexAttention"
q = self.c_q(x).view(B, T, self.n_head, -1)
k = self.c_k(x).view(B, T, self.n_head, -1)
v = self.c_v(x).view(B, T, self.n_head, -1)
v = (1 - self.lamb) * v + self.lamb * vi.view_as(v) # @Grad62304977
q, k = norm(q), norm(k) # QK norm suggested by @Grad62304977
q, k = self.rotary(q), self.rotary(k)
y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask)
y = y.transpose(1, 2).contiguous().view_as(x) # re-assemble all head outputs side by side
y = self.c_proj(y)
return y
class MLP(nn.Module):
def __init__(self, dim):
super().__init__()
self.c_fc = CastedLinear(dim, 4 * dim)
self.c_proj = CastedLinear(4 * dim, dim)
self.c_proj.weight.data.zero_() # zero init suggested by @Grad62304977
def forward(self, x):
x = self.c_fc(x)
x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977
x = self.c_proj(x)
return x
class Block(nn.Module):
def __init__(self, config):
super().__init__()
self.attn = CausalSelfAttention(config.n_embd, config.n_head)
self.mlp = MLP(config.n_embd)
self.lambdas = nn.Parameter(torch.tensor([1., 0.]))
def forward(self, x, vi, x0, block_mask):
x = self.lambdas[0] * x + self.lambdas[1] * x0
x = x + self.attn(norm(x), vi, block_mask)
x = x + self.mlp(norm(x))
return x
# -----------------------------------------------------------------------------
# The main GPT-2 model
@dataclass
class GPTConfig:
vocab_size : int = 50304
n_layer : int = 12
n_head : int = 6 # head dim 128 suggested by @Grad62304977
n_embd : int = 768
class GPT(nn.Module):
def __init__(self, config):
super().__init__()
# U-net design by @brendanh0gan
self.num_encoder_layers = config.n_layer // 2 # Half of the layers for encoder
self.num_decoder_layers = config.n_layer - self.num_encoder_layers # Remaining for decoder
# Add learnable skip connection weights for decoder layers
self.skip_weights = nn.Parameter(torch.ones(self.num_decoder_layers))
self.transformer = nn.ModuleDict(dict(
wte = nn.Embedding(config.vocab_size, config.n_embd),
# token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual learning
vte = nn.Embedding(config.vocab_size, config.n_embd*12),
h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
))
self.lm_head = CastedLinear(config.n_embd, config.vocab_size)
self.lm_head.weight.data.zero_() # @Grad62304977
def forward(self, idx, target, attn_blocksize):
docs = (idx == 50256).cumsum(0)
def document_causal_mask(b, h, q_idx, kv_idx):
causal_mask = q_idx >= kv_idx
document_mask = docs[q_idx] == docs[kv_idx]
window_mask = q_idx - kv_idx < attn_blocksize
return causal_mask & document_mask & window_mask
S = len(idx)
block_mask = create_block_mask(document_causal_mask, None, None, S, S, device="cuda", _compile=True)
# forward the GPT model itself
x = self.transformer.wte(idx[None]) # token embeddings of shape (b, t, n_embd)
x = norm(x) # @Grad62304977
x0 = x
vi = self.transformer.vte(idx[None]).chunk(12, dim=-1)
# Store outputs for U-Net skip connections
skip_connections = []
# Encoder pass - process only the first half of the blocks
for i in range(self.num_encoder_layers):
x = self.transformer.h[i](x, vi[i], x0, block_mask)
skip_connections.append(x)
# Decoder pass - process the remaining blocks with weighted skip connections
for i in range(self.num_decoder_layers):
x = x + self.skip_weights[i] * skip_connections.pop()
x = self.transformer.h[self.num_encoder_layers + i](x, vi[self.num_encoder_layers+i], x0, block_mask)
x = norm(x)
logits = self.lm_head(x)
logits = 30 * torch.tanh(logits / 30) # @Grad62304977
logits = logits.float()
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target.view(-1))
return loss
# -----------------------------------------------------------------------------
# Our own simple Distributed Data Loader
def _peek_data_shard(filename):
# only reads the header, returns header data
with open(filename, "rb") as f:
# first read the header, which is 256 int32 integers (4 bytes each)
header = np.frombuffer(f.read(256*4), dtype=np.int32)
if header[0] != 20240520:
print("ERROR: magic number mismatch in the data .bin file!")
print("---> HINT: Are you passing in a correct file with --input_bin?")
print("---> HINT: Dataset encoding changed recently, re-run data prepro or refer again to README")
print("---> HINT: For example re-run: `python dev/data/tinyshakespeare.py`, then re-try")
exit(1)
assert header[1] == 1, "unsupported version"
ntok = header[2] # number of tokens (claimed)
return ntok # for now just return the number of tokens
def _load_data_shard(filename):
with open(filename, "rb") as f:
# first read the header, which is 256 int32 integers (4 bytes each)
header = np.frombuffer(f.read(256*4), dtype=np.int32)
assert header[0] == 20240520, "magic number mismatch in the data .bin file"
assert header[1] == 1, "unsupported version"
ntok = header[2] # number of tokens (claimed)
# the rest of it are tokens, stored as uint16
tokens = np.frombuffer(f.read(), dtype=np.uint16)
assert len(tokens) == ntok, "number of tokens read does not match header?"
return tokens
class DistributedDataLoader:
def __init__(self, filename_pattern, T, process_rank, num_processes):
self.process_rank = process_rank
self.num_processes = num_processes
self.T = T
# glob files that match the pattern
self.files = sorted(glob.glob(filename_pattern))
assert len(self.files) > 0, f"did not find any files that match the pattern {filename_pattern}"
# load and validate all data shards, count number of tokens in total
ntok_total = 0
for fname in self.files:
shard_ntok = _peek_data_shard(fname)
assert shard_ntok >= num_processes * T + 1
ntok_total += int(shard_ntok)
self.ntok_total = ntok_total
self.reset()
def reset(self):
self.current_shard = -1
self.advance()
def advance(self): # advance to next data shard
self.current_shard = (self.current_shard + 1) % len(self.files)
self.current_position = self.process_rank * self.T
self.tokens = _load_data_shard(self.files[self.current_shard])
def next_batch(self):
batch_size = self.T * self.num_processes
buf = self.tokens[self.current_position:self.current_position+self.T+1]
buf = torch.tensor(buf.astype(np.int32), dtype=torch.long)
x = buf[:-1] # inputs
y = buf[1:] # targets
# advance current position and load next shard if necessary
self.current_position += batch_size
if self.current_position + batch_size >= len(self.tokens):
self.advance()
return x.cuda(), y.cuda()
# -----------------------------------------------------------------------------
# int main
@dataclass
class Hyperparameters:
# data hyperparams
input_bin : str = 'data/fineweb10B/fineweb_train_*.bin' # input .bin to train on
input_val_bin : str = 'data/fineweb10B/fineweb_val_*.bin' # input .bin to eval validation loss on
# optimization hyperparams
batch_size : int = 8 # batch size, in sequences, across all devices
sequence_length : int = 64*1024 # sequence length, in tokens
num_iterations : int = 1530 # number of iterations to run
warmup_iters : int = 0
cooldown_iters : int = 600 # number of iterations of linear warmup/cooldown for triangular or trapezoidal schedule
weight_decay : float = 0
# evaluation and logging hyperparams
val_loss_every : int = 125 # every how many steps to evaluate val loss? 0 for only at the end
val_tokens : int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons
save_every : int = 0 # every how many steps to save the checkpoint? 0 for only at the end
args = Hyperparameters()
# set up DDP (distributed data parallel). torchrun sets this env variable
assert torch.cuda.is_available()
dist.init_process_group(backend='nccl')
ddp_rank = int(os.environ['RANK'])
ddp_local_rank = int(os.environ['LOCAL_RANK'])
ddp_world_size = int(os.environ['WORLD_SIZE'])
device = f'cuda:{ddp_local_rank}'
torch.cuda.set_device(device)
print(f"using device: {device}")
master_process = (ddp_rank == 0) # this process will do logging, checkpointing etc.
# begin logging
logfile = None
if master_process:
run_id = str(uuid.uuid4())
logdir = 'logs/%s/' % run_id
os.makedirs(logdir, exist_ok=True)
logfile = 'logs/%s.txt' % run_id
# create the log file
with open(logfile, "w") as f:
# begin the log by printing this file (the Python code)
f.write(code)
f.write('='*100 + '\n')
def print0(s, logonly=False):
if master_process:
with open(logfile, "a") as f:
if not logonly:
print(s)
f.write(s+'\n')
# log information about the hardware/software environment this is running on
# and print the full `nvidia-smi` to file
print0(f"Running pytorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}\nnvidia-smi:")
import subprocess
result = subprocess.run(['nvidia-smi'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
print0(f'{result.stdout}', logonly=True)
print0('='*100, logonly=True)
# convenience variables
T = args.sequence_length
# calculate the number of steps to take in the val loop.
assert args.val_tokens % (T * ddp_world_size) == 0
val_steps = args.val_tokens // (T * ddp_world_size)
# calculate the steps of gradient accumulation required to attain the desired global batch size.
assert args.batch_size % (ddp_world_size) == 0
train_accumulation_steps = args.batch_size // ddp_world_size
# load tokens
train_loader = DistributedDataLoader(args.input_bin, T, ddp_rank, ddp_world_size)
val_loader = DistributedDataLoader(args.input_val_bin, T, ddp_rank, ddp_world_size)
print0(f"Training DataLoader: total number of tokens: {train_loader.ntok_total} across {len(train_loader.files)} files")
print0(f"Validation DataLoader: total number of tokens: {val_loader.ntok_total} across {len(val_loader.files)} files")
print0('='*100, logonly=True)
x, y = train_loader.next_batch()
# there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. suggested to me by @Grad62304977.
# this originates from Karpathy's experiments.
num_vocab = 50304
model = GPT(GPTConfig(vocab_size=num_vocab, n_layer=12, n_head=6, n_embd=768))
model = model.cuda().bfloat16()
for m in model.modules():
if isinstance(m, CastedLinear):
m.float()
if hasattr(config, "coordinate_descent_tuning"):
config.coordinate_descent_tuning = True # suggested by @Chillee
model = torch.compile(model)
# here we wrap model into DDP container
model = DDP(model, device_ids=[ddp_local_rank])
raw_model = model.module # always contains the "raw" unwrapped model
# init the optimizer(s)
optimizer1 = torch.optim.Adam([raw_model.transformer.wte.weight, raw_model.transformer.vte.weight], lr=0.6, betas=(0.8, 0.95), fused=True)
optimizer2 = torch.optim.Adam([raw_model.lm_head.weight], lr=0.008, betas=(0.8, 0.95), fused=True)
params = list(raw_model.transformer.h.parameters())
matrix_params = [p for p in params if p.ndim == 2]
scalar_params = [p for p in params if p.ndim < 2] + [raw_model.skip_weights]
optimizer3 = Muon(matrix_params, lr=0.05, momentum=0.95)
optimizer4 = torch.optim.Adam(scalar_params, lr=0.04, betas=(0.8, 0.95), fused=True) # note that this learning rate is neither sensitive nor tuned
optimizers = [optimizer1, optimizer2, optimizer3, optimizer4]
# learning rate decay scheduler (linear warmup and cooldown)
def get_lr(it):
assert it <= args.num_iterations
# 1) linear warmup for warmup_iters steps
if it < args.warmup_iters:
return (it+1) / args.warmup_iters
# 2) constant lr for a while
elif it < args.num_iterations - args.cooldown_iters:
return 1.0
# 3) linear cooldown
else:
decay_ratio = (args.num_iterations - it) / args.cooldown_iters
return decay_ratio
schedulers = [torch.optim.lr_scheduler.LambdaLR(opt, get_lr) for opt in optimizers]
# Start training loop
training_time_ms = 0
# start the clock
torch.cuda.synchronize()
t0 = time.time()
# begin training
for step in range(args.num_iterations + 1):
last_step = (step == args.num_iterations)
# This effectively ignores timing first 10 steps, which are slower for weird reasons.
# Alternately, and slightly more correctly in terms of benchmarking, we could do 10
# steps with dummy data first, and then re-initialize the model and reset the loader.
if step == 10:
training_time_ms = 0
t0 = time.time()
timed_steps = float('nan') if step <= 11 else (step - 10) + 1 # <= 11 to avoid bug in val
# Set the attention blocksize for the current step, in chunks of 64. By @fernbear.bsky.social
attn_blocksize = torch.tensor(64*((step/args.num_iterations * (1792 - 64) + 64)//64), dtype=torch.int, device='cuda')
# once in a while evaluate the validation dataset
if (last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0)):
# stop the clock
torch.cuda.synchronize()
training_time_ms += 1000 * (time.time() - t0)
# run validation batches
model.eval()
val_loader.reset()
val_loss = 0.0
for _ in range(val_steps):
with torch.no_grad():
x_val, y_val = val_loader.next_batch()
val_loss += model(x_val, y_val, attn_blocksize=attn_blocksize)
dist.all_reduce(val_loss, op=dist.ReduceOp.AVG)
val_loss /= val_steps
# log val loss to console and to logfile
print0(f'step:{step}/{args.num_iterations} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/(timed_steps-1):.2f}ms')
# start the clock again
torch.cuda.synchronize()
t0 = time.time()
if master_process and (last_step or (args.save_every > 0 and step % args.save_every == 0)):
# stop the clock
torch.cuda.synchronize()
training_time_ms += 1000 * (time.time() - t0)
# save the state of the training process
log = dict(step=step, code=code, model=raw_model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers])
torch.save(log, 'logs/%s/state_step%06d.pt' % (run_id, step))
# start the clock again
torch.cuda.synchronize()
t0 = time.time()
# bit confusing: we want to make sure to eval on 0th iteration
# but also after the very last iteration. so we loop for step <= num_iterations
# instead of just < num_iterations (one extra due to <=), only to do
# the validation/sampling one last time, and then we break right here as we're done.
if last_step:
break
# --------------- TRAINING SECTION BEGIN -----------------
model.train()
for i in range(1, train_accumulation_steps+1):
ctx = model.no_sync() if i < train_accumulation_steps else contextlib.nullcontext()
with ctx: # there's no need to sync gradients every accumulation step
# forward pass
loss = model(x, y, attn_blocksize=attn_blocksize)
# advance the dataset for the next batch
x, y = train_loader.next_batch()
# backward pass
loss.backward()
train_loss = loss.detach()
for p in model.parameters():
p.grad /= train_accumulation_steps
# momentum warmup for Muon
frac = min(step/300, 1)
optimizer3.param_groups[0]['momentum'] = (1 - frac) * 0.85 + frac * 0.95
# step the optimizers and schedulers
for opt, sched in zip(optimizers, schedulers):
opt.step()
sched.step()
# null the gradients
model.zero_grad(set_to_none=True)
# --------------- TRAINING SECTION END -------------------
# everything that follows now is just diagnostics, prints, logging, etc.
#dist.all_reduce(train_loss, op=dist.ReduceOp.AVG) # all-reducing the training loss would be more correct in terms of logging, but slower
approx_time = training_time_ms + 1000 * (time.time() - t0)
print0(f"step:{step+1}/{args.num_iterations} train_loss:{train_loss.item():.4f} train_time:{approx_time:.0f}ms step_avg:{approx_time/timed_steps:.2f}ms")
if master_process:
print(f"peak memory consumption: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB")
# -------------------------------------------------------------------------
# clean up nice
dist.destroy_process_group()
====================================================================================================
Running pytorch 2.6.0.dev20241203+cu124 compiled for CUDA 12.4
nvidia-smi:
Thu Dec 5 03:41:24 2024
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 535.183.06 Driver Version: 535.183.06 CUDA Version: 12.2 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA H100 80GB HBM3 On | 00000000:19:00.0 Off | 0 |
| N/A 39C P0 75W / 700W | 3MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 1 NVIDIA H100 80GB HBM3 On | 00000000:3B:00.0 Off | 0 |
| N/A 31C P0 115W / 700W | 529MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 2 NVIDIA H100 80GB HBM3 On | 00000000:4C:00.0 Off | 0 |
| N/A 31C P0 118W / 700W | 529MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 3 NVIDIA H100 80GB HBM3 On | 00000000:5D:00.0 Off | 0 |
| N/A 38C P0 119W / 700W | 529MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 4 NVIDIA H100 80GB HBM3 On | 00000000:9B:00.0 Off | 0 |
| N/A 39C P0 123W / 700W | 529MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 5 NVIDIA H100 80GB HBM3 On | 00000000:BB:00.0 Off | 0 |
| N/A 30C P0 110W / 700W | 529MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 6 NVIDIA H100 80GB HBM3 On | 00000000:CB:00.0 Off | 0 |
| N/A 39C P0 127W / 700W | 529MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 7 NVIDIA H100 80GB HBM3 On | 00000000:DB:00.0 Off | 0 |
| N/A 30C P0 119W / 700W | 529MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
+---------------------------------------------------------------------------------------+
====================================================================================================
Training DataLoader: total number of tokens: 1100000000 across 11 files
Validation DataLoader: total number of tokens: 100000000 across 1 files
====================================================================================================
step:0/1530 val_loss:10.8258 train_time:0ms step_avg:nanms
step:1/1530 train_loss:10.8258 train_time:31472ms step_avg:nanms
step:2/1530 train_loss:10.0847 train_time:31583ms step_avg:nanms
step:3/1530 train_loss:8.4120 train_time:31743ms step_avg:nanms
step:4/1530 train_loss:7.5646 train_time:31903ms step_avg:nanms
step:5/1530 train_loss:7.4415 train_time:32064ms step_avg:nanms
step:6/1530 train_loss:6.9575 train_time:32224ms step_avg:nanms
step:7/1530 train_loss:7.1982 train_time:32384ms step_avg:nanms
step:8/1530 train_loss:6.7300 train_time:32546ms step_avg:nanms
step:9/1530 train_loss:6.6212 train_time:32705ms step_avg:nanms
step:10/1530 train_loss:6.5043 train_time:32865ms step_avg:nanms
step:11/1530 train_loss:6.4671 train_time:114ms step_avg:nanms
step:12/1530 train_loss:6.3423 train_time:274ms step_avg:nanms
step:13/1530 train_loss:6.2625 train_time:434ms step_avg:144.65ms
step:14/1530 train_loss:6.1986 train_time:593ms step_avg:148.31ms
step:15/1530 train_loss:6.1737 train_time:753ms step_avg:150.52ms
step:16/1530 train_loss:6.0992 train_time:914ms step_avg:152.32ms
step:17/1530 train_loss:6.1649 train_time:1075ms step_avg:153.51ms
step:18/1530 train_loss:5.9431 train_time:1235ms step_avg:154.32ms
step:19/1530 train_loss:6.0127 train_time:1395ms step_avg:155.05ms
step:20/1530 train_loss:5.6685 train_time:1556ms step_avg:155.57ms
step:21/1530 train_loss:5.9396 train_time:1717ms step_avg:156.07ms
step:22/1530 train_loss:6.1910 train_time:1876ms step_avg:156.37ms
step:23/1530 train_loss:5.8741 train_time:2038ms step_avg:156.75ms
step:24/1530 train_loss:6.0123 train_time:2198ms step_avg:156.97ms
step:25/1530 train_loss:5.6859 train_time:2356ms step_avg:157.10ms
step:26/1530 train_loss:5.5911 train_time:2518ms step_avg:157.40ms
step:27/1530 train_loss:5.7918 train_time:2680ms step_avg:157.66ms
step:28/1530 train_loss:5.3947 train_time:2840ms step_avg:157.78ms
step:29/1530 train_loss:5.6829 train_time:3001ms step_avg:157.97ms
step:30/1530 train_loss:5.4682 train_time:3162ms step_avg:158.09ms
step:31/1530 train_loss:5.4392 train_time:3322ms step_avg:158.20ms
step:32/1530 train_loss:5.2740 train_time:3482ms step_avg:158.25ms
step:33/1530 train_loss:5.5741 train_time:3642ms step_avg:158.35ms
step:34/1530 train_loss:5.4848 train_time:3802ms step_avg:158.40ms
step:35/1530 train_loss:5.6208 train_time:3962ms step_avg:158.50ms
step:36/1530 train_loss:5.5445 train_time:4123ms step_avg:158.60ms
step:37/1530 train_loss:5.4549 train_time:4283ms step_avg:158.62ms
step:38/1530 train_loss:5.2936 train_time:4444ms step_avg:158.70ms
step:39/1530 train_loss:5.3191 train_time:4604ms step_avg:158.75ms
step:40/1530 train_loss:5.2258 train_time:4764ms step_avg:158.80ms
step:41/1530 train_loss:5.2244 train_time:4925ms step_avg:158.86ms
step:42/1530 train_loss:5.1653 train_time:5084ms step_avg:158.87ms
step:43/1530 train_loss:5.2563 train_time:5244ms step_avg:158.92ms
step:44/1530 train_loss:5.2355 train_time:5405ms step_avg:158.96ms
step:45/1530 train_loss:5.3789 train_time:5565ms step_avg:159.00ms
step:46/1530 train_loss:5.1873 train_time:5725ms step_avg:159.04ms
step:47/1530 train_loss:5.0905 train_time:5885ms step_avg:159.04ms
step:48/1530 train_loss:5.2136 train_time:6045ms step_avg:159.09ms
step:49/1530 train_loss:5.1603 train_time:6205ms step_avg:159.11ms
step:50/1530 train_loss:5.2518 train_time:6366ms step_avg:159.15ms
step:51/1530 train_loss:5.1337 train_time:6526ms step_avg:159.18ms
step:52/1530 train_loss:5.0206 train_time:6686ms step_avg:159.20ms
step:53/1530 train_loss:5.1746 train_time:6847ms step_avg:159.24ms
step:54/1530 train_loss:5.0075 train_time:7008ms step_avg:159.27ms
step:55/1530 train_loss:5.4065 train_time:7169ms step_avg:159.30ms
step:56/1530 train_loss:5.0048 train_time:7329ms step_avg:159.32ms
step:57/1530 train_loss:4.8780 train_time:7490ms step_avg:159.36ms
step:58/1530 train_loss:5.0496 train_time:7651ms step_avg:159.39ms
step:59/1530 train_loss:5.0372 train_time:7811ms step_avg:159.40ms
step:60/1530 train_loss:5.1790 train_time:7972ms step_avg:159.44ms
step:61/1530 train_loss:4.8739 train_time:8133ms step_avg:159.48ms
step:62/1530 train_loss:4.9735 train_time:8293ms step_avg:159.48ms
step:63/1530 train_loss:4.9723 train_time:8453ms step_avg:159.50ms
step:64/1530 train_loss:4.9348 train_time:8614ms step_avg:159.52ms
step:65/1530 train_loss:4.7893 train_time:8774ms step_avg:159.53ms
step:66/1530 train_loss:4.8957 train_time:8936ms step_avg:159.57ms
step:67/1530 train_loss:4.8038 train_time:9096ms step_avg:159.57ms
step:68/1530 train_loss:5.0879 train_time:9255ms step_avg:159.58ms
step:69/1530 train_loss:4.7200 train_time:9415ms step_avg:159.57ms
step:70/1530 train_loss:4.8256 train_time:9575ms step_avg:159.58ms
step:71/1530 train_loss:4.9615 train_time:9736ms step_avg:159.61ms
step:72/1530 train_loss:4.8625 train_time:9897ms step_avg:159.63ms
step:73/1530 train_loss:4.7636 train_time:10057ms step_avg:159.63ms
step:74/1530 train_loss:4.9036 train_time:10219ms step_avg:159.67ms
step:75/1530 train_loss:4.8196 train_time:10379ms step_avg:159.67ms
step:76/1530 train_loss:4.7850 train_time:10539ms step_avg:159.69ms
step:77/1530 train_loss:4.9021 train_time:10701ms step_avg:159.72ms
step:78/1530 train_loss:5.1422 train_time:10861ms step_avg:159.73ms
step:79/1530 train_loss:4.8201 train_time:11023ms step_avg:159.75ms
step:80/1530 train_loss:4.8538 train_time:11183ms step_avg:159.76ms
step:81/1530 train_loss:4.6345 train_time:11343ms step_avg:159.77ms
step:82/1530 train_loss:4.8062 train_time:11504ms step_avg:159.78ms
step:83/1530 train_loss:4.7621 train_time:11664ms step_avg:159.78ms
step:84/1530 train_loss:4.7519 train_time:11824ms step_avg:159.79ms
step:85/1530 train_loss:4.6049 train_time:11984ms step_avg:159.78ms
step:86/1530 train_loss:4.8165 train_time:12144ms step_avg:159.79ms
step:87/1530 train_loss:4.7392 train_time:12304ms step_avg:159.79ms
step:88/1530 train_loss:4.7296 train_time:12464ms step_avg:159.80ms
step:89/1530 train_loss:4.6796 train_time:12626ms step_avg:159.82ms
step:90/1530 train_loss:4.6292 train_time:12785ms step_avg:159.81ms
step:91/1530 train_loss:4.6072 train_time:12946ms step_avg:159.83ms
step:92/1530 train_loss:4.7553 train_time:13106ms step_avg:159.83ms
step:93/1530 train_loss:4.5767 train_time:13267ms step_avg:159.84ms
step:94/1530 train_loss:4.6367 train_time:13428ms step_avg:159.86ms
step:95/1530 train_loss:4.6698 train_time:13588ms step_avg:159.86ms
step:96/1530 train_loss:4.5816 train_time:13749ms step_avg:159.88ms
step:97/1530 train_loss:4.6178 train_time:13910ms step_avg:159.88ms
step:98/1530 train_loss:4.5652 train_time:14071ms step_avg:159.90ms
step:99/1530 train_loss:4.6409 train_time:14232ms step_avg:159.91ms
step:100/1530 train_loss:4.6639 train_time:14392ms step_avg:159.91ms
step:101/1530 train_loss:4.5264 train_time:14553ms step_avg:159.92ms
step:102/1530 train_loss:4.6928 train_time:14714ms step_avg:159.93ms
step:103/1530 train_loss:4.5440 train_time:14875ms step_avg:159.94ms
step:104/1530 train_loss:4.5322 train_time:15035ms step_avg:159.95ms
step:105/1530 train_loss:4.5263 train_time:15195ms step_avg:159.95ms
step:106/1530 train_loss:4.5925 train_time:15355ms step_avg:159.94ms
step:107/1530 train_loss:4.4933 train_time:15514ms step_avg:159.94ms
step:108/1530 train_loss:4.3545 train_time:15675ms step_avg:159.95ms
step:109/1530 train_loss:4.4786 train_time:15836ms step_avg:159.96ms
step:110/1530 train_loss:4.4725 train_time:15996ms step_avg:159.96ms
step:111/1530 train_loss:4.4171 train_time:16156ms step_avg:159.96ms
step:112/1530 train_loss:4.5869 train_time:16317ms step_avg:159.97ms
step:113/1530 train_loss:4.4810 train_time:16477ms step_avg:159.97ms
step:114/1530 train_loss:4.3606 train_time:16638ms step_avg:159.98ms
step:115/1530 train_loss:4.4890 train_time:16802ms step_avg:160.02ms
step:116/1530 train_loss:4.4560 train_time:16966ms step_avg:160.06ms
step:117/1530 train_loss:4.3467 train_time:17129ms step_avg:160.09ms
step:118/1530 train_loss:4.5728 train_time:17293ms step_avg:160.12ms
step:119/1530 train_loss:4.4398 train_time:17457ms step_avg:160.16ms
step:120/1530 train_loss:4.3074 train_time:17623ms step_avg:160.21ms
step:121/1530 train_loss:4.2856 train_time:17786ms step_avg:160.23ms
step:122/1530 train_loss:4.4427 train_time:17949ms step_avg:160.26ms
step:123/1530 train_loss:4.2666 train_time:18113ms step_avg:160.29ms
step:124/1530 train_loss:4.5795 train_time:18278ms step_avg:160.33ms
step:125/1530 train_loss:4.4444 train_time:18442ms step_avg:160.37ms
step:125/1530 val_loss:4.3937 train_time:18488ms step_avg:160.77ms
step:126/1530 train_loss:4.4071 train_time:18608ms step_avg:160.42ms
step:127/1530 train_loss:4.4183 train_time:18774ms step_avg:160.46ms
step:128/1530 train_loss:4.3646 train_time:18939ms step_avg:160.50ms
step:129/1530 train_loss:4.6829 train_time:19104ms step_avg:160.54ms
step:130/1530 train_loss:4.3541 train_time:19267ms step_avg:160.56ms
step:131/1530 train_loss:4.3859 train_time:19431ms step_avg:160.58ms
step:132/1530 train_loss:4.3253 train_time:19595ms step_avg:160.61ms
step:133/1530 train_loss:4.4365 train_time:19759ms step_avg:160.64ms
step:134/1530 train_loss:4.2533 train_time:19924ms step_avg:160.68ms
step:135/1530 train_loss:4.4335 train_time:20088ms step_avg:160.70ms
step:136/1530 train_loss:4.2009 train_time:20251ms step_avg:160.72ms
step:137/1530 train_loss:4.3579 train_time:20416ms step_avg:160.75ms
step:138/1530 train_loss:4.2661 train_time:20580ms step_avg:160.78ms
step:139/1530 train_loss:4.3713 train_time:20744ms step_avg:160.81ms
step:140/1530 train_loss:4.4664 train_time:20908ms step_avg:160.83ms
step:141/1530 train_loss:4.3061 train_time:21071ms step_avg:160.85ms
step:142/1530 train_loss:4.3099 train_time:21236ms step_avg:160.88ms
step:143/1530 train_loss:4.2497 train_time:21401ms step_avg:160.91ms
step:144/1530 train_loss:4.3401 train_time:21565ms step_avg:160.93ms
step:145/1530 train_loss:4.2916 train_time:21729ms step_avg:160.95ms
step:146/1530 train_loss:4.1612 train_time:21892ms step_avg:160.97ms
step:147/1530 train_loss:4.3137 train_time:22056ms step_avg:160.99ms
step:148/1530 train_loss:4.3458 train_time:22221ms step_avg:161.02ms
step:149/1530 train_loss:4.3023 train_time:22386ms step_avg:161.05ms
step:150/1530 train_loss:4.4265 train_time:22550ms step_avg:161.07ms
step:151/1530 train_loss:4.2615 train_time:22715ms step_avg:161.10ms
step:152/1530 train_loss:4.2749 train_time:22879ms step_avg:161.12ms
step:153/1530 train_loss:4.3659 train_time:23043ms step_avg:161.14ms
step:154/1530 train_loss:4.3700 train_time:23206ms step_avg:161.16ms
step:155/1530 train_loss:4.2594 train_time:23369ms step_avg:161.17ms
step:156/1530 train_loss:4.3416 train_time:23533ms step_avg:161.19ms
step:157/1530 train_loss:4.3876 train_time:23698ms step_avg:161.21ms
step:158/1530 train_loss:4.2391 train_time:23862ms step_avg:161.23ms
step:159/1530 train_loss:4.3154 train_time:24026ms step_avg:161.25ms
step:160/1530 train_loss:4.1197 train_time:24190ms step_avg:161.26ms
step:161/1530 train_loss:4.3440 train_time:24353ms step_avg:161.28ms
step:162/1530 train_loss:4.3582 train_time:24517ms step_avg:161.29ms
step:163/1530 train_loss:4.3335 train_time:24680ms step_avg:161.31ms
step:164/1530 train_loss:4.1841 train_time:24844ms step_avg:161.32ms
step:165/1530 train_loss:4.2849 train_time:25008ms step_avg:161.34ms
step:166/1530 train_loss:4.3382 train_time:25171ms step_avg:161.35ms
step:167/1530 train_loss:4.2029 train_time:25335ms step_avg:161.37ms
step:168/1530 train_loss:4.2887 train_time:25500ms step_avg:161.39ms
step:169/1530 train_loss:4.1627 train_time:25664ms step_avg:161.41ms
step:170/1530 train_loss:4.0208 train_time:25829ms step_avg:161.43ms
step:171/1530 train_loss:4.1886 train_time:25992ms step_avg:161.44ms
step:172/1530 train_loss:4.2115 train_time:26155ms step_avg:161.45ms
step:173/1530 train_loss:4.2756 train_time:26319ms step_avg:161.46ms
step:174/1530 train_loss:4.4339 train_time:26482ms step_avg:161.47ms
step:175/1530 train_loss:4.2472 train_time:26645ms step_avg:161.48ms
step:176/1530 train_loss:4.0950 train_time:26808ms step_avg:161.49ms
step:177/1530 train_loss:4.0616 train_time:26969ms step_avg:161.49ms
step:178/1530 train_loss:4.1714 train_time:27133ms step_avg:161.50ms
step:179/1530 train_loss:4.1136 train_time:27297ms step_avg:161.52ms
step:180/1530 train_loss:4.1041 train_time:27459ms step_avg:161.52ms
step:181/1530 train_loss:4.2900 train_time:27623ms step_avg:161.54ms
step:182/1530 train_loss:4.1465 train_time:27785ms step_avg:161.54ms
step:183/1530 train_loss:4.1238 train_time:27947ms step_avg:161.55ms
step:184/1530 train_loss:4.1221 train_time:28111ms step_avg:161.56ms
step:185/1530 train_loss:4.2006 train_time:28274ms step_avg:161.57ms
step:186/1530 train_loss:4.1656 train_time:28437ms step_avg:161.57ms
step:187/1530 train_loss:4.2341 train_time:28600ms step_avg:161.58ms
step:188/1530 train_loss:4.1727 train_time:28897ms step_avg:162.35ms
step:189/1530 train_loss:4.1120 train_time:29230ms step_avg:163.30ms
step:190/1530 train_loss:4.2119 train_time:29392ms step_avg:163.29ms
step:191/1530 train_loss:4.0788 train_time:29553ms step_avg:163.28ms
step:192/1530 train_loss:4.0312 train_time:29718ms step_avg:163.29ms
step:193/1530 train_loss:4.2559 train_time:29882ms step_avg:163.29ms
step:194/1530 train_loss:4.1701 train_time:30044ms step_avg:163.29ms
step:195/1530 train_loss:4.3565 train_time:30208ms step_avg:163.28ms
step:196/1530 train_loss:4.1807 train_time:30370ms step_avg:163.28ms
step:197/1530 train_loss:4.0407 train_time:30533ms step_avg:163.28ms
step:198/1530 train_loss:4.1757 train_time:30698ms step_avg:163.29ms
step:199/1530 train_loss:4.0383 train_time:30861ms step_avg:163.28ms
step:200/1530 train_loss:4.1076 train_time:31024ms step_avg:163.29ms
step:201/1530 train_loss:3.9940 train_time:31186ms step_avg:163.28ms
step:202/1530 train_loss:4.2426 train_time:31349ms step_avg:163.27ms
step:203/1530 train_loss:4.0614 train_time:31511ms step_avg:163.27ms
step:204/1530 train_loss:4.1827 train_time:31675ms step_avg:163.27ms
step:205/1530 train_loss:4.2401 train_time:31839ms step_avg:163.28ms
step:206/1530 train_loss:3.9473 train_time:32002ms step_avg:163.28ms
step:207/1530 train_loss:4.0813 train_time:32165ms step_avg:163.27ms
step:208/1530 train_loss:4.0948 train_time:32328ms step_avg:163.27ms
step:209/1530 train_loss:4.2357 train_time:32490ms step_avg:163.27ms
step:210/1530 train_loss:4.1754 train_time:32654ms step_avg:163.27ms
step:211/1530 train_loss:4.0578 train_time:32817ms step_avg:163.27ms
step:212/1530 train_loss:4.1118 train_time:32981ms step_avg:163.27ms
step:213/1530 train_loss:4.0528 train_time:33143ms step_avg:163.27ms
step:214/1530 train_loss:4.1181 train_time:33307ms step_avg:163.27ms
step:215/1530 train_loss:3.9619 train_time:33469ms step_avg:163.26ms
step:216/1530 train_loss:4.0044 train_time:33632ms step_avg:163.26ms
step:217/1530 train_loss:4.0215 train_time:33796ms step_avg:163.26ms
step:218/1530 train_loss:4.0830 train_time:33958ms step_avg:163.26ms
step:219/1530 train_loss:4.0748 train_time:34123ms step_avg:163.27ms
step:220/1530 train_loss:4.0815 train_time:34286ms step_avg:163.26ms
step:221/1530 train_loss:4.0859 train_time:34448ms step_avg:163.26ms
step:222/1530 train_loss:3.9963 train_time:34612ms step_avg:163.26ms
step:223/1530 train_loss:3.9865 train_time:34775ms step_avg:163.26ms
step:224/1530 train_loss:4.2910 train_time:34937ms step_avg:163.26ms
step:225/1530 train_loss:3.9183 train_time:35101ms step_avg:163.26ms
step:226/1530 train_loss:3.9880 train_time:35264ms step_avg:163.26ms
step:227/1530 train_loss:3.9710 train_time:35427ms step_avg:163.26ms
step:228/1530 train_loss:4.1510 train_time:35591ms step_avg:163.26ms
step:229/1530 train_loss:3.9352 train_time:35757ms step_avg:163.28ms
step:230/1530 train_loss:4.0309 train_time:35925ms step_avg:163.29ms
step:231/1530 train_loss:3.9017 train_time:36091ms step_avg:163.31ms
step:232/1530 train_loss:3.9660 train_time:36258ms step_avg:163.32ms
step:233/1530 train_loss:4.0832 train_time:36425ms step_avg:163.34ms
step:234/1530 train_loss:4.0267 train_time:36591ms step_avg:163.35ms
step:235/1530 train_loss:3.9028 train_time:36759ms step_avg:163.37ms
step:236/1530 train_loss:4.0842 train_time:36926ms step_avg:163.39ms
step:237/1530 train_loss:4.0839 train_time:37091ms step_avg:163.40ms
step:238/1530 train_loss:3.9413 train_time:37258ms step_avg:163.41ms
step:239/1530 train_loss:4.0780 train_time:37425ms step_avg:163.43ms
step:240/1530 train_loss:4.1136 train_time:37591ms step_avg:163.44ms
step:241/1530 train_loss:3.9656 train_time:37758ms step_avg:163.45ms
step:242/1530 train_loss:4.1461 train_time:37925ms step_avg:163.47ms
step:243/1530 train_loss:4.0128 train_time:38091ms step_avg:163.48ms
step:244/1530 train_loss:4.0827 train_time:38257ms step_avg:163.49ms
step:245/1530 train_loss:4.1406 train_time:38423ms step_avg:163.50ms
step:246/1530 train_loss:4.0583 train_time:38589ms step_avg:163.51ms
step:247/1530 train_loss:4.0064 train_time:38754ms step_avg:163.52ms
step:248/1530 train_loss:4.1036 train_time:38922ms step_avg:163.54ms
step:249/1530 train_loss:3.9220 train_time:39087ms step_avg:163.54ms
step:250/1530 train_loss:3.9725 train_time:39253ms step_avg:163.55ms
step:250/1530 val_loss:4.0045 train_time:39302ms step_avg:163.76ms
step:251/1530 train_loss:4.0787 train_time:39423ms step_avg:163.58ms
step:252/1530 train_loss:4.1886 train_time:39592ms step_avg:163.60ms
step:253/1530 train_loss:3.9362 train_time:39758ms step_avg:163.61ms
step:254/1530 train_loss:3.8792 train_time:39924ms step_avg:163.62ms
step:255/1530 train_loss:4.0767 train_time:40090ms step_avg:163.63ms
step:256/1530 train_loss:3.9888 train_time:40254ms step_avg:163.64ms
step:257/1530 train_loss:3.9924 train_time:40420ms step_avg:163.65ms
step:258/1530 train_loss:3.9815 train_time:40588ms step_avg:163.66ms
step:259/1530 train_loss:4.0262 train_time:40753ms step_avg:163.67ms
step:260/1530 train_loss:4.0512 train_time:40919ms step_avg:163.68ms
step:261/1530 train_loss:4.0245 train_time:41089ms step_avg:163.70ms
step:262/1530 train_loss:3.9946 train_time:41254ms step_avg:163.71ms
step:263/1530 train_loss:3.8928 train_time:41421ms step_avg:163.72ms
step:264/1530 train_loss:3.9874 train_time:41588ms step_avg:163.73ms
step:265/1530 train_loss:3.8753 train_time:41754ms step_avg:163.74ms
step:266/1530 train_loss:3.9242 train_time:41920ms step_avg:163.75ms
step:267/1530 train_loss:3.9368 train_time:42088ms step_avg:163.77ms
step:268/1530 train_loss:3.9642 train_time:42254ms step_avg:163.77ms
step:269/1530 train_loss:3.8591 train_time:42419ms step_avg:163.78ms
step:270/1530 train_loss:4.0960 train_time:42586ms step_avg:163.79ms
step:271/1530 train_loss:3.9701 train_time:42752ms step_avg:163.80ms
step:272/1530 train_loss:3.9242 train_time:42918ms step_avg:163.81ms
step:273/1530 train_loss:3.9399 train_time:43086ms step_avg:163.82ms
step:274/1530 train_loss:4.0330 train_time:43252ms step_avg:163.83ms
step:275/1530 train_loss:4.0602 train_time:43418ms step_avg:163.84ms
step:276/1530 train_loss:4.2247 train_time:43587ms step_avg:163.86ms
step:277/1530 train_loss:4.0347 train_time:43752ms step_avg:163.87ms
step:278/1530 train_loss:4.0849 train_time:43919ms step_avg:163.88ms
step:279/1530 train_loss:4.0039 train_time:44087ms step_avg:163.89ms
step:280/1530 train_loss:4.1819 train_time:44253ms step_avg:163.90ms
step:281/1530 train_loss:3.9719 train_time:44419ms step_avg:163.91ms
step:282/1530 train_loss:3.9401 train_time:44589ms step_avg:163.93ms
step:283/1530 train_loss:3.9124 train_time:44756ms step_avg:163.94ms
step:284/1530 train_loss:4.0448 train_time:44922ms step_avg:163.95ms
step:285/1530 train_loss:4.0598 train_time:45089ms step_avg:163.96ms
step:286/1530 train_loss:4.0902 train_time:45253ms step_avg:163.96ms
step:287/1530 train_loss:3.9030 train_time:45419ms step_avg:163.97ms
step:288/1530 train_loss:4.0183 train_time:45586ms step_avg:163.98ms
step:289/1530 train_loss:3.8735 train_time:45751ms step_avg:163.98ms
step:290/1530 train_loss:3.8605 train_time:45916ms step_avg:163.99ms
step:291/1530 train_loss:3.9066 train_time:46082ms step_avg:163.99ms
step:292/1530 train_loss:3.8673 train_time:46246ms step_avg:163.99ms
step:293/1530 train_loss:3.9015 train_time:46411ms step_avg:164.00ms
step:294/1530 train_loss:3.9320 train_time:46576ms step_avg:164.00ms
step:295/1530 train_loss:3.8398 train_time:46741ms step_avg:164.00ms
step:296/1530 train_loss:3.8609 train_time:46907ms step_avg:164.01ms
step:297/1530 train_loss:3.8730 train_time:47073ms step_avg:164.02ms
step:298/1530 train_loss:3.9728 train_time:47237ms step_avg:164.02ms
step:299/1530 train_loss:3.8202 train_time:47403ms step_avg:164.02ms
step:300/1530 train_loss:3.9686 train_time:47568ms step_avg:164.03ms
step:301/1530 train_loss:3.9657 train_time:47732ms step_avg:164.03ms
step:302/1530 train_loss:3.9319 train_time:47898ms step_avg:164.03ms
step:303/1530 train_loss:3.9729 train_time:48065ms step_avg:164.04ms
step:304/1530 train_loss:3.9669 train_time:48229ms step_avg:164.05ms
step:305/1530 train_loss:4.4489 train_time:48394ms step_avg:164.05ms
step:306/1530 train_loss:3.9328 train_time:48559ms step_avg:164.05ms
step:307/1530 train_loss:3.8366 train_time:48725ms step_avg:164.06ms
step:308/1530 train_loss:3.9734 train_time:48890ms step_avg:164.06ms
step:309/1530 train_loss:3.8731 train_time:49055ms step_avg:164.06ms
step:310/1530 train_loss:4.0853 train_time:49220ms step_avg:164.07ms
step:311/1530 train_loss:3.9268 train_time:49387ms step_avg:164.08ms
step:312/1530 train_loss:3.8697 train_time:49552ms step_avg:164.08ms
step:313/1530 train_loss:3.9362 train_time:49717ms step_avg:164.08ms
step:314/1530 train_loss:4.0613 train_time:49883ms step_avg:164.09ms
step:315/1530 train_loss:3.9446 train_time:50048ms step_avg:164.09ms
step:316/1530 train_loss:3.8004 train_time:50213ms step_avg:164.09ms
step:317/1530 train_loss:3.8774 train_time:50378ms step_avg:164.10ms
step:318/1530 train_loss:3.9260 train_time:50543ms step_avg:164.10ms
step:319/1530 train_loss:3.8858 train_time:50708ms step_avg:164.10ms
step:320/1530 train_loss:4.0097 train_time:50873ms step_avg:164.11ms
step:321/1530 train_loss:3.9568 train_time:51039ms step_avg:164.11ms
step:322/1530 train_loss:3.9364 train_time:51205ms step_avg:164.12ms
step:323/1530 train_loss:4.0140 train_time:51371ms step_avg:164.12ms
step:324/1530 train_loss:3.9482 train_time:51535ms step_avg:164.12ms
step:325/1530 train_loss:4.0160 train_time:51700ms step_avg:164.13ms
step:326/1530 train_loss:3.8956 train_time:51868ms step_avg:164.14ms
step:327/1530 train_loss:4.3992 train_time:52033ms step_avg:164.14ms
step:328/1530 train_loss:4.0804 train_time:52199ms step_avg:164.15ms
step:329/1530 train_loss:3.7943 train_time:52367ms step_avg:164.16ms
step:330/1530 train_loss:3.7485 train_time:52531ms step_avg:164.16ms
step:331/1530 train_loss:3.9750 train_time:52696ms step_avg:164.16ms
step:332/1530 train_loss:3.9175 train_time:52861ms step_avg:164.17ms
step:333/1530 train_loss:3.8851 train_time:53027ms step_avg:164.17ms
step:334/1530 train_loss:3.8413 train_time:53193ms step_avg:164.17ms
step:335/1530 train_loss:4.0190 train_time:53357ms step_avg:164.18ms
step:336/1530 train_loss:3.9568 train_time:53522ms step_avg:164.18ms
step:337/1530 train_loss:4.4309 train_time:53690ms step_avg:164.19ms
step:338/1530 train_loss:3.9375 train_time:53855ms step_avg:164.19ms
step:339/1530 train_loss:3.8632 train_time:54021ms step_avg:164.20ms
step:340/1530 train_loss:3.9409 train_time:54188ms step_avg:164.21ms
step:341/1530 train_loss:3.8529 train_time:54355ms step_avg:164.21ms
step:342/1530 train_loss:3.8112 train_time:54522ms step_avg:164.22ms
step:343/1530 train_loss:3.8382 train_time:54692ms step_avg:164.24ms
step:344/1530 train_loss:3.9968 train_time:54859ms step_avg:164.25ms
step:345/1530 train_loss:3.8188 train_time:55029ms step_avg:164.27ms
step:346/1530 train_loss:3.7653 train_time:55197ms step_avg:164.28ms
step:347/1530 train_loss:3.8010 train_time:55366ms step_avg:164.29ms
step:348/1530 train_loss:3.8600 train_time:55533ms step_avg:164.30ms
step:349/1530 train_loss:3.8265 train_time:55702ms step_avg:164.31ms
step:350/1530 train_loss:3.5748 train_time:55872ms step_avg:164.33ms
step:351/1530 train_loss:3.8274 train_time:56039ms step_avg:164.34ms
step:352/1530 train_loss:4.1844 train_time:56208ms step_avg:164.35ms
step:353/1530 train_loss:3.6567 train_time:56376ms step_avg:164.36ms
step:354/1530 train_loss:3.9234 train_time:56544ms step_avg:164.37ms
step:355/1530 train_loss:3.7827 train_time:56713ms step_avg:164.38ms
step:356/1530 train_loss:3.8858 train_time:56878ms step_avg:164.39ms
step:357/1530 train_loss:3.7630 train_time:57049ms step_avg:164.41ms
step:358/1530 train_loss:3.8606 train_time:57218ms step_avg:164.42ms
step:359/1530 train_loss:3.7748 train_time:57389ms step_avg:164.44ms
step:360/1530 train_loss:3.4332 train_time:57558ms step_avg:164.45ms
step:361/1530 train_loss:4.0286 train_time:57726ms step_avg:164.46ms
step:362/1530 train_loss:3.9170 train_time:57894ms step_avg:164.47ms
step:363/1530 train_loss:3.8414 train_time:58062ms step_avg:164.48ms
step:364/1530 train_loss:3.7544 train_time:58230ms step_avg:164.49ms
step:365/1530 train_loss:3.9149 train_time:58397ms step_avg:164.50ms
step:366/1530 train_loss:3.8713 train_time:58566ms step_avg:164.51ms
step:367/1530 train_loss:3.8573 train_time:58734ms step_avg:164.52ms
step:368/1530 train_loss:3.8502 train_time:58901ms step_avg:164.53ms
step:369/1530 train_loss:3.7469 train_time:59071ms step_avg:164.54ms
step:370/1530 train_loss:3.8768 train_time:59238ms step_avg:164.55ms
step:371/1530 train_loss:3.7307 train_time:59408ms step_avg:164.56ms
step:372/1530 train_loss:3.6971 train_time:59577ms step_avg:164.58ms
step:373/1530 train_loss:3.9092 train_time:59744ms step_avg:164.58ms
step:374/1530 train_loss:3.8318 train_time:59912ms step_avg:164.59ms
step:375/1530 train_loss:3.8046 train_time:60080ms step_avg:164.60ms
step:375/1530 val_loss:3.8271 train_time:60129ms step_avg:164.74ms