사용자 도구

사이트 도구


code:gpt_example

차이

문서의 선택한 두 판 사이의 차이를 보여줍니다.

차이 보기로 링크

양쪽 이전 판이전 판
다음 판
이전 판
code:gpt_example [2020/08/04 21:42] rex8312code:gpt_example [2024/03/23 02:42] (현재) – 바깥 편집 127.0.0.1
줄 1: 줄 1:
-====== [example] GPT ======+====== Example: GPT ======
  
   * 참고   * 참고
 +    * https://github.com/karpathy/minGPT
     * https://github.com/huggingface/transformers/blob/master/src/transformers/modeling_gpt2.py     * https://github.com/huggingface/transformers/blob/master/src/transformers/modeling_gpt2.py
     * https://github.com/openai/finetune-transformer-lm     * https://github.com/openai/finetune-transformer-lm
줄 7: 줄 8:
     * https://github.com/Andras7/gpt2-pytorch     * https://github.com/Andras7/gpt2-pytorch
  
 +===== V2 =====
 +<code python gpt_v2.py>
 +
 +import argparse
 +import math
 +
 +import numpy as np
 +import plotille
 +import torch
 +import torch.nn as nn
 +import torch.nn.functional as F
 +import torch.optim as optim
 +import tqdm
 +from gr.pygr import mlab
 +from IPython import embed
 +from torch.utils.data import Dataset
 +from torch.utils.data.dataloader import DataLoader
 +
 +
 +def parse_args():
 +    parser = argparse.ArgumentParser()
 +    parser.add_argument('--dropout', type=float, default=0.1)
 +    parser.add_argument('--lr', type=float, default=0.0001)
 +    parser.add_argument('--max_epoch', type=int, default=200)
 +    parser.add_argument('--batch_size', type=int, default=128)
 +    parser.add_argument('--data_repeat', type=int, default=1)
 +    parser.add_argument('--device', type=str, default='cuda')
 +    parser.add_argument('--block_size', type=int, default=32)
 +    parser.add_argument('--test_steps', type=int, default=512)
 +    parser.add_argument('--n_workers', type=int, default=1)
 +    parser.add_argument('--weight_decay', type=float, default=0.1)
 +    parser.add_argument('--noise_scale', type=float, default=0.1)
 +    parser.add_argument('--max_grad_norm', type=float, default=1.0)
 +    parser.add_argument('--dataset', choices=['BasicDataset', 'MotionDataset'], default='MotionDataset')
 +    return parser.parse_args()
 +
 +args = parse_args()
 +
 +
 +class CausalSelfAttention(nn.Module):
 +    """
 +    https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
 +    """
 +
 +    def __init__(self, d_model, n_head, block_size, dropout):
 +        super().__init__()
 +        assert d_model % n_head == 0
 +        # key, query, value projections for all heads
 +        self.key = nn.Linear(d_model, d_model)
 +        self.query = nn.Linear(d_model, d_model)
 +        self.value = nn.Linear(d_model, d_model)
 +        # regularization
 +        self.attn_drop = nn.Dropout(dropout)
 +        self.resid_drop = nn.Dropout(dropout)
 +        # output projection
 +        self.proj = nn.Linear(d_model, d_model)
 +        # causal mask to ensure that attention is only applied to the left in the input sequence
 +        self.register_buffer(
 +            "mask", 
 +            torch.tril(torch.ones(block_size, block_size)).view(1, 1, block_size, block_size)
 +        )
 +        self.n_head = n_head
 +
 +    def forward(self, x, layer_past=None):
 +        B, T, C = x.size()
 +
 +        # calculate query, key, values for all heads in batch and move head forward to be the batch dim
 +        k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
 +        q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
 +        v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
 +
 +        # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
 +        att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
 +        att = att.masked_fill(self.mask[:,:,:T,:T] == 0, -1e10) # todo: just use float('-inf') instead?
 +        att = F.softmax(att, dim=-1)
 +        att = self.attn_drop(att)
 +        y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
 +        y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
 +
 +        # output projection
 +        y = self.resid_drop(self.proj(y))
 +        return y
 +
 +class Block(nn.Module):
 +    """ an unassuming Transformer block """
 +
 +    def __init__(self, d_model, n_head, block_size, dropout):
 +        super().__init__()
 +        self.ln1 = nn.LayerNorm(d_model)
 +        self.ln2 = nn.LayerNorm(d_model)
 +        self.attn = CausalSelfAttention(d_model, n_head, block_size, dropout)
 +        self.mlp = nn.Sequential(
 +            nn.Linear(d_model, 4 * d_model),
 +            nn.GELU(),
 +            nn.Linear(4 * d_model, d_model),
 +            nn.Dropout(dropout),
 +        )
 +
 +    def forward(self, x):
 +        x = x + self.attn(self.ln1(x))
 +        x = x + self.mlp(self.ln2(x))
 +        return x
 +
 +
 +class GPTModel(nn.Module):
 +    def __init__(self, input_dims, output_dims, block_size):
 +        super().__init__()
 +        self.n_layers = 6
 +        self.n_heads = 8
 +        self.d_model = 512
 +        self.block_size = block_size
 +
 +        self.we = nn.Linear(input_dims, self.d_model, bias=True)
 +        self.wp = nn.Parameter(torch.zeros(1, self.block_size, self.d_model))
 +        self.blocks = nn.Sequential(*[
 +            Block(self.d_model, self.n_heads, self.block_size, args.dropout) 
 +            for _ in range(self.n_layers)
 +        ])
 +        self.norm = nn.LayerNorm(self.d_model)
 +        self.wd = nn.Linear(self.d_model, output_dims, bias=True)
 +
 +        self.apply(self._init_weights)
 +        print(f'n_params: {sum(p.numel() for p in self.parameters())}')
 +
 +    def _init_weights(self, module):
 +        if isinstance(module, (nn.Linear, nn.Embedding)):
 +            module.weight.data.normal_(mean=0.0, std=0.02)
 +            if isinstance(module, nn.Linear) and module.bias is not None:
 +                module.bias.data.zero_()
 +        elif isinstance(module, nn.LayerNorm):
 +            module.bias.data.zero_()
 +            module.weight.data.fill_(1.0)
 +
 +    def forward(self, src):
 +        B, T, C = src.size()
 +        src_embed = self.we(src)
 +        pos_embed = self.wp[:, :T, :]
 +        hx = src_embed + pos_embed
 +        hx = self.blocks(hx)
 +        hx = self.norm(hx)
 +        out = self.wd(self.norm(hx))
 +        src = torch.cat([src[:, 1:, :], out[:, -1:, :]], dim=1).detach()
 +        return out, src
 +
 +
 +class BasicDataset(Dataset):
 +
 +    def __init__(self, block_size, repeat, noise_scale):
 +        self.block_size = block_size
 +
 +        self.data = np.sin(np.arange(10240) / 10.)
 +        # self.data = np.sin(np.arange(10240) / 10.) * 0.5 + 2.5
 +        # self.data = np.abs(np.sin(np.arange(10240) / 10.))
 +        # data = np.sin(np.arange(10240) / 10.) * (np.sin(np.arange(10240) / 10.) > 0.0)
 +        self.data = self.data.astype(np.float32)
 +        self.data = self.data.reshape(-1, 1)
 +        self.data_std = self.data.std(0)
 +        self.repeat = repeat
 +        self.noise_scale = noise_scale
 +    
 +    def __len__(self):
 +        # return math.ceil(len(self.data) / (self.block_size + 1))
 +        return len(self.data) * self.repeat
 +
 +    def __getitem__(self, idx):
 +        # we're actually going to "cheat" and pick a spot in the dataset at random
 +        i = np.random.randint(0, len(self.data) - (self.block_size + 1))
 +        chunk = self.data[i: i+self.block_size+1]
 +        chunk += np.random.normal(0, args.noise_scale, chunk.shape) * self.data_std
 +        x = torch.tensor(chunk[:-1], dtype=torch.float32)
 +        y = torch.tensor(chunk[1:], dtype=torch.float32)
 +        return x, y
 +
 +    def get_test_data(self, test_steps, device):
 +        i = np.random.randint(0, len(self.data) - (test_steps + 1))
 +        idx = np.arange(i, i+test_steps)
 +        data = self.data[idx].reshape(1, -1, 1)
 +        tgt = torch.tensor(data, device=device)
 +        src = tgt[:, :args.block_size]
 +        gen = tgt[:, :args.block_size]
 +        return tgt, src, gen
 +
 +
 +class MotionDataset(Dataset):
 +    
 +    def __init__(self, block_size, repeat, noise_scale):
 +        self.block_size = block_size
 +
 +        import urllib, json
 +        url = "https://raw.githubusercontent.com/xbpeng/DeepMimic/master/data/motions/humanoid3d_backflip.txt"
 +        self.data = json.loads(urllib.request.urlopen(url).read())['Frames']
 +        self.data = np.array(self.data, dtype=np.float32)
 +        self.data = np.hstack([self.data[:, 3:4], self.data])
 +        self.data = np.tile(self.data, (100, 1))
 +        self.dims = self.data.shape[-1]
 +        self.data_mean = self.data.mean(0, keepdims=True)
 +        self.data_std = self.data.std(0, keepdims=True)
 +        self.data = (self.data - self.data_mean) / self.data_std
 +
 +        self.data = self.data.astype(np.float32)
 +        self.repeat = repeat
 +        self.noise_scale = noise_scale
 +    
 +    def __len__(self):
 +        # return math.ceil(len(self.data) / (self.block_size + 1))
 +        return len(self.data) * self.repeat
 +
 +    def __getitem__(self, idx):
 +        # we're actually going to "cheat" and pick a spot in the dataset at random
 +        i = np.random.randint(0, len(self.data) - (self.block_size + 1))
 +        chunk = self.data[i: i+self.block_size+1]
 +        chunk += np.random.normal(0, args.noise_scale, chunk.shape)
 +        x = torch.tensor(chunk[:-1], dtype=torch.float32)
 +        y = torch.tensor(chunk[1:], dtype=torch.float32)
 +        return x, y
 +
 +    def get_test_data(self, test_steps, device):
 +        i = np.random.randint(0, len(self.data) - (test_steps + 1))
 +        idx = np.arange(i, i+test_steps)
 +        data = self.data[idx].reshape(1, -1, self.dims)
 +        tgt = torch.tensor(data, device=device)
 +        src = tgt[:, :args.block_size]
 +        gen = tgt[:, :args.block_size]
 +        return tgt, src, gen
 +
 +
 +if __name__ == '__main__':
 +
 +    # create the dataloader
 +    Dataset = globals()[args.dataset]
 +    dataset = Dataset(args.block_size, args.data_repeat, args.noise_scale)
 +    loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=args.n_workers)
 +    
 +    # create the model
 +    dim = dataset.data.shape[-1]
 +    model = GPTModel(dim, dim, args.block_size).to(args.device)
 +
 +    # create the optimizer
 +    no_decay = ["bias", "LayerNorm.weight"]
 +    params_decay = [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)]
 +    params_nodecay = [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)]
 +    optim_groups = [
 +        {"params": params_decay, "weight_decay": args.weight_decay},
 +        {"params": params_nodecay, "weight_decay": 0.0},
 +    ]
 +    optimizer = optim.AdamW(optim_groups, lr=args.lr, betas=(0.9, 0.95))
 +
 +    def warmup_cosine(optimizer, lr_max, epoch, warmup=1.0):
 +        s = float(epoch <= warmup)
 +        w = s*(epoch / warmup) + (1-s)*(0.5 * (1 + np.cos(np.pi * epoch)))
 +        for param_group in optimizer.param_groups:
 +            param_group['lr'] = w * lr_max
 +
 +    step = 0
 +    train_loss_list = list()
 +    test_score_list = list()
 +    
 +    for epoch in tqdm.trange(args.max_epoch):
 +        # fitting
 +        model.train()
 +        for i, (src, tgt) in tqdm.tqdm(enumerate(loader), total=len(loader), leave=False):
 +            src, tgt = src.to(args.device), tgt.to(args.device)
 +
 +            gen, _ = model(src)
 +
 +            optimizer.zero_grad()
 +            loss = (0.5 * (tgt - gen) ** 2).mean()
 +            loss.backward()
 +            nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
 +            optimizer.step()
 +            warmup_cosine(optimizer, args.lr, epoch + i / len(loader))
 +
 +            step += 1 / len(loader)
 +            train_loss_list.append((step, loss.item()))
 +
 +        tqdm.tqdm.write(plotille.scatter(*zip(*train_loss_list[-1000:]), height=25))
 +
 +        # eval
 +        model.eval()
 +        tgt, src, gen = dataset.get_test_data(args.test_steps, args.device)
 +
 +        with torch.no_grad():
 +            for i in range(args.test_steps - args.block_size):
 +                gen_, src = model(src)
 +                gen = torch.cat([gen, gen_[:, -1:, :]], dim=1)
 +        
 +        loss = (0.5 * (tgt - gen) ** 2).mean()
 +        score = (-loss).exp()
 +        test_score_list.append((step, score.item()))
 +
 +        mlab.plot(tgt.cpu().numpy()[0, :, 0])
 +        mlab.oplot(gen.cpu().numpy()[0, :, 0])
 +        tqdm.tqdm.write(plotille.scatter(*zip(*test_score_list[-1000:]), height=25))
 +        tqdm.tqdm.write(str(args))
 +
 +    embed()
 +
 +</code>
 +
 +===== V1 =====
 <code python gpt.py> <code python gpt.py>
  
줄 25: 줄 326:
     parser.add_argument('--custom_mha', type=lambda x: x in ('1', 'true'), default=False)     parser.add_argument('--custom_mha', type=lambda x: x in ('1', 'true'), default=False)
     parser.add_argument('--custom_block', type=lambda x: x in ('1', 'true'), default=True)     parser.add_argument('--custom_block', type=lambda x: x in ('1', 'true'), default=True)
 +    parser.add_argument('--dropout', type=float, default=0.1)
 +    parser.add_argument('--lr', type=float, default=0.00025)
     return parser.parse_args()     return parser.parse_args()
  
줄 70: 줄 373:
  
 class MHA(nn.Module): class MHA(nn.Module):
-    def __init__(self, embed_dim, num_heads):+    def __init__(self, embed_dim, num_heads, dropout):
         super().__init__()         super().__init__()
         self.n_heads = num_heads         self.n_heads = num_heads
줄 77: 줄 380:
             self.attn = MultiheadAttention(embed_dim, num_heads)             self.attn = MultiheadAttention(embed_dim, num_heads)
         else:         else:
-            self.attn = nn.MultiheadAttention(embed_dim, num_heads)+            self.attn = nn.MultiheadAttention(embed_dim, num_heads, dropout)
         self.out = nn.Linear(embed_dim, embed_dim)         self.out = nn.Linear(embed_dim, embed_dim)
  
줄 111: 줄 414:
  
 class CustomBlock(nn.Module): class CustomBlock(nn.Module):
-    def __init__(self, embed_dim, num_heads):+    def __init__(self, embed_dim, num_heads, dropout=0.1):
         super().__init__()         super().__init__()
         self.ln_1 = nn.LayerNorm(embed_dim)         self.ln_1 = nn.LayerNorm(embed_dim)
-        self.attn = MHA(embed_dim, num_heads)+        self.attn = MHA(embed_dim, num_heads, dropout)
         self.ln_2 = nn.LayerNorm(embed_dim)         self.ln_2 = nn.LayerNorm(embed_dim)
         self.mlp = MLP(embed_dim)         self.mlp = MLP(embed_dim)
줄 152: 줄 455:
         if args.custom_block:         if args.custom_block:
             self.blocks = nn.ModuleList([             self.blocks = nn.ModuleList([
-                CustomBlock(self.d_model, self.n_heads) for _ in range(self.n_layers)+                CustomBlock(self.d_model, self.n_heads, dropout=args.dropout) for _ in range(self.n_layers)
             ])             ])
         else:         else:
             self.blocks = nn.ModuleList([             self.blocks = nn.ModuleList([
-                Block(self.d_model, self.n_heads) for _ in range(self.n_layers)+                Block(self.d_model, self.n_heads, dropout=args.dropout) for _ in range(self.n_layers)
             ])             ])
  
줄 192: 줄 495:
  
     n_epochs = 2500     n_epochs = 2500
-    prev_steps = 64 +    prev_steps = 32 
-    next_steps = 64+    next_steps = 2
     test_steps = 512     test_steps = 512
-    bsz = 8  # 4  # 128  # 배치 작아야 함+    bsz = 32  # 8  # 4  # 128
     device = 'cuda'     device = 'cuda'
  
-    dataset = np.sin(np.arange(4096) / 10.)+    dataset = np.sin(np.arange(10240) / 10.) * 0.5 + 2.5
          
     model = GPTModel(1, 1, prev_steps + next_steps).to(device)     model = GPTModel(1, 1, prev_steps + next_steps).to(device)
-    optimizer = optim.Adam(model.parameters(), lr=0.000001, betas=(0.9, 0.95), eps=1e-8) +    optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.95), eps=1e-8) 
-    scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=2)+    scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=2) 
 + 
 +    def warmup_cosine(optimizer, lr_max, epoch, warmup=1.0): 
 +        s = float(epoch <= warmup) 
 +        w = s*(epoch / warmup) + (1-s)*(0.5 * (1 + np.cos(np.pi * epoch))) 
 +        for param_group in optimizer.param_groups: 
 +            param_group['lr'] = w * lr_max
  
     step = 0     step = 0
줄 230: 줄 539:
             loss.backward()             loss.backward()
             optimizer.step()             optimizer.step()
-            scheduler.step(epoch + i / len(idxes))+            scheduler.step(epoch + i / len(idxes)) 
 +            warmup_cosine(optimizer, args.lr, epoch + i / len(idxes))
  
             step += 1 / len(idxes)             step += 1 / len(idxes)
줄 262: 줄 572:
  
     embed()     embed()
- 
 </code> </code>
  
 {{tag>GPT example}} {{tag>GPT example}}
code/gpt_example.1596577361.txt.gz · 마지막으로 수정됨: (바깥 편집)