common.title

Docs
Quantum Circuit
TYTAN CLOUD

QUANTUM GAMING


Desktop RAG

Overview
Terms of service

Privacy policy

Contact
Research

Sign in
Sign up
common.title

blueqat cloudで nn.TransformerのベンチマークV100 / CPU

Yuichiro Minato

2023/02/18 17:13

こんにちは、cuQuantumなどのGPU量子コンピュータシミュレータの側で深層学習も実行をして時間を有意義に使いたいところです。今回は古典のTransformerモデルを実行するにあたって、GPUを使ってみました。

さまざまなチュートリアルを見てみましたが、結局PyTorchの公式に載っているモデルがうまく動いていたのでそちらを使いました。

https://pytorch.org/tutorials/beginner/transformer_tutorial.html

import math
import os
from tempfile import TemporaryDirectory
from typing import Tuple

import torch
from torch import nn, Tensor
import torch.nn.functional as F
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from torch.utils.data import dataset

class TransformerModel(nn.Module):

def __init__(self, ntoken: int, d_model: int, nhead: int, d_hid: int,
         nlayers: int, dropout: float = 0.5):
    super().__init__()
    self.model_type = 'Transformer'
    self.pos_encoder = PositionalEncoding(d_model, dropout)
    encoder_layers = TransformerEncoderLayer(d_model, nhead, d_hid, dropout)
    self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
    self.encoder = nn.Embedding(ntoken, d_model)
    self.d_model = d_model
    self.decoder = nn.Linear(d_model, ntoken)

self.init_weights()

def init_weights(self) -> None:
    initrange = 0.1
    self.encoder.weight.data.uniform_(-initrange, initrange)
    self.decoder.bias.data.zero_()
    self.decoder.weight.data.uniform_(-initrange, initrange)

def forward(self, src: Tensor, src_mask: Tensor) -> Tensor:
    """
    Args:
      src: Tensor, shape [seq_len, batch_size]
      src_mask: Tensor, shape [seq_len, seq_len]

Returns:
      output Tensor of shape [seq_len, batch_size, ntoken]
    """
    src = self.encoder(src) * math.sqrt(self.d_model)
    src = self.pos_encoder(src)
    output = self.transformer_encoder(src, src_mask)
    output = self.decoder(output)
    return output

def generate_square_subsequent_mask(sz: int) -> Tensor:
  """Generates an upper-triangular matrix of -inf, with zeros on diag."""
  return torch.triu(torch.ones(sz, sz) * float('-inf'), diagonal=1)

class PositionalEncoding(nn.Module):

def \_\_init\_\_(self, d\_model: int, dropout: float = 0.1, max\_len: int = 5000):
    super().\_\_init\_\_()
    self.dropout = nn.Dropout(p=dropout)

    position = torch.arange(max\_len).unsqueeze(1)
    div\_term = torch.exp(torch.arange(0, d\_model, 2) \* (-math.log(10000.0) / d\_model))
    pe = torch.zeros(max\_len, 1, d\_model)
    pe\[:, 0, 0::2\] = torch.sin(position \* div\_term)
    pe\[:, 0, 1::2\] = torch.cos(position \* div\_term)
    self.register\_buffer('pe', pe)

def forward(self, x: Tensor) -> Tensor:
    """
    Args:
        x: Tensor, shape \[seq\_len, batch\_size, embedding\_dim\]
    """
    x = x + self.pe\[:x.size(0)\]
    return self.dropout(x)

from torchtext.datasets import WikiText2
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator

train_iter = WikiText2(split='train')
tokenizer = get_tokenizer('basic_english')
vocab = build_vocab_from_iterator(map(tokenizer, train_iter), specials=['<unk>'])
vocab.set_default_index(vocab['<unk>'])

def data_process(raw_text_iter: dataset.IterableDataset) -> Tensor:
"""Converts raw text into a flat Tensor."""
data = [torch.tensor(vocab(tokenizer(item)), dtype=torch.long) for item in raw_text_iter]
return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))

train_iter was "consumed" by the process of building the vocab,

so we have to create it again

train_iter, val_iter, test_iter = WikiText2()
train_data = data_process(train_iter)
val_data = data_process(val_iter)
test_data = data_process(test_iter)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

def batchify(data: Tensor, bsz: int) -> Tensor:
"""Divides the data into bsz separate sequences, removing extra elements
that wouldn't cleanly fit.

Args:
    data: Tensor, shape \[N\]
    bsz: int, batch size

Returns:
    Tensor of shape \[N // bsz, bsz\]
"""
seq\_len = data.size(0) // bsz
data = data\[:seq\_len \* bsz\]
data = data.view(bsz, seq\_len).t().contiguous()
return data.to(device)

batch_size = 20
eval_batch_size = 10
train_data = batchify(train_data, batch_size) # shape [seq_len, batch_size]
val_data = batchify(val_data, eval_batch_size)
test_data = batchify(test_data, eval_batch_size)

bptt = 35
def get_batch(source: Tensor, i: int) -> Tuple[Tensor, Tensor]:
"""
Args:
source: Tensor, shape [full_seq_len, batch_size]
i: int

Returns:
    tuple (data, target), where data has shape \[seq\_len, batch\_size\] and
    target has shape \[seq\_len \* batch\_size\]
"""
seq\_len = min(bptt, len(source) - 1 - i)
data = source\[i:i+seq\_len\]
target = source\[i+1:i+1+seq\_len\].reshape(-1)
return data, target

ntokens = len(vocab) # size of vocabulary
emsize = 200 # embedding dimension
d_hid = 200 # dimension of the feedforward network model in nn.TransformerEncoder
nlayers = 2 # number of nn.TransformerEncoderLayer in nn.TransformerEncoder
nhead = 2 # number of heads in nn.MultiheadAttention
dropout = 0.2 # dropout probability
model = TransformerModel(ntokens, emsize, nhead, d_hid, nlayers, dropout).to(device)

import copy
import time

criterion = nn.CrossEntropyLoss()
lr = 5.0 # learning rate
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)

def train(model: nn.Module) -> None:
model.train() # turn on train mode
total_loss = 0.
log_interval = 200
start_time = time.time()
src_mask = generate_square_subsequent_mask(bptt).to(device)

num\_batches = len(train\_data) // bptt
for batch, i in enumerate(range(0, train\_data.size(0) - 1, bptt)):
    data, targets = get\_batch(train\_data, i)
    seq\_len = data.size(0)
    if seq\_len != bptt:  # only on last batch
        src\_mask = src\_mask\[:seq\_len, :seq\_len\]
    output = model(data, src\_mask)
    loss = criterion(output.view(-1, ntokens), targets)

    optimizer.zero\_grad()
    loss.backward()
    torch.nn.utils.clip\_grad\_norm\_(model.parameters(), 0.5)
    optimizer.step()

    total\_loss += loss.item()
    if batch % log\_interval == 0 and batch > 0:
        lr = scheduler.get\_last\_lr()\[0\]
        ms\_per\_batch = (time.time() - start\_time) \* 1000 / log\_interval
        cur\_loss = total\_loss / log\_interval
        ppl = math.exp(cur\_loss)
        print(f'| epoch {epoch:3d} | {batch:5d}/{num\_batches:5d} batches | '
              f'lr {lr:02.2f} | ms/batch {ms\_per\_batch:5.2f} | '
              f'loss {cur\_loss:5.2f} | ppl {ppl:8.2f}')
        total\_loss = 0
        start\_time = time.time()

def evaluate(model: nn.Module, eval_data: Tensor) -> float:
model.eval() # turn on evaluation mode
total_loss = 0.
src_mask = generate_square_subsequent_mask(bptt).to(device)
with torch.no_grad():
for i in range(0, eval_data.size(0) - 1, bptt):
data, targets = get_batch(eval_data, i)
seq_len = data.size(0)
if seq_len != bptt:
src_mask = src_mask[:seq_len, :seq_len]
output = model(data, src_mask)
output_flat = output.view(-1, ntokens)
total_loss += seq_len * criterion(output_flat, targets).item()
return total_loss / (len(eval_data) - 1)

best_val_loss = float('inf')
epochs = 3

with TemporaryDirectory() as tempdir:
best_model_params_path = os.path.join(tempdir, "best_model_params.pt")

for epoch in range(1, epochs + 1):
    epoch\_start\_time = time.time()
    train(model)
    val\_loss = evaluate(model, val\_data)
    val\_ppl = math.exp(val\_loss)
    elapsed = time.time() - epoch\_start\_time
    print('-' \* 89)
    print(f'| end of epoch {epoch:3d} | time: {elapsed:5.2f}s | '
        f'valid loss {val\_loss:5.2f} | valid ppl {val\_ppl:8.2f}')
    print('-' \* 89)

    if val\_loss < best\_val\_loss:
        best\_val\_loss = val\_loss
        torch.save(model.state\_dict(), best\_model\_params\_path)

    scheduler.step()
model.load\_state\_dict(torch.load(best\_model\_params\_path)) # load best model states

GPUとCPUでベンチマークを取りました。GPUはNVIDIA V100 32GBです。

CPUはあまり高速ではない普通のものを使いました。

Intel(R) CPU @ 2.50GHz 1coreです。

1epochの実行速度です。

GPU

| end of epoch  1 | time: 72.37s | valid loss 5.80 | valid ppl  331.84

CPU

| end of epoch 1 | time: 3313.80s | valid loss 5.82 | valid ppl 336.48

実行速度的には、46倍程度の差が出ました。試しに他のCPUでも行ってみて、Google Colabの標準のCPU使用で行ったところ、3000秒程度でしたので、同じような結果でした。やはりCPUで実行するのは無理がありますので、引き続きGPUを活用して学習などをしてみたいですね。

縦軸はCPUを1としたときの実行時間です。

今後はCPUで時間計測しても仕方ないので、GPU同士での比較をしたいと思います。以上です。

© 2025, blueqat Inc. All rights reserved