單機(jī)單卡訓(xùn)練模式
# 設(shè)置GPU參數(shù),是否使用GPU,使用那塊GPU
if config.use_gpu and torch.cuda.is_available():
device=torch.device('cuda',config.gpu_id)
else:
device=torch.device('cpu')
# 檢查一下GPU是否可以使用
print('GPU是否可用:'+str(torch.cuda.is_available()))
單機(jī)多卡訓(xùn)練模式
-
Single Machine Data Parallel(單機(jī)多卡模式)這個(gè)版本已經(jīng)淘汰
image.png
from torch.nn.parallel import DataParallel
device_id=[0,1,2,3]
device=torch.device('cuda:{}'.format(device_id[0])) # 設(shè)置0號GPU是主GPU
model=model.to(device)
model=DataParallel(model,device_ids=device_id,output_device=device)
首先會(huì)把所有數(shù)據(jù)分發(fā)到列表上的GPU進(jìn)行訓(xùn)練凤优,然后再gather到主GPU計(jì)算loss
-
DistributedParallel(簡稱DDP,多進(jìn)程多卡訓(xùn)練)
image.png
代碼變成流程:
- 1.初始化進(jìn)程組
torch.distributed.init rocess_group(backend="nccl", world_size=n_gpus,rank=args.local_rank)
# backend:進(jìn)程方式
# word_size:當(dāng)前這個(gè)GPU上有多少張卡
# rank:指定當(dāng)前進(jìn)程是在那個(gè)GPU上
- 2.設(shè)置CUDA_VISIBLE_DEVICES環(huán)境變量
torch.cuda.set_device(args.local_rank)
- 3.對模型進(jìn)行包裹
model = DistributedDataParallel(model.cuda(args.local_rank), device_ids=[args.local_rank])
- 4.對每張卡的數(shù)據(jù)進(jìn)行分配
train_sampler = DistributedSampler(train_dataset)
# 源碼位于torch/utils/data/distributed.py
- 5.將數(shù)據(jù)傳遞到dataload中,傳遞進(jìn)來的數(shù)據(jù)就不需要suffer了
- 6.將數(shù)據(jù)復(fù)制到cuda上
data=data.cuda(args.local_rank)
- 7.執(zhí)行命令(在使用ddp這種方式訓(xùn)練的時(shí)候,需要使用命令執(zhí)行)
python -m torch.distributed.launch--nproc_per_node=n_gpu train.py
- 8.保存模型
torch.save在local_rank=O的位置進(jìn)行保存,同樣注意調(diào)用model.module.state_dict()
torch.load 注意map_location
注意事項(xiàng):
- train.py中要有接受local_rank的參數(shù)選項(xiàng),launch會(huì)傳入這個(gè)參數(shù)
- 每個(gè)進(jìn)程的batch_size應(yīng)該是一個(gè)GPU所需要的batch_size大小
- 在每個(gè)周期開始處,調(diào)用train_sampler.set_epoch(epoch)可以使得數(shù)據(jù)充分打亂
- 有了sampler,就不要在DataLoader中設(shè)置shuffle=True了
完整代碼
# 系統(tǒng)相關(guān)的
import argparse
import os
# 框架相關(guān)
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
import torch.nn as nn
import os
# 自定義包
from BruceNRE.config import config
from BruceNRE.utils import make_seed,load_pkl
from BruceNRE.process import process
from BruceNRE.dataset import CustomDataset,collate_fn
from BruceNRE import models
from BruceNRE.trainer import train,validate
# 導(dǎo)入分布式訓(xùn)練依賴
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel
__Models__={
"BruceCNN":models.BruceCNN
}
parser=argparse.ArgumentParser(description="關(guān)系抽取")
parser.add_argument("--model_name",type=str,default='BruceCNN',help='model name')
parser.add_argument('--local_rank',type=int,default=1,help='local device id on current node')
if __name__=="__main__":
# ====================關(guān)鍵代碼==================================
os.environ["CUDA_VISIBLE_DEVICES"]="0,1,2,3"
# 分布式訓(xùn)練初始化
torch.distributed.init_process_group(backend="nccl")
# 設(shè)置當(dāng)前的設(shè)備只用這張卡
torch.cuda.set_device(args.local_rank)
# 單機(jī)多卡:代表有幾塊GPU
args.word_size=int(os.getenv("WORLD_SIZE",'1'))
# 獲取當(dāng)前進(jìn)程的序號,用于進(jìn)程間的通信
args.global_rank=dist.get_rank()
#=============================================================
model_name=args.model_name if args.model_name else config.model_name
# 為了保證模型每次訓(xùn)練的時(shí)候都一樣的,設(shè)置了一個(gè)初始化種子
make_seed(config.seed)
# 數(shù)據(jù)預(yù)處理
process(config.data_path,config.out_path,file_type='csv')
# 加載數(shù)據(jù)
vocab_path=os.path.join(config.out_path,'vocab.pkl')
train_data_path=os.path.join(config.out_path,'train.pkl')
test_data_path=os.path.join(config.out_path,'test.pkl')
vocab=load_pkl(vocab_path,'vocab')
vocab_size=len(vocab.word2idx)
#CustomDataset是繼承了torch.util.data的Dataset類的一個(gè)類,用于數(shù)據(jù)加載,詳情見Dataset
train_dataset = CustomDataset(train_data_path, 'train-data')
test_dataset = CustomDataset(test_data_path, 'test-data')
# 測試CNN模型
model=__Models__[model_name](vocab_size,config)
print(model)
#=====================關(guān)鍵代碼=================================
# 定義,并且把模型放到GPU上
local_rank = torch.distributed.get_rank()
torch.cuda.set_device(local_rank)
global device
device = torch.device("cuda", local_rank)
# 拷貝模型,將模型放入DistributedDataParallelAPI
model.to(device)
# 加載多GPU
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank,
find_unused_parameters=True)
# 構(gòu)建一個(gè)train-sample
train_sample = DistributedSampler(train_dataset)
test_sample=DistributedSampler(test_dataset)
# 使用分布式訓(xùn)練,一定要把suffle設(shè)置為False,因?yàn)镈istributedSampler會(huì)吧數(shù)據(jù)打亂
train_dataloader = DataLoader(
dataset=train_dataset,
batch_size=config.batch_size,
shuffle=False,
drop_last=True,
collate_fn=collate_fn,
sampler=train_sample
)
test_dataloader = DataLoader(
dataset=test_dataset,
batch_size=config.batch_size,
shuffle=False,
drop_last=True,
collate_fn=collate_fn,
sampler=test_sample
)
# =============================================
# 構(gòu)建優(yōu)化器
optimizer=optim.Adam(model.parameters(),lr=config.learing_rate)
scheduler=optim.lr_scheduler.ReduceLROnPlateau(optimizer,'max',factor=config.decay_rate,patience=config.decay_patience)
# 損失函數(shù):交叉熵
loss_fn=nn.CrossEntropyLoss()
# 評價(jià)指標(biāo),微平均,宏平均
best_macro_f1,best_macro_epoch=0,1
best_micro_f1,best_micro_epoch=0,1
best_macro_model,best_micro_model='',''
print("***************************開始訓(xùn)練*******************************")
for epoch in range(1,config.epoch+1):
train_sample.set_epoch(epoch) # 讓每張卡在每個(gè)周期中得到的數(shù)據(jù)是隨機(jī)的
train(epoch,device,train_dataloader,model,optimizer,loss_fn,config)
macro_f1,micro_f1=validate(test_dataloader,device,model,config)
model_name=model.module.save(epoch=epoch)
scheduler.step(macro_f1)
if macro_f1>best_macro_f1:
best_macro_f1=macro_f1
best_macro_epoch=epoch
best_macro_model=model_name
if micro_f1>best_micro_f1:
best_micro_f1=micro_f1
best_micro_epoch=epoch
best_micro_model=model_name
print("=========================模型訓(xùn)練完成==================================")
print(f'best macro f1:{best_macro_f1:.4f}',f'in epoch:{best_macro_epoch},save in:{best_macro_model}')
print(f'best micro f1:{best_micro_f1:.4f}',f'in epoch:{best_macro_epoch},save_in:{best_micro_model}')
最后在shell后臺使用的下面語句運(yùn)行(暫時(shí)我只發(fā)現(xiàn)這種方法運(yùn)行,其他方法還需要尋找)
CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 main.py
其中
- torch.distributed.launch表示以分布式的方式啟動(dòng)訓(xùn)練,
- nproc_per_node指定一共就多少個(gè)節(jié)點(diǎn)恨憎,可以設(shè)置成顯卡的個(gè)數(shù)