From 803d1f1b72fecfcfd0ea56614c9bdae92ce02f9d Mon Sep 17 00:00:00 2001 From: Jax922 <1322037892@qq.com> Date: Mon, 12 May 2025 17:46:18 +0800 Subject: [PATCH] =?UTF-8?q?=E6=A3=80=E6=9F=A5=E9=80=9F=E5=BA=A6=E6=85=A2?= =?UTF-8?q?=E7=9A=84=E5=8E=9F=E5=9B=A0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- model/dataset.py | 2 +- train_pretrain.py | 169 ++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 155 insertions(+), 16 deletions(-) diff --git a/model/dataset.py b/model/dataset.py index d67cb8c..14acc6c 100644 --- a/model/dataset.py +++ b/model/dataset.py @@ -10,7 +10,7 @@ from sklearn.model_selection import train_test_split import os import ast -os.environ["TOKENIZERS_PARALLELISM"] = "false" +os.environ["TOKENIZERS_PARALLELISM"] = "true" class PretrainDataset(Dataset): diff --git a/train_pretrain.py b/train_pretrain.py index c4c86e7..4eeabbd 100644 --- a/train_pretrain.py +++ b/train_pretrain.py @@ -42,18 +42,64 @@ def train_epoch(epoch, wandb): start_time = time.time() # 在函数开始处定义moe_path,避免在异常处理中引用未定义变量 moe_path = '_moe' if lm_config.use_moe else '' - for step, (X, Y, loss_mask) in enumerate(train_loader): + + # 添加CUDA事件来分析性能 + if args.profile and (not ddp or dist.get_rank() == 0): + data_start = torch.cuda.Event(enable_timing=True) + data_end = torch.cuda.Event(enable_timing=True) + forward_start = torch.cuda.Event(enable_timing=True) + forward_end = torch.cuda.Event(enable_timing=True) + backward_start = torch.cuda.Event(enable_timing=True) + backward_end = torch.cuda.Event(enable_timing=True) + optimizer_start = torch.cuda.Event(enable_timing=True) + optimizer_end = torch.cuda.Event(enable_timing=True) + + # 预取数据 + prefetch_factor = 2 # 预取的批次数 + data_iter = iter(train_loader) + prefetch_batches = [] + + # 预取初始批次 + for _ in range(min(prefetch_factor, len(train_loader))): try: - # 将数据加载到设备上 - X = X.to(args.device) - Y = Y.to(args.device) - loss_mask = loss_mask.to(args.device) + batch = next(data_iter) + prefetch_batches.append([t.to(args.device, non_blocking=True) for t in batch]) + except StopIteration: + break + + for step in range(len(train_loader)): + try: + # 计时数据加载 + if args.profile and (not ddp or dist.get_rank() == 0): + data_start.record() + + # 使用预取的数据 + if prefetch_batches: + X, Y, loss_mask = prefetch_batches.pop(0) + else: + # 如果预取队列为空,直接加载 + X, Y, loss_mask = [t.to(args.device) for t in next(data_iter)] + + # 异步预取下一批数据 + if step + prefetch_factor < len(train_loader): + try: + batch = next(data_iter) + prefetch_batches.append([t.to(args.device, non_blocking=True) for t in batch]) + except StopIteration: + pass + + if args.profile and (not ddp or dist.get_rank() == 0): + data_end.record() # 更新学习率 lr = get_lr(epoch * iter_per_epoch + step, args.epochs * iter_per_epoch, args.learning_rate) for param_group in optimizer.param_groups: param_group['lr'] = lr + # 计时前向传播 + if args.profile and (not ddp or dist.get_rank() == 0): + forward_start.record() + with ctx: res = model(X) loss = loss_fct( @@ -77,6 +123,10 @@ def train_epoch(epoch, wandb): # 如果出错,不添加辅助损失 loss = loss / args.accumulation_steps + if args.profile and (not ddp or dist.get_rank() == 0): + forward_end.record() + backward_start.record() + # Print data types for debugging if step == 0 and (not ddp or dist.get_rank() == 0): # Print only for the first step of the first epoch on the main process Logger("---- Data Type Check ----") @@ -89,9 +139,24 @@ def train_epoch(epoch, wandb): Logger(f"loss.dtype: {loss.dtype}") Logger("-------------------------") + # 反向传播 scaler.scale(loss).backward() + if args.profile and (not ddp or dist.get_rank() == 0): + backward_end.record() + + # 在每一步都进行性能分析,而不仅仅是在梯度累积完成时 + if (step + 1) % args.profile_interval == 0: + # 记录优化器时间(如果是梯度累积步骤) + if (step + 1) % args.accumulation_steps == 0: + optimizer_start.record() + + # 优化器步骤 if (step + 1) % args.accumulation_steps == 0: + if args.profile and (not ddp or dist.get_rank() == 0): + if (step + 1) % args.profile_interval != 0: + optimizer_start.record() + scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip) @@ -100,6 +165,40 @@ def train_epoch(epoch, wandb): optimizer.zero_grad(set_to_none=True) + if args.profile and (not ddp or dist.get_rank() == 0): + optimizer_end.record() + + # 性能分析输出(每profile_interval步) + if args.profile and (not ddp or dist.get_rank() == 0) and (step + 1) % args.profile_interval == 0: + # 同步CUDA事件以获取准确的计时 + torch.cuda.synchronize() + + # 计算各阶段耗时 + data_time = data_start.elapsed_time(data_end) + forward_time = forward_start.elapsed_time(forward_end) + backward_time = backward_start.elapsed_time(backward_end) + + # 只有在梯度累积步骤完成时才有优化器时间 + if (step + 1) % args.accumulation_steps == 0: + optimizer_time = optimizer_start.elapsed_time(optimizer_end) + total_compute_time = forward_time + backward_time + optimizer_time + Logger(f"性能分析 - 步骤 {step+1}:") + Logger(f" 数据加载时间: {data_time:.2f} ms") + Logger(f" 前向传播时间: {forward_time:.2f} ms") + Logger(f" 反向传播时间: {backward_time:.2f} ms") + Logger(f" 优化器时间: {optimizer_time:.2f} ms") + Logger(f" 总计算时间: {total_compute_time:.2f} ms") + Logger(f" 计算/数据比例: {total_compute_time / data_time:.2f}") + else: + # 非梯度累积步骤,没有优化器时间 + total_compute_time = forward_time + backward_time + Logger(f"性能分析 - 步骤 {step+1} (梯度累积中):") + Logger(f" 数据加载时间: {data_time:.2f} ms") + Logger(f" 前向传播时间: {forward_time:.2f} ms") + Logger(f" 反向传播时间: {backward_time:.2f} ms") + Logger(f" 总计算时间: {total_compute_time:.2f} ms") + Logger(f" 计算/数据比例: {total_compute_time / data_time:.2f}") + # 打印日志 if step % args.log_interval == 0: spend_time = time.time() - start_time @@ -114,9 +213,37 @@ def train_epoch(epoch, wandb): spend_time / (step + 1) * iter_per_epoch // 60 - spend_time // 60)) if (wandb is not None) and (not ddp or dist.get_rank() == 0): - wandb.log({"loss": loss.item() * args.accumulation_steps, - "lr": optimizer.param_groups[-1]['lr'], - "epoch_Time": spend_time / (step + 1) * iter_per_epoch // 60 - spend_time // 60}) + log_dict = { + "loss": loss.item() * args.accumulation_steps, + "lr": optimizer.param_groups[-1]['lr'], + "epoch_Time": spend_time / (step + 1) * iter_per_epoch // 60 - spend_time // 60 + } + + # 如果启用了性能分析,也记录性能指标 + if args.profile and (step + 1) % args.profile_interval == 0: + # 基本性能指标 + perf_dict = { + "data_time_ms": data_time, + "forward_time_ms": forward_time, + "backward_time_ms": backward_time + } + + # 只有在梯度累积步骤完成时才有优化器时间 + if (step + 1) % args.accumulation_steps == 0: + total_compute_time = forward_time + backward_time + optimizer_time + perf_dict.update({ + "optimizer_time_ms": optimizer_time, + "compute_time_ms": total_compute_time + }) + else: + total_compute_time = forward_time + backward_time + perf_dict.update({ + "compute_time_ms": total_compute_time + }) + + log_dict.update(perf_dict) + + wandb.log(log_dict) # 保存模型 if (step + 1) % args.save_interval == 0 and (not ddp or dist.get_rank() == 0): @@ -194,28 +321,33 @@ if __name__ == "__main__": parser.add_argument("--out_dir", type=str, default="out") # 若要以最快速度实现zero则epochs设置为1轮;否则应当利用有限的数据训练2~6个epochs。 parser.add_argument("--epochs", type=int, default=3) - parser.add_argument("--batch_size", type=int, default=8) + parser.add_argument("--batch_size", type=int, default=24) parser.add_argument("--learning_rate", type=float, default=2e-4) parser.add_argument("--device", type=str, default="cuda:0" if torch.cuda.is_available() else "cpu") #如果GPU可用,则使用GPU,否则使用CPU。 parser.add_argument("--dtype", type=str, default="bfloat16") parser.add_argument("--use_wandb", default=True, action="store_true") parser.add_argument("--wandb_project", type=str, default="MiniMind-Pretrain") - parser.add_argument("--num_workers", type=int, default=8) + parser.add_argument("--num_workers", type=int, default=48) parser.add_argument("--ddp", action="store_true") - parser.add_argument("--accumulation_steps", type=int, default=64) #梯度累积步数,用于控制梯度更新频率。 + parser.add_argument("--accumulation_steps", type=int, default=32) #梯度累积步数,用于控制梯度更新频率。 parser.add_argument("--grad_clip", type=float, default=1.0) #梯度裁剪阈值,用于防止梯度爆炸。 parser.add_argument("--warmup_iters", type=int, default=0) #预热迭代次数,用于控制学习率预热过程。 parser.add_argument("--log_interval", type=int, default=100) #日志打印间隔,用于控制日志打印的频率。 parser.add_argument("--save_interval", type=int, default=10000) #模型保存间隔,用于控制模型保存的频率。 parser.add_argument('--local_rank', type=int, default=-1) #本地进程编号,用于分布式训练。 - parser.add_argument('--dim', default=2048, type=int) #模型维度,用于控制模型的大小。 + parser.add_argument('--dim', default=1024, type=int) #模型维度,用于控制模型的大小。 parser.add_argument('--n_layers', default=32, type=int) #层数,用于控制模型层数。 parser.add_argument('--max_seq_len', default=1024, type=int) #最大序列长度,用于控制输入序列的最大长度。 parser.add_argument('--use_moe', default=False, type=bool) #是否使用MOE,用于控制是否使用MOE。 parser.add_argument('--disable_db', action='store_true', help="禁用数据库功能,使用固定值1e-4替代") #禁用数据库功能,启用特殊模式 parser.add_argument("--data_path", type=str, default="./dataset/pretrain_hq.jsonl") #数据路径,用于控制数据集的路径。 parser.add_argument("--pretrained_embedding_path", type=str, default=None, help="Path to pretrained token embedding weights (.pth file)") + # 性能分析相关参数 + parser.add_argument("--profile", action="store_true", default=True, help="启用性能分析") + parser.add_argument("--profile_interval", type=int, default=100, help="性能分析打印间隔(步数)") args = parser.parse_args() + print(args) + lm_config = LMConfig( dim=args.dim, @@ -267,24 +399,31 @@ if __name__ == "__main__": model, tokenizer = init_model(lm_config, args.pretrained_embedding_path) train_ds = PretrainDataset(args.data_path, tokenizer, max_length=lm_config.max_seq_len) train_sampler = DistributedSampler(train_ds) if ddp else None + # 优化DataLoader配置 train_loader = DataLoader( train_ds, batch_size=args.batch_size, pin_memory=True, + pin_memory_device=f"cuda:{ddp_local_rank}" if ddp else "cuda:0", # 指定pin_memory设备 drop_last=False, shuffle=False, num_workers=args.num_workers, - sampler=train_sampler + sampler=train_sampler, + persistent_workers=True if args.num_workers > 0 else False, # 保持worker进程活跃 + prefetch_factor=2 if args.num_workers > 0 else None # 预取因子 ) - scaler = torch.cuda.amp.GradScaler(enabled=(args.dtype in ['float16'])) + # 只有在使用float16时才启用GradScaler,bfloat16不需要 + scaler = torch.cuda.amp.GradScaler(enabled=(args.dtype == 'float16')) optimizer = optim.AdamW(model.parameters(), lr=args.learning_rate) if ddp: model._ddp_params_and_buffers_to_ignore = {"pos_cis"} - # 添加find_unused_parameters=True参数,解决未使用参数的问题 + # 保留find_unused_parameters=True参数,因为模型中确实有未使用的参数 model = DistributedDataParallel(model, device_ids=[ddp_local_rank], find_unused_parameters=True) + # 暂时保留set_detect_anomaly以便调试 + # 训练稳定后可以注释掉这行来提高速度 torch.autograd.set_detect_anomaly(True) iter_per_epoch = len(train_loader) for epoch in range(args.epochs):