From c4f682267b78d7a0e02cf4e5558868601c213de4 Mon Sep 17 00:00:00 2001 From: Changqi Lu Date: Thu, 25 Jul 2024 08:54:45 +0800 Subject: [PATCH] [benchmark]: llama add tokens metrics When benchmarking a LLM, token/s is also an important metric, so this metric is added to the llama benchmark. Signed-off-by: Changqi Lu --- examples/language/performance_evaluator.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/examples/language/performance_evaluator.py b/examples/language/performance_evaluator.py index ca4a02cd2981..1fbd4c56278d 100644 --- a/examples/language/performance_evaluator.py +++ b/examples/language/performance_evaluator.py @@ -108,6 +108,7 @@ def __init__( self.num_samples: int = 0 self.flop_megatron = 0 self.flop: int = 0 + self.num_tokens: int = 0 def on_step_start(self, step: int) -> None: self.disable = self.ignore_steps > 0 and step < self.ignore_steps @@ -125,6 +126,7 @@ def on_step_end(self, input_ids: Tensor, **kwargs) -> None: batch_size, seq_len = input_ids.shape self.num_samples += batch_size + self.num_tokens += batch_size * seq_len checkpoint_activations_factor = 3 + int(self.enable_grad_checkpoint) self.flop_megatron += ( 24 * checkpoint_activations_factor * batch_size * seq_len * self.num_layers * (self.hidden_size**2) @@ -135,14 +137,15 @@ def on_step_end(self, input_ids: Tensor, **kwargs) -> None: def on_fit_end(self) -> None: avg_duration = all_reduce_mean(self.timer.duration, self.coordinator.world_size) - avg_throughput = self.num_samples * self.dp_world_size / (avg_duration + 1e-12) + avg_throughput_samples = self.num_samples * self.dp_world_size / (avg_duration + 1e-12) + avg_throughput_tokens = self.num_tokens * self.dp_world_size / (avg_duration + 1e-12) mp_world_size = self.coordinator.world_size // self.dp_world_size avg_tflops_per_gpu_megatron = self.flop_megatron / 1e12 / (avg_duration + 1e-12) / mp_world_size avg_tflops_per_gpu = self.flop / 1e12 / (avg_duration + 1e-12) / mp_world_size self.coordinator.print_on_master( - f"num_samples: {self.num_samples}, dp_world_size: {self.dp_world_size}, flop_megatron: {self.flop_megatron}, flop: {self.flop}, avg_duration: {avg_duration}, " - f"avg_throughput: {avg_throughput}" + f"num_samples: {self.num_samples}, num_tokens: {self.num_tokens}, dp_world_size: {self.dp_world_size}, flop_megatron: {self.flop_megatron}, flop: {self.flop}, avg_duration: {avg_duration}, " + f"avg_throughput_samples: {avg_throughput_samples}, avg_throughput_tokens: {avg_throughput_tokens}" ) self.coordinator.print_on_master( - f"Throughput: {avg_throughput:.2f} samples/sec, TFLOPS per GPU by Megatron: {avg_tflops_per_gpu_megatron:.2f}, TFLOPS per GPU: {avg_tflops_per_gpu:.2f}" + f"Throughput_Samples: {avg_throughput_samples:.2f} samples/sec, Throughput_Tokens: {avg_throughput_tokens:.2f} samples/sec, TFLOPS per GPU by Megatron: {avg_tflops_per_gpu_megatron:.2f}, TFLOPS per GPU: {avg_tflops_per_gpu:.2f}" )