Skip to content

Commit

Permalink
pre-commit: running and fixing...
Browse files Browse the repository at this point in the history
  • Loading branch information
github-actions[bot] committed Mar 21, 2024
1 parent 4596a67 commit 605e027
Showing 1 changed file with 3 additions and 5 deletions.
8 changes: 3 additions & 5 deletions thunder/benchmarks/benchmark_litgpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ def train(self):
iter_times = []
if i == self.warmup_iter: # warmup
t0 = iter_t0
iter_times = [] #reset the iter time list
iter_times = [] # reset the iter time list

for step_idx in range(self.gradient_accumulation_steps):
input_ids, targets = next(self.train_data_iter)
Expand Down Expand Up @@ -328,9 +328,7 @@ def train(self):
t1 = time.perf_counter()
if global_rank in [0, None]:
iter_time = (t1 - iter_t0) * 1000
print(
f"iter {i}: loss {loss_item:.4f}, iter time: {iter_time:.2f}ms, t: {input_ids.size(1)}"
)
print(f"iter {i}: loss {loss_item:.4f}, iter time: {iter_time:.2f}ms, t: {input_ids.size(1)}")
iter_times.append(iter_time)

# if global_rank in [0, None] and i >=warmup_iter:
Expand All @@ -348,7 +346,7 @@ def train(self):

if global_rank in [0, None]:
self.perf_metrics["average_iter_time"] = ((t1 - t0) * 1000) / (self.max_iters - self.warmup_iter)
self.perf_metrics["median_iter_time"] = np.median(iter_times) #To avoid outliers
self.perf_metrics["median_iter_time"] = np.median(iter_times) # To avoid outliers

def add_perf_metrics(self):
# tokens_per_sec = total number of benchmarked iterations x global BS x block_size / total elapsed time (s)
Expand Down

0 comments on commit 605e027

Please sign in to comment.