Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions examples/nehar/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,7 @@
benchmark = Benchmark(
model, test_set_loader, [], postprocessors, [static_metrics, workload_metrics]
)
results = benchmark.run(verbose=False)
print(results)
results = benchmark.run(verbose=True)

results_path = os.path.join(file_path, "results")
benchmark.save_benchmark_results(results_path)
Expand Down
157 changes: 110 additions & 47 deletions neurobench/benchmarks/benchmark.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
import sys
from contextlib import redirect_stdout
from os import mkdir
from tqdm import tqdm
from snntorch import utils
from neurobench.metrics.manager.static_manager import StaticMetricManager
from neurobench.metrics.manager.workload_manager import WorkloadMetricManager
Expand All @@ -16,13 +14,18 @@
import json
import csv
import os
from typing import Literal, List, Type, Optional, Dict, Any, Callable, Tuple
from typing import Literal, List, Type, Optional, Dict, Any
import pathlib
import snntorch
from torch import Tensor

from rich.console import Console
from rich.panel import Panel
from rich.table import Table
import torch
import nir
from rich.live import Live
import rich
from .utils import make_layout, create_progress_bar, create_content_panel


class Benchmark:
Expand All @@ -36,25 +39,18 @@ def __init__(
self,
model: NeuroBenchModel,
dataloader: Optional[DataLoader],
preprocessors: Optional[
List[
NeuroBenchPreProcessor
| Callable[[Tuple[Tensor, Tensor]], Tuple[Tensor, Tensor]]
]
],
postprocessors: Optional[
List[NeuroBenchPostProcessor | Callable[[Tensor], Tensor]]
],
preprocessors: Optional[List[NeuroBenchPreProcessor]],
postprocessors: Optional[List[NeuroBenchPostProcessor]],
metric_list: List[List[Type[StaticMetric | WorkloadMetric]]],
):
"""
Args:
model: A NeuroBenchModel.
dataloader: A PyTorch DataLoader.
preprocessors: A list of NeuroBenchPreProcessors or callable functions (e.g. lambda) with matching interfaces.
postprocessors: A list of NeuroBenchPostProcessors or callable functions (e.g. lambda) with matching interfaces.
metric_list: A list of lists of StaticMetric and WorkloadMetric classes of metrics to run.
First item is StaticMetrics, second item is WorkloadMetrics.
preprocessors: A list of NeuroBenchPreProcessors.
postprocessors: A list of NeuroBenchPostProcessors.
metric_list: A list of lists of strings of metrics to run.
First item is static metrics, second item is data metrics.
"""

self.model = model
Expand All @@ -70,13 +66,8 @@ def run(
quiet: bool = False,
verbose: bool = False,
dataloader: Optional[DataLoader] = None,
preprocessors: Optional[
NeuroBenchPreProcessor
| Callable[[Tuple[Tensor, Tensor]], Tuple[Tensor, Tensor]]
] = None,
postprocessors: Optional[
NeuroBenchPostProcessor | Callable[[Tensor], Tensor]
] = None,
preprocessors: Optional[NeuroBenchPreProcessor] = None,
postprocessors: Optional[NeuroBenchPostProcessor] = None,
device: Optional[str] = None,
) -> Dict[str, Any]:
"""
Expand All @@ -96,7 +87,6 @@ def run(

"""
with redirect_stdout(None if quiet else sys.stdout):
print("Running benchmark")

self.results = None
results = self.static_metric_manager.run_metrics(self.model)
Expand All @@ -116,40 +106,78 @@ def run(
self.model.__net__().to(device)

batch_num = 0
for data in tqdm(dataloader, total=len(dataloader), disable=quiet):
# convert data to tuple
data = tuple(data) if not isinstance(data, tuple) else data
print("\n")

progress = create_progress_bar(total=len(dataloader))
layout = make_layout(verbose)
layout["progress"].update(Panel(progress))
if verbose:
layout["content"].update(Panel(""))

console = Console()
console.clear()

with Live(
layout,
refresh_per_second=10,
vertical_overflow="visible",
screen=True, # This will clear the screen and create a new canvas
console=console,
) as live:
task = progress.add_task(
"[cyan]Running Benchmark...", total=len(dataloader)
)

if device is not None:
data = (data[0].to(device), data[1].to(device))
for data in dataloader:
# convert data to tuple
data = tuple(data) if not isinstance(data, tuple) else data

batch_size = data[0].size(0)
if device is not None:
data = (data[0].to(device), data[1].to(device))

# Preprocessing data
input, target = self.processor_manager.preprocess(data)
batch_size = data[0].size(0)

# Run model on test data
preds = self.model(input)
# Preprocessing data
data = self.processor_manager.preprocess(data)

# Postprocessing data
preds = self.processor_manager.postprocess(preds)
# Run model on test data
preds = self.model(data[0])

# Data metrics
batch_results = self.workload_metric_manager.run_metrics(
self.model, preds, data, batch_size, dataset_len
)
self.workload_metric_manager.reset_hooks(self.model)
# Postprocessing data
preds = self.processor_manager.postprocess(preds)

# Data metrics
batch_results = self.workload_metric_manager.run_metrics(
self.model, preds, data, batch_size, dataset_len
)
self.workload_metric_manager.reset_hooks(self.model)

if verbose:
results.update(batch_results)
print(f"\nBatch num {batch_num + 1}/{len(dataloader)}")
print(dict(results))
progress.update(task, advance=1)

batch_num += 1
if verbose:
# Create a list of panels for each metric
content_panel = create_content_panel(
progress, batch_results, verbose
)
layout["content"].update(
Panel(
content_panel,
title="Batch History",
border_style="cyan",
)
)

batch_num += 1

# Keep the progress bar base at the end without collapsing it
progress.update(task, completed=len(dataloader))
live.refresh()
live.stop()

results.update(self.workload_metric_manager.results)
self.workload_metric_manager.clean_results()
self.results = dict(results)
self._show_results(console)
return self.results

def save_benchmark_results(
Expand Down Expand Up @@ -274,3 +302,38 @@ def to_onnx(self, dummy_input: Tensor, filename: str, **kwargs) -> None:
self.model.__net__().eval()
torch.onnx.export(self.model.__net__(), dummy_input, filename, **kwargs)
print(f"Model exported to {filename}")

def _show_results(self, console):
"""Print the results of the benchmark."""
if self.results is None:
raise ValueError("No results to show. Run the benchmark first.")

# Create console and table

table = Table(
title="[bold magenta]\nBenchmark Results:[/bold magenta]",
show_lines=True,
expand=True,
header_style="bold cyan",
title_justify="center",
title_style="bold magenta",
border_style="cyan",
box=rich.box.ROUNDED,
)

# Define columns
table.add_column("Metric", style="bold magenta", justify="left")
table.add_column("Value", style="bold yellow", justify="right")

# Add metrics to the table
for key, value in self.results.items():
if isinstance(value, dict):
table.add_section()
table.add_row(f"[bold green]{key}[/]", "")
for sub_key, sub_value in value.items():
table.add_row(f" {sub_key}", f"{sub_value}")
else:
table.add_row(key, f"{value}")

# Print the table
console.print(table)
85 changes: 85 additions & 0 deletions neurobench/benchmarks/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
from rich.layout import Layout
from rich.progress import (
Progress,
SpinnerColumn,
BarColumn,
TextColumn,
TimeRemainingColumn,
)
from rich.panel import Panel
from rich.columns import Columns


def make_layout(verbose: bool = False) -> Layout:
"""Create a layout for the benchmark runner."""
layout = Layout()
if verbose:
layout.split(
Layout(name="progress", size=3),
Layout(name="content"),
)
else:
layout.split(
Layout(name="progress", size=3),
)

return layout


def create_progress_bar(total: int) -> Progress:
"""Create a progress bar."""

progress = Progress(
SpinnerColumn(spinner_name="dots"),
TextColumn("[bold cyan]Processing[/bold cyan]"),
BarColumn(bar_width=40, style="magenta", complete_style="cyan"),
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
TextColumn("•"),
TextColumn("[bold blue]{task.completed}/{task.total}[/bold blue] batches"),
TextColumn("•"),
TimeRemainingColumn(),
expand=True,
transient=False,
)

return progress


def create_content_panel(progress, batch_results, verbose):
panels = []
if verbose:
for key, value in batch_results.items():
if isinstance(value, dict):
sub_panels = [
Panel(
f"Value: {sub_value}",
title=sub_key,
border_style="green",
)
for sub_key, sub_value in value.items()
]
panels.append(
Panel(
Columns(sub_panels),
title=key,
border_style="blue",
)
)
else:
panels.append(
Panel(
f"Value: {value}",
title=key,
border_style="blue",
)
)
# Create a structured panel for the content section
content_panel = Panel(
Columns(panels),
title=f"Processing batch {progress.tasks[0].completed}/{progress.tasks[0].total}",
title_align="left",
border_style="cyan",
)

# Update the content section with the history of panels
return content_panel
Loading