|
|
|
|
|
import argparse |
|
|
import importlib.util |
|
|
import json |
|
|
import math |
|
|
import os |
|
|
import sys |
|
|
from pathlib import Path |
|
|
from types import ModuleType |
|
|
from typing import Any, Dict |
|
|
|
|
|
|
|
|
HERE = Path(__file__).resolve().parent |
|
|
RESOURCES_DIR = HERE / "resources" |
|
|
sys.path.insert(0, str(RESOURCES_DIR)) |
|
|
|
|
|
from benchmark import run_benchmark |
|
|
from baseline import cross_entropy as baseline_cross_entropy |
|
|
import torch |
|
|
import triton |
|
|
|
|
|
DEFAULT_SPEC = HERE / "resources" / "submission_spec.json" |
|
|
ARTIFACT_PATH = Path("./output_ans").resolve() |
|
|
|
|
|
|
|
|
def load_solution_module(solution_path: Path) -> ModuleType: |
|
|
"""Load the solution module from the given path.""" |
|
|
if not solution_path.exists(): |
|
|
raise FileNotFoundError(f"solution.py not found at {solution_path}") |
|
|
spec = importlib.util.spec_from_file_location("submitted_solution", solution_path) |
|
|
if spec is None or spec.loader is None: |
|
|
raise ImportError(f"Failed to load spec for {solution_path}") |
|
|
module = importlib.util.module_from_spec(spec) |
|
|
spec.loader.exec_module(module) |
|
|
return module |
|
|
|
|
|
|
|
|
def materialize_artifact(result: Any, solution_path: Path) -> Path: |
|
|
"""Materialize the solution result into an artifact file.""" |
|
|
ARTIFACT_PATH.parent.mkdir(parents=True, exist_ok=True) |
|
|
if isinstance(result, dict): |
|
|
with ARTIFACT_PATH.open("w", encoding="utf-8") as fout: |
|
|
json.dump(result, fout) |
|
|
return ARTIFACT_PATH |
|
|
if isinstance(result, str): |
|
|
|
|
|
|
|
|
is_possible_path = len(result) < 4096 and '\n' not in result |
|
|
if is_possible_path: |
|
|
candidate = Path(result) |
|
|
try: |
|
|
if candidate.is_file(): |
|
|
with ARTIFACT_PATH.open("w", encoding="utf-8") as fout: |
|
|
json.dump({"program_path": str(candidate.resolve())}, fout) |
|
|
return ARTIFACT_PATH |
|
|
except OSError: |
|
|
|
|
|
pass |
|
|
|
|
|
with ARTIFACT_PATH.open("w", encoding="utf-8") as fout: |
|
|
fout.write(result) |
|
|
return ARTIFACT_PATH |
|
|
raise TypeError( |
|
|
"Solution.solve() must return a dict/path-string/code-string; got " |
|
|
f"{type(result)!r}." |
|
|
) |
|
|
|
|
|
|
|
|
def load_cross_entropy_from_artifact(artifact_path: Path) -> Any: |
|
|
"""Load the cross_entropy function from the artifact.""" |
|
|
with artifact_path.open("r", encoding="utf-8") as fin: |
|
|
artifact = json.load(fin) |
|
|
|
|
|
if "code" in artifact: |
|
|
|
|
|
import tempfile |
|
|
import os |
|
|
|
|
|
try: |
|
|
|
|
|
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: |
|
|
f.write(artifact["code"]) |
|
|
temp_file = f.name |
|
|
|
|
|
|
|
|
import importlib.util |
|
|
spec = importlib.util.spec_from_file_location("temp_cross_entropy_module", temp_file) |
|
|
module = importlib.util.module_from_spec(spec) |
|
|
spec.loader.exec_module(module) |
|
|
|
|
|
if not hasattr(module, "cross_entropy"): |
|
|
raise ValueError("Code must define a 'cross_entropy' function") |
|
|
|
|
|
|
|
|
os.unlink(temp_file) |
|
|
|
|
|
return module.cross_entropy |
|
|
except Exception as e: |
|
|
|
|
|
try: |
|
|
if 'temp_file' in locals(): |
|
|
os.unlink(temp_file) |
|
|
except: |
|
|
pass |
|
|
raise |
|
|
|
|
|
elif "program_path" in artifact: |
|
|
|
|
|
program_path = Path(artifact["program_path"]) |
|
|
if not program_path.exists(): |
|
|
raise FileNotFoundError(f"Program file not found: {program_path}") |
|
|
|
|
|
spec = importlib.util.spec_from_file_location("submitted_program", program_path) |
|
|
if spec is None or spec.loader is None: |
|
|
raise ImportError(f"Failed to load spec for {program_path}") |
|
|
module = importlib.util.module_from_spec(spec) |
|
|
spec.loader.exec_module(module) |
|
|
|
|
|
if not hasattr(module, "cross_entropy"): |
|
|
raise ValueError("Program must define a 'cross_entropy' function") |
|
|
return module.cross_entropy |
|
|
|
|
|
else: |
|
|
raise ValueError("Artifact must contain either 'code' or 'program_path'") |
|
|
|
|
|
|
|
|
def evaluate_kernel_performance(cross_entropy_func: Any, metadata: Dict[str, Any] = None) -> Dict[str, Any]: |
|
|
"""Evaluate the performance of a Triton kernel implementation.""" |
|
|
try: |
|
|
|
|
|
result = run_benchmark(cross_entropy_func, baseline_cross_entropy, print_output=False, metadata=metadata) |
|
|
|
|
|
|
|
|
geometric_mean_speedup = result["geometric_mean_speedup"] |
|
|
arithmetic_mean_speedup = result["arithmetic_mean_speedup"] |
|
|
median_speedup = result["median_speedup"] |
|
|
pass_all = result["pass_all"] |
|
|
|
|
|
|
|
|
if not pass_all: |
|
|
return { |
|
|
"error": "Correctness not 100% across all tests", |
|
|
"geometric_mean_speedup": geometric_mean_speedup, |
|
|
"arithmetic_mean_speedup": arithmetic_mean_speedup, |
|
|
"median_speedup": median_speedup, |
|
|
"score": 0, |
|
|
"pass_all": False, |
|
|
"total_tests": len(result["rows"]), |
|
|
"passed_tests": sum(1 for r in result["rows"] if r["close_passed"]), |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
geo_mean_cpu_time = result.get("geo_mean_cpu_time", 0.0) |
|
|
geo_mean_gpu_time = result.get("geo_mean_gpu_time", 0.0) |
|
|
geo_mean_answer_time = result.get("geo_mean_answer_time", 0.0) |
|
|
|
|
|
if geo_mean_cpu_time > 0 and geo_mean_gpu_time > 0 and geo_mean_answer_time > 0: |
|
|
|
|
|
target_time_100 = geo_mean_gpu_time / 3.0 |
|
|
|
|
|
target_time_0 = geo_mean_gpu_time |
|
|
|
|
|
if geo_mean_answer_time >= target_time_0: |
|
|
|
|
|
score = 0.0 |
|
|
elif geo_mean_answer_time <= target_time_100: |
|
|
|
|
|
score = 100.0 |
|
|
else: |
|
|
|
|
|
score = 100.0 * (target_time_0 - geo_mean_answer_time) / (target_time_0 - target_time_100) |
|
|
else: |
|
|
|
|
|
raw_score = min(geometric_mean_speedup, 3.0) |
|
|
score = max(0, (raw_score - 1.0) / 2.0 * 100) |
|
|
|
|
|
return { |
|
|
"geometric_mean_speedup": geometric_mean_speedup, |
|
|
"arithmetic_mean_speedup": arithmetic_mean_speedup, |
|
|
"median_speedup": median_speedup, |
|
|
"score": score, |
|
|
"pass_all": pass_all, |
|
|
"total_tests": len(result["rows"]), |
|
|
"passed_tests": sum(1 for r in result["rows"] if r["close_passed"]), |
|
|
} |
|
|
|
|
|
except Exception as e: |
|
|
return { |
|
|
"error": str(e), |
|
|
"score": 0, |
|
|
"pass_all": False, |
|
|
} |
|
|
|
|
|
|
|
|
def evaluate(solution_path: Path, spec_path: Path) -> dict: |
|
|
"""Main evaluation function.""" |
|
|
try: |
|
|
|
|
|
module = load_solution_module(solution_path) |
|
|
|
|
|
if not hasattr(module, "Solution"): |
|
|
raise ValueError("Solution module must define a 'Solution' class") |
|
|
|
|
|
solution_class = module.Solution |
|
|
solution_instance = solution_class() |
|
|
|
|
|
if not hasattr(solution_instance, "solve"): |
|
|
raise ValueError("Solution class must have a 'solve' method") |
|
|
|
|
|
|
|
|
metadata = None |
|
|
if spec_path.exists(): |
|
|
with spec_path.open("r", encoding="utf-8") as f: |
|
|
spec = json.load(f) |
|
|
metadata = spec.get("metadata", None) |
|
|
|
|
|
|
|
|
result = solution_instance.solve(str(spec_path)) |
|
|
|
|
|
|
|
|
artifact_path = materialize_artifact(result, solution_path) |
|
|
|
|
|
|
|
|
cross_entropy_func = load_cross_entropy_from_artifact(artifact_path) |
|
|
|
|
|
|
|
|
evaluation_result = evaluate_kernel_performance(cross_entropy_func, metadata=metadata) |
|
|
|
|
|
return { |
|
|
"status": "success", |
|
|
"artifact_path": str(artifact_path), |
|
|
**evaluation_result, |
|
|
} |
|
|
|
|
|
except Exception as e: |
|
|
return { |
|
|
"status": "error", |
|
|
"error": str(e), |
|
|
"score": 0, |
|
|
} |
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser(description="Evaluate Cross Entropy optimization solutions") |
|
|
parser.add_argument( |
|
|
"--solution-path", |
|
|
type=Path, |
|
|
default=Path("./solution.py"), |
|
|
help="Path to solution.py file", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--spec-path", |
|
|
type=Path, |
|
|
default=DEFAULT_SPEC, |
|
|
help="Path to specification file", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--output-path", |
|
|
type=Path, |
|
|
default=Path("./result.json"), |
|
|
help="Path to output result file", |
|
|
) |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
result = evaluate(args.solution_path, args.spec_path) |
|
|
|
|
|
|
|
|
with args.output_path.open("w", encoding="utf-8") as fout: |
|
|
json.dump(result, fout, indent=2) |
|
|
|
|
|
|
|
|
if result["status"] == "success": |
|
|
print(f"Evaluation completed successfully!") |
|
|
print(f"Score: {result['score']:.2f}/100") |
|
|
|
|
|
|
|
|
if "error" in result: |
|
|
print(f"Error: {result['error']}") |
|
|
if "geometric_mean_speedup" in result: |
|
|
print(f"Geometric mean speedup: {result['geometric_mean_speedup']:.3f}x") |
|
|
if "passed_tests" in result and "total_tests" in result: |
|
|
print(f"Tests passed: {result['passed_tests']}/{result['total_tests']}") |
|
|
else: |
|
|
|
|
|
if "geometric_mean_speedup" in result: |
|
|
print(f"Geometric mean speedup: {result['geometric_mean_speedup']:.3f}x") |
|
|
if "passed_tests" in result and "total_tests" in result: |
|
|
print(f"Tests passed: {result['passed_tests']}/{result['total_tests']}") |
|
|
|
|
|
|
|
|
print(result['score']) |
|
|
else: |
|
|
print(f"Evaluation failed: {result['error']}") |
|
|
|
|
|
print("0") |
|
|
sys.exit(1) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|
|
|
|