Datasets:
Formats:
parquet
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
video-understanding
multi-evidence-reasoning
long-video
temporal-reasoning
spatial-reasoning
video-qa
License:
| """ | |
| HERBench Hugging Face Datasets loading script. | |
| Why this file exists: | |
| - Hugging Face Dataset Viewer auto-parses JSON files if no loading script is detected. | |
| - Auto-parsing uses pandas->pyarrow inference and can fail when nested fields (like `metadata`) | |
| have inconsistent shapes across rows (common in multi-task benchmarks). | |
| - By providing a proper datasets loading script named after the repo (`herbench.py` for HERBench), | |
| the Hub will use this builder instead, with an explicit, stable schema. | |
| This script ensures streaming compatibility and robust schema handling for the Dataset Viewer. | |
| """ | |
| from __future__ import annotations | |
| import json | |
| from typing import Any, Dict, Iterator | |
| import datasets | |
| _DESCRIPTION = """\ | |
| HERBench: A Benchmark for Multi-Evidence Integration in Video Question Answering. | |
| This dataset contains multiple-choice questions grounded in long videos and designed to | |
| require integration of multiple temporally separated cues (high evidential requirement). | |
| """ | |
| _HOMEPAGE = "https://github.com/DanBenAmi/HERBench" | |
| _LICENSE = "CC-BY-NC-SA-4.0" | |
| _CITATION = """\ | |
| @article{herbench2025, | |
| title={HERBench: A Benchmark for Multi-Evidence Integration in Video Question Answering}, | |
| author={Ben-Ami, Dan and Serussi, Gabriele and Cohen, Kobi and Baskin, Chaim}, | |
| journal={arXiv preprint arXiv:XXXX.XXXXX}, | |
| year={2025} | |
| } | |
| """ | |
| _VERSION = "1.0.3" | |
| class HERBenchConfig(datasets.BuilderConfig): | |
| """BuilderConfig for HERBench.""" | |
| pass | |
| class HERBench(datasets.GeneratorBasedBuilder): | |
| """HERBench Dataset: Multi-Evidence Integration in Video QA.""" | |
| VERSION = datasets.Version(_VERSION) | |
| BUILDER_CONFIGS = [ | |
| HERBenchConfig( | |
| name="full", | |
| version=VERSION, | |
| description="Full HERBench dataset (27,936 questions; 335 videos; ~161GB).", | |
| ), | |
| HERBenchConfig( | |
| name="lite", | |
| version=VERSION, | |
| description="HERBench-Lite subset (~5,600 questions; ~67 videos; ~35GB for quick prototyping).", | |
| ), | |
| ] | |
| # Make the Hub viewer default to the smaller config (faster and less error-prone). | |
| DEFAULT_CONFIG_NAME = "lite" | |
| def _info(self) -> datasets.DatasetInfo: | |
| """ | |
| Define the dataset schema with strict, stable types for all fields. | |
| IMPORTANT: Keep features stable across all rows. | |
| `metadata` in the raw JSON varies by task (different keys / nested lists). | |
| To keep the schema consistent for Arrow + Dataset Viewer: | |
| - Expose common metadata fields as flat, typed columns | |
| - Store the full raw metadata dict as a JSON string in `metadata_json` | |
| """ | |
| features = datasets.Features( | |
| { | |
| # Core fields | |
| "question_id": datasets.Value("string"), | |
| "video_id": datasets.Value("string"), | |
| "video_path": datasets.Value("string"), | |
| "question": datasets.Value("string"), | |
| "choices": datasets.Sequence(datasets.Value("string")), | |
| "answer": datasets.Value("string"), | |
| "answer_index": datasets.Value("int32"), | |
| "answer_text": datasets.Value("string"), | |
| "task_type": datasets.Value("string"), | |
| # Common metadata fields (flat, typed) | |
| "source_dataset": datasets.Value("string"), | |
| "duration": datasets.Value("float32"), | |
| "resolution": datasets.Value("string"), | |
| # Full raw metadata as JSON string (stable schema) | |
| "metadata_json": datasets.Value("string"), | |
| } | |
| ) | |
| return datasets.DatasetInfo( | |
| description=_DESCRIPTION, | |
| features=features, | |
| homepage=_HOMEPAGE, | |
| license=_LICENSE, | |
| citation=_CITATION, | |
| version=self.VERSION, | |
| ) | |
| def _split_generators(self, dl_manager: datasets.DownloadManager): | |
| """ | |
| Define data splits. Downloads only the JSON annotations for streaming efficiency. | |
| Videos are referenced by path but not downloaded during viewer loading. | |
| """ | |
| if self.config.name == "lite": | |
| annotations_file = "data/herbench_annotations_lite.json" | |
| else: | |
| annotations_file = "data/herbench_annotations.json" | |
| # Download only annotations (not videos) for Dataset Viewer efficiency | |
| data_files = dl_manager.download( | |
| { | |
| "annotations": annotations_file, | |
| } | |
| ) | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| gen_kwargs={"annotations_file": data_files["annotations"]}, | |
| ) | |
| ] | |
| def _generate_examples(self, annotations_file: str) -> Iterator[tuple[int, Dict[str, Any]]]: | |
| """ | |
| Yield examples from the annotations file with robust type handling. | |
| This method ensures: | |
| - Streaming compatibility (processes one example at a time) | |
| - Stable schema (all fields have consistent types) | |
| - Defensive parsing (handles missing/malformed fields gracefully) | |
| """ | |
| with open(annotations_file, encoding="utf-8") as f: | |
| annotations = json.load(f) | |
| for idx, ann in enumerate(annotations): | |
| # Extract and normalize metadata | |
| md = ann.get("metadata") | |
| if md is None or not isinstance(md, dict): | |
| # Defensive: ensure metadata is always a dict | |
| md = {} | |
| # Extract common metadata fields with defaults | |
| source_dataset = md.get("source_dataset", "unknown") | |
| duration = md.get("duration", 0.0) | |
| resolution = md.get("resolution", "unknown") | |
| # Normalize numeric types to avoid Arrow type inference issues | |
| try: | |
| duration_f = float(duration) if duration is not None else 0.0 | |
| except (ValueError, TypeError): | |
| duration_f = 0.0 | |
| # Normalize choices field | |
| choices = ann.get("choices", []) | |
| if not isinstance(choices, list): | |
| choices = [] | |
| # Normalize answer_index | |
| answer_index = ann.get("answer_index", 0) | |
| try: | |
| answer_index = int(answer_index) if answer_index is not None else 0 | |
| except (ValueError, TypeError): | |
| answer_index = 0 | |
| yield idx, { | |
| "question_id": str(ann.get("question_id", f"HER_{idx:06d}")), | |
| "video_id": str(ann.get("video_id", "")), | |
| "video_path": str(ann.get("video_path", "")), | |
| "question": str(ann.get("question", "")), | |
| "choices": [str(x) for x in choices], | |
| "answer": str(ann.get("answer", "")), | |
| "answer_index": answer_index, | |
| "answer_text": str(ann.get("answer_text", "")), | |
| "task_type": str(ann.get("task_type", "unknown")), | |
| "source_dataset": str(source_dataset), | |
| "duration": duration_f, | |
| "resolution": str(resolution), | |
| "metadata_json": json.dumps(md, ensure_ascii=False), | |
| } | |