Datasets:
Formats:
parquet
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
video-understanding
multi-evidence-reasoning
long-video
temporal-reasoning
spatial-reasoning
video-qa
License:
| """ | |
| HERBench Dataset Loader for Hugging Face | |
| This module provides a Hugging Face datasets loader for HERBench, a benchmark for | |
| multi-evidence integration in video question answering. | |
| Usage: | |
| # Option 1: Load via Hugging Face datasets library | |
| from datasets import load_dataset | |
| dataset = load_dataset("DanBenAmi/HERBench") | |
| print(dataset['test'][0]) | |
| # Option 2: Load locally | |
| from datasets import load_dataset | |
| dataset = load_dataset("path/to/HERBench/herbench_loader.py") | |
| Example: | |
| >>> from datasets import load_dataset | |
| >>> dataset = load_dataset("DanBenAmi/HERBench") | |
| >>> sample = dataset['test'][0] | |
| >>> print(sample['question']) | |
| >>> print(sample['choices']) | |
| >>> print(sample['answer']) | |
| For more information, visit: | |
| - GitHub: https://github.com/DanBenAmi/HERBench | |
| - Paper: https://arxiv.org/abs/XXXX.XXXXX (coming soon) | |
| - Project Page: https://danbenami.github.io/herbench (coming soon) | |
| """ | |
| import json | |
| from pathlib import Path | |
| from typing import Dict, List, Optional | |
| import datasets | |
| _DESCRIPTION = """\ | |
| HERBench: A Benchmark for Multi-Evidence Integration in Video Question Answering | |
| HERBench is a challenging benchmark designed to evaluate vision-language models on | |
| multi-evidence integration in long videos. Unlike existing benchmarks where questions | |
| can often be answered from single frames, HERBench enforces a High Evidential Requirement | |
| (ER) where each question requires aggregating at least k ≥ 3 distinct, temporally | |
| separated visual cues. | |
| Key Features: | |
| - 27,936 five-way multiple-choice questions (Full) or ~5,600 questions (Lite) | |
| - 335 unique videos (Full) or ~67 videos (Lite) | |
| - Average video length of 395 seconds (6.6 minutes) | |
| - 12 compositional task types covering temporal, spatial, and causal reasoning | |
| - Mean Minimum Required Frame-Set (MRFS) of 5.49 | |
| - Questions designed to prevent single-frame shortcuts | |
| - Comprehensive evaluation of multi-evidence reasoning capabilities | |
| Available in two versions: | |
| - Full: 27,936 questions, 335 videos (~161 GB) - Complete benchmark | |
| - Lite: ~5,600 questions, ~67 videos (~35 GB) - 20% subset for quick prototyping | |
| The benchmark includes videos from diverse sources: | |
| - WildTrack: Multi-camera pedestrian tracking scenes | |
| - HD-EPIC: First-person egocentric videos of daily activities | |
| - PersonPath22: Person tracking in various environments | |
| - Movie Trailers: Narrative story understanding | |
| Each question is carefully designed to require: | |
| 1. Multiple pieces of evidence (k ≥ 3 frames) | |
| 2. Temporal separation between evidence frames | |
| 3. Compositional reasoning across evidence | |
| 4. Integration of visual information from different moments | |
| """ | |
| _HOMEPAGE = "https://github.com/DanBenAmi/HERBench" | |
| _LICENSE = "CC-BY-NC-SA-4.0" | |
| _CITATION = """\ | |
| @article{herbench2025, | |
| title={HERBench: A Benchmark for Multi-Evidence Integration in Video Question Answering}, | |
| author={Ben-Ami, Dan and Serussi, Gabriele and Cohen, Kobi and Baskin, Chaim}, | |
| journal={arXiv preprint arXiv:XXXX.XXXXX}, | |
| year={2025} | |
| } | |
| """ | |
| _VERSION = "1.0.0" | |
| class HERBenchConfig(datasets.BuilderConfig): | |
| """BuilderConfig for HERBench.""" | |
| def __init__(self, **kwargs): | |
| """BuilderConfig for HERBench. | |
| Args: | |
| **kwargs: keyword arguments forwarded to super. | |
| """ | |
| super(HERBenchConfig, self).__init__(**kwargs) | |
| class HERBench(datasets.GeneratorBasedBuilder): | |
| """HERBench Dataset: Multi-Evidence Integration in Video QA.""" | |
| VERSION = datasets.Version(_VERSION) | |
| BUILDER_CONFIGS = [ | |
| HERBenchConfig( | |
| name="full", | |
| version=VERSION, | |
| description="Full HERBench dataset with all 27,936 questions and 335 videos (~161GB)", | |
| ), | |
| HERBenchConfig( | |
| name="lite", | |
| version=VERSION, | |
| description="HERBench-Lite: 20% subset with ~5,600 questions and ~67 videos (~35GB)", | |
| ), | |
| ] | |
| DEFAULT_CONFIG_NAME = "full" | |
| def _info(self): | |
| """Specify the datasets.DatasetInfo object.""" | |
| features = datasets.Features({ | |
| "question_id": datasets.Value("string"), | |
| "video_id": datasets.Value("string"), | |
| "video_path": datasets.Value("string"), | |
| "question": datasets.Value("string"), | |
| "choices": datasets.Sequence(datasets.Value("string")), | |
| "answer": datasets.Value("string"), | |
| "answer_index": datasets.Value("int32"), | |
| "answer_text": datasets.Value("string"), | |
| "task_type": datasets.Value("string"), | |
| "metadata": datasets.Features({ | |
| "source_dataset": datasets.Value("string"), | |
| "duration": datasets.Value("float32"), | |
| "resolution": datasets.Value("string"), | |
| "evidence_count": datasets.Value("int32"), | |
| "difficulty": datasets.Value("string"), | |
| }), | |
| }) | |
| return datasets.DatasetInfo( | |
| description=_DESCRIPTION, | |
| features=features, | |
| homepage=_HOMEPAGE, | |
| license=_LICENSE, | |
| citation=_CITATION, | |
| version=self.VERSION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| """Return SplitGenerators.""" | |
| # Determine which annotation file to use based on config | |
| if self.config.name == "lite": | |
| annotations_file = "data/herbench_annotations_lite.json" | |
| else: | |
| annotations_file = "data/herbench_annotations.json" | |
| # Download and extract data files | |
| data_files = dl_manager.download({ | |
| "annotations": annotations_file, | |
| "task_metadata": "data/task_metadata.json", | |
| "video_metadata": "data/video_metadata.json", | |
| }) | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| gen_kwargs={ | |
| "annotations_file": data_files["annotations"], | |
| "task_metadata_file": data_files["task_metadata"], | |
| "video_metadata_file": data_files["video_metadata"], | |
| }, | |
| ), | |
| ] | |
| def _generate_examples(self, annotations_file, task_metadata_file, video_metadata_file): | |
| """Yield examples as (key, example) tuples.""" | |
| # Load annotations | |
| with open(annotations_file, encoding="utf-8") as f: | |
| annotations = json.load(f) | |
| # Yield each annotation | |
| for idx, annotation in enumerate(annotations): | |
| # Ensure metadata exists | |
| if "metadata" not in annotation: | |
| annotation["metadata"] = { | |
| "source_dataset": "unknown", | |
| "duration": 0.0, | |
| "resolution": "unknown", | |
| "evidence_count": 0, | |
| "difficulty": "unknown" | |
| } | |
| else: | |
| # Ensure required metadata fields exist | |
| metadata = annotation["metadata"] | |
| if "source_dataset" not in metadata: | |
| metadata["source_dataset"] = "unknown" | |
| if "duration" not in metadata: | |
| metadata["duration"] = 0.0 | |
| if "resolution" not in metadata: | |
| metadata["resolution"] = "unknown" | |
| if "evidence_count" not in metadata: | |
| metadata["evidence_count"] = 0 | |
| if "difficulty" not in metadata: | |
| metadata["difficulty"] = "unknown" | |
| yield idx, { | |
| "question_id": annotation.get("question_id", f"HER_{idx:06d}"), | |
| "video_id": annotation.get("video_id", ""), | |
| "video_path": annotation.get("video_path", ""), | |
| "question": annotation.get("question", ""), | |
| "choices": annotation.get("choices", []), | |
| "answer": annotation.get("answer", ""), | |
| "answer_index": int(annotation.get("answer_index", 0)), | |
| "answer_text": annotation.get("answer_text", ""), | |
| "task_type": annotation.get("task_type", "unknown"), | |
| "metadata": annotation["metadata"], | |
| } | |
| # Example usage and helper functions | |
| def load_herbench(cache_dir: Optional[str] = None) -> datasets.DatasetDict: | |
| """ | |
| Load HERBench dataset using Hugging Face datasets library. | |
| Args: | |
| cache_dir: Optional directory to cache the dataset. | |
| Returns: | |
| DatasetDict with 'test' split containing all questions. | |
| Example: | |
| >>> dataset = load_herbench() | |
| >>> print(f"Total questions: {len(dataset['test'])}") | |
| >>> print(dataset['test'][0]) | |
| """ | |
| return datasets.load_dataset( | |
| "DanBenAmi/HERBench", | |
| cache_dir=cache_dir | |
| ) | |
| def get_questions_by_task(dataset, task_type: str) -> List[Dict]: | |
| """ | |
| Filter questions by task type. | |
| Args: | |
| dataset: HERBench dataset or test split. | |
| task_type: Task type to filter (e.g., 'temporal_reasoning'). | |
| Returns: | |
| List of questions matching the task type. | |
| Example: | |
| >>> dataset = load_herbench() | |
| >>> temporal_qs = get_questions_by_task(dataset['test'], 'temporal_reasoning') | |
| >>> print(f"Temporal reasoning questions: {len(temporal_qs)}") | |
| """ | |
| if isinstance(dataset, datasets.DatasetDict): | |
| dataset = dataset['test'] | |
| return [q for q in dataset if q['task_type'] == task_type] | |
| def get_questions_by_video(dataset, video_id: str) -> List[Dict]: | |
| """ | |
| Get all questions for a specific video. | |
| Args: | |
| dataset: HERBench dataset or test split. | |
| video_id: Video identifier. | |
| Returns: | |
| List of questions for the specified video. | |
| Example: | |
| >>> dataset = load_herbench() | |
| >>> video_qs = get_questions_by_video(dataset['test'], 'cam2_segment_4_180s_240s') | |
| >>> print(f"Questions for video: {len(video_qs)}") | |
| """ | |
| if isinstance(dataset, datasets.DatasetDict): | |
| dataset = dataset['test'] | |
| return [q for q in dataset if q['video_id'] == video_id] | |
| def print_sample(sample: Dict) -> None: | |
| """ | |
| Pretty print a sample from the dataset. | |
| Args: | |
| sample: A single sample from HERBench. | |
| Example: | |
| >>> dataset = load_herbench() | |
| >>> print_sample(dataset['test'][0]) | |
| """ | |
| duration = sample['metadata'].get('duration', 0.0) | |
| print(f"Question ID: {sample['question_id']}") | |
| print(f"Video: {sample['video_id']} ({duration:.1f}s)") | |
| print(f"Resolution: {sample['metadata'].get('resolution', 'unknown')}") | |
| print(f"Task: {sample['task_type']}") | |
| print(f"\nQuestion: {sample['question']}") | |
| print(f"\nChoices:") | |
| for i, choice in enumerate(sample['choices']): | |
| marker = "→" if i == sample['answer_index'] else " " | |
| print(f" {marker} {choice}") | |
| print(f"\nCorrect Answer: {sample['answer']} (index: {sample['answer_index']})") | |
| if sample.get('answer_text'): | |
| print(f"Answer Text: {sample['answer_text']}") | |
| print(f"Source: {sample['metadata']['source_dataset']}") | |
| print("-" * 60) | |
| if __name__ == "__main__": | |
| # Example usage when run as script | |
| print("Loading HERBench dataset...") | |
| dataset = load_herbench() | |
| print(f"\nDataset loaded successfully!") | |
| print(f"Total questions: {len(dataset['test'])}") | |
| print(f"\nFirst sample:") | |
| print_sample(dataset['test'][0]) | |
| # Show task distribution | |
| from collections import Counter | |
| task_counts = Counter(q['task_type'] for q in dataset['test']) | |
| print(f"\nTask distribution:") | |
| for task, count in task_counts.most_common(): | |
| print(f" {task}: {count}") | |