DanBenAmi commited on
Commit
d5b91b2
·
1 Parent(s): a6c22d4

Add HERBench.py (capitalized) for HF dataset loader

Browse files

HuggingFace expects the loading script filename to match the dataset name.
Since the repo is DanBenAmi/HERBench, the script should be HERBench.py.

Keeping both herbench.py (lowercase) and HERBench.py (capitalized) for
compatibility.

Files changed (1) hide show
  1. HERBench.py +194 -0
HERBench.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ HERBench Hugging Face Datasets loading script.
3
+
4
+ Why this file exists:
5
+ - Hugging Face Dataset Viewer auto-parses JSON files if no loading script is detected.
6
+ - Auto-parsing uses pandas->pyarrow inference and can fail when nested fields (like `metadata`)
7
+ have inconsistent shapes across rows (common in multi-task benchmarks).
8
+ - By providing a proper datasets loading script named after the repo (`herbench.py` for HERBench),
9
+ the Hub will use this builder instead, with an explicit, stable schema.
10
+
11
+ This script ensures streaming compatibility and robust schema handling for the Dataset Viewer.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ from typing import Any, Dict, Iterator
18
+
19
+ import datasets
20
+
21
+
22
+ _DESCRIPTION = """\
23
+ HERBench: A Benchmark for Multi-Evidence Integration in Video Question Answering.
24
+
25
+ This dataset contains multiple-choice questions grounded in long videos and designed to
26
+ require integration of multiple temporally separated cues (high evidential requirement).
27
+ """
28
+
29
+ _HOMEPAGE = "https://github.com/DanBenAmi/HERBench"
30
+ _LICENSE = "CC-BY-NC-SA-4.0"
31
+
32
+ _CITATION = """\
33
+ @article{herbench2025,
34
+ title={HERBench: A Benchmark for Multi-Evidence Integration in Video Question Answering},
35
+ author={Ben-Ami, Dan and Serussi, Gabriele and Cohen, Kobi and Baskin, Chaim},
36
+ journal={arXiv preprint arXiv:XXXX.XXXXX},
37
+ year={2025}
38
+ }
39
+ """
40
+
41
+ _VERSION = "1.0.3"
42
+
43
+
44
+ class HERBenchConfig(datasets.BuilderConfig):
45
+ """BuilderConfig for HERBench."""
46
+ pass
47
+
48
+
49
+ class HERBench(datasets.GeneratorBasedBuilder):
50
+ """HERBench Dataset: Multi-Evidence Integration in Video QA."""
51
+
52
+ VERSION = datasets.Version(_VERSION)
53
+
54
+ BUILDER_CONFIGS = [
55
+ HERBenchConfig(
56
+ name="full",
57
+ version=VERSION,
58
+ description="Full HERBench dataset (27,936 questions; 335 videos; ~161GB).",
59
+ ),
60
+ HERBenchConfig(
61
+ name="lite",
62
+ version=VERSION,
63
+ description="HERBench-Lite subset (~5,600 questions; ~67 videos; ~35GB for quick prototyping).",
64
+ ),
65
+ ]
66
+
67
+ # Make the Hub viewer default to the smaller config (faster and less error-prone).
68
+ DEFAULT_CONFIG_NAME = "lite"
69
+
70
+ def _info(self) -> datasets.DatasetInfo:
71
+ """
72
+ Define the dataset schema with strict, stable types for all fields.
73
+
74
+ IMPORTANT: Keep features stable across all rows.
75
+ `metadata` in the raw JSON varies by task (different keys / nested lists).
76
+ To keep the schema consistent for Arrow + Dataset Viewer:
77
+ - Expose common metadata fields as flat, typed columns
78
+ - Store the full raw metadata dict as a JSON string in `metadata_json`
79
+ """
80
+ features = datasets.Features(
81
+ {
82
+ # Core fields
83
+ "question_id": datasets.Value("string"),
84
+ "video_id": datasets.Value("string"),
85
+ "video_path": datasets.Value("string"),
86
+ "question": datasets.Value("string"),
87
+ "choices": datasets.Sequence(datasets.Value("string")),
88
+ "answer": datasets.Value("string"),
89
+ "answer_index": datasets.Value("int32"),
90
+ "answer_text": datasets.Value("string"),
91
+ "task_type": datasets.Value("string"),
92
+
93
+ # Common metadata fields (flat, typed)
94
+ "source_dataset": datasets.Value("string"),
95
+ "duration": datasets.Value("float32"),
96
+ "resolution": datasets.Value("string"),
97
+
98
+ # Full raw metadata as JSON string (stable schema)
99
+ "metadata_json": datasets.Value("string"),
100
+ }
101
+ )
102
+
103
+ return datasets.DatasetInfo(
104
+ description=_DESCRIPTION,
105
+ features=features,
106
+ homepage=_HOMEPAGE,
107
+ license=_LICENSE,
108
+ citation=_CITATION,
109
+ version=self.VERSION,
110
+ )
111
+
112
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
113
+ """
114
+ Define data splits. Downloads only the JSON annotations for streaming efficiency.
115
+ Videos are referenced by path but not downloaded during viewer loading.
116
+ """
117
+ if self.config.name == "lite":
118
+ annotations_file = "data/herbench_annotations_lite.json"
119
+ else:
120
+ annotations_file = "data/herbench_annotations.json"
121
+
122
+ # Download only annotations (not videos) for Dataset Viewer efficiency
123
+ data_files = dl_manager.download(
124
+ {
125
+ "annotations": annotations_file,
126
+ }
127
+ )
128
+
129
+ return [
130
+ datasets.SplitGenerator(
131
+ name=datasets.Split.TEST,
132
+ gen_kwargs={"annotations_file": data_files["annotations"]},
133
+ )
134
+ ]
135
+
136
+ def _generate_examples(self, annotations_file: str) -> Iterator[tuple[int, Dict[str, Any]]]:
137
+ """
138
+ Yield examples from the annotations file with robust type handling.
139
+
140
+ This method ensures:
141
+ - Streaming compatibility (processes one example at a time)
142
+ - Stable schema (all fields have consistent types)
143
+ - Defensive parsing (handles missing/malformed fields gracefully)
144
+ """
145
+ with open(annotations_file, encoding="utf-8") as f:
146
+ annotations = json.load(f)
147
+
148
+ for idx, ann in enumerate(annotations):
149
+ # Extract and normalize metadata
150
+ md = ann.get("metadata")
151
+ if md is None or not isinstance(md, dict):
152
+ # Defensive: ensure metadata is always a dict
153
+ md = {}
154
+
155
+ # Extract common metadata fields with defaults
156
+ source_dataset = md.get("source_dataset", "unknown")
157
+ duration = md.get("duration", 0.0)
158
+ resolution = md.get("resolution", "unknown")
159
+
160
+ # Normalize numeric types to avoid Arrow type inference issues
161
+ try:
162
+ duration_f = float(duration) if duration is not None else 0.0
163
+ except (ValueError, TypeError):
164
+ duration_f = 0.0
165
+
166
+ # Normalize choices field
167
+ choices = ann.get("choices", [])
168
+ if not isinstance(choices, list):
169
+ choices = []
170
+
171
+ # Normalize answer_index
172
+ answer_index = ann.get("answer_index", 0)
173
+ try:
174
+ answer_index = int(answer_index) if answer_index is not None else 0
175
+ except (ValueError, TypeError):
176
+ answer_index = 0
177
+
178
+ yield idx, {
179
+ "question_id": str(ann.get("question_id", f"HER_{idx:06d}")),
180
+ "video_id": str(ann.get("video_id", "")),
181
+ "video_path": str(ann.get("video_path", "")),
182
+ "question": str(ann.get("question", "")),
183
+ "choices": [str(x) for x in choices],
184
+ "answer": str(ann.get("answer", "")),
185
+ "answer_index": answer_index,
186
+ "answer_text": str(ann.get("answer_text", "")),
187
+ "task_type": str(ann.get("task_type", "unknown")),
188
+ "source_dataset": str(source_dataset),
189
+ "duration": duration_f,
190
+ "resolution": str(resolution),
191
+ "metadata_json": json.dumps(md, ensure_ascii=False),
192
+ }
193
+
194
+