DanBenAmi Claude Sonnet 4.5 commited on
Commit
89563c9
·
1 Parent(s): 38f2cd8

Fix Dataset Viewer and clean up repository

Browse files

- Update herbench.py to v1.0.2 with robust schema handling
- Fix metadata field inconsistencies across task types
- Add proper type normalization for all fields
- Improve streaming compatibility for Dataset Viewer
- Store full metadata as JSON string in metadata_json field

- Update README.md with Dataset Viewer information
- Add note about herbench.py loading script
- Document stable schema handling

- Update .gitignore
- Add DEVELOPMENT.md to ignore list (local dev docs only)
- Keep scripts/ ignored (not needed in HF dataset card)

This commit ensures the HF Dataset Viewer works properly and keeps
the repository clean and professional for the public dataset card.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Sonnet 4.5 <[email protected]>

Files changed (3) hide show
  1. .gitignore +1 -0
  2. README.md +2 -0
  3. herbench.py +60 -23
.gitignore CHANGED
@@ -39,6 +39,7 @@ PROJECT_SUMMARY.md
39
  QUICK_START.txt
40
  USAGE_INSTRUCTIONS.md
41
  upload_videos.sh
 
42
 
43
  # Ignore README files in subdirectories
44
  assets/README.md
 
39
  QUICK_START.txt
40
  USAGE_INSTRUCTIONS.md
41
  upload_videos.sh
42
+ DEVELOPMENT.md
43
 
44
  # Ignore README files in subdirectories
45
  assets/README.md
README.md CHANGED
@@ -194,6 +194,8 @@ HERBench/
194
 
195
  **Archive Structure:** Videos are organized so that Lite videos are in the first archive parts (00-03), and Full-only videos are in the remaining parts. This allows efficient downloading of either version without duplication.
196
 
 
 
197
  ---
198
 
199
  ### Annotation Format
 
194
 
195
  **Archive Structure:** Videos are organized so that Lite videos are in the first archive parts (00-03), and Full-only videos are in the remaining parts. This allows efficient downloading of either version without duplication.
196
 
197
+ **Dataset Viewer:** The HF Dataset Viewer uses [herbench.py](herbench.py) to load and preview the dataset. The script defines a stable schema that handles the varying metadata structures across different task types, ensuring efficient streaming and compatibility with Arrow/Parquet format.
198
+
199
  ---
200
 
201
  ### Annotation Format
herbench.py CHANGED
@@ -7,12 +7,14 @@ Why this file exists:
7
  have inconsistent shapes across rows (common in multi-task benchmarks).
8
  - By providing a proper datasets loading script named after the repo (`herbench.py` for HERBench),
9
  the Hub will use this builder instead, with an explicit, stable schema.
 
 
10
  """
11
 
12
  from __future__ import annotations
13
 
14
  import json
15
- from typing import Any, Dict, Iterator, Optional
16
 
17
  import datasets
18
 
@@ -36,11 +38,12 @@ _CITATION = """\
36
  }
37
  """
38
 
39
- _VERSION = "1.0.1"
40
 
41
 
42
  class HERBenchConfig(datasets.BuilderConfig):
43
  """BuilderConfig for HERBench."""
 
44
 
45
 
46
  class HERBench(datasets.GeneratorBasedBuilder):
@@ -52,12 +55,12 @@ class HERBench(datasets.GeneratorBasedBuilder):
52
  HERBenchConfig(
53
  name="full",
54
  version=VERSION,
55
- description="Full HERBench dataset (all questions; large video collection).",
56
  ),
57
  HERBenchConfig(
58
  name="lite",
59
  version=VERSION,
60
- description="HERBench-Lite subset (smaller for quick prototyping and the Dataset Viewer).",
61
  ),
62
  ]
63
 
@@ -65,14 +68,18 @@ class HERBench(datasets.GeneratorBasedBuilder):
65
  DEFAULT_CONFIG_NAME = "lite"
66
 
67
  def _info(self) -> datasets.DatasetInfo:
68
- # IMPORTANT: Keep features stable across all rows.
69
- #
70
- # `metadata` in the raw JSON varies by task (different keys / nested lists).
71
- # To keep the schema consistent for Arrow + Dataset Viewer:
72
- # - expose a few common metadata fields as flat columns
73
- # - store the full raw metadata dict as a JSON string in `metadata_json`
 
 
 
74
  features = datasets.Features(
75
  {
 
76
  "question_id": datasets.Value("string"),
77
  "video_id": datasets.Value("string"),
78
  "video_path": datasets.Value("string"),
@@ -82,13 +89,15 @@ class HERBench(datasets.GeneratorBasedBuilder):
82
  "answer_index": datasets.Value("int32"),
83
  "answer_text": datasets.Value("string"),
84
  "task_type": datasets.Value("string"),
85
- # Common metadata (flat)
 
86
  "source_dataset": datasets.Value("string"),
87
  "duration": datasets.Value("float32"),
88
  "resolution": datasets.Value("string"),
89
  "evidence_count": datasets.Value("int32"),
90
  "difficulty": datasets.Value("string"),
91
- # Full raw metadata as JSON string (stable column type)
 
92
  "metadata_json": datasets.Value("string"),
93
  }
94
  )
@@ -103,11 +112,16 @@ class HERBench(datasets.GeneratorBasedBuilder):
103
  )
104
 
105
  def _split_generators(self, dl_manager: datasets.DownloadManager):
 
 
 
 
106
  if self.config.name == "lite":
107
  annotations_file = "data/herbench_annotations_lite.json"
108
  else:
109
  annotations_file = "data/herbench_annotations.json"
110
 
 
111
  data_files = dl_manager.download(
112
  {
113
  "annotations": annotations_file,
@@ -122,39 +136,62 @@ class HERBench(datasets.GeneratorBasedBuilder):
122
  ]
123
 
124
  def _generate_examples(self, annotations_file: str) -> Iterator[tuple[int, Dict[str, Any]]]:
 
 
 
 
 
 
 
 
125
  with open(annotations_file, encoding="utf-8") as f:
126
  annotations = json.load(f)
127
 
128
  for idx, ann in enumerate(annotations):
129
- md = ann.get("metadata") or {}
130
- if not isinstance(md, dict):
131
- # Very defensive: keep schema stable even if a row has unexpected metadata type.
132
- md = {"_raw_metadata": md}
 
133
 
 
134
  source_dataset = md.get("source_dataset", "unknown")
135
  duration = md.get("duration", 0.0)
136
  resolution = md.get("resolution", "unknown")
137
  evidence_count = md.get("evidence_count", 0)
138
  difficulty = md.get("difficulty", "unknown")
139
 
140
- # Normalize numeric types (avoid Arrow type inference issues)
141
  try:
142
- duration_f = float(duration)
143
- except Exception:
144
  duration_f = 0.0
 
145
  try:
146
- evidence_i = int(evidence_count)
147
- except Exception:
148
  evidence_i = 0
149
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  yield idx, {
151
  "question_id": str(ann.get("question_id", f"HER_{idx:06d}")),
152
  "video_id": str(ann.get("video_id", "")),
153
  "video_path": str(ann.get("video_path", "")),
154
  "question": str(ann.get("question", "")),
155
- "choices": [str(x) for x in (ann.get("choices") or [])],
156
  "answer": str(ann.get("answer", "")),
157
- "answer_index": int(ann.get("answer_index", 0) or 0),
158
  "answer_text": str(ann.get("answer_text", "")),
159
  "task_type": str(ann.get("task_type", "unknown")),
160
  "source_dataset": str(source_dataset),
 
7
  have inconsistent shapes across rows (common in multi-task benchmarks).
8
  - By providing a proper datasets loading script named after the repo (`herbench.py` for HERBench),
9
  the Hub will use this builder instead, with an explicit, stable schema.
10
+
11
+ This script ensures streaming compatibility and robust schema handling for the Dataset Viewer.
12
  """
13
 
14
  from __future__ import annotations
15
 
16
  import json
17
+ from typing import Any, Dict, Iterator
18
 
19
  import datasets
20
 
 
38
  }
39
  """
40
 
41
+ _VERSION = "1.0.2"
42
 
43
 
44
  class HERBenchConfig(datasets.BuilderConfig):
45
  """BuilderConfig for HERBench."""
46
+ pass
47
 
48
 
49
  class HERBench(datasets.GeneratorBasedBuilder):
 
55
  HERBenchConfig(
56
  name="full",
57
  version=VERSION,
58
+ description="Full HERBench dataset (27,936 questions; 335 videos; ~161GB).",
59
  ),
60
  HERBenchConfig(
61
  name="lite",
62
  version=VERSION,
63
+ description="HERBench-Lite subset (~5,600 questions; ~67 videos; ~35GB for quick prototyping).",
64
  ),
65
  ]
66
 
 
68
  DEFAULT_CONFIG_NAME = "lite"
69
 
70
  def _info(self) -> datasets.DatasetInfo:
71
+ """
72
+ Define the dataset schema with strict, stable types for all fields.
73
+
74
+ IMPORTANT: Keep features stable across all rows.
75
+ `metadata` in the raw JSON varies by task (different keys / nested lists).
76
+ To keep the schema consistent for Arrow + Dataset Viewer:
77
+ - Expose common metadata fields as flat, typed columns
78
+ - Store the full raw metadata dict as a JSON string in `metadata_json`
79
+ """
80
  features = datasets.Features(
81
  {
82
+ # Core fields
83
  "question_id": datasets.Value("string"),
84
  "video_id": datasets.Value("string"),
85
  "video_path": datasets.Value("string"),
 
89
  "answer_index": datasets.Value("int32"),
90
  "answer_text": datasets.Value("string"),
91
  "task_type": datasets.Value("string"),
92
+
93
+ # Common metadata fields (flat, typed)
94
  "source_dataset": datasets.Value("string"),
95
  "duration": datasets.Value("float32"),
96
  "resolution": datasets.Value("string"),
97
  "evidence_count": datasets.Value("int32"),
98
  "difficulty": datasets.Value("string"),
99
+
100
+ # Full raw metadata as JSON string (stable schema)
101
  "metadata_json": datasets.Value("string"),
102
  }
103
  )
 
112
  )
113
 
114
  def _split_generators(self, dl_manager: datasets.DownloadManager):
115
+ """
116
+ Define data splits. Downloads only the JSON annotations for streaming efficiency.
117
+ Videos are referenced by path but not downloaded during viewer loading.
118
+ """
119
  if self.config.name == "lite":
120
  annotations_file = "data/herbench_annotations_lite.json"
121
  else:
122
  annotations_file = "data/herbench_annotations.json"
123
 
124
+ # Download only annotations (not videos) for Dataset Viewer efficiency
125
  data_files = dl_manager.download(
126
  {
127
  "annotations": annotations_file,
 
136
  ]
137
 
138
  def _generate_examples(self, annotations_file: str) -> Iterator[tuple[int, Dict[str, Any]]]:
139
+ """
140
+ Yield examples from the annotations file with robust type handling.
141
+
142
+ This method ensures:
143
+ - Streaming compatibility (processes one example at a time)
144
+ - Stable schema (all fields have consistent types)
145
+ - Defensive parsing (handles missing/malformed fields gracefully)
146
+ """
147
  with open(annotations_file, encoding="utf-8") as f:
148
  annotations = json.load(f)
149
 
150
  for idx, ann in enumerate(annotations):
151
+ # Extract and normalize metadata
152
+ md = ann.get("metadata")
153
+ if md is None or not isinstance(md, dict):
154
+ # Defensive: ensure metadata is always a dict
155
+ md = {}
156
 
157
+ # Extract common metadata fields with defaults
158
  source_dataset = md.get("source_dataset", "unknown")
159
  duration = md.get("duration", 0.0)
160
  resolution = md.get("resolution", "unknown")
161
  evidence_count = md.get("evidence_count", 0)
162
  difficulty = md.get("difficulty", "unknown")
163
 
164
+ # Normalize numeric types to avoid Arrow type inference issues
165
  try:
166
+ duration_f = float(duration) if duration is not None else 0.0
167
+ except (ValueError, TypeError):
168
  duration_f = 0.0
169
+
170
  try:
171
+ evidence_i = int(evidence_count) if evidence_count is not None else 0
172
+ except (ValueError, TypeError):
173
  evidence_i = 0
174
 
175
+ # Normalize choices field
176
+ choices = ann.get("choices", [])
177
+ if not isinstance(choices, list):
178
+ choices = []
179
+
180
+ # Normalize answer_index
181
+ answer_index = ann.get("answer_index", 0)
182
+ try:
183
+ answer_index = int(answer_index) if answer_index is not None else 0
184
+ except (ValueError, TypeError):
185
+ answer_index = 0
186
+
187
  yield idx, {
188
  "question_id": str(ann.get("question_id", f"HER_{idx:06d}")),
189
  "video_id": str(ann.get("video_id", "")),
190
  "video_path": str(ann.get("video_path", "")),
191
  "question": str(ann.get("question", "")),
192
+ "choices": [str(x) for x in choices],
193
  "answer": str(ann.get("answer", "")),
194
+ "answer_index": answer_index,
195
  "answer_text": str(ann.get("answer_text", "")),
196
  "task_type": str(ann.get("task_type", "unknown")),
197
  "source_dataset": str(source_dataset),