File size: 3,277 Bytes
d76c07d
b952a99
 
d76c07d
b952a99
 
d76c07d
b952a99
 
d76c07d
 
 
 
 
 
 
 
b952a99
 
 
 
 
4bdae80
d76c07d
 
 
 
 
 
 
 
b952a99
d76c07d
b952a99
 
d76c07d
b952a99
d76c07d
 
 
 
 
 
 
 
 
 
 
 
ddfb010
d76c07d
08d2f73
d76c07d
b952a99
08d2f73
 
 
d76c07d
b952a99
 
d76c07d
 
 
08d2f73
 
 
d76c07d
 
08d2f73
d76c07d
 
08d2f73
 
 
 
 
 
 
 
 
 
d76c07d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import json, os
from PIL import Image
import datasets
from datasets.utils.file_utils import xopen          # <─ key helper
from datasets.utils.logging import get_logger, tqdm

logger = get_logger(__name__)
logger.setLevel("INFO")

NUM_PARTS = 14
_TAR_URLS = [
    f"https://huggingface.co/datasets/artpods56/EcclesialSchematisms/"
    f"resolve/main/images/images_part{i}.tar"
    for i in range(1, NUM_PARTS + 1)
]
_LABEL_URL = "https://huggingface.co/datasets/artpods56/EcclesialSchematisms/" \
             "resolve/main/labels/train.jsonl"

class EcclesiaeSchematisms(datasets.GeneratorBasedBuilder):
    def _info(self):
        return datasets.DatasetInfo(
            features=datasets.Features({
                "image_pil": datasets.Image(decode="pil"),   # keep memory low
                "image":     datasets.Value("string"),
                "width":     datasets.Value("int32"),
                "height":    datasets.Value("int32"),
                "words":     datasets.Sequence(datasets.Value("string")),
                "bboxes":    datasets.Sequence(datasets.Sequence(datasets.Value("int32"))),
                "labels":    datasets.Sequence(datasets.Value("string")),
                "conf":      datasets.Sequence(datasets.Value("float32")),
            })
        )

    def _split_generators(self, dl_manager):
        images_tars = dl_manager.download(_TAR_URLS)
        labels_file = dl_manager.download(_LABEL_URL)  # local, small

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "images_tars": images_tars,
                    "labels": labels_file,
                    "dl_manager": dl_manager,          # pass it forward
                },
            )
        ]

    # NOTE the extra parameter
    def _generate_examples(self, images_tars, labels, dl_manager):
        logger.info("Loading label annotations…")
        annotations = {}
        with xopen(labels, "r") as f:                  # works in streaming
            for line in f:
                ann = json.loads(line)
                annotations[os.path.basename(ann["image_path"])] = ann
        logger.info("Loaded %d annotations.", len(annotations))

        example_id = 0
        for tar_path in images_tars:
            logger.info("Iterating %s", tar_path)
            # iter_archive streams members one-by-one
            for member_name, fobj in dl_manager.iter_archive(tar_path):
                if not member_name.endswith(".jpg"):
                    continue
                img_name = os.path.basename(member_name)
                ann = annotations.get(img_name)
                if ann is None:
                    continue

                with Image.open(fobj).convert("RGB") as image:
                    yield example_id, {
                        "image_pil": image,
                        "image": ann["image_path"],
                        "width": ann["width"],
                        "height": ann["height"],
                        "words": ann["words"],
                        "bboxes": ann["bboxes"],
                        "labels": ann["labels"],
                        "conf": ann.get("conf", []),
                    }
                example_id += 1