tgdatasets commited on
Commit
ca9b2f1
·
verified ·
1 Parent(s): 686f129

Upload 3 files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. Cyoc.sh +463 -0
  3. cyoc-db.tsv +2 -2
  4. cyoc-outline.tsv +3 -0
.gitattributes CHANGED
@@ -58,3 +58,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
  cyoc-db.tsv filter=lfs diff=lfs merge=lfs -text
 
 
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
  cyoc-db.tsv filter=lfs diff=lfs merge=lfs -text
61
+ cyoc-outline.tsv filter=lfs diff=lfs merge=lfs -text
Cyoc.sh ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #clean files
2
+ removetrash () {
3
+ rm *.txt *.temp *.html ; rm -d -rf *\(temp\)*
4
+ rm A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
5
+ }
6
+
7
+ #clean
8
+ removetrash
9
+
10
+ #get stories
11
+ curl "https://www.cyoc.net/interactives/story_index.html" | grep -Po '(?<=href=")[^"]*(?<=/outline.html)[^"]*(?=")' > 01-chapters-00.txt
12
+
13
+ #data preparatator
14
+ A=$(cat "01-chapters-00.txt")
15
+ B=$(cat "01-chapters-00.txt" | sed -e 's#.*[a-zA-Z0-9].*# dir=cyoc-outline(temp)#')
16
+ C=$(cat "01-chapters-00.txt" | sed -e 's#.*/interactives/# out=#g' -e 's#/outline.html#.html#' )
17
+ paste -d'\n' <(echo "$A") <(echo "$B") <(echo "$C") > 01-chapters-01.txt
18
+
19
+ #dowload file
20
+ for i in {1..3}; do :; aria2c -i 01-chapters-01.txt -x1 -j1 --save-session=01-chapters-01.temp ; mv 01-chapters-01.temp 01-chapters-01.txt ; done
21
+
22
+ #add to archive
23
+ zip -j -r -D "cyoc-outline.zip" "cyoc-outline(temp)/"*
24
+
25
+ #create index
26
+ python3 << 'EOF'
27
+ import os
28
+ import csv
29
+ import re
30
+ from bs4 import BeautifulSoup
31
+ from urllib.parse import urljoin
32
+
33
+ BASE_URL = "https://www.cyoc.net"
34
+
35
+ def parse_tree_iterative(root_ul, file_path, base_url=BASE_URL, verbose=False):
36
+ stack = [(root_ul, [], 0)]
37
+ chapters = []
38
+
39
+ while stack:
40
+ ul_tag, path, depth = stack.pop()
41
+ for i, li in enumerate(ul_tag.find_all('li', recursive=False)):
42
+ chapter_path = path + [str(i + 1)]
43
+ path_string = "-".join(chapter_path)
44
+
45
+ a_tag = li.find('a', recursive=False)
46
+ if a_tag:
47
+ title = a_tag.get_text(strip=True) or "N/A"
48
+ full_url = urljoin(base_url+'/', a_tag.get('href',''))
49
+
50
+ li_text = li.get_text(strip=True)
51
+ match = re.search(r'Ch\s*(\d+)', li_text)
52
+ chapter_number = match.group(1) if match else "N/A"
53
+
54
+ author_name, author_url = "Anonymous", ""
55
+ author_a_tag = li.find('a', href=re.compile(r'/user_'))
56
+ if author_a_tag:
57
+ author_name = author_a_tag.get_text(strip=True) or "Anonymous"
58
+ author_url = urljoin(base_url+'/', author_a_tag.get('href',''))
59
+
60
+ chapters.append({
61
+ 'filename': os.path.basename(file_path),
62
+ 'chapter_number': chapter_number,
63
+ 'story_title': title,
64
+ 'history_path': path_string,
65
+ 'url': full_url,
66
+ 'author_name': author_name,
67
+ 'author_url': author_url
68
+ })
69
+
70
+ nested_ul = li.find('ul', recursive=False)
71
+ if nested_ul:
72
+ if verbose:
73
+ print(f"Adding nested <ul> at path {path_string} to stack in {file_path}")
74
+ stack.append((nested_ul, chapter_path, depth+1))
75
+
76
+ return chapters
77
+
78
+ def parse_html_file(file_path, base_url=BASE_URL, verbose=False):
79
+ if verbose:
80
+ print(f"\nParsing file: {file_path}")
81
+ try:
82
+ with open(file_path, 'r', encoding='latin-1') as f:
83
+ html_content = f.read()
84
+ except UnicodeDecodeError as e:
85
+ print(f"[ERROR] Decoding {file_path}: {e}")
86
+ return []
87
+
88
+ soup = BeautifulSoup(html_content, 'html.parser')
89
+ subtitle_tag = soup.select_one('.subtitle')
90
+ subtitle_text = subtitle_tag.get_text(strip=True) if subtitle_tag else ""
91
+
92
+ root_ul = soup.find('ul', class_='outline') or soup.find('ul', class_='tree')
93
+ if not root_ul:
94
+ if verbose:
95
+ print(f"No list structure found in {file_path}. Skipping.")
96
+ return []
97
+
98
+ chapters = parse_tree_iterative(root_ul, file_path, base_url, verbose)
99
+
100
+ # Add story number prefix to history_path
101
+ story_number_match = re.search(r'story_(\d+)', file_path)
102
+ story_number = story_number_match.group(1) if story_number_match else "0"
103
+
104
+ for row in chapters:
105
+ row['subtitle'] = subtitle_text
106
+ row['history_path'] = f"{story_number}={row['history_path']}"
107
+
108
+ if chapters:
109
+ print(f"[LOG] Correctly extracted: {file_path} ({len(chapters)} chapters)")
110
+
111
+ return chapters
112
+
113
+ def process_folder_and_save_tsv(folder_path, output_file, base_url=BASE_URL, verbose=False):
114
+ if not os.path.exists(folder_path):
115
+ print(f"[ERROR] Folder '{folder_path}' does not exist.")
116
+ return
117
+
118
+ all_chapter_data = []
119
+ for filename in os.listdir(folder_path):
120
+ if filename.endswith('.html'):
121
+ file_path = os.path.join(folder_path, filename)
122
+ all_chapter_data.extend(parse_html_file(file_path, base_url, verbose))
123
+
124
+ if not all_chapter_data:
125
+ print("No chapter data found. TSV file not created.")
126
+ return
127
+
128
+ headers = [
129
+ "outline_url",
130
+ "chapter_number",
131
+ "story_title",
132
+ "history_path",
133
+ "chapter_url",
134
+ "author_name",
135
+ "author_url",
136
+ "subtitle"
137
+ ]
138
+
139
+ with open(output_file, 'w', newline='', encoding='utf-8') as tsv_file:
140
+ writer = csv.writer(tsv_file, delimiter='\t')
141
+ writer.writerow(headers)
142
+ for row in all_chapter_data:
143
+ base_filename = os.path.splitext(row['filename'])[0]
144
+ new_filename_url = f"http://www.cyoc.net/interactives/{base_filename}/outline.html"
145
+ writer.writerow([
146
+ new_filename_url,
147
+ row['chapter_number'],
148
+ row['story_title'],
149
+ row['history_path'],
150
+ row['url'],
151
+ row['author_name'],
152
+ row['author_url'],
153
+ row['subtitle']
154
+ ])
155
+
156
+ print(f"Processing complete. Data for {len(all_chapter_data)} chapters saved to '{output_file}'.")
157
+
158
+ if __name__ == "__main__":
159
+ folder_to_process = 'cyoc-outline(temp)'
160
+ output_tsv = 'cyoc-outline.tsv'
161
+ process_folder_and_save_tsv(folder_to_process, output_tsv, verbose=True)
162
+ EOF
163
+
164
+ #data extractor
165
+ find "cyoc-outline(temp)" -type f | xargs grep -hoP "(?<=href=\")[^\"]+|(?<=href=')[^']+"| sed -e 's#.*/interactives/#https://www.cyoc.net/interactives/#g' | grep 'chapter_' | sed 's/\.html$//' | sed 's/$/.html/' > 01-chapters-02.txt
166
+ awk '{print $1}' 01-chapters-02.txt cyoc-db.xml cyoc-db.xml | sed 's/^M$//' | sort -f | uniq -ci | sed -n '/ 1 /s/^ *1 //p' | sponge 01-chapters-02.txt
167
+
168
+ #data preparatator
169
+ A=$(cat "01-chapters-02.txt")
170
+ B=$(cat "01-chapters-02.txt" | sed -e 's#.*[a-zA-Z0-9].*# dir=story-chapters(temp)#g' )
171
+ C=$(cat "01-chapters-02.txt" | sed -e 's#.*interactives/chapter_# out=#g' )
172
+ paste -d'\n' <(echo "$A") <(echo "$B") <(echo "$C") > 01-chapters-03.txt
173
+
174
+ #dowload file
175
+ for i in {1..3}; do :; aria2c -i 01-chapters-03.txt --save-session=01-chapters-03.temp ; mv 01-chapters-03.temp 01-chapters-03.txt ; done
176
+
177
+ #extract-chapters content
178
+ python3 << 'EOF'
179
+ import os
180
+ import json
181
+ from datetime import datetime
182
+ import ftfy
183
+ import chardet
184
+ import re
185
+ from bs4 import BeautifulSoup
186
+ from tqdm import tqdm
187
+
188
+ INPUT_FOLDER = 'story-chapters(temp)'
189
+ OUTPUT_FILE = 'story-chapters(temp).tsv'
190
+ BATCH_SIZE = 5000
191
+
192
+ # --- Story title replacement dictionary ---
193
+ STORY_TITLE_REPLACEMENTS = {
194
+ "story_11": "Welcome to ToonWorld",
195
+ "story_1": "Ty's Power",
196
+ "story_13": "It began when I made her clothes disappear",
197
+ "story_18": "The Arcade",
198
+ "story_20": "The Four Giggling Girls",
199
+ "story_21": "Transform or Dare?",
200
+ "story_24": "Chronivac Version 4.0",
201
+ "story_25": "Altered Fates",
202
+ "story_26": "Pleasure Island",
203
+ "story_27": "Stonewood Male Dormitory",
204
+ "story_29": "The Lightning Strike",
205
+ "story_30": "The Madame Illusia",
206
+ "story_31": "CYOTF",
207
+ "story_33": "Beldazarr, The Djinn of Mind and Body",
208
+ "story_34": "CYOTF (New)",
209
+ "story_37": "The Zoo",
210
+ "story_39": "Spelstorm Manor",
211
+ "story_40": "A Game of Change",
212
+ "story_42": "Transformationet",
213
+ "story_44": "The Ointment Store",
214
+ "story_47": "CYOTF (PG-13)",
215
+ "story_48": "Mad Science",
216
+ "story_50": "CYOTF (Animal)",
217
+ "story_51": "CYOTF (Human)",
218
+ "story_7": "The Vials",
219
+ "story_8": "The Magic Shop"
220
+ }
221
+
222
+ # --- Helper functions ---
223
+ def read_and_fix_encoding(path):
224
+ raw = open(path, 'rb').read()
225
+ tried_encodings = ['utf-8', 'windows-1252', 'latin-1']
226
+ for enc in tried_encodings:
227
+ try:
228
+ text = raw.decode(enc)
229
+ return ftfy.fix_text(text)
230
+ except UnicodeDecodeError:
231
+ continue
232
+ det = chardet.detect(raw)
233
+ enc = det.get('encoding', 'utf-8')
234
+ try:
235
+ text = raw.decode(enc)
236
+ except UnicodeDecodeError:
237
+ text = raw.decode(enc, errors='surrogateescape')
238
+ return ftfy.fix_text(text)
239
+
240
+ def sanitize_json_text(text):
241
+ if not text:
242
+ return ""
243
+ return text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
244
+
245
+ def clean_url(url):
246
+ """Remove Wayback Machine prefix from URLs if present"""
247
+ if not url:
248
+ return url
249
+ if "web.archive.org" in url and "/http" in url:
250
+ parts = url.split("/", 5)
251
+ if len(parts) >= 6:
252
+ return parts[5]
253
+ return url
254
+
255
+ # --- Main parsing function using BeautifulSoup ---
256
+ def parse_html_to_tsv_row(file_path):
257
+ html_content = read_and_fix_encoding(file_path)
258
+ soup = BeautifulSoup(html_content, 'html.parser')
259
+
260
+ # --- Story title ---
261
+ story_title = None
262
+ story_link = soup.select_one('#story-links a[href*="story_"]')
263
+ if story_link:
264
+ m = re.search(r'story_\d+', story_link['href'])
265
+ if m:
266
+ story_title = m.group(0)
267
+
268
+ if not story_title:
269
+ story_el = soup.select_one('#toggler-storytitle')
270
+ if story_el:
271
+ story_title = story_el.get_text(strip=True)
272
+
273
+ if not story_title:
274
+ story_title = (soup.select_one('div.content .subtitle > div') or
275
+ soup.select_one('h4.subtitle a'))
276
+ if story_title:
277
+ story_title = story_title.get_text(strip=True)
278
+
279
+ if story_title in STORY_TITLE_REPLACEMENTS:
280
+ story_title = STORY_TITLE_REPLACEMENTS[story_title]
281
+
282
+ # --- Chapter title ---
283
+ chapter_title_el = soup.select_one('h2.title')
284
+ chapter_title = chapter_title_el.get_text(strip=True) if chapter_title_el else ''
285
+ chapter_title = chapter_title.replace("Go to:First chapterStory outlineRecent chapters", "").strip()
286
+
287
+ # --- Timestamp ---
288
+ timestamp = None
289
+ ts_el = soup.select_one('span.chapter-date span.timestamp')
290
+ if ts_el and ts_el.has_attr('title'):
291
+ ts_str = ts_el['title']
292
+ try:
293
+ dt_obj = datetime.strptime(ts_str, '%m/%d/%Y %H:%M')
294
+ timestamp = dt_obj.strftime('%Y-%m-%dT%H:%M:%SZ')
295
+ except:
296
+ timestamp = ts_str
297
+ if not timestamp:
298
+ raw_date = soup.select_one('span.chapter-date')
299
+ if raw_date:
300
+ raw_text = raw_date.get_text(strip=True)
301
+ try:
302
+ dt_obj = datetime.strptime(raw_text, '%m/%d/%y %H:%M')
303
+ timestamp = dt_obj.strftime('%Y-%m-%dT%H:%M:%SZ')
304
+ except:
305
+ timestamp = raw_text
306
+
307
+ # --- Author ---
308
+ author_name, author_url = None, None
309
+ author_el = soup.select_one('span.chapter-author a') or soup.select_one('.chapter-author a')
310
+ if author_el:
311
+ author_name = author_el.get_text(strip=True)
312
+ author_url = clean_url(author_el.get('href'))
313
+ if author_url and author_url.startswith("user_"):
314
+ author_url = "https://www.cyoc.net/interactives/" + author_url
315
+
316
+ # --- Tags ---
317
+ tags_list = [t.get('title') for t in soup.select('div.chapter-info span.tags a abbr.l') if t.has_attr('title')]
318
+ tags_str = ",".join(sanitize_json_text(t) for t in tags_list)
319
+
320
+ # --- Adult content legend ---
321
+ adult_content_legend_list = []
322
+ legend_div = soup.select_one('div.adult-content-info')
323
+ if legend_div:
324
+ legend_text = legend_div.get_text(strip=True)
325
+ if legend_text.startswith('Legend:'):
326
+ items = legend_text.replace('Legend:', '').strip().split(', ')
327
+ adult_content_legend_list = [sanitize_json_text(i) for i in items]
328
+ else:
329
+ imgs = legend_div.select('img[title]')
330
+ adult_content_legend_list = [sanitize_json_text(img['title']) for img in imgs]
331
+ adult_content_legend_str = ",".join(adult_content_legend_list)
332
+
333
+ # --- Main text with multiple fallbacks ---
334
+ main_text_el = (soup.select_one('#chapter-text') or
335
+ (soup.select_one('.chapter-info') and soup.select_one('.chapter-info').find_next('p')) or
336
+ soup.select_one('p.serif') or
337
+ soup.select_one('section > .serif') or
338
+ soup.select_one('div.content p'))
339
+ main_text_html = str(main_text_el) if main_text_el else ''
340
+ main_text_json_str = json.dumps({'html': sanitize_json_text(main_text_html)}, separators=(',', ':'))
341
+
342
+ # --- Choices ---
343
+ options_list = []
344
+ choices_blocks = soup.select('ul#subchapters li') or soup.select('ul.subchapter-list li')
345
+ for li in choices_blocks:
346
+ link = li.select_one('a')
347
+ if link:
348
+ option_title = sanitize_json_text(link.get_text(strip=True))
349
+ option_url = clean_url(link.get('href'))
350
+ adult_icon = li.select_one('span.sprite-adult')
351
+ adult_content = adult_icon['title'] if adult_icon and adult_icon.has_attr('title') else "None"
352
+ option_tags = [t['title'] for t in li.select('span.tags a abbr.l') if t.has_attr('title')]
353
+ options_list.append({
354
+ 'title': option_title,
355
+ 'url': option_url,
356
+ 'adult_content': adult_content,
357
+ 'tags': [sanitize_json_text(t) for t in option_tags]
358
+ })
359
+ options_json_str = json.dumps(options_list, separators=(',', ':'))
360
+
361
+ # --- Suggested options ---
362
+ suggested_list = []
363
+ suggested_blocks = soup.select('ul.subchapter-list.suggested-list li') or soup.select('ul#subchapters li')
364
+ for li in suggested_blocks:
365
+ link = li.select_one('a')
366
+ if link:
367
+ sug_title = sanitize_json_text(link.get_text(strip=True))
368
+ sug_url = clean_url(link.get('href'))
369
+ suggested_list.append({'title': sug_title, 'url': sug_url})
370
+ suggested_json_str = json.dumps(suggested_list, separators=(',', ':'))
371
+
372
+ # --- Compose TSV row ---
373
+ chapter_number = os.path.basename(file_path).split('.')[0].split('_')[-1]
374
+ columns = [
375
+ clean_url(f"https://www.cyoc.net/interactives/chapter_{chapter_number}.html"),
376
+ clean_url(f"https://www.cyoc.net/interactives/chapter_{chapter_number}.html"),
377
+ sanitize_json_text(story_title),
378
+ sanitize_json_text(chapter_title),
379
+ timestamp or '',
380
+ sanitize_json_text(author_name),
381
+ author_url or '',
382
+ main_text_json_str,
383
+ options_json_str,
384
+ suggested_json_str,
385
+ tags_str,
386
+ adult_content_legend_str
387
+ ]
388
+ return columns
389
+
390
+ # --- Process folder in batches ---
391
+ def process_folder_in_batches(input_folder, output_file, batch_size=5000):
392
+ html_files = sorted([os.path.join(input_folder, f) for f in os.listdir(input_folder) if f.endswith('.html')])
393
+ total_files = len(html_files)
394
+ print(f"Found {total_files} HTML files to process...")
395
+
396
+ buffer = []
397
+ with tqdm(total=total_files, desc="Overall Progress", unit="file") as pbar:
398
+ for idx, html_file in enumerate(html_files, 1):
399
+ try:
400
+ tsv_row = parse_html_to_tsv_row(html_file)
401
+ buffer.append("\t".join(tsv_row))
402
+ except Exception as e:
403
+ print(f"Error processing {html_file}: {e}")
404
+
405
+ if idx % batch_size == 0:
406
+ with open(output_file, 'a', encoding='utf-8') as f_out:
407
+ f_out.write("\n".join(buffer) + "\n")
408
+ buffer = []
409
+ pbar.update(1)
410
+
411
+ if buffer:
412
+ with open(output_file, 'a', encoding='utf-8') as f_out:
413
+ f_out.write("\n".join(buffer) + "\n")
414
+
415
+ # --- Run ---
416
+ if __name__ == '__main__':
417
+ if os.path.exists(OUTPUT_FILE):
418
+ os.remove(OUTPUT_FILE)
419
+ process_folder_in_batches(INPUT_FOLDER, OUTPUT_FILE, BATCH_SIZE)
420
+ print(f"\nAll files processed. TSV saved to '{OUTPUT_FILE}'.")
421
+ EOF
422
+
423
+ #index creator
424
+ awk -F'\t' '
425
+ NR==FNR {
426
+ # Processing cyoc-outline.tsv first
427
+ if ($5 != "") {
428
+ lookup[$5] = $4
429
+ }
430
+ next
431
+ }
432
+ {
433
+ # Processing save.tsv
434
+ if ($2 in lookup) {
435
+ $2 = lookup[$2]
436
+ }
437
+ print $0
438
+ }
439
+ ' OFS='\t' "cyoc-outline.tsv" "story-chapters(temp).tsv" | sponge "story-chapters(temp).tsv"
440
+
441
+ sed -i 's#Binary file (standard input) matches: ##g' cyoc-db.temp
442
+ sed -i 's#.*You are not logged in. Log in InteractivesSearch.*##g' cyoc-db.temp
443
+ cat cyoc-db.temp >> cyoc-db.xml
444
+
445
+ #prepare archive
446
+ cat 01-chapters-00.txt >> 01-chapters-04.txt
447
+ cat 01-chapters-02.txt >> 01-chapters-04.txt
448
+
449
+ #archive stories
450
+ for i in {1..2}; do :;
451
+ while :; do
452
+ for i in {1..3}; do
453
+ read -r url || break 2
454
+ aria2c https://web.archive.org/save/""$url --user-agent="Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)" --dry-run ; sleep 0.10 &
455
+ echo -e "$url Done [1]\n" &
456
+ done
457
+ wait
458
+ done < 01-chapters-04.txt
459
+ done
460
+
461
+ removetrash
462
+
463
+
cyoc-db.tsv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6713fdf493409b6cd001058d6d62be92e1b37d11ba5abbb25ef24777823a97a7
3
- size 910822994
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2a2e454440b4baff71703fffa783bef689558596872f812d01fe7b416c8de55
3
+ size 910824220
cyoc-outline.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6be9ee5baaed5c5125f12fd87e523e323f6a71f83f4958ff267bc440a028850d
3
+ size 54768915