cyoc / Cyoc.sh
tgdatasets's picture
Upload 3 files
ca9b2f1 verified
#clean files
removetrash () {
rm *.txt *.temp *.html ; rm -d -rf *\(temp\)*
rm A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
}
#clean
removetrash
#get stories
curl "https://www.cyoc.net/interactives/story_index.html" | grep -Po '(?<=href=")[^"]*(?<=/outline.html)[^"]*(?=")' > 01-chapters-00.txt
#data preparatator
A=$(cat "01-chapters-00.txt")
B=$(cat "01-chapters-00.txt" | sed -e 's#.*[a-zA-Z0-9].*# dir=cyoc-outline(temp)#')
C=$(cat "01-chapters-00.txt" | sed -e 's#.*/interactives/# out=#g' -e 's#/outline.html#.html#' )
paste -d'\n' <(echo "$A") <(echo "$B") <(echo "$C") > 01-chapters-01.txt
#dowload file
for i in {1..3}; do :; aria2c -i 01-chapters-01.txt -x1 -j1 --save-session=01-chapters-01.temp ; mv 01-chapters-01.temp 01-chapters-01.txt ; done
#add to archive
zip -j -r -D "cyoc-outline.zip" "cyoc-outline(temp)/"*
#create index
python3 << 'EOF'
import os
import csv
import re
from bs4 import BeautifulSoup
from urllib.parse import urljoin
BASE_URL = "https://www.cyoc.net"
def parse_tree_iterative(root_ul, file_path, base_url=BASE_URL, verbose=False):
stack = [(root_ul, [], 0)]
chapters = []
while stack:
ul_tag, path, depth = stack.pop()
for i, li in enumerate(ul_tag.find_all('li', recursive=False)):
chapter_path = path + [str(i + 1)]
path_string = "-".join(chapter_path)
a_tag = li.find('a', recursive=False)
if a_tag:
title = a_tag.get_text(strip=True) or "N/A"
full_url = urljoin(base_url+'/', a_tag.get('href',''))
li_text = li.get_text(strip=True)
match = re.search(r'Ch\s*(\d+)', li_text)
chapter_number = match.group(1) if match else "N/A"
author_name, author_url = "Anonymous", ""
author_a_tag = li.find('a', href=re.compile(r'/user_'))
if author_a_tag:
author_name = author_a_tag.get_text(strip=True) or "Anonymous"
author_url = urljoin(base_url+'/', author_a_tag.get('href',''))
chapters.append({
'filename': os.path.basename(file_path),
'chapter_number': chapter_number,
'story_title': title,
'history_path': path_string,
'url': full_url,
'author_name': author_name,
'author_url': author_url
})
nested_ul = li.find('ul', recursive=False)
if nested_ul:
if verbose:
print(f"Adding nested <ul> at path {path_string} to stack in {file_path}")
stack.append((nested_ul, chapter_path, depth+1))
return chapters
def parse_html_file(file_path, base_url=BASE_URL, verbose=False):
if verbose:
print(f"\nParsing file: {file_path}")
try:
with open(file_path, 'r', encoding='latin-1') as f:
html_content = f.read()
except UnicodeDecodeError as e:
print(f"[ERROR] Decoding {file_path}: {e}")
return []
soup = BeautifulSoup(html_content, 'html.parser')
subtitle_tag = soup.select_one('.subtitle')
subtitle_text = subtitle_tag.get_text(strip=True) if subtitle_tag else ""
root_ul = soup.find('ul', class_='outline') or soup.find('ul', class_='tree')
if not root_ul:
if verbose:
print(f"No list structure found in {file_path}. Skipping.")
return []
chapters = parse_tree_iterative(root_ul, file_path, base_url, verbose)
# Add story number prefix to history_path
story_number_match = re.search(r'story_(\d+)', file_path)
story_number = story_number_match.group(1) if story_number_match else "0"
for row in chapters:
row['subtitle'] = subtitle_text
row['history_path'] = f"{story_number}={row['history_path']}"
if chapters:
print(f"[LOG] Correctly extracted: {file_path} ({len(chapters)} chapters)")
return chapters
def process_folder_and_save_tsv(folder_path, output_file, base_url=BASE_URL, verbose=False):
if not os.path.exists(folder_path):
print(f"[ERROR] Folder '{folder_path}' does not exist.")
return
all_chapter_data = []
for filename in os.listdir(folder_path):
if filename.endswith('.html'):
file_path = os.path.join(folder_path, filename)
all_chapter_data.extend(parse_html_file(file_path, base_url, verbose))
if not all_chapter_data:
print("No chapter data found. TSV file not created.")
return
headers = [
"outline_url",
"chapter_number",
"story_title",
"history_path",
"chapter_url",
"author_name",
"author_url",
"subtitle"
]
with open(output_file, 'w', newline='', encoding='utf-8') as tsv_file:
writer = csv.writer(tsv_file, delimiter='\t')
writer.writerow(headers)
for row in all_chapter_data:
base_filename = os.path.splitext(row['filename'])[0]
new_filename_url = f"http://www.cyoc.net/interactives/{base_filename}/outline.html"
writer.writerow([
new_filename_url,
row['chapter_number'],
row['story_title'],
row['history_path'],
row['url'],
row['author_name'],
row['author_url'],
row['subtitle']
])
print(f"Processing complete. Data for {len(all_chapter_data)} chapters saved to '{output_file}'.")
if __name__ == "__main__":
folder_to_process = 'cyoc-outline(temp)'
output_tsv = 'cyoc-outline.tsv'
process_folder_and_save_tsv(folder_to_process, output_tsv, verbose=True)
EOF
#data extractor
find "cyoc-outline(temp)" -type f | xargs grep -hoP "(?<=href=\")[^\"]+|(?<=href=')[^']+"| sed -e 's#.*/interactives/#https://www.cyoc.net/interactives/#g' | grep 'chapter_' | sed 's/\.html$//' | sed 's/$/.html/' > 01-chapters-02.txt
awk '{print $1}' 01-chapters-02.txt cyoc-db.xml cyoc-db.xml | sed 's/^M$//' | sort -f | uniq -ci | sed -n '/ 1 /s/^ *1 //p' | sponge 01-chapters-02.txt
#data preparatator
A=$(cat "01-chapters-02.txt")
B=$(cat "01-chapters-02.txt" | sed -e 's#.*[a-zA-Z0-9].*# dir=story-chapters(temp)#g' )
C=$(cat "01-chapters-02.txt" | sed -e 's#.*interactives/chapter_# out=#g' )
paste -d'\n' <(echo "$A") <(echo "$B") <(echo "$C") > 01-chapters-03.txt
#dowload file
for i in {1..3}; do :; aria2c -i 01-chapters-03.txt --save-session=01-chapters-03.temp ; mv 01-chapters-03.temp 01-chapters-03.txt ; done
#extract-chapters content
python3 << 'EOF'
import os
import json
from datetime import datetime
import ftfy
import chardet
import re
from bs4 import BeautifulSoup
from tqdm import tqdm
INPUT_FOLDER = 'story-chapters(temp)'
OUTPUT_FILE = 'story-chapters(temp).tsv'
BATCH_SIZE = 5000
# --- Story title replacement dictionary ---
STORY_TITLE_REPLACEMENTS = {
"story_11": "Welcome to ToonWorld",
"story_1": "Ty's Power",
"story_13": "It began when I made her clothes disappear",
"story_18": "The Arcade",
"story_20": "The Four Giggling Girls",
"story_21": "Transform or Dare?",
"story_24": "Chronivac Version 4.0",
"story_25": "Altered Fates",
"story_26": "Pleasure Island",
"story_27": "Stonewood Male Dormitory",
"story_29": "The Lightning Strike",
"story_30": "The Madame Illusia",
"story_31": "CYOTF",
"story_33": "Beldazarr, The Djinn of Mind and Body",
"story_34": "CYOTF (New)",
"story_37": "The Zoo",
"story_39": "Spelstorm Manor",
"story_40": "A Game of Change",
"story_42": "Transformationet",
"story_44": "The Ointment Store",
"story_47": "CYOTF (PG-13)",
"story_48": "Mad Science",
"story_50": "CYOTF (Animal)",
"story_51": "CYOTF (Human)",
"story_7": "The Vials",
"story_8": "The Magic Shop"
}
# --- Helper functions ---
def read_and_fix_encoding(path):
raw = open(path, 'rb').read()
tried_encodings = ['utf-8', 'windows-1252', 'latin-1']
for enc in tried_encodings:
try:
text = raw.decode(enc)
return ftfy.fix_text(text)
except UnicodeDecodeError:
continue
det = chardet.detect(raw)
enc = det.get('encoding', 'utf-8')
try:
text = raw.decode(enc)
except UnicodeDecodeError:
text = raw.decode(enc, errors='surrogateescape')
return ftfy.fix_text(text)
def sanitize_json_text(text):
if not text:
return ""
return text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
def clean_url(url):
"""Remove Wayback Machine prefix from URLs if present"""
if not url:
return url
if "web.archive.org" in url and "/http" in url:
parts = url.split("/", 5)
if len(parts) >= 6:
return parts[5]
return url
# --- Main parsing function using BeautifulSoup ---
def parse_html_to_tsv_row(file_path):
html_content = read_and_fix_encoding(file_path)
soup = BeautifulSoup(html_content, 'html.parser')
# --- Story title ---
story_title = None
story_link = soup.select_one('#story-links a[href*="story_"]')
if story_link:
m = re.search(r'story_\d+', story_link['href'])
if m:
story_title = m.group(0)
if not story_title:
story_el = soup.select_one('#toggler-storytitle')
if story_el:
story_title = story_el.get_text(strip=True)
if not story_title:
story_title = (soup.select_one('div.content .subtitle > div') or
soup.select_one('h4.subtitle a'))
if story_title:
story_title = story_title.get_text(strip=True)
if story_title in STORY_TITLE_REPLACEMENTS:
story_title = STORY_TITLE_REPLACEMENTS[story_title]
# --- Chapter title ---
chapter_title_el = soup.select_one('h2.title')
chapter_title = chapter_title_el.get_text(strip=True) if chapter_title_el else ''
chapter_title = chapter_title.replace("Go to:First chapterStory outlineRecent chapters", "").strip()
# --- Timestamp ---
timestamp = None
ts_el = soup.select_one('span.chapter-date span.timestamp')
if ts_el and ts_el.has_attr('title'):
ts_str = ts_el['title']
try:
dt_obj = datetime.strptime(ts_str, '%m/%d/%Y %H:%M')
timestamp = dt_obj.strftime('%Y-%m-%dT%H:%M:%SZ')
except:
timestamp = ts_str
if not timestamp:
raw_date = soup.select_one('span.chapter-date')
if raw_date:
raw_text = raw_date.get_text(strip=True)
try:
dt_obj = datetime.strptime(raw_text, '%m/%d/%y %H:%M')
timestamp = dt_obj.strftime('%Y-%m-%dT%H:%M:%SZ')
except:
timestamp = raw_text
# --- Author ---
author_name, author_url = None, None
author_el = soup.select_one('span.chapter-author a') or soup.select_one('.chapter-author a')
if author_el:
author_name = author_el.get_text(strip=True)
author_url = clean_url(author_el.get('href'))
if author_url and author_url.startswith("user_"):
author_url = "https://www.cyoc.net/interactives/" + author_url
# --- Tags ---
tags_list = [t.get('title') for t in soup.select('div.chapter-info span.tags a abbr.l') if t.has_attr('title')]
tags_str = ",".join(sanitize_json_text(t) for t in tags_list)
# --- Adult content legend ---
adult_content_legend_list = []
legend_div = soup.select_one('div.adult-content-info')
if legend_div:
legend_text = legend_div.get_text(strip=True)
if legend_text.startswith('Legend:'):
items = legend_text.replace('Legend:', '').strip().split(', ')
adult_content_legend_list = [sanitize_json_text(i) for i in items]
else:
imgs = legend_div.select('img[title]')
adult_content_legend_list = [sanitize_json_text(img['title']) for img in imgs]
adult_content_legend_str = ",".join(adult_content_legend_list)
# --- Main text with multiple fallbacks ---
main_text_el = (soup.select_one('#chapter-text') or
(soup.select_one('.chapter-info') and soup.select_one('.chapter-info').find_next('p')) or
soup.select_one('p.serif') or
soup.select_one('section > .serif') or
soup.select_one('div.content p'))
main_text_html = str(main_text_el) if main_text_el else ''
main_text_json_str = json.dumps({'html': sanitize_json_text(main_text_html)}, separators=(',', ':'))
# --- Choices ---
options_list = []
choices_blocks = soup.select('ul#subchapters li') or soup.select('ul.subchapter-list li')
for li in choices_blocks:
link = li.select_one('a')
if link:
option_title = sanitize_json_text(link.get_text(strip=True))
option_url = clean_url(link.get('href'))
adult_icon = li.select_one('span.sprite-adult')
adult_content = adult_icon['title'] if adult_icon and adult_icon.has_attr('title') else "None"
option_tags = [t['title'] for t in li.select('span.tags a abbr.l') if t.has_attr('title')]
options_list.append({
'title': option_title,
'url': option_url,
'adult_content': adult_content,
'tags': [sanitize_json_text(t) for t in option_tags]
})
options_json_str = json.dumps(options_list, separators=(',', ':'))
# --- Suggested options ---
suggested_list = []
suggested_blocks = soup.select('ul.subchapter-list.suggested-list li') or soup.select('ul#subchapters li')
for li in suggested_blocks:
link = li.select_one('a')
if link:
sug_title = sanitize_json_text(link.get_text(strip=True))
sug_url = clean_url(link.get('href'))
suggested_list.append({'title': sug_title, 'url': sug_url})
suggested_json_str = json.dumps(suggested_list, separators=(',', ':'))
# --- Compose TSV row ---
chapter_number = os.path.basename(file_path).split('.')[0].split('_')[-1]
columns = [
clean_url(f"https://www.cyoc.net/interactives/chapter_{chapter_number}.html"),
clean_url(f"https://www.cyoc.net/interactives/chapter_{chapter_number}.html"),
sanitize_json_text(story_title),
sanitize_json_text(chapter_title),
timestamp or '',
sanitize_json_text(author_name),
author_url or '',
main_text_json_str,
options_json_str,
suggested_json_str,
tags_str,
adult_content_legend_str
]
return columns
# --- Process folder in batches ---
def process_folder_in_batches(input_folder, output_file, batch_size=5000):
html_files = sorted([os.path.join(input_folder, f) for f in os.listdir(input_folder) if f.endswith('.html')])
total_files = len(html_files)
print(f"Found {total_files} HTML files to process...")
buffer = []
with tqdm(total=total_files, desc="Overall Progress", unit="file") as pbar:
for idx, html_file in enumerate(html_files, 1):
try:
tsv_row = parse_html_to_tsv_row(html_file)
buffer.append("\t".join(tsv_row))
except Exception as e:
print(f"Error processing {html_file}: {e}")
if idx % batch_size == 0:
with open(output_file, 'a', encoding='utf-8') as f_out:
f_out.write("\n".join(buffer) + "\n")
buffer = []
pbar.update(1)
if buffer:
with open(output_file, 'a', encoding='utf-8') as f_out:
f_out.write("\n".join(buffer) + "\n")
# --- Run ---
if __name__ == '__main__':
if os.path.exists(OUTPUT_FILE):
os.remove(OUTPUT_FILE)
process_folder_in_batches(INPUT_FOLDER, OUTPUT_FILE, BATCH_SIZE)
print(f"\nAll files processed. TSV saved to '{OUTPUT_FILE}'.")
EOF
#index creator
awk -F'\t' '
NR==FNR {
# Processing cyoc-outline.tsv first
if ($5 != "") {
lookup[$5] = $4
}
next
}
{
# Processing save.tsv
if ($2 in lookup) {
$2 = lookup[$2]
}
print $0
}
' OFS='\t' "cyoc-outline.tsv" "story-chapters(temp).tsv" | sponge "story-chapters(temp).tsv"
sed -i 's#Binary file (standard input) matches: ##g' cyoc-db.temp
sed -i 's#.*You are not logged in. Log in InteractivesSearch.*##g' cyoc-db.temp
cat cyoc-db.temp >> cyoc-db.xml
#prepare archive
cat 01-chapters-00.txt >> 01-chapters-04.txt
cat 01-chapters-02.txt >> 01-chapters-04.txt
#archive stories
for i in {1..2}; do :;
while :; do
for i in {1..3}; do
read -r url || break 2
aria2c https://web.archive.org/save/""$url --user-agent="Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)" --dry-run ; sleep 0.10 &
echo -e "$url Done [1]\n" &
done
wait
done < 01-chapters-04.txt
done
removetrash