leaves-of-grass / generate_data.py
diversen's picture
add files
847b6c5
raw
history blame
3.67 kB
import re
import json
def clean_text_file(input_file, output_file):
"""
Cleans the text file by:
- Removing lines that start with 'BOOK' or contain only numbers,
- Substituting two leading spaces with a single tab,
- Normalizing all leading whitespace to no more than two tabs of indentation,
- Splitting the text into chunks separated by at least 3 newlines.
"""
with open(input_file, "r", encoding="utf-8") as infile:
lines = infile.readlines()
cleaned_lines = []
for line in lines:
stripped_line = line.strip()
if stripped_line.startswith("BOOK") or stripped_line.isdigit():
continue
line = re.sub(r"^ ", "\t", line)
line = re.sub(r"^[\t ]+", normalize_whitespace, line)
# Remove a single leading tab if present
if line.startswith("\t"):
line = line[1:]
cleaned_lines.append(line)
cleaned_text = "".join(cleaned_lines)
chunks = [chunk.strip() for chunk in cleaned_text.split("\n\n\n") if chunk.strip()]
with open(output_file, "w", encoding="utf-8") as outfile:
outfile.writelines(cleaned_lines)
return chunks
def normalize_whitespace(match):
whitespace = match.group(0).replace("\t", " ")
tab_count = len(whitespace) // 2
return "\t" * min(tab_count, 2)
def chunk_poem_by_lines(lines, max_chunk_lines, overlap_lines):
"""
Split a poem into chunks of manageable size, preserving lines and overlap for continuity.
"""
chunks = []
for i in range(0, len(lines), max_chunk_lines - overlap_lines):
chunk = lines[i : i + max_chunk_lines]
chunks.append(chunk)
return chunks
def generate_training_pairs(
poems, max_context_lines=10, max_chunk_lines=20, overlap_lines=10
):
"""
Generate input-output training pairs for poetry generation, using line-based chunking.
max_context_lines: The maximum number of lines to consider as context for the next line.
max_chunk_lines: The maximum number of lines to consider in a single chunk.
overlap_lines: The number of lines to overlap between chunks for continuity.
"""
training_data = []
for poem in poems:
lines = poem.splitlines(keepends=True)
# Chunk the poem into manageable pieces
chunks = chunk_poem_by_lines(
lines, max_chunk_lines=max_chunk_lines, overlap_lines=overlap_lines
)
for chunk in chunks:
for i in range(1, len(chunk)):
input_lines = "".join(chunk[max(0, i - max_context_lines) : i])
output_line = chunk[i] # The next line to predict
training_data.append({"input": input_lines, "output": output_line})
return training_data
# Example usage
poems = clean_text_file("leaves-of-grass-original.txt", "leaves-of-grass-cleaned.txt")
# 400 poems
print(f"Number of poems: {len(poems)}")
# TODO: Implement generate_training_pairs
# poems = poems[25:26] # For testing purposes. Long poem.
# Compact Poetry Model
max_context_lines = 5
max_chunk_lines = 10
overlap_lines = 2
"""
# Narrative Poetry Model
max_context_lines = 10
max_chunk_lines = 20
overlap_lines = 5
"""
"""
# Epic or Free Verse Poetry
max_context_lines = 20
max_chunk_lines = 50
overlap_lines = 10
"""
training_data = generate_training_pairs(
poems,
max_context_lines=max_context_lines,
max_chunk_lines=max_chunk_lines,
overlap_lines=overlap_lines,
)
print(f"Number of training pairs: {len(training_data)}")
# write the training data to a file
with open("train.json", "w") as outfile:
json.dump(training_data, outfile, indent=2, ensure_ascii=False)