|
import re |
|
import json |
|
|
|
|
|
def clean_text_file(input_file, output_file): |
|
""" |
|
Cleans the text file by: |
|
- Removing lines that start with 'BOOK' or contain only numbers, |
|
- Substituting two leading spaces with a single tab, |
|
- Normalizing all leading whitespace to no more than two tabs of indentation, |
|
- Splitting the text into chunks separated by at least 3 newlines. |
|
""" |
|
with open(input_file, "r", encoding="utf-8") as infile: |
|
lines = infile.readlines() |
|
|
|
cleaned_lines = [] |
|
|
|
for line in lines: |
|
stripped_line = line.strip() |
|
if stripped_line.startswith("BOOK") or stripped_line.isdigit(): |
|
continue |
|
|
|
line = re.sub(r"^ ", "\t", line) |
|
line = re.sub(r"^[\t ]+", normalize_whitespace, line) |
|
|
|
|
|
if line.startswith("\t"): |
|
line = line[1:] |
|
cleaned_lines.append(line) |
|
|
|
cleaned_text = "".join(cleaned_lines) |
|
chunks = [chunk.strip() for chunk in cleaned_text.split("\n\n\n") if chunk.strip()] |
|
|
|
with open(output_file, "w", encoding="utf-8") as outfile: |
|
outfile.writelines(cleaned_lines) |
|
|
|
return chunks |
|
|
|
|
|
def normalize_whitespace(match): |
|
whitespace = match.group(0).replace("\t", " ") |
|
tab_count = len(whitespace) // 2 |
|
return "\t" * min(tab_count, 2) |
|
|
|
|
|
def chunk_poem_by_lines(lines, max_chunk_lines, overlap_lines): |
|
""" |
|
Split a poem into chunks of manageable size, preserving lines and overlap for continuity. |
|
""" |
|
chunks = [] |
|
for i in range(0, len(lines), max_chunk_lines - overlap_lines): |
|
chunk = lines[i : i + max_chunk_lines] |
|
chunks.append(chunk) |
|
return chunks |
|
|
|
|
|
def generate_training_pairs( |
|
poems, max_context_lines=10, max_chunk_lines=20, overlap_lines=10 |
|
): |
|
""" |
|
Generate input-output training pairs for poetry generation, using line-based chunking. |
|
max_context_lines: The maximum number of lines to consider as context for the next line. |
|
max_chunk_lines: The maximum number of lines to consider in a single chunk. |
|
overlap_lines: The number of lines to overlap between chunks for continuity. |
|
|
|
|
|
""" |
|
training_data = [] |
|
|
|
for poem in poems: |
|
lines = poem.splitlines(keepends=True) |
|
|
|
|
|
chunks = chunk_poem_by_lines( |
|
lines, max_chunk_lines=max_chunk_lines, overlap_lines=overlap_lines |
|
) |
|
|
|
for chunk in chunks: |
|
for i in range(1, len(chunk)): |
|
input_lines = "".join(chunk[max(0, i - max_context_lines) : i]) |
|
output_line = chunk[i] |
|
training_data.append({"input": input_lines, "output": output_line}) |
|
|
|
return training_data |
|
|
|
|
|
|
|
poems = clean_text_file("leaves-of-grass-original.txt", "leaves-of-grass-cleaned.txt") |
|
|
|
|
|
print(f"Number of poems: {len(poems)}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
max_context_lines = 5 |
|
max_chunk_lines = 10 |
|
overlap_lines = 2 |
|
|
|
""" |
|
# Narrative Poetry Model |
|
max_context_lines = 10 |
|
max_chunk_lines = 20 |
|
overlap_lines = 5 |
|
""" |
|
|
|
""" |
|
# Epic or Free Verse Poetry |
|
max_context_lines = 20 |
|
max_chunk_lines = 50 |
|
overlap_lines = 10 |
|
""" |
|
|
|
training_data = generate_training_pairs( |
|
poems, |
|
max_context_lines=max_context_lines, |
|
max_chunk_lines=max_chunk_lines, |
|
overlap_lines=overlap_lines, |
|
) |
|
|
|
print(f"Number of training pairs: {len(training_data)}") |
|
|
|
|
|
with open("train.json", "w") as outfile: |
|
json.dump(training_data, outfile, indent=2, ensure_ascii=False) |
|
|