pszemraj commited on
Commit
ad9ffae
1 Parent(s): a3cabc9

Upload dataset-preprocessor.py

Browse files
Files changed (1) hide show
  1. dataset-preprocessor.py +54 -0
dataset-preprocessor.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ import re
3
+ import random
4
+
5
+ def split_into_paragraphs(text):
6
+ # Split by markdown headers or double newlines
7
+ paragraphs = re.split(r'\n\n|(?=^#)', text, flags=re.MULTILINE)
8
+ return [p.strip() for p in paragraphs if p.strip()]
9
+
10
+ def create_input_output_pairs(example):
11
+ paragraphs = example['paragraphs']
12
+ n_paragraphs = len(paragraphs)
13
+
14
+ # Randomly select about half of the paragraphs for input
15
+ n_input = max(1, random.randint(n_paragraphs // 2 - 1, n_paragraphs // 2 + 1))
16
+
17
+ input_paragraphs = paragraphs[:n_input]
18
+ output_paragraphs = paragraphs[n_input:]
19
+
20
+ return {
21
+ 'inputs': ' '.join(input_paragraphs),
22
+ 'targets': ' '.join(output_paragraphs)
23
+ }
24
+
25
+ def preprocess_dataset(dataset_name, text_column='text'):
26
+ # Load the dataset
27
+ dataset = load_dataset(dataset_name)
28
+
29
+ # Split text into paragraphs
30
+ dataset = dataset.map(
31
+ lambda example: {'paragraphs': split_into_paragraphs(example[text_column])},
32
+ remove_columns=[text_column]
33
+ )
34
+
35
+ # Create input-output pairs
36
+ preprocessed_dataset = dataset.map(
37
+ create_input_output_pairs,
38
+ remove_columns=['paragraphs']
39
+ )
40
+
41
+ return preprocessed_dataset
42
+
43
+ # Usage example
44
+ if __name__ == "__main__":
45
+ # Replace 'your_dataset' with the actual dataset name
46
+ dataset_name = 'your_dataset'
47
+
48
+ preprocessed_dataset = preprocess_dataset(dataset_name)
49
+
50
+ # Print some examples
51
+ print(preprocessed_dataset['train'][:5])
52
+
53
+ # Save the preprocessed dataset
54
+ preprocessed_dataset.save_to_disk("preprocessed_dataset")