kirisame commited on
Commit
8c941a2
·
1 Parent(s): e2e59fa

Finalize scripts

Browse files
.gitattributes CHANGED
@@ -1,2 +1,5 @@
1
  *.txt filter=lfs diff=lfs merge=lfs -text
2
  *.ERB filter=lfs diff=lfs merge=lfs -text
 
 
 
 
1
  *.txt filter=lfs diff=lfs merge=lfs -text
2
  *.ERB filter=lfs diff=lfs merge=lfs -text
3
+ *.csv filter=lfs diff=lfs merge=lfs -text
4
+ *.json filter=lfs diff=lfs merge=lfs -text
5
+
convo-dataset/dataset.py DELETED
@@ -1,36 +0,0 @@
1
- import abc
2
- import os
3
- import json
4
-
5
- from tqdm import tqdm
6
-
7
- from . import util
8
-
9
- class Dataset(abc.ABC):
10
- @abc.abstractmethod
11
- def name(self):
12
- """
13
- Returns the name of the dataset
14
- """
15
- pass
16
-
17
- @abc.abstractmethod
18
- def download(self):
19
- """
20
- Downloads the dataset
21
- """
22
- pass
23
-
24
- @abc.abstractmethod
25
- def clean(self):
26
- """
27
- Cleans the dataset
28
- """
29
- pass
30
-
31
- @abc.abstractmethod
32
- def size(self):
33
- """
34
- Returns the size of the dataset
35
- """
36
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
convo-dataset/util.py DELETED
@@ -1,8 +0,0 @@
1
- import requests
2
-
3
- def download_file(url, filename):
4
- with requests.get(url, stream=True) as r:
5
- r.raise_for_status()
6
- with open(filename, 'wb') as f:
7
- for chunk in r.iter_content(chunk_size=8192):
8
- f.write(chunk)
 
 
 
 
 
 
 
 
 
utils/compile.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import glob
3
+ import os
4
+ import json
5
+
6
+ import argparse
7
+
8
+ from tqdm import tqdm
9
+
10
+ import umi_parse
11
+ import cornell_movie_dialogue_parse
12
+ import eratoho_parse
13
+ import valhalla_parse
14
+ import discord_parse
15
+
16
+ def compile_raw():
17
+ print('-- Parsing Umineko Data --\n')
18
+ umi_parse.parse()
19
+ print('\n-- Parsing Cornell Movie Dialogue Data --\n')
20
+ cornell_movie_dialogue_parse.parse()
21
+ print('\n-- Parsing Eratoho Data --\n')
22
+ eratoho_parse.parse()
23
+ print('\n-- Parsing Valhalla Data --\n')
24
+ valhalla_parse.parse()
25
+ print('\n-- Parsing Discord Data --\n')
26
+ discord_parse.parse()
27
+
28
+ def compile_mtf_jax():
29
+ # compiles dataset into a single text file to be tokenized by the mtf jax repo
30
+
31
+ # get all the files with *.txt in ./data.
32
+ files = glob.glob('data/*/*.txt')
33
+ with open('output.txt', 'w', encoding='utf-8') as f:
34
+ for file in tqdm(files):
35
+ with open(file, 'r', encoding='utf-8') as f2:
36
+ f.write(f2.read().replace('\n\n', '\n'))
37
+ f.write('\n')
38
+
39
+ # remove all double newlines
40
+ lines = ''
41
+ with open('output.txt', 'r', encoding='utf-8') as f:
42
+ lines = f.read().replace('\n\n', '\n')
43
+ with open('output.txt', 'w', encoding='utf-8') as f:
44
+ f.write(lines)
45
+
46
+ def compile_gpt_neo():
47
+ # compile each file into a json lines file
48
+ files = glob.glob('data/*/*.txt')
49
+ # shuffle the files
50
+ random.shuffle(files)
51
+ with open('output.jsonl', 'w', encoding='utf-8') as f:
52
+ for file in tqdm(files):
53
+ with open(file, 'r', encoding='utf-8') as f2:
54
+ f.write(json.dumps({'text': f2.read().replace('\n\n', '\n')}))
55
+ f.write('\n')
56
+
57
+ if __name__ == '__main__':
58
+ parser = argparse.ArgumentParser(description='Process raw data')
59
+ parser.add_argument('-d', '--dont_compile', action='store_true', help='dont compile raw data', default=False)
60
+ parser.add_argument('-m', '--mtf_jax', action='store_true', help='compile raw data into a single text file to be tokenized by the mtf jax repo')
61
+ parser.add_argument('-g', '--gpt_neo', action='store_true', help='compile raw data into a single json lines file')
62
+ args = parser.parse_args()
63
+
64
+ if not args.dont_compile:
65
+ compile_raw()
66
+ if args.mtf_jax:
67
+ compile_mtf_jax()
68
+ if args.gpt_neo:
69
+ compile_gpt_neo()
70
+
71
+ print('Done!')
utils/cornell_movie_dialogue_parse.py CHANGED
@@ -1,4 +1,6 @@
1
  # Parses the Cornell Movie Dialogue Corpus https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html
 
 
2
  import argparse
3
  import re
4
  import tqdm
@@ -54,7 +56,7 @@ def get_movie_metadata(filename):
54
 
55
 
56
  def format_movie_metadata(metadata):
57
- return f'[Title: \'{metadata["title"]}\'; Year: {metadata["year"]}; Rating: {metadata["rating"]}; Num_votes: {metadata["num_votes"]}; Genres: {metadata["genres"]};]'
58
 
59
 
60
  def construct_dialogue(lineIDs, lines):
@@ -65,10 +67,21 @@ def construct_dialogue(lineIDs, lines):
65
  dialogue.append(lines[lineID])
66
  return '\n'.join(dialogue)
67
 
 
 
 
 
 
 
 
 
 
68
 
69
  # parses movie_conversations.txt and puts the results in out_filename
70
- def parse_conversations(conversation_filename, out_filename, lines, movie_metadata):
71
- with open(conversation_filename, 'r', encoding='utf-8') as conversation_file, open(out_filename, 'w', encoding='utf-8') as out_file:
 
 
72
  for row in tqdm.tqdm(conversation_file, desc='Constructing dialogue'):
73
  row = row.split(' +++$+++ ')
74
  if len(row) != 4:
@@ -80,22 +93,33 @@ def parse_conversations(conversation_filename, out_filename, lines, movie_metada
80
  dialogue = construct_dialogue(lineIDs, lines)
81
  except InvalidLineError:
82
  continue
83
- out_file.write(f'⁂\n{format_movie_metadata(movie_metadata[movieID])}\n⁂\n')
84
- out_file.write(f'{dialogue}\n')
85
-
86
-
87
- parser = argparse.ArgumentParser(description='Process Cornell Movie Dialogue Corpus', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
88
- parser.add_argument('-i', '--in_dir', type=str, help='directory to process', default='.')
89
- parser.add_argument('-o', '--out_file', type=str, help='file to output', default='cornell_movie_dialogue.txt')
90
- parser.add_argument('-r', '--raw_name', action='store_true', help='use speaker tags as they appear in the dataset instead of normalized names')
91
- args = parser.parse_args()
92
-
93
- if __name__ == '__main__':
 
 
 
94
  try:
95
  lines = get_lines(args.in_dir + '/movie_lines.txt', args.raw_name);
96
  movie_metadata = get_movie_metadata(args.in_dir + '/movie_titles_metadata.txt')
97
- parse_conversations(args.in_dir + '/movie_conversations.txt', args.out_file, lines, movie_metadata)
98
  except (FileNotFoundError, InvalidFormatError) as e:
99
  print(e)
100
  exit(1)
101
 
 
 
 
 
 
 
 
 
 
1
  # Parses the Cornell Movie Dialogue Corpus https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html
2
+ import os
3
+ import json
4
  import argparse
5
  import re
6
  import tqdm
 
56
 
57
 
58
  def format_movie_metadata(metadata):
59
+ return f'[Title: {metadata["title"]}; Genres: {metadata["genres"].replace("[", "").replace("]", "")}]'.replace('\'', '')
60
 
61
 
62
  def construct_dialogue(lineIDs, lines):
 
67
  dialogue.append(lines[lineID])
68
  return '\n'.join(dialogue)
69
 
70
+ def dump_stats(length, movie_id):
71
+ stats = {'Cornell Movie-Dialogue Corpus': {}}
72
+ if os.path.exists('stats.json'):
73
+ stats = json.load(open('stats.json', 'r', encoding='utf-8'))
74
+ if 'Cornell Movie-Dialogue Corpus' not in stats:
75
+ stats['Cornell Movie-Dialogue Corpus'] = {}
76
+ stats['Cornell Movie-Dialogue Corpus'][str(movie_id)] = length
77
+ with open('stats.json', 'w', encoding='utf-8') as f:
78
+ json.dump(stats, f)
79
 
80
  # parses movie_conversations.txt and puts the results in out_filename
81
+ def parse_conversations(conversation_filename, out_dir, lines, movie_metadata, stats=True):
82
+ if not os.path.exists(out_dir):
83
+ os.makedirs(out_dir)
84
+ with open(conversation_filename, 'r', encoding='utf-8') as conversation_file:
85
  for row in tqdm.tqdm(conversation_file, desc='Constructing dialogue'):
86
  row = row.split(' +++$+++ ')
87
  if len(row) != 4:
 
93
  dialogue = construct_dialogue(lineIDs, lines)
94
  except InvalidLineError:
95
  continue
96
+ with open(f'{out_dir}/{movieID}.txt', 'w', encoding='utf-8') as out_file:
97
+ out_file.write(f'⁂\n{format_movie_metadata(movie_metadata[movieID])}\n⁂\n')
98
+ out_file.write(f'{dialogue}\n')
99
+ if stats:
100
+ dump_stats(len(dialogue.split('\n')), movieID)
101
+
102
+ def parse(args = None):
103
+ if args == None:
104
+ parser = argparse.ArgumentParser()
105
+ parser.add_argument('--in_dir', default='./raw/cornell movie-dialogs corpus')
106
+ parser.add_argument('--out_dir', default='./data/cornell movie-dialogs corpus')
107
+ parser.add_argument('--raw_name', action='store_true', default=False)
108
+ parser.add_argument('--stats', action='store_true', default=True)
109
+ args = parser.parse_args()
110
  try:
111
  lines = get_lines(args.in_dir + '/movie_lines.txt', args.raw_name);
112
  movie_metadata = get_movie_metadata(args.in_dir + '/movie_titles_metadata.txt')
113
+ parse_conversations(args.in_dir + '/movie_conversations.txt', args.out_dir, lines, movie_metadata)
114
  except (FileNotFoundError, InvalidFormatError) as e:
115
  print(e)
116
  exit(1)
117
 
118
+ if __name__ == '__main__':
119
+ parser = argparse.ArgumentParser(description='Process Cornell Movie Dialogue Corpus', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
120
+ parser.add_argument('-i', '--in_dir', type=str, help='directory to process', default='.')
121
+ parser.add_argument('-o', '--out_file', type=str, help='file to output', default='cornell_movie_dialogue.txt')
122
+ parser.add_argument('-r', '--raw_name', action='store_true', help='use speaker tags as they appear in the dataset instead of normalized names')
123
+ parser.add_argument('-s', '--stats', action='store_true', help='store stats')
124
+ args = parser.parse_args()
125
+ parse(None)
utils/discord_parse.py CHANGED
@@ -71,13 +71,15 @@ def dump_stats(s):
71
  stats = {}
72
  if os.path.exists('stats.json'):
73
  stats = json.load(open('stats.json', 'r', encoding='utf-8'))
 
 
74
  else:
75
  stats = {'discord': {}}
76
  stats['discord'][s[0]['guild']+' - '+s[0]['name']] = s[1]
77
  with open('stats.json', 'w', encoding='utf-8') as f:
78
  json.dump(stats, f)
79
 
80
- if __name__ == '__main__':
81
  parser = argparse.ArgumentParser(description='Process Discord JSONs')
82
  parser.add_argument('-f', '--file', type=str, help='file to process', required=False)
83
  parser.add_argument('-a', '--anonymous', action='store_true', help='anonymous author')
@@ -85,7 +87,7 @@ if __name__ == '__main__':
85
  parser.add_argument('-o', '--out_dir', type=str, help='directory to output', required=False, default='./data/discord')
86
  parser.add_argument('-d', '--dl', type=str, help='json file containing channel IDs to download', required=False)
87
  parser.add_argument('-t', '--token', type=str, help='discord auth token', required=False)
88
- parser.add_argument('-s', '--stats', action='store_true', help='write to stats')
89
  args = parser.parse_args()
90
 
91
  if args.dl:
@@ -110,4 +112,7 @@ if __name__ == '__main__':
110
  dump_stats(s)
111
  except json.JSONDecodeError:
112
  print(f'JSON Validation error in "{file}", skipping.')
113
- continue
 
 
 
 
71
  stats = {}
72
  if os.path.exists('stats.json'):
73
  stats = json.load(open('stats.json', 'r', encoding='utf-8'))
74
+ if 'discord' not in stats:
75
+ stats['discord'] = {}
76
  else:
77
  stats = {'discord': {}}
78
  stats['discord'][s[0]['guild']+' - '+s[0]['name']] = s[1]
79
  with open('stats.json', 'w', encoding='utf-8') as f:
80
  json.dump(stats, f)
81
 
82
+ def parse(args=None):
83
  parser = argparse.ArgumentParser(description='Process Discord JSONs')
84
  parser.add_argument('-f', '--file', type=str, help='file to process', required=False)
85
  parser.add_argument('-a', '--anonymous', action='store_true', help='anonymous author')
 
87
  parser.add_argument('-o', '--out_dir', type=str, help='directory to output', required=False, default='./data/discord')
88
  parser.add_argument('-d', '--dl', type=str, help='json file containing channel IDs to download', required=False)
89
  parser.add_argument('-t', '--token', type=str, help='discord auth token', required=False)
90
+ parser.add_argument('-s', '--stats', action='store_true', help='write to stats', default=True)
91
  args = parser.parse_args()
92
 
93
  if args.dl:
 
112
  dump_stats(s)
113
  except json.JSONDecodeError:
114
  print(f'JSON Validation error in "{file}", skipping.')
115
+ continue
116
+
117
+ if __name__ == '__main__':
118
+ parse()
utils/eratoho_parse.py CHANGED
@@ -7,9 +7,6 @@ import re
7
 
8
  from clean import clean
9
 
10
- def format_channel(metadata):
11
- return f'[Name: #{metadata["name"]}; Description: {metadata["topic"]}; Guild: {metadata["guild"]}]'
12
-
13
  def worker_parse(filename, out_filename=None, **kwargs):
14
  filtered_lines = []
15
  with open(filename, 'r', encoding='utf-8') as f:
@@ -72,16 +69,16 @@ def worker_parse(filename, out_filename=None, **kwargs):
72
 
73
 
74
  def dump_stats(length, character, index):
75
- stats = {}
76
  if os.path.exists('stats.json'):
77
  stats = json.load(open('stats.json', 'r', encoding='utf-8'))
78
- else:
79
- stats = {'eratoho': {}}
80
  stats['eratoho'][str(character)+' - '+str(index)] = length
81
  with open('stats.json', 'w', encoding='utf-8') as f:
82
  json.dump(stats, f)
83
 
84
- if __name__ == '__main__':
85
  parser = argparse.ArgumentParser(description='Process Discord JSONs')
86
  parser.add_argument('-i', '--in_dir', type=str, help='directory to process', required=False, default='./raw/eratoho')
87
  parser.add_argument('-o', '--out_dir', type=str, help='directory to output', required=False, default='./data/eratoho')
@@ -102,8 +99,6 @@ if __name__ == '__main__':
102
  'clownpiece': 'Clownpiece'
103
  }
104
 
105
- print('wat')
106
-
107
  if args.in_dir and args.out_dir:
108
  if not os.path.exists(args.out_dir):
109
  os.mkdir(args.out_dir)
@@ -111,4 +106,7 @@ if __name__ == '__main__':
111
  for file in files:
112
  lines = worker_parse(file, out_filename=args.out_dir+'/'+file.split('/')[-1].replace('.ERB', '.txt'), character=names[file.split('/')[-1].split('_')[0]], index=int(file.split('/')[-1].split('_')[1].split('.')[0]))
113
  if args.stats:
114
- dump_stats(lines, names[file.split('/')[-1].split('_')[0]].split('.')[0], file.split('/')[-1].split('_')[1].split('.')[0])
 
 
 
 
7
 
8
  from clean import clean
9
 
 
 
 
10
  def worker_parse(filename, out_filename=None, **kwargs):
11
  filtered_lines = []
12
  with open(filename, 'r', encoding='utf-8') as f:
 
69
 
70
 
71
  def dump_stats(length, character, index):
72
+ stats = {'eratoho': {}}
73
  if os.path.exists('stats.json'):
74
  stats = json.load(open('stats.json', 'r', encoding='utf-8'))
75
+ if 'eratoho' not in stats:
76
+ stats['eratoho'] = {}
77
  stats['eratoho'][str(character)+' - '+str(index)] = length
78
  with open('stats.json', 'w', encoding='utf-8') as f:
79
  json.dump(stats, f)
80
 
81
+ def parse():
82
  parser = argparse.ArgumentParser(description='Process Discord JSONs')
83
  parser.add_argument('-i', '--in_dir', type=str, help='directory to process', required=False, default='./raw/eratoho')
84
  parser.add_argument('-o', '--out_dir', type=str, help='directory to output', required=False, default='./data/eratoho')
 
99
  'clownpiece': 'Clownpiece'
100
  }
101
 
 
 
102
  if args.in_dir and args.out_dir:
103
  if not os.path.exists(args.out_dir):
104
  os.mkdir(args.out_dir)
 
106
  for file in files:
107
  lines = worker_parse(file, out_filename=args.out_dir+'/'+file.split('/')[-1].replace('.ERB', '.txt'), character=names[file.split('/')[-1].split('_')[0]], index=int(file.split('/')[-1].split('_')[1].split('.')[0]))
108
  if args.stats:
109
+ dump_stats(lines, names[file.split('/')[-1].split('_')[0]].split('.')[0], file.split('/')[-1].split('_')[1].split('.')[0])
110
+
111
+ if __name__ == '__main__':
112
+ parse()
utils/friends_dialogue_parse.py CHANGED
@@ -24,7 +24,7 @@ def valid_csv(dict_reader):
24
 
25
 
26
  def format_conversation_metadata(utterance):
27
- return f'[Season: {utterance["season_id"]}; Episode: {utterance["episode_id"]}; Conversation: {utterance["scene_id"]};]'
28
 
29
 
30
  def format_utterance(utterance):
 
24
 
25
 
26
  def format_conversation_metadata(utterance):
27
+ return f'[Title: Friends Season {utterance["season_id"]}]'
28
 
29
 
30
  def format_utterance(utterance):
utils/hourai_touhoudialogue_parser.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import tqdm
4
+ import re
5
+
6
+ from clean import clean
7
+
8
+ authors = {
9
+ 'Alice': 'Alice Margatroid',
10
+ 'Hourai': 'Hourai',
11
+ 'Shanghai': 'Shanghai',
12
+ 'Marisa': 'Marisa Kirisame',
13
+ 'Patchouli': 'Patchouli Knowledge',
14
+ 'Yuuka': 'Yuuka Kazami',
15
+ 'Cirno': 'Cirno',
16
+ 'Remilia': 'Remilia Scarlet',
17
+ 'Koakuma': 'Koakuma',
18
+ 'Medicine': 'Medicine Melancholy',
19
+ 'Shinki': 'Shinki',
20
+ '???': '???',
21
+ 'Flandre': 'Flandre Scarlet',
22
+ 'Rumia': 'Rumia',
23
+ 'Meiling': 'Hong Meiling',
24
+ 'Sakuya': 'Sakuya Izayoi',
25
+ 'Reimu': 'Reimu Hakurei',
26
+ 'Yukari': 'Yukari Yakumo',
27
+ 'Chen': 'Chen',
28
+ 'Sanae': 'Sanae Kochiya',
29
+ 'Ran': 'Ran Yakumo',
30
+ 'Letty': 'Letty Whiterock',
31
+ 'Lyrica': 'Lyrica Prismriver',
32
+ 'Lunasa': 'Lunasa Prismriver',
33
+ 'Merlin': 'Merlin Prismriver',
34
+ 'Sisters': 'Lyrica, Lunasa, and Merlin',
35
+ 'Youmu': 'Youmu Konpaku',
36
+ 'Yuyuko': 'Yuyuko Saigyouji',
37
+ 'Marisa\nYuyuko': 'Marisa and Yuyuko',
38
+ 'Reimu\nYuyuko': 'Reimu and Yuyuko',
39
+ 'Sakuya\nYuyuko': 'Sakuya and Yuyuko',
40
+ 'Keine': 'Keine Kamishirasawa',
41
+ 'Kaguya': 'Kaguya Houraisan',
42
+ 'Mokou': 'Fujiwara no Mokou',
43
+ 'Wriggle': 'Wriggle Nightbug',
44
+ 'Mystia': 'Mystia Lorelei',
45
+ 'Reisen': 'Reisen Udongein Inaba',
46
+ 'Eirin': 'Eirin Yagokoro',
47
+ '\xa0': '???',
48
+ 'Aya': 'Aya Shameimaru',
49
+ 'Tewi': 'Tewi Inaba',
50
+ 'Komachi': 'Komachi Onozuka',
51
+ 'Eiki': 'Eiki Shiki',
52
+ 'Kanako': 'Kanako Yasaka',
53
+ 'Suwako': 'Suwako Moriya',
54
+ 'Minoriko': 'Minoriko Aki',
55
+ 'Hina': 'Hina Kagiyama',
56
+ 'Nitori': 'Nitori Kawashiro',
57
+ 'Iku': 'Iku Nagae',
58
+ 'Tenshi': 'Tenshi Hinanawi',
59
+ 'Suika': 'Suika Ibuki',
60
+ 'Koishi': 'Koishi Komeiji',
61
+ 'Satori': 'Satori Komeiji',
62
+ 'Yamame': 'Yamame Kurodani',
63
+ 'Parsee': 'Parsee Mizuhashi',
64
+ 'Yuugi': 'Yuugi Hoshiguma',
65
+ 'Rin': 'Rin Kaenbyou',
66
+ 'Utsuho': 'Utsuho Reiuji',
67
+ 'Kogasa': 'Kogasa Tatara',
68
+ 'Nue': 'Nue Houjuu',
69
+ 'Nazrin': 'Nazrin',
70
+ 'Ichirin': 'Ichirin Kumoi',
71
+ 'Minamitsu': 'Minamitsu Murasa',
72
+ 'Shou': 'Shou Toramaru',
73
+ 'Byakuren': 'Byakuren Hijiri',
74
+ 'Master Big Catfish': 'Master Big Catfish',
75
+ 'Hatate': 'Hatate Himekaidou',
76
+ 'Luna': 'Luna Child',
77
+ 'Star': 'Star Sapphire',
78
+ 'Sunny': 'Sunny Milk',
79
+ 'Mamizou': 'Mamizou Futatsuiwa',
80
+ 'Kyouko': 'Kyouko Kasodani',
81
+ 'Yoshika': 'Yoshika Miyako',
82
+ 'Seiga': 'Seiga Kaku',
83
+ 'Futo': 'Mononobe no Futo',
84
+ 'Miko': 'Toyosatomimi no Miko',
85
+ 'Hijiri': 'Byakuren Hijiri',
86
+ 'Kokoro': 'Hata no Kokoro',
87
+ '<miko2>': 'Toyosatomimi no Miko\'s Clone',
88
+ 'Benben': 'Benben Tsukumo',
89
+ 'Yatsuhashi': 'Yatsuhashi Tsukumo',
90
+ 'Raiko': 'Raiko Horikawa',
91
+ 'Wakasagihime': 'Wakasagihime',
92
+ 'Sekibanki': 'Sekibanki',
93
+ 'Kagerou': 'Kagerou Imaizumi',
94
+ 'Seija': 'Seija Kijin',
95
+ 'Shinmyoumaru': 'Shinmyoumaru',
96
+ 'Kasen': 'Kasen Ibaraki',
97
+ 'Usami': 'Sumireko Usami',
98
+ 'Doremy': 'Doremy Sweet',
99
+ 'Junko': 'Junko',
100
+ 'Hecatia': 'Hecatia Lapislazuli',
101
+ 'Seiran': 'Seiran',
102
+ 'Ringo': 'Ringo',
103
+ 'Sagume': 'Sagume Kishin',
104
+ 'Clownpiece': 'Clownpiece',
105
+ 'Udonge': 'Reisen Udongein Inaba',
106
+ 'Joon': 'Joon Yorigami',
107
+ 'Shion': 'Shion Yorigami',
108
+ 'Joon\nShion': 'Joon and Shion',
109
+ 'Usami2': 'Sumireko Usami\'s Clone',
110
+ 'Usami3': 'Sumireko Usami\'s Other Clone',
111
+ 'Shinmyoumaru2': 'Shinmyoumaru\'s Clone',
112
+ 'Mai': 'Mai',
113
+ 'Satono': 'Satono Nishida',
114
+ 'Okina': 'Okina Matara',
115
+ 'Eternity': 'Eternity Larva',
116
+ 'Nemuno': 'Nemuno Sakata',
117
+ 'Aunn': 'Aunn Komano',
118
+ 'Narumi': 'Narumi Yatadera',
119
+ 'Sumireko': 'Sumireko Usami',
120
+ 'Kutaka': 'Kutaka Niwatari',
121
+ 'Marisa(Wolf)': 'Marisa Kirisame',
122
+ 'Saki': 'Saki Kurokuma',
123
+ 'Eika': 'Eika Ebisu',
124
+ 'Urumi': 'Urumi Ushizaki',
125
+ 'Yachie': 'Yachie Kicchou',
126
+ 'Mayumi': 'Mayumi Joutouguu',
127
+ 'Keiki': 'Keiki Haniyasushin',
128
+ 'Marisa(Otter)': 'Marisa Kirisame',
129
+ 'Marisa(Eagle)': 'Marisa Kirisame',
130
+ 'Reimu(Wolf)': 'Reimu Hakurei',
131
+ 'Reimu(Otter)': 'Reimu Hakurei',
132
+ 'Reimu(Eagle)': 'Reimu Hakurei',
133
+ 'Youmu(Wolf)': 'Youmu Konpaku',
134
+ 'Youmu(Otter)': 'Youmu Konpaku',
135
+ 'Youmu(Eagle)': 'Youmu Konpaku',
136
+ 'Tsukasa': 'Tsukasa Kudamaki',
137
+ 'Momoyo': 'Momoyo Himemushi',
138
+ 'Misumaru': 'Misumaru Tamatsukuri',
139
+ 'Mike': 'Mike Goutokuji',
140
+ 'Takane': 'Takane Yamashiro',
141
+ 'Sannyo': 'Sannyo Komakusa',
142
+ 'Megumu': 'Megumu Iizunamaru',
143
+ 'Chimata': 'Chimata Tenkyuu',
144
+ }
145
+
146
+ # regex to remove strings like <balloon$a11x3>
147
+ exp = re.compile(r'<.*?>')
148
+
149
+ def worker_parse_hourai(filename, out_filename=None):
150
+ # parse csv file
151
+ messages = []
152
+
153
+ with open(filename, 'r', encoding='utf-8') as f:
154
+ data = f.read()
155
+ lines = data.split('\n')
156
+ for line in lines:
157
+ if line.startswith('name,line'):
158
+ continue
159
+ line = line.split(',')
160
+ if len(line) < 2:
161
+ continue
162
+ author = line[0]
163
+ text = clean(line[1].replace('\"', ''))
164
+ messages.append(f'{authors[author]}: {text}')
165
+
166
+ # write to file
167
+ with open(out_filename, 'w', encoding='utf-8') as f:
168
+ txt = '\n'.join(messages)
169
+ f.write('⁂\n[Title: Hourai; Genre: Touhou Project]\n⁂\n')
170
+ f.write(txt)
171
+
172
+ return len(messages)
173
+
174
+ def worker_parse_dialogue(filename, out_filename=None):
175
+ # open json
176
+ messages = []
177
+ with open(filename, 'r', encoding='utf-8') as f:
178
+ data = json.load(f)
179
+ for message in data:
180
+ # first, check if author is in dict values
181
+ author = message['Name']
182
+ # iterate over values
183
+ for key, value in authors.items():
184
+ if value == author:
185
+ author = key
186
+ break
187
+ author = authors[author]
188
+ text = clean(message['Text']).replace('&#160;?"@', '').replace('&amp;', '').replace('&#160;', '').replace('\ ', '').replace('\. ', '\n')
189
+ for key, value in authors.items():
190
+ text = text.replace(key, value)
191
+ text = exp.sub('', text)
192
+ messages.append(f'{author}: {text}')
193
+
194
+ # write to file
195
+ with open(out_filename, 'w', encoding='utf-8') as f:
196
+ txt = '\n'.join(messages)
197
+ f.write('⁂\n[Title: The Touhou Project]\n⁂\n')
198
+ f.write(txt.replace('\n\n', '\n'))
199
+
200
+ return len(messages)
201
+
202
+ def dump_stats(length, name):
203
+ stats = {'misc': {}}
204
+ if os.path.exists('stats.json'):
205
+ stats = json.load(open('stats.json', 'r', encoding='utf-8'))
206
+ if 'misc' not in stats:
207
+ stats['misc'] = {}
208
+ stats['misc'][name] = length
209
+ with open('stats.json', 'w', encoding='utf-8') as f:
210
+ json.dump(stats, f)
211
+
212
+ def parse():
213
+ if not os.path.exists('./data/misc'):
214
+ os.mkdir('./data/misc')
215
+ dump_stats(worker_parse_hourai('./raw/misc/lines.csv', './data/misc/lines.txt'), 'hourai')
216
+ dump_stats(worker_parse_dialogue('./raw/misc/dialogue.json', './data/misc/dialogue.txt'), 'dialogue')
217
+
218
+ if __name__ == '__main__':
219
+ parse()
utils/stats.py CHANGED
@@ -13,10 +13,14 @@ if __name__ == '__main__':
13
  print('-- Convo1 Dataset Stats --')
14
  print(f'Datasets Available: {len(stats)}')
15
 
 
16
  for dataset in stats.keys():
17
  total = 0
18
  print(f'\n-- Dataset Name: {dataset} --')
19
  for guild in sorted(stats[dataset].items(), key=lambda x: x[1], reverse=True):
20
  print(f'{guild[0]}: {guild[1]}')
21
  total += guild[1]
22
- print(f'\nTotal Messages in {dataset}: {total}\n')
 
 
 
 
13
  print('-- Convo1 Dataset Stats --')
14
  print(f'Datasets Available: {len(stats)}')
15
 
16
+ total_total = 0
17
  for dataset in stats.keys():
18
  total = 0
19
  print(f'\n-- Dataset Name: {dataset} --')
20
  for guild in sorted(stats[dataset].items(), key=lambda x: x[1], reverse=True):
21
  print(f'{guild[0]}: {guild[1]}')
22
  total += guild[1]
23
+ print(f'\nTotal Messages in {dataset}: {total}\n')
24
+ total_total += total
25
+
26
+ print(f'\nTotal Messages in Convo Dataset: {total_total}\n')
utils/{07_parse.py → umi_parse.py} RENAMED
@@ -1,4 +1,4 @@
1
- import parse
2
  import os
3
  import json
4
 
@@ -22,7 +22,7 @@ def worker_parse(filename, out_filename=None, stats=True, index=0):
22
 
23
  jap_characters = ['。', '「', '」', '?', '!', '、', '…', 'ー']
24
 
25
- author = parse.parse('\ufeff== {0} ==\n', lines[0])[0]
26
 
27
  with open(out_filename, 'w') as f:
28
  for line in lines:
@@ -33,7 +33,7 @@ def worker_parse(filename, out_filename=None, stats=True, index=0):
33
  continue
34
  if line.startswith('\n==') or line.startswith('=='):
35
  try:
36
- author = parse.parse('== {0} ==\n', line)[0]
37
  except:
38
  author = 'Narrator'
39
  continue
@@ -72,13 +72,19 @@ def worker_parse(filename, out_filename=None, stats=True, index=0):
72
  f.write(metadata)
73
  f.write(clean_07(lines))
74
 
75
- if __name__ == '__main__':
76
  files = [
77
  './raw/visualnovel/ep1-4_EN__JP.txt',
78
  './raw/visualnovel/ep5-8_EN__JP.txt',
79
  ]
80
 
81
  output_files = [ i.replace('raw', 'data') for i in files ]
 
 
 
82
 
83
  for i in range(len(files)):
84
- worker_parse(files[i], output_files[i], stats=True, index=i)
 
 
 
 
1
+ import parse as _parse
2
  import os
3
  import json
4
 
 
22
 
23
  jap_characters = ['。', '「', '」', '?', '!', '、', '…', 'ー']
24
 
25
+ author = _parse.parse('\ufeff== {0} ==\n', lines[0])[0]
26
 
27
  with open(out_filename, 'w') as f:
28
  for line in lines:
 
33
  continue
34
  if line.startswith('\n==') or line.startswith('=='):
35
  try:
36
+ author = _parse.parse('== {0} ==\n', line)[0]
37
  except:
38
  author = 'Narrator'
39
  continue
 
72
  f.write(metadata)
73
  f.write(clean_07(lines))
74
 
75
+ def parse():
76
  files = [
77
  './raw/visualnovel/ep1-4_EN__JP.txt',
78
  './raw/visualnovel/ep5-8_EN__JP.txt',
79
  ]
80
 
81
  output_files = [ i.replace('raw', 'data') for i in files ]
82
+
83
+ if not os.path.exists('./data/visualnovel'):
84
+ os.mkdir('./data/visualnovel')
85
 
86
  for i in range(len(files)):
87
+ worker_parse(files[i], output_files[i], stats=True, index=i)
88
+
89
+ if __name__ == '__main__':
90
+ parse()
utils/valhalla_parse.py CHANGED
@@ -31,5 +31,8 @@ def worker_parse(filename, out_filename, stats=True):
31
  dump_stats(len(msgs), day)
32
 
33
 
 
 
 
34
  if __name__ == '__main__':
35
- worker_parse('./raw/visualnovel/valhalla.txt', './data/visualnovel/valhalla.txt')
 
31
  dump_stats(len(msgs), day)
32
 
33
 
34
+ def parse():
35
+ worker_parse('./raw/visualnovel/valhalla.txt', './data/visualnovel/valhalla.txt')
36
+
37
  if __name__ == '__main__':
38
+ parse()