AAOBA commited on
Commit
52c1378
β€’
1 Parent(s): 17bf106

updated info and app

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. info.md +2 -6
app.py CHANGED
@@ -133,7 +133,7 @@ def tts_fn(
133
  if __name__ == "__main__":
134
  parser = argparse.ArgumentParser()
135
  parser.add_argument(
136
- "-m", "--model", default="./models/G_180K.pth", help="path of your model"
137
  )
138
  parser.add_argument(
139
  "-c",
 
133
  if __name__ == "__main__":
134
  parser = argparse.ArgumentParser()
135
  parser.add_argument(
136
+ "-m", "--model", default="./models/G_270K.pth", help="path of your model"
137
  )
138
  parser.add_argument(
139
  "-c",
info.md CHANGED
@@ -2,9 +2,9 @@
2
 
3
  ---------------
4
 
5
- πŸ“… 2023.10.19 πŸ“…
6
 
7
- - Updated current Generator to 180K steps' checkpoint
8
 
9
  ------------------
10
 
@@ -57,7 +57,3 @@
57
  βœ‚ This work **clips gradient value to 10** βœ‚.
58
 
59
  ⚠ Finetuning the model on **single-speaker datasets separately** will definitely reach better result than training on **a huge dataset comprising of many speakers**. Sharing a same model leads to unexpected mixing of the speaker's voice line. ⚠
60
-
61
- ### TODO:
62
-
63
- πŸ“… Train one more cycle using text preprocessor provided by [AkitoP](https://huggingface.co/AkitoP) with cleaner text inputs and training data of Mejiro Ramonu. πŸ“…
 
2
 
3
  ---------------
4
 
5
+ πŸ“… 2023.10.24 πŸ“…
6
 
7
+ - Updated current Generator to 270K steps' checkpoint
8
 
9
  ------------------
10
 
 
57
  βœ‚ This work **clips gradient value to 10** βœ‚.
58
 
59
  ⚠ Finetuning the model on **single-speaker datasets separately** will definitely reach better result than training on **a huge dataset comprising of many speakers**. Sharing a same model leads to unexpected mixing of the speaker's voice line. βš