|
|
|
from gensim.models import Word2Vec |
|
import gensim |
|
|
|
def tokenize_sentence(sentence): |
|
return sentence.split() |
|
|
|
with open('korpus_malti_tok.txt', 'r', encoding='utf-8') as file: |
|
sentences = file.read().splitlines() |
|
|
|
data = [tokenize_sentence(sentence) for sentence in sentences] |
|
|
|
model = Word2Vec(data, |
|
vector_size=300, |
|
window=10, |
|
min_count=20, |
|
workers=16, |
|
sample=1e-5, |
|
alpha=0.03, |
|
min_alpha=0.0007, |
|
negative=20) |
|
|
|
model.build_vocab(data, progress_per=1000) |
|
print(model.corpus_count) |
|
|
|
model.train(data, total_examples=model.corpus_count, epochs=15) |
|
|
|
model.wv.save_word2vec_format('mt_word2vec_2.txt', binary=False) |
|
|
|
print('Émbeddings successfully trained!') |
|
|