{ "paper_id": "O15-1016", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T08:10:06.523192Z" }, "title": "Word Co-occurrence Augmented Topic Model in Short Text", "authors": [ { "first": "Guan-Bin", "middle": [], "last": "\u9673\u51a0\u658c", "suffix": "", "affiliation": { "laboratory": "", "institution": "Cheng Kung University", "location": {} }, "email": "" }, { "first": "", "middle": [], "last": "Chen", "suffix": "", "affiliation": { "laboratory": "", "institution": "Cheng Kung University", "location": {} }, "email": "gbchen@ikmlab.csie.ncku.edu.tw" }, { "first": "Hung-Yu", "middle": [], "last": "\u9ad8\u5b8f\u5b87", "suffix": "", "affiliation": { "laboratory": "", "institution": "Cheng Kung University", "location": {} }, "email": "" }, { "first": "", "middle": [], "last": "Kao", "suffix": "", "affiliation": { "laboratory": "", "institution": "Cheng Kung University", "location": {} }, "email": "" } ], "year": "", "venue": null, "identifiers": {}, "abstract": "\u5728\u7db2\u969b\u7db2\u8def\u4e0a\uff0c\u5927\u91cf\u7684\u6587\u5b57\u4f7f\u5f97\u4eba\u5011\u96e3\u4ee5\u5728\u6709\u9650\u7684\u77ed\u6642\u9593\u5167\u52a0\u4ee5\u5438\u6536\u4e26\u4e86\u89e3\uff0c\u4e3b\u984c\u6a21\u578b (\u5982 pLSA \u8207 LDA) \u88ab\u63d0\u51fa\u4f86\u8a66\u5716\u5c0d\u9019\u4e9b\u9577\u6587\u4ef6\u505a\u6458\u8981\u8207\u7e3d\u7d50\u6210\u5e7e\u500b\u4ee3\u8868\u6027\u7684\u4e3b\u984c\u5b57\u3002 \u8fd1\u5e74\u4f86\uff0c\u96a8\u8457\u793e\u7fa4\u7db2\u8def\u7684\u8208\u8d77(\u5982 Twitter) \uff0c\u4f7f\u5f97\u77ed\u6587\u4ef6\u7684\u6578\u91cf\u4e5f\u96a8\u4e4b\u8b8a\u5927\uff0c\u5728\u70ba\u6578\u773e \u591a\u7684\u77ed\u6587\u672c\u4e2d\u5982\u4f55\u826f\u597d\u5730\u505a\u6458\u8981\u8207\u6574\u7406\u4e5f\u8b8a\u6210\u4e00\u5927\u8ab2\u984c\uff0c\u56e0\u800c\u6709\u4e86\u61c9\u7528\u4e3b\u984c\u6a21\u578b\u65bc\u77ed\u6587 \u672c\u7684\u60f3\u6cd5\u3002\u7136\u800c\u76f4\u63a5\u61c9\u7528\u4e3b\u984c\u6a21\u578b\u5230\u9019\u4e9b\u77ed\u6587\u672c\u4e0a\uff0c\u7531\u65bc\u77ed\u6587\u672c\u4e2d\u5b57\u6578\u4e0d\u8db3\u4ee5\u7528\u4f86\u826f\u597d \u5730\u7d71\u8a08\u8a72\u4e3b\u984c\u7684\u5b57\u8a5e\u5171\u73fe\u7279\u6027\uff0c\u6240\u4ee5\u7d93\u5e38\u6703\u5f97\u5230\u4e00\u4e9b\u76f8\u5e72\u5ea6\u4f4e\u7684\u4e3b\u984c\u3002\u6839\u64da\u6211\u5011\u56de\u9867\u7684 \u6587\u737b\uff0c\u96d9\u8a5e\u4e3b\u984c\u6a21\u578b(Bi-term topic model, BTM)\u900f\u904e\u6574\u500b\u8cc7\u6599\u96c6\u4e2d\u7684\u96d9\u8a5e(Bi-term) \uff0c \u76f4\u63a5\u5c0d\u5b57\u8a5e\u5171\u73fe\u7279\u6027\u505a\u5efa\u6a21\uff0c\u80fd\u6709\u6548\u6539\u5584\u55ae\u4e00\u6587\u4ef6\u4e2d\u5b57\u6578\u4e0d\u8db3\u7684\u554f\u984c\u3002\u7136\u800c BTM \u65bc\u7d71 \u8a08\u904e\u7a0b\u4e2d\u53ea\u8003\u616e\u96d9\u8a5e\u7684\u5171\u73fe\u983b\u7387\uff0c\u5c0e\u81f4\u7522\u751f\u7684\u4e3b\u984c\u5f88\u5bb9\u6613\u6703\u88ab\u55ae\u4e00\u9ad8\u983b\u5b57\u6240\u4e3b\u5c0e\u3002 \u672c\u7814\u7a76\u63d0\u51fa\u57fa\u65bc\u5b57\u8a5e\u5171\u73fe\u6027\u7684\u4e3b\u984c\u6a21\u578b\u4f86\u6539\u5584 BTM \u4e2d\u4e3b\u984c\u88ab\u9ad8\u983b\u5b57\u6240\u4e3b\u5c0e\u7684\u554f\u984c\u3002\u5c0d \u65bc BTM \u7684\u554f\u984c\uff0c\u6211\u5011\u63d0\u51fa\u7684 PMI-\u03b2-BTM \u65b9\u6cd5\u5c0e\u5165\u9ede\u5c0d\u9ede\u4ea4\u4e92\u8cc7\u8a0a(pointwise mutual information, PMI)\u5206\u6578\u65bc\u5176\u4e3b\u984c\u5b57\u7684\u4e8b\u524d\u6a5f\u7387\u5206\u5e03\u4e2d\uff0c\u4f86\u964d\u4f4e\u55ae\u4e00\u9ad8\u983b\u5b57\u7684\u5f71\u97ff\u3002\u5be6\u9a57 \u7d50\u679c\u986f\u793a\uff0c\u6211\u5011\u7684 PMI-\u03b2-BTM \u7121\u8ad6\u662f\u5728\u6b63\u898f\u7684\u65b0\u805e\u6a19\u984c\u4e0a\u6216\u662f\u5728\u96dc\u8a0a\u9ad8\u7684 tweet \u4e0a\u7686 \u6709\u8f03\u597d\u7684\u4e3b\u984c\u6027\u3002\u53e6\u5916\uff0c\u6211\u5011\u6240\u63d0\u51fa\u7684\u65b9\u6cd5\u4e0d\u9700\u4fee\u6539\u539f\u59cb\u4e3b\u984c\u6a21\u578b\uff0c\u56e0\u6b64\u53ef\u76f4\u63a5\u61c9\u7528\u65bc BTM \u7684\u884d\u751f\u6a21\u578b\u4e0a\u3002 \u95dc\u9375\u8a5e\uff1a\u77ed\u6587\u672c\uff0c\u4e3b\u984c\u6a21\u578b\uff0c\u6587\u4ef6\u5206\u985e\uff0c\u6587\u4ef6\u5206\u7fa4", "pdf_parse": { "paper_id": "O15-1016", "_pdf_hash": "", "abstract": [ { "text": "\u5728\u7db2\u969b\u7db2\u8def\u4e0a\uff0c\u5927\u91cf\u7684\u6587\u5b57\u4f7f\u5f97\u4eba\u5011\u96e3\u4ee5\u5728\u6709\u9650\u7684\u77ed\u6642\u9593\u5167\u52a0\u4ee5\u5438\u6536\u4e26\u4e86\u89e3\uff0c\u4e3b\u984c\u6a21\u578b (\u5982 pLSA \u8207 LDA) \u88ab\u63d0\u51fa\u4f86\u8a66\u5716\u5c0d\u9019\u4e9b\u9577\u6587\u4ef6\u505a\u6458\u8981\u8207\u7e3d\u7d50\u6210\u5e7e\u500b\u4ee3\u8868\u6027\u7684\u4e3b\u984c\u5b57\u3002 \u8fd1\u5e74\u4f86\uff0c\u96a8\u8457\u793e\u7fa4\u7db2\u8def\u7684\u8208\u8d77(\u5982 Twitter) \uff0c\u4f7f\u5f97\u77ed\u6587\u4ef6\u7684\u6578\u91cf\u4e5f\u96a8\u4e4b\u8b8a\u5927\uff0c\u5728\u70ba\u6578\u773e \u591a\u7684\u77ed\u6587\u672c\u4e2d\u5982\u4f55\u826f\u597d\u5730\u505a\u6458\u8981\u8207\u6574\u7406\u4e5f\u8b8a\u6210\u4e00\u5927\u8ab2\u984c\uff0c\u56e0\u800c\u6709\u4e86\u61c9\u7528\u4e3b\u984c\u6a21\u578b\u65bc\u77ed\u6587 \u672c\u7684\u60f3\u6cd5\u3002\u7136\u800c\u76f4\u63a5\u61c9\u7528\u4e3b\u984c\u6a21\u578b\u5230\u9019\u4e9b\u77ed\u6587\u672c\u4e0a\uff0c\u7531\u65bc\u77ed\u6587\u672c\u4e2d\u5b57\u6578\u4e0d\u8db3\u4ee5\u7528\u4f86\u826f\u597d \u5730\u7d71\u8a08\u8a72\u4e3b\u984c\u7684\u5b57\u8a5e\u5171\u73fe\u7279\u6027\uff0c\u6240\u4ee5\u7d93\u5e38\u6703\u5f97\u5230\u4e00\u4e9b\u76f8\u5e72\u5ea6\u4f4e\u7684\u4e3b\u984c\u3002\u6839\u64da\u6211\u5011\u56de\u9867\u7684 \u6587\u737b\uff0c\u96d9\u8a5e\u4e3b\u984c\u6a21\u578b(Bi-term topic model, BTM)\u900f\u904e\u6574\u500b\u8cc7\u6599\u96c6\u4e2d\u7684\u96d9\u8a5e(Bi-term) \uff0c \u76f4\u63a5\u5c0d\u5b57\u8a5e\u5171\u73fe\u7279\u6027\u505a\u5efa\u6a21\uff0c\u80fd\u6709\u6548\u6539\u5584\u55ae\u4e00\u6587\u4ef6\u4e2d\u5b57\u6578\u4e0d\u8db3\u7684\u554f\u984c\u3002\u7136\u800c BTM \u65bc\u7d71 \u8a08\u904e\u7a0b\u4e2d\u53ea\u8003\u616e\u96d9\u8a5e\u7684\u5171\u73fe\u983b\u7387\uff0c\u5c0e\u81f4\u7522\u751f\u7684\u4e3b\u984c\u5f88\u5bb9\u6613\u6703\u88ab\u55ae\u4e00\u9ad8\u983b\u5b57\u6240\u4e3b\u5c0e\u3002 \u672c\u7814\u7a76\u63d0\u51fa\u57fa\u65bc\u5b57\u8a5e\u5171\u73fe\u6027\u7684\u4e3b\u984c\u6a21\u578b\u4f86\u6539\u5584 BTM \u4e2d\u4e3b\u984c\u88ab\u9ad8\u983b\u5b57\u6240\u4e3b\u5c0e\u7684\u554f\u984c\u3002\u5c0d \u65bc BTM \u7684\u554f\u984c\uff0c\u6211\u5011\u63d0\u51fa\u7684 PMI-\u03b2-BTM \u65b9\u6cd5\u5c0e\u5165\u9ede\u5c0d\u9ede\u4ea4\u4e92\u8cc7\u8a0a(pointwise mutual information, PMI)\u5206\u6578\u65bc\u5176\u4e3b\u984c\u5b57\u7684\u4e8b\u524d\u6a5f\u7387\u5206\u5e03\u4e2d\uff0c\u4f86\u964d\u4f4e\u55ae\u4e00\u9ad8\u983b\u5b57\u7684\u5f71\u97ff\u3002\u5be6\u9a57 \u7d50\u679c\u986f\u793a\uff0c\u6211\u5011\u7684 PMI-\u03b2-BTM \u7121\u8ad6\u662f\u5728\u6b63\u898f\u7684\u65b0\u805e\u6a19\u984c\u4e0a\u6216\u662f\u5728\u96dc\u8a0a\u9ad8\u7684 tweet \u4e0a\u7686 \u6709\u8f03\u597d\u7684\u4e3b\u984c\u6027\u3002\u53e6\u5916\uff0c\u6211\u5011\u6240\u63d0\u51fa\u7684\u65b9\u6cd5\u4e0d\u9700\u4fee\u6539\u539f\u59cb\u4e3b\u984c\u6a21\u578b\uff0c\u56e0\u6b64\u53ef\u76f4\u63a5\u61c9\u7528\u65bc BTM \u7684\u884d\u751f\u6a21\u578b\u4e0a\u3002 \u95dc\u9375\u8a5e\uff1a\u77ed\u6587\u672c\uff0c\u4e3b\u984c\u6a21\u578b\uff0c\u6587\u4ef6\u5206\u985e\uff0c\u6587\u4ef6\u5206\u7fa4", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Abstract", "sec_num": null } ], "body_text": [ { "text": "Topic models learn topics base on the amount of the word co-occurrence in the documents. The word co-occurrence is a degree which describes how often the two words appear together. BTM, discovers topics from bi-terms in the whole corpus to overcome the lack of local word co-occurrence information. However, BTM will make the common words be performed excessively because BTM identifies the word co-occurrence information by the bi-term", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "The 2015 Conference on Computational Linguistics and Speech Processing ROCLING 2015, pp. 164-166 \uf0d3 The Association for Computational Linguistics and Chinese Language Processing frequency in corpus-level. Thus, we propose a PMI-\u03b2 priors methods on BTM. Our PMI-\u03b2 priors method can adjust the co-occurrence score to prevent the common words problem. Next, we will describe the detail of our method of PMI-\u03b2 priors.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "However, just consider the frequency of bi-term in corpus-level will generate the topics which contain too many common words. To solve this problem, we consider the Pointwise Mutual Information (PMI) [9] . Since the PMI score not only considers the co-occurrence frequency of the two words, but also normalizes by the single word frequency. Thus, we want to apply PMI score in the original BTM. A suitable way to apply PMI scores is modifying the priors in the BTM. The reason is that the priors modifying will not increase the complexity in the generation model and very intuitive. Clearly, there are two kinds of priors in BTM which are \u03b2-prior and \u03b2-priors. The \u03b2-prior is a corpus-topic bias without the data. While the \u03b2-priors are topic-word biases without the data. Applying the PMI score to the \u03b2-priors is the only one choice because we can adjust the degree of the word co-occurrence by modifying the distributions in the \u03b2-priors. For example, we assume that a topic contains three words \"pen\", \"apple\" and \"banana\". In the symmetric priors, we set <0.1, 0.1, 0.1> which means no bias of these three words, while we can apply <0.1, 0.5, 0.5> to enhance the word co-occurrence of \"apple\" and \"banana\". Thus the topic will prefer to put the \"apple\" and \"banana\" together in the topic sampling step. Table 1 shows the clustering results on the Twitter2011 dataset, when we set the number of topic to 50. As expected, BTM is better than Mixture of unigram and LDA got the worst result when we adopt the symmetric priors <0.1>. When apply the PMI-\u03b2 priors, we get the better result than BTM with symmetric priors. Otherwise, our baseline method, PCA-\u03b2, is better than the original LDA because the PCA-\u03b2 prior can make up the lack of the global word co-occurrence information in the original LDA. In this paper, we propose a solution for topic model to enhance the amount of the word co-occurrence relation in the short text corpus. First, we find the BTM identifies the word co-occurrence by considering the bi-term frequency in the corpus-level. BTM will make the common words be performed excessively because the frequency of bi-term comes from the whole corpus instead of a short document. We propose a PMI-\u03b2 priors method to overcome this problem. The experimental results show our PMI-\u03b2-BTM get the best results in the regular short news title text.", "cite_spans": [ { "start": 200, "end": 203, "text": "[9]", "ref_id": "BIBREF8" } ], "ref_spans": [ { "start": 1308, "end": 1315, "text": "Table 1", "ref_id": "TABREF0" } ], "eq_spans": [], "section": "", "sec_num": null } ], "back_matter": [ { "text": "The work reported in this paper was partially supported by the National NEP-II Project MOST 104-3113-F-260-001, 2015.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Acknowledgement", "sec_num": null } ], "bib_entries": { "BIBREF0": { "ref_id": "b0", "title": "Probabilistic latent semantic analysis", "authors": [ { "first": "T", "middle": [], "last": "Hofmann", "suffix": "" } ], "year": 1999, "venue": "Proceedings of the Fifteenth conference on Uncertainty in artificial intelligence", "volume": "", "issue": "", "pages": "289--296", "other_ids": {}, "num": null, "urls": [], "raw_text": "T. Hofmann, \"Probabilistic latent semantic analysis,\" in Proceedings of the Fifteenth conference on Uncertainty in artificial intelligence, pp. 289-296, 1999.", "links": null }, "BIBREF1": { "ref_id": "b1", "title": "Latent dirichlet allocation", "authors": [ { "first": "D", "middle": [ "M" ], "last": "Blei", "suffix": "" }, { "first": "A", "middle": [ "Y" ], "last": "Ng", "suffix": "" }, { "first": "M", "middle": [ "I" ], "last": "Jordan", "suffix": "" } ], "year": 2003, "venue": "the Journal of machine Learning research", "volume": "3", "issue": "", "pages": "993--1022", "other_ids": {}, "num": null, "urls": [], "raw_text": "D. M. Blei, A. Y. Ng, and M. I. Jordan, \"Latent dirichlet allocation,\" the Journal of machine Learning research, vol. 3, pp. 993-1022, 2003.", "links": null }, "BIBREF2": { "ref_id": "b2", "title": "A Survey on Topic Modeling", "authors": [ { "first": "M", "middle": [], "last": "Divya", "suffix": "" }, { "first": "K", "middle": [], "last": "Thendral", "suffix": "" }, { "first": "S", "middle": [], "last": "Chitrakala", "suffix": "" } ], "year": 2013, "venue": "International Journal of Recent Advances in Engineering & Technology (IJRAET)", "volume": "1", "issue": "", "pages": "57--61", "other_ids": {}, "num": null, "urls": [], "raw_text": "M. Divya, K. Thendral, and S. Chitrakala, \"A Survey on Topic Modeling,\" International Journal of Recent Advances in Engineering & Technology (IJRAET), vol. 1, pp. 57-61, 2013.", "links": null }, "BIBREF3": { "ref_id": "b3", "title": "Optimizing semantic coherence in topic models", "authors": [ { "first": "D", "middle": [], "last": "Mimno", "suffix": "" }, { "first": "H", "middle": [ "M" ], "last": "Wallach", "suffix": "" }, { "first": "E", "middle": [], "last": "Talley", "suffix": "" }, { "first": "M", "middle": [], "last": "Leenders", "suffix": "" }, { "first": "A", "middle": [], "last": "Mccallum", "suffix": "" } ], "year": 2011, "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", "volume": "", "issue": "", "pages": "262--272", "other_ids": {}, "num": null, "urls": [], "raw_text": "D. Mimno, H. M. Wallach, E. Talley, M. Leenders, and A. McCallum, \"Optimizing semantic coherence in topic models,\" in Proceedings of the Conference on Empirical Methods in Natural Language Processing, pp. 262-272, 2011.", "links": null }, "BIBREF4": { "ref_id": "b4", "title": "A biterm topic model for short texts", "authors": [ { "first": "X", "middle": [], "last": "Yan", "suffix": "" }, { "first": "J", "middle": [], "last": "Guo", "suffix": "" }, { "first": "Y", "middle": [], "last": "Lan", "suffix": "" }, { "first": "X", "middle": [], "last": "Cheng", "suffix": "" } ], "year": 2013, "venue": "Proceedings of the 22nd international conference on World Wide Web", "volume": "", "issue": "", "pages": "1445--1456", "other_ids": {}, "num": null, "urls": [], "raw_text": "X. Yan, J. Guo, Y. Lan, and X. Cheng, \"A biterm topic model for short texts,\" in Proceedings of the 22nd international conference on World Wide Web, Rio de Janeiro, Brazil, pp. 1445-1456, 2013.", "links": null }, "BIBREF5": { "ref_id": "b5", "title": "Text classification from labeled and unlabeled documents using EM", "authors": [ { "first": "K", "middle": [], "last": "Nigam", "suffix": "" }, { "first": "A", "middle": [ "K" ], "last": "Mccallum", "suffix": "" }, { "first": "S", "middle": [], "last": "Thrun", "suffix": "" }, { "first": "T", "middle": [], "last": "Mitchell", "suffix": "" } ], "year": 2000, "venue": "Machine learning", "volume": "39", "issue": "", "pages": "103--134", "other_ids": {}, "num": null, "urls": [], "raw_text": "K. Nigam, A. K. McCallum, S. Thrun, and T. Mitchell, \"Text classification from labeled and unlabeled documents using EM,\" Machine learning, vol. 39, pp. 103-134, 2000.", "links": null }, "BIBREF6": { "ref_id": "b6", "title": "Comparing twitter and traditional media using topic models", "authors": [ { "first": "W", "middle": [ "X" ], "last": "Zhao", "suffix": "" }, { "first": "J", "middle": [], "last": "Jiang", "suffix": "" }, { "first": "J", "middle": [], "last": "Weng", "suffix": "" }, { "first": "J", "middle": [], "last": "He", "suffix": "" }, { "first": "E.-P", "middle": [], "last": "Lim", "suffix": "" }, { "first": "H", "middle": [], "last": "Yan", "suffix": "" } ], "year": 2011, "venue": "Advances in Information Retrieval", "volume": "", "issue": "", "pages": "338--349", "other_ids": {}, "num": null, "urls": [], "raw_text": "W. X. Zhao, J. Jiang, J. Weng, J. He, E.-P. Lim, H. Yan, et al., \"Comparing twitter and traditional media using topic models,\" in Advances in Information Retrieval, ed: Springer, pp. 338-349, 2011.", "links": null }, "BIBREF7": { "ref_id": "b7", "title": "BTM: Topic Modeling over Short Texts", "authors": [ { "first": "X", "middle": [], "last": "Cheng", "suffix": "" }, { "first": "X", "middle": [], "last": "Yan", "suffix": "" }, { "first": "Y", "middle": [], "last": "Lan", "suffix": "" }, { "first": "J", "middle": [], "last": "Guo", "suffix": "" } ], "year": 2014, "venue": "IEEE Transactions on", "volume": "26", "issue": "", "pages": "2928--2941", "other_ids": {}, "num": null, "urls": [], "raw_text": "X. Cheng, X. Yan, Y. Lan, and J. Guo, \"BTM: Topic Modeling over Short Texts,\" Knowledge and Data Engineering, IEEE Transactions on, vol. 26, pp. 2928-2941, 2014.", "links": null }, "BIBREF8": { "ref_id": "b8", "title": "Word association norms, mutual information, and lexicography", "authors": [ { "first": "K", "middle": [ "W" ], "last": "Church", "suffix": "" }, { "first": "P", "middle": [], "last": "Hanks", "suffix": "" } ], "year": 1990, "venue": "Computational linguistics", "volume": "16", "issue": "", "pages": "22--29", "other_ids": {}, "num": null, "urls": [], "raw_text": "K. W. Church and P. Hanks, \"Word association norms, mutual information, and lexicography,\" Computational linguistics, vol. 16, pp. 22-29, 1990.", "links": null }, "BIBREF9": { "ref_id": "b9", "title": "Rethinking LDA: Why priors matter", "authors": [ { "first": "H", "middle": [ "M" ], "last": "Wallach", "suffix": "" }, { "first": "D", "middle": [], "last": "Minmo", "suffix": "" }, { "first": "A", "middle": [], "last": "Mccallum", "suffix": "" } ], "year": 2009, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "H. M. Wallach, D. Minmo, and A. McCallum, \"Rethinking LDA: Why priors matter,\" 2009.", "links": null } }, "ref_entries": { "TABREF0": { "type_str": "table", "html": null, "content": "
Model\u03b2 priorsPurityNMIRI
LDA<0.100> PCA-\u03b20.4174 0.43480.3217 0.33250.9127 0.9266
Mix<0.100> PCA-\u03b20.4217 0.37480.3358 0.33050.8687 0.7550
<0.100>0.43180.34290.9092
BTMPCA-\u03b20.43670.40000.8665
PMI-\u03b20.44270.39270.9284
", "text": "The Clustering Results on Twitter2011 dataset", "num": null } } } }