|
{ |
|
"paper_id": "O00-3002", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:59:07.489738Z" |
|
}, |
|
"title": "Design and Evaluation of Approaches to Automatic Chinese Text Categorization", |
|
"authors": [ |
|
{ |
|
"first": "Jyh-Jong", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Tsay", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Chung Cheng University", |
|
"location": { |
|
"postCode": "62107", |
|
"settlement": "Chiayi", |
|
"country": "Taiwan, ROC" |
|
} |
|
}, |
|
"email": "tsay@cs.ccu.edu.tw" |
|
}, |
|
{ |
|
"first": "Jing-Doo", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Chung Cheng University", |
|
"location": { |
|
"postCode": "62107", |
|
"settlement": "Chiayi", |
|
"country": "Taiwan, ROC" |
|
} |
|
}, |
|
"email": "jdwang@cs.ccu.edu.tw" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In this paper, we propose and evaluate approaches to categorizing Chinese texts, which consist of term extraction, term selection, term clustering and text classification. We propose a scalable approach which uses frequency counts to identify left and right boundaries of possibly significant terms. We used the combination of term selection and term clustering to reduce the dimension of the vector space to a practical level. While the huge number of possible Chinese terms makes most of the machine learning algorithms impractical, results obtained in an experiment on a CAN news collection show that the dimension could be dramatically reduced to 1200 while approximately the same level of classification accuracy was maintained using our approach. We also studied and compared the performance of three well known classifiers, the Rocchio linear classifier, naive Bayes probabilistic classifier and k-nearest neighbors(kNN) classifier, when they were applied to categorize Chinese texts. Overall, kNN achieved the best accuracy, about 78.3%, but required large amounts of computation time and memory when used to classify new texts. Rocchio was very time and memory efficient, and achieved a high level of accuracy, about 75.4%. In practical implementation, Rocchio may be a good choice.", |
|
"pdf_parse": { |
|
"paper_id": "O00-3002", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In this paper, we propose and evaluate approaches to categorizing Chinese texts, which consist of term extraction, term selection, term clustering and text classification. We propose a scalable approach which uses frequency counts to identify left and right boundaries of possibly significant terms. We used the combination of term selection and term clustering to reduce the dimension of the vector space to a practical level. While the huge number of possible Chinese terms makes most of the machine learning algorithms impractical, results obtained in an experiment on a CAN news collection show that the dimension could be dramatically reduced to 1200 while approximately the same level of classification accuracy was maintained using our approach. We also studied and compared the performance of three well known classifiers, the Rocchio linear classifier, naive Bayes probabilistic classifier and k-nearest neighbors(kNN) classifier, when they were applied to categorize Chinese texts. Overall, kNN achieved the best accuracy, about 78.3%, but required large amounts of computation time and memory when used to classify new texts. Rocchio was very time and memory efficient, and achieved a high level of accuracy, about 75.4%. In practical implementation, Rocchio may be a good choice.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In recent years, we have seen a tremendous growth in the number of online text documents available on the Internet, in digital libraries and news sources. Effective location of information in these huge resources is difficult without good indexing as well as organization of text collections. Automatic text categorization, which is defined as the task of assigning predefined class (category) labels to free text documents, is one of The objective of this study was to design and evaluate approaches to categorizing Chinese texts. In particular, we implemented and evaluated approaches which consist of the following processes: term extraction, term selection, term clustering and text classification. Note that in Chinese texts, although a sentence is composed of a sequence of terms, no white spaces are inserted to separate terms from each other. Term extraction which segments sentences into term sequences is a difficult task [5] . Several approaches have been proposed to extract terms from Chinese texts [4, 13] . In this paper, we propose a scalable approach [17] which is based on String B-trees proposed in [7] and is capable of handling huge numbers of text documents. Our approach uses frequency counts to identify possible term boundaries as proposed in [13] and is able to identify new terms which occur very often in Chinese texts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 932, |
|
"end": 935, |
|
"text": "[5]", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1012, |
|
"end": 1015, |
|
"text": "[4,", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1016, |
|
"end": 1019, |
|
"text": "13]", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1068, |
|
"end": 1072, |
|
"text": "[17]", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1118, |
|
"end": 1121, |
|
"text": "[7]", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1268, |
|
"end": 1272, |
|
"text": "[13]", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "However, the number of terms in Chinese can be very large. It is very easy to encounter 10 6 or even more terms in moderately-sized collections. The huge number of possible terms results in very high dimensionality when documents was presented in a vector space model and makes many machine learning algorithms impractical. To reduce the dimension to a practical level, we propose to perform term selection and term clustering on extracted terms. In particular, we use the \u03c7 2 statistic [16] to select terms that are highly correlated to class categories. In [16] , we presented an extensive comparison of several measures for term selection in Chinese text categorization, such as the odds ratio, information gain, mutual information, and \u03c7 2 statistic. Experimental results shows that the \u03c7 2 statistic approach achieves the best performance. Notice that in term selection, if only a small number of terms is selected, a document may contain very few or even none of the selected terms, and thus will be classified into the default class. On the other hand, a large number of selected terms make automatic categorization computationally impractical. We thus allow a large number of terms to be selected and then perform term clustering to group similar terms into clusters.", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 92, |
|
"text": "6", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 487, |
|
"end": 491, |
|
"text": "[16]", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 559, |
|
"end": 563, |
|
"text": "[16]", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "A large number of algorithms for clustering are Available [11] . Most of them are unsupervised and ignore any class labels that are given. In this study, we used distributional clustering [2] , which explicitly takes advantage of the class labels to group terms with similar class distributions into the same cluster. In an experiment on a collection of CNA news [1] articles, the number of terms extracted was 548363.", |
|
"cite_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 62, |
|
"text": "[11]", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 188, |
|
"end": 191, |
|
"text": "[2]", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 363, |
|
"end": 366, |
|
"text": "[1]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Experimental results show that the level of classification accuracy could be maintained while the dimension was reduced to 1200 by selecting 90000 terms first and then clustering them into 1200 clusters. Notice that term selection and term clustering also can compeensate for imprecision in term extraction as erroneous terms can be dropped out during term selection or grouped with more significant terms through term clustering. In addition to term selection and term clustering algorithms, there are others which can be applied to reduce the level of dimensionality, such as Principle Component Analysis (PCA) [6] . PCA is an unsupervised dimensional reduction technique, whereas distributional clustering is supervised and can take advantage of class labels to concentrate effort on the specific task of categorization. We expect distributional clustering to perform well in the context of text categorization.", |
|
"cite_spans": [ |
|
{ |
|
"start": 613, |
|
"end": 616, |
|
"text": "[6]", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "In this paper, we also compare extensively three well-known classifiers, including the Rocchio linear classifier [12] , naive Bayes probabilistic classifier, and k-nearest neighbor (kNN) classifier [20] . We observed in an experiment that the classification accuracy of Rocchio and kNN improved slightly as the dimension was reduced to 1200 by means of term selection and term clustering but that the accuracy of the naive Bayes classifier dropped slightly. This might have been due to the fact that term clustering refines the shapes of each cluster but distorts the distribution of each term. Overall, kNN achieved the best accuracy, about 78.3%, but required large amounts of computation time and memory when used to classify new texts. Rocchio is very time and memory efficient, and achieves accuracy of about 75.4%, which is slightly worse than kNN.", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 117, |
|
"text": "[12]", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 198, |
|
"end": 202, |
|
"text": "[20]", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Recently, Huang et al. [10] evaluated the weight matrix approach, which estimates the relative importance of the keywords in each class and classifies a test news to the class that maximizes the sum of the weights of keywords appearing in that news. Although they achieved about 88% classification accuracy, their experiment was different from ours as well as those used in much related research [3, 22] . First, the training news did not come from the same news source as the test news, but come from a thesaurus [19] that was carefully built by linguistic specialists. Second, the test news was classified by readers who could employ logic that was close to that assumed by the classification algorithms but different from that employed by the editors. Third, a piece of test news could be assigned to multiple classes when it covered topics from different classes. In fact, for a collection of 1136 news items, 1380 class labels were assigned, which indicates that about 20% of the test news iteems had multiple class labels. However, in the CNA news collection used in this study, each news items had exactly one predefined class no matter how many topics it covered. It is not clear whether or not the weight matrix approach can achieve the same performance when all the differences are removed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 27, |
|
"text": "[10]", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 396, |
|
"end": 399, |
|
"text": "[3,", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 400, |
|
"end": 403, |
|
"text": "22]", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 514, |
|
"end": 518, |
|
"text": "[19]", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "The remainder of this paper is organized as follows. Section 2 sketches the String B-tree approach to term extraction. Section 3 describes the\u03c7 2 statistic approach to term selection. Section 4 describes distributional clustering. Section 5 reviews the classifiers compared in this paper. Section 6 gives experimental results. Section 7 gives conclusions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "In this paper, we propose a scalable approach [18] to term extraction, which is based on String B-trees (SB-trees) [7] . This approach can handle large text collections and can identify newly created terms frequently found in Chinese. It does not use a dictionary but rather uses frequency counts to identify the boundaries of possible terms as in [13] . We will describe the term extraction method in the following.", |
|
"cite_spans": [ |
|
{ |
|
"start": 46, |
|
"end": 50, |
|
"text": "[18]", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 115, |
|
"end": 118, |
|
"text": "[7]", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 352, |
|
"text": "[13]", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Extraction", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Let w be a string. For any character x, let P (wx|w) be the probability that w is followed by x, and let P(xw|w) be the probability that w is preceded by x. We say that w passes right boundary verification if ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Extraction", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "= = \u03b8 \u03b8", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Extraction", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": ", which means that w will be identified as a significant term when it has at least two distinct successor and predecessor characters. For each class, we build two SB-trees, one for all the suffixes [8] of the original texts used for right boundary verification, and the other for the suffixes of the reversed texts which is used for left boundary verification. Notice that SB-trees are scalable; they can maintain dynamic collections and identify new terms as new articles are inserted.", |
|
"cite_spans": [ |
|
{ |
|
"start": 198, |
|
"end": 201, |
|
"text": "[8]", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Extraction", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Term selection is performed to choose representative terms for each class such that these terms can distinguish one class from the others. After the term extraction process is completed, there are many terms remain that are not informative for categorization. In [16] , we extensively compared several measures used for term selection in Chinese text categorization, such as the odds ratio, information gain, mutual information, and \u03c7 2 statistic. Experimental results show that the \u03c7 2 statistic approach achieves the best performance when combined with the naive Bayes classifier. In this study, we used the \u03c7 2 statistic [21] approach to perform term selection.", |
|
"cite_spans": [ |
|
{ |
|
"start": 263, |
|
"end": 267, |
|
"text": "[16]", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 624, |
|
"end": 628, |
|
"text": "[21]", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Selection", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "For a term t and a class c, the \u03c7 2 statistic measures the correlation between t and c. Let A be the number of times t and c co-occur, let B be the number of times t occurs without c, let P be the number of times c occurs without t, let Q be the number of times neither t nor c occur, and let N be the total number of documents. The \u03c7 2 statistic is defined as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Selection", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": ") ( ) ( ) ( ) ( ) ( ) , ( 2 2 Q P B A Q B P A BP AQ N c t + \u00d7 + \u00d7 + \u00d7 + \u2212 \u00d7 = \u03c7 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Selection", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "Notice that the \u03c7 2 statistic approach prefers terms that are highly correlated with a particular class. For each term, the \u03c7 2 statistic scores with regard to different classes can be different. In [21] , Yang used the", |
|
"cite_spans": [ |
|
{ |
|
"start": 199, |
|
"end": 203, |
|
"text": "[21]", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Selection", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "average or the maximum of the scores to select representative terms, which may result in a biased distribution of selected terms between classes. To avoid this situation, we select from each class the same number of terms having the largest\u03c7 2 statistic in that class.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Design and Evaluation of Approaches to Automatic Chinese Text Categorization 47", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We perform term clustering to further reduce the dimension of the vector space after the term selection process. In order to avoid the situation in which a document contains none of the selected terms, in term selection, we select a suitable large set of terms which may require a large amount of computation time and memory for classification. Term clustering groups similar terms into one cluster that no longer distinguishes between constituent terms. In this study, we used distributional clustering [2] , which groups terms with similar distributions over classes into the same cluster. Note that distributional clustering can compensate for the drawback of term extraction, where incomplete terms are clustered into the group containing their original terms. On the other hand, when training data is sparse, performance may be improved by averaging statistics of similar words together so that the resulting estimates are more robust. We describe distributional clustering [2] in more detail in the following.", |
|
"cite_spans": [ |
|
{ |
|
"start": 504, |
|
"end": 507, |
|
"text": "[2]", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 979, |
|
"end": 982, |
|
"text": "[2]", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Clustering", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Term clustering algorithms define a similarity measure between terms and group similar terms into term clusters. In distributional clustering, the difference between two term distributions is measured by Kullback-Leibler (KL) divergence. For term i t and term j t , the KL divergence, denoted as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Clustering", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "( ( | ) || ( | )) i j D P C t P C t , is defined as ) | ( ) | ( log ) | ( | | 1 j k i k C k i k t C P t C P t C P \u2211 = \u2212", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Clustering", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": ", where |C| is the number of classes and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Clustering", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": ") | ( i k t C P", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Clustering", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "is the probability of class k C given term i t", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Clustering", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": ". To avoid the odd properties of KL divergence, such as asymmetry, we use the average KL divergence defined as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Clustering", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": ")) | ( || ) | ( ( ) ( ) ( )) | ( || ) | ( ( ) ( ) ( j i j j i j j i i j i i t t C P t C P D t t P t P t t C P t C P D t t P t P \u2228 \u22c5 \u2228 + \u2228 \u22c5 \u2228 , where j i t t \u2228 represents", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Clustering", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "clustering of term i t and term j t into one group. Based on the average KL divergence, we apply a simple greedy agglomerative algorithm to cluster terms as follows. Let M be the number of final clusters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Clustering", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Initially, M terms are selected as seeds. Each term represents a singleton cluster. The following process is repeated until all the terms have been added: the two most similar clusters are merged into one cluster, and then the term that has the highest \u03c7 2 statistic measure among the remaining terms is added as a singleton cluster. The initial M seeding terms are uniformly selected from all classes. That is, from each class, the", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Clustering", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "| | C M", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Clustering", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "terms that have the highest \u03c7 2 statistic measure are selected as initial seeds. This avoids the problem of bias [2] , where the M initial clusters may prefer some classes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 116, |
|
"text": "[2]", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Clustering", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In this paper, we compare three wellknown classifiers, including the Rocchio linear classifier, naive Bayes (NB) probabilistic classifier and k-nearest neighbor (kNN) classifier, which are reviewed in the following sections.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classifiers", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "The Rocchio algorithm is a training algorithm [12] ", |
|
"cite_spans": [ |
|
{ |
|
"start": 46, |
|
"end": 50, |
|
"text": "[12]", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rocchio Linear Classifier", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "| | | | k C D D i k C D i i C D D C D G k i k i \u2212 \u2212 = \u2211 \u2211 \u2212 \u2208 \u2208 \u03b7 ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rocchio Linear Classifier", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The Naive Bayes (NB) probabilistic classifiers have been studied for application to machine learning [14] . The basic idea in NB is to use the joint probabilities of terms and classes to estimate the probabilities of classes given a document. The naive part is the assumption of term independence, i.e., the conditional probability of a term, given a class, is assumed to be independent from the conditional probabilities of other words given that class. This assumption makes computation for NB classifiers far more efficient than that for the non-naive Bayes approaches [20] whose time complexity are exponential.", |
|
"cite_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 105, |
|
"text": "[14]", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 572, |
|
"end": 576, |
|
"text": "[20]", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Naive Bayes (NB) Classifier", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Let X be a request document; NB assigns to X the most probable class NB", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Naive Bayes (NB) Classifier", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "C defined as arg max ( | ) k NB c c k C P C X \u2208 =", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Naive Bayes (NB) Classifier", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": ". By Bayes' theorem,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Naive Bayes (NB) Classifier", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "\u2211 \u2208 = C c i i k k k i C P C X P C P C X P X C P ) ( ) | ( ) ( ) | ( ) | (", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Naive Bayes (NB) Classifier", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": ". Due to the assumption of term independence,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Naive Bayes (NB) Classifier", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": ") | ( ) | ( | | 1 k j X j k C t P C X P = \u03a0 = , where ) | ( k j C t P", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Naive Bayes (NB) Classifier", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "is the conditional probability of term j t given class k C . Notice that the above equation works well when every term appears in every document. However, the product becomes 0 when some terms do not appear in the given document. We use", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Naive Bayes (NB) Classifier", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "\u2211 + + = | | ) , ( | | ) , ( 1 ) | ( T j k j k j k j C t TF T C t TF C t P in order to approximate ) ( | j k P t C", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Naive Bayes (NB) Classifier", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "to avoid the possibility that the product will become 0, where ) , ( ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Naive Bayes (NB) Classifier", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": ") | ( ) ( ) | ( ) ( ) | ( X t TF i j X t i i X t TF k j X t k k j j j j C t P C P C t P C P X C P \u2208 \u2208 \u03a0 \u03a0 = \u2211 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Naive Bayes (NB) Classifier", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Given an arbitrary request document X, kNN ranks its nearest neighbors among the training documents and uses the classes of the k top-ranking neighbors to predict the classes of X. The similarity score of each neighbor document when it is compared to X is used as the weight of the class of the neighboring document, and the sum of the class weights over the k nearest neighbors is used to perform class ranking [20] . , respectively. To conduct categorization, the cosine similarity between each i D and X is calculated. The training documents are sorted using the cosine similarity metric in descending order. Then the k top-ranking documents are selected. The final score of the request document X when compared to each class is calculated by summing the cosine similarity metric of these k selected documents and their class association. The class with the highest score is assigned to X. We have performed an experiment using different values of k, including 5, 10, 15, 20, 30, 50, 100, 150, 200 and 300. The best choice of k in our experiment is 15 when n = 90000 and is 10 when n = 1200.", |
|
"cite_spans": [ |
|
{ |
|
"start": 412, |
|
"end": 416, |
|
"text": "[20]", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "k-Nearest Neighbor (kNN) Classifier", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "In our experiment, we used Chinese news articles from the Central News Agency (CNA) [1] . We used news articles spanning a period of one year, from 1/1/1991 to 12/31/1991, to extract terms. News articles from the six-month period 8/1/1991 to 1/31/1992 were used as training data to train classifiers. The testing data consisted of news articles from the one-month period 2/1/1992 to 2/28/1992. All the news articles were preclassified into 12 classes, listted in Figure 1 . Note that the number of texts used was far larger than that employed in previous related researches [10, 22] . As a result, the conclusions drawn based on our experimental results are believed to be more reliable.", |
|
"cite_spans": [ |
|
{ |
|
"start": 84, |
|
"end": 87, |
|
"text": "[1]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 574, |
|
"end": 578, |
|
"text": "[10,", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 579, |
|
"end": 582, |
|
"text": "22]", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 463, |
|
"end": 471, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "The news articles were not uniformly distributed over the classes, as shown in Figure 1 . We, thus, measure the classification accuracy at both micro and macro levels. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 87, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Figure 1 The distribution of CAN news articles.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We performed term extraction, term selection and term clustering to reduce the dimension. Both the space and time required to classify new documents could be reduced as the dimension of the vector space was reduced. Figure 2 shows the time needed to classify new documents, measured on a PC with a Pentium II 233 CPU, 128MB RAM and an IDE HardDisk, for dimension n = 90000 and 1200, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 216, |
|
"end": 224, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dimension Reduction", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In the term extraction process, terms that appeared fewer than 10 times or in only one document were dropped out. We then used frequency counts to identify significant terms. The number of significant terms extracted was 548363. Term selection was then performed to select a subset of most representative terms. In order to find an appropriate number p of selected terms, we experimented for different values of p, including 12000, 36000, 60000, 90000 and 120000. We choose a p value of 90000 because kNN and NB achieved the best MicroAccuracy results of 77.12% and 76.45%, respectively, when p was 90000, as indicated in Figure 3 . The selected terms were clustered using distributional clustering into term clusters. To choose a suitable number c of term clusters, we experimented with different values of c, including 120, 240, 360, 600, 900, 1200, 1800, 2400, 3600 and 4800. We choose a c value of 1200 because kNN and Rocchio achieved the best performance when c was 1200, as shown in Figure 4 . Figure 5 shows some examples of term groups. In addition to clustering similar terms to reduce the dimension, term clustering can also cluster redundant substrings that are erroneously identified during term extraction into the group that contains their original terms. For example, as shown in Figure6,\"\u4e8c\u5c46\u570b\" and \"\u4e8c\u5c46\u570b\u4ee3\" are clustered into group 12; \"\u8b49\u5238\u4ea4\uf9e0\u6240\" and \"\u5238\u4ea4\uf9e0\u6240\" are clustered into group 300. On the other hand, the averaging statistics of similar words may result in more robust estimates. For example, \"\uf983\ufa08\u696d\"(a travel agent) and \"\uf983\u904a\u5354\u6703\"(a travel agency association) are similar words and are clustered into group 100.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 622, |
|
"end": 630, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 990, |
|
"end": 998, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1001, |
|
"end": 1009, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dimension Reduction", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In [2] , Baker claimed that performance can be improved by means of term clustering when training data is sparse because by averaging statistics of similar words, more robust estimates can be obtained. This was confirmed by our experiment. Note that our training data was quite sparse as the average number of none-zero items in training vectors was 106 when n is 90000, and was 79 when c is 1200. The memory space could be reduced by 25% ", |
|
"cite_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 6, |
|
"text": "[2]", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dimension Reduction", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Overall, kNN achieved the best MicroAccuracy results, and Rocchio achieved slightly worse results, as shown in Figure 7 and Figure 8 . Note that the MicroAccuracy results for Rocchio and kNN improved slightly from 75.24% and 77.12% to 75.39% and 78.33%, respectively, when the dimension of the vector space was reduced from 90000 to 1200 by means of distributional clustering. However, the performance of na\u00efve Bayes dropped when terms were clustered. This might have been due to the fact that naive Bayes is more sensitive to term distributions which might be distorted by term clustering. kNN prefered large classes as its MacroAccuracy result, 73.88%, was the lowest, but its MicroAccuracy result, 77.12%, was the best, as indicated in Figure 7 . For highly related classes, kNN may prefer a larger class as the probability that the k nearest neighbors will belong to the larger class is higher. kNN achieved much better recall results than Rocchio for the class Politics (\u653f\u6cbb), which was the largest class in our news collections. However, Rocchio achieved much better recall results than kNN did for the class Military (\u8ecd\u4e8b). Note that the class Politics (\u653f\u6cbb) and the class Military (\u8ecd\u4e8b) were highly correlated, as observed in [17] , and that the class Politics (\u653f\u6cbb) was 5 times larger than the class Military (\u8ecd\u4e8b).", |
|
"cite_spans": [ |
|
{ |
|
"start": 1230, |
|
"end": 1234, |
|
"text": "[17]", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 119, |
|
"text": "Figure 7", |
|
"ref_id": "FIGREF10" |
|
}, |
|
{ |
|
"start": 124, |
|
"end": 132, |
|
"text": "Figure 8", |
|
"ref_id": "FIGREF11" |
|
}, |
|
{ |
|
"start": 739, |
|
"end": 747, |
|
"text": "Figure 7", |
|
"ref_id": "FIGREF10" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Classifiers Comparison", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "In practical implementation, Rocchio could be a good choice. Rocchio is quite time and memory efficient because the time and memory requirements for the classification process are proportional to the number of classes. However, the time and memory requirements for kNN are proportional to the number of training documents. Rocchio is more noise tolerant than kNN and NB, as shown by the fact that the performance of kNN and NB worsened but the performance of Rocchio improved when n was changed from 90000 to 120000, as shown in Figure 3 . Rocchio produced slightly worse MicroAccuracy results than kNN did, but can be improved to produce results approaching the performance of kNN by taking more than one representative to represent each class in [17] .", |
|
"cite_spans": [ |
|
{ |
|
"start": 748, |
|
"end": 752, |
|
"text": "[17]", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 529, |
|
"end": 537, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Classifiers Comparison", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "In this paper, we have proposed and evaluated approaches to categorizing Chinese texts, which consist of term extraction, term selection, term clustering and text classification. For term extraction, we have proposed an approach based on String B-trees. It is scalable and is capable of handling very large numbers of text collections. We use the \u03c7 2 statistic to perform term selection and use distributional clustering to perform term clustering to reduce the dimension of the vector space. Although many redundant terms are identified as significant terms during the term extraction process, the combination of term selection and term clustering somehow can compensate for this drawback by either filtering them out or clustering them into the group containing their original terms. Results of an experiment on a CNA news collection shows that the dimension could be reduced from 90000 to 1200 while approximately the same level of classification accuracy was maintained. We have also studies and compared the performance of three well known classifiers, the Rocchio linear classifier (Rocchio), naive Bayes (NB) probabilistic classifier and k-nearest neighbors (kNN) classifier, when they were applied to categorize Chinese texts. Overall, kNN achieved the best accuracy, about 78.3%, but required large amounts of computation time and memory to classify new texts. Rocchio was very time and memory efficient, and achieved accuracy of about 75.4%. In practical implementation, Rocchio may be a good choice. In addition, we have recently shown [17] that the performance of the Rocchio linear classifier can be improved to approximate that of kNN by taking multiple representative vectors to represent one class.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1547, |
|
"end": 1551, |
|
"text": "[17]", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7." |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank Dr. Chien, Lee-Feng and Mr. Lee, Min-Jer for kind help in gathering the CNA news articles.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Distributional clustering of words for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Douglas", |
|
"middle": [], |
|
"last": "Baker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kachites", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the 21th Ann Int ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR'98)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "96--103", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Douglas Baker and Kachites McCallum. \"Distributional clustering of words for text classification.\" In Proceedings of the 21th Ann Int ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR'98), pages 96-103. 1998.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "PAT-tree-based online corpus collection and classification", |
|
"authors": [ |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Chun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lee-Feng", |
|
"middle": [], |
|
"last": "Chien", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "The Fourth International Workshop on Information Retrieval with Asian Languages(IRAL'99)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "78--82", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chen Chun-Liang and Lee-Feng Chien. \"PAT-tree-based online corpus collection and classification.\" In The Fourth International Workshop on Information Retrieval with Asian Languages(IRAL'99), pages 78-82. 1999.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "PAT-Tree-Based keyword extraction for Chinese information retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Chien", |
|
"middle": [], |
|
"last": "Lee-Feng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proceedings of the 20 th Ann Int ACM SIFIR Conference on Research and Development in Information Retrieval(SIGIR'97)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "50--58", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chien Lee-Feng. \"PAT-Tree-Based keyword extraction for Chinese information retrieval.\" In Proceedings of the 20 th Ann Int ACM SIFIR Conference on Research and Development in Information Retrieval(SIGIR'97), pages 50-58. 1997.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Important issues on Chinese information retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Chien", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-Feng", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hsiao-Tieh", |
|
"middle": [], |
|
"last": "Pu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Computation Linguistics and Chinese Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "205--221", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chien Lee-Feng and Hsiao-Tieh Pu. \"Important issues on Chinese information retrieval.\" In Computation Linguistics and Chinese Language Processing, pages 205-221. 1996.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Indexing by latent semantic analysis", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Deerwester", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Dumais", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Landauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Furnas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Harshman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Journal of the American Society for Information Science", |
|
"volume": "41", |
|
"issue": "6", |
|
"pages": "391--407", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S.C. Deerwester, S.T. Dumais, T.K. Landauer, G.W.Furnas, and R.A. Harshman. \"Indexing by latent semantic analysis.\" Journal of the American Society for Information Science, 41(6):391-407. 1990.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The String B-tree: A new data structure for string search in external memory and its application", |
|
"authors": [ |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Ferragina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Grossi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Journal of ACM", |
|
"volume": "46", |
|
"issue": "2", |
|
"pages": "236--280", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paolo Ferragina and Roberto Grossi. \"The String B-tree: A new data structure for string search in external memory and its application.\" Journal of ACM, 46(2):236-280. 1999.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Information Retrieval Data Structures Algorithm", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "William", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rick", |
|
"middle": [], |
|
"last": "Frakes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kazman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William B.Frakes and Rick Kazman. Information Retrieval Data Structures Algorithm. Prentice Hall, Englewood Cliffs, New Jersey 0732. 1992.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Turning yahoo into an automatic web-page classifier", |
|
"authors": [ |
|
{ |
|
"first": "Marko", |
|
"middle": [], |
|
"last": "Frobelink", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dunja", |
|
"middle": [], |
|
"last": "Mladenic", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the 13 th European Conference on Aritficial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "473--474", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marko Frobelink and Dunja Mladenic. \"Turning yahoo into an automatic web-page classifier.\" In Proceedings of the 13 th European Conference on Aritficial Intelligence, pages 473-474. 1998.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Automatic classification for news written in Chinese", |
|
"authors": [ |
|
{ |
|
"first": "Yi-Ling", |
|
"middle": [], |
|
"last": "Huang Sen-Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ja-Chen", |
|
"middle": [], |
|
"last": "Chou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Computer Processing of Oriental Languages", |
|
"volume": "12", |
|
"issue": "2", |
|
"pages": "143--159", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huang Sen-Yuan, Yi-Ling Chou, and Ja-Chen Lin. \"Automatic classification for news written in Chinese.\" Computer Processing of Oriental Languages, 12(2):143-159. 1998.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Finding Groups in Data Analysis : An Introduction to Cluster Analysis", |
|
"authors": [ |
|
{ |
|
"first": "Leonard", |
|
"middle": [], |
|
"last": "Kaufman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Rousseeuw", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leonard Kaufman and Peter J. Rousseeuw. \"Finding Groups in Data Analysis : An Introduction to Cluster Analysis.\" John Wiley and Sons,Inc., New York. 1990.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Training algorithms for linear text classifiers", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Schapire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Callan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ron", |
|
"middle": [], |
|
"last": "Papka", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Proceedings of the 19 th Ann Int ACM SIFIR Conference on Research and Development in Information Retrieval (SIGIR'96)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "298--306", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David D. Lewis, Robert E. Schapire, James P. Callan, and Ron Papka. \"Training algorithms for linear text classifiers.\" In Proceedings of the 19 th Ann Int ACM SIFIR Conference on Research and Development in Information Retrieval (SIGIR'96), pages 298-306. 1996.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "A way to extract unknown words without dictionary from Chinese corpus and its applications", |
|
"authors": [ |
|
{ |
|
"first": "Lin", |
|
"middle": [], |
|
"last": "Yih-Jeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Shing", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shyh-Yang", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Jer", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Research on Computational Linguistics Conference (ROCLING XI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "217--226", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lin Yih-Jeng, Ming-Shing Yu, Shyh-Yang Hwang, and Ming-Jer Wu. \"A way to extract unknown words without dictionary from Chinese corpus and its applications.\" In Research on Computational Linguistics Conference (ROCLING XI), pages 217-226. 1998.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Machine Learning. The McGraw", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom M. Mitchell. Machine Learning. The McGraw-Hill Companies, Inc. 1997 .", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Term Weighting Revisited", |
|
"authors": [ |
|
{ |
|
"first": "Amitabh Kumar", |
|
"middle": [], |
|
"last": "Singhal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "PHD theses", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amitabh Kumar Singhal. \"Term Weighting Revisited. \" PHD theses, Cornell University. 1997.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Term selection with distributional clustering for Chinese text categorization using n-grams", |
|
"authors": [ |
|
{ |
|
"first": "Tsay", |
|
"middle": [], |
|
"last": "Jyh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-Jong", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing-Doo", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Research on Computational Linguistics Conference XII", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "151--170", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsay Jyh-Jong and Jing-Doo Wang. \"Term selection with distributional clustering for Chinese text categorization using n-grams.\" In Research on Computational Linguistics Conference XII, pages 151-170. 1999.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Improving automatic Chinese text categorization by error correction", |
|
"authors": [ |
|
{ |
|
"first": "Tsay", |
|
"middle": [], |
|
"last": "Jyh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-Jong", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing-Doo", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "The Fifth International Workshop on Information Retrieval with Asian Languages(IRAL2000)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsay Jyh-Jong and Jing-Doo Wang. \"Improving automatic Chinese text categorization by error correction.\" In The Fifth International Workshop on Information Retrieval with Asian Languages(IRAL2000), pages 1-8. 2000.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A scalable approach for Chinese term extraction", |
|
"authors": [ |
|
{ |
|
"first": "Jyh-Jong", |
|
"middle": [], |
|
"last": "Tsay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing-Doo", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "2000 International Computer Sympoyium(ICS2000)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "246--253", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jyh-Jong Tsay and Jing-Doo Wang. \"A scalable approach for Chinese term extraction.\" In 2000 International Computer Sympoyium(ICS2000), Taiwan, R.O.C, pages 246-253. 2000.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "The Thesaurus of Daily Wordings", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R.C.Yang. The Thesaurus of Daily Wordings. Book-Spring Publishing Company, Taiwan. 1995.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A re-examination of text categorization methods", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Yiming", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of the 22th Ann Int ACM SIFIR Conference on Research and Development in Information Retrieval(SIGIR'99)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "42--49", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Yiming and Xin Liu. \"A re-examination of text categorization methods.\" In Proceedings of the 22th Ann Int ACM SIFIR Conference on Research and Development in Information Retrieval(SIGIR'99),pages 42-49. 1999.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A comparative study on feature selection in text categorization", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Yiming", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [ |
|
"O" |
|
], |
|
"last": "Pedersen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proceedings of the Fourteenth International Conference on Machine Learning(ICML'97)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "412--420", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Yiming and Jan O.Pedersen. \"A comparative study on feature selection in text categorization.\" In Proceedings of the Fourteenth International Conference on Machine Learning(ICML'97), pages 412-420. 1997.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "A study of document auto-classification in mandarin Chinese", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Yun-Yan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Research on Computational Linguistics Conference(ROCLING VI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "217--233", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Yun-Yan. \"A study of document auto-classification in mandarin Chinese.\" In Research on Computational Linguistics Conference(ROCLING VI), pages 217-233. 1993.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "for linear classifiers and was initially developed for information retrieval in the vector space model. The basic idea is to construct one prototype vector per class, using a training set of documents. Given a class, the training document collection consists of positive and negative examples. Positive examples are those documents belonging to that class, while negative examples are those documents not belonging to that class. The prototype vector of a class is the centroid of positive examples, tuned using negative examples. Let i D be a document in the training collection D, represented as a vector ,1", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "where \u03b7 is the parameter that adjusts the relative impact of positive and negative examples. We have experimented with different values for \u03b7, including 0.25, 0.5, 0.75 and 1. The best choice of \u03b7 in our experiment was found to be 0To classify a request document X, we compute the cosine similarity between X and each prototype vector i G , and assign to X the class whose prototype vector has the highest degree of cosine similarity with X. Cosine similarity is defined as", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "of occurrence of term j t in documents of class k C and |T| is the total number of distinct terms used in the domain of document representation. The formula used to predict the probability of class value k C for a given document X is )", |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "In a kNN algorithm, each training document i D as well as the request document X are represented by means of vectors as )", |
|
"num": null |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Three performance measures were used to evaluate the performance of each classifier: MicroAccuracy, MacroAccuracy and Design and Evaluation of Approaches to Automatic Chinese Text Categorization 51 AccuracyVariance. Let |C| be the number of predefined classes, and let | | i C be the number of testing news articles that are preclassified into the ith class, and let the average of the classification accuracy within classes.", |
|
"num": null |
|
}, |
|
"FIGREF5": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Classification time.", |
|
"num": null |
|
}, |
|
"FIGREF6": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "MicroAccuacy comparison(term selection). Design and Evaluation of Approaches to Automatic Chinese Text Categorization 53 MicroAccuacy comparison(term clustering).", |
|
"num": null |
|
}, |
|
"FIGREF7": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Term clustering examples.", |
|
"num": null |
|
}, |
|
"FIGREF8": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Term frequencies in each class.", |
|
"num": null |
|
}, |
|
"FIGREF9": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "averaged statistics of terms were more robust estimates when the percentage of none-zero items increased from 0.12%(=106/90000) to 6.58%(=79/1200) due to term clustering.", |
|
"num": null |
|
}, |
|
"FIGREF10": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Recall(%)/precision(%) comparison(n=90000).", |
|
"num": null |
|
}, |
|
"FIGREF11": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Recall(%)/precision(%) comparison(n=1200).", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"content": "<table><tr><td>d d</td><td>d</td><td colspan=\"3\">in</td><td colspan=\"6\">, where</td><td/><td colspan=\"12\">j d i j i tf</td><td>be the term frequency of</td></tr><tr><td colspan=\"20\">the jth term in document i D , and let defined as</td><td/><td>d</td><td>i</td><td>,</td><td>j</td><td>=</td><td>log</td><td>2</td><td>(</td><td>tf</td><td>i</td><td>,</td><td>j</td><td>+</td><td>) 1</td><td>*</td><td>log</td><td>2</td><td>(</td><td>df N</td><td>j</td><td>)</td><td>, where N</td></tr><tr><td colspan=\"17\">is the total number of documents in the training collection.</td><td/><td/><td/><td/><td/><td/></tr><tr><td>The prototype vector</td><td colspan=\"2\">G</td><td>i</td><td>=</td><td>(</td><td>g</td><td>i</td><td>1 ,</td><td>,</td><td>g</td><td>i</td><td>,</td><td>2</td><td>, \u22c5</td><td>\u22c5</td><td>, \u22c5</td><td>g</td><td>i</td><td>,</td><td>n</td><td>)</td><td/><td>of class</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"text": ", is the weight assigned to the jth term and n is the dimension of the document space. To determine , i j d , we use the TF-IDF weighting method[15], which has been shown to be effective when used in the vector space model. Let ," |
|
} |
|
} |
|
} |
|
} |