|
{ |
|
"paper_id": "I11-1042", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:31:00.902791Z" |
|
}, |
|
"title": "Quality-biased Ranking of Short Texts in Microblogging Services", |
|
"authors": [ |
|
{ |
|
"first": "Minlie", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tsinghua University", |
|
"location": { |
|
"postCode": "100084", |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "aihuang@tsinghua.edu.cn" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Beihang University", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Xiaoyan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tsinghua University", |
|
"location": { |
|
"postCode": "100084", |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "The abundance of user-generated content comes at a price: the quality of content may range from very high to very low. We propose a regression approach that incorporates various features to recommend short-text documents from Twitter, with a bias toward quality perspective. The approach is built on top of a linear regression model which includes a regularization factor inspired from the content conformity hypothesis-documents similar in content may have similar quality. We test the system on the Edinburgh Twitter corpus. Experimental results show that the regularization factor inspired from the hypothesis can improve the ranking performance and that using unlabeled data can make ranking performance better. Comparative results show that our method outperforms several baseline systems. We also make systematic feature analysis and find that content quality features are dominant in short-text ranking.", |
|
"pdf_parse": { |
|
"paper_id": "I11-1042", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "The abundance of user-generated content comes at a price: the quality of content may range from very high to very low. We propose a regression approach that incorporates various features to recommend short-text documents from Twitter, with a bias toward quality perspective. The approach is built on top of a linear regression model which includes a regularization factor inspired from the content conformity hypothesis-documents similar in content may have similar quality. We test the system on the Edinburgh Twitter corpus. Experimental results show that the regularization factor inspired from the hypothesis can improve the ranking performance and that using unlabeled data can make ranking performance better. Comparative results show that our method outperforms several baseline systems. We also make systematic feature analysis and find that content quality features are dominant in short-text ranking.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "More and more user-generated data are emerging on personal blogs, microblogging services (e.g. Twitter), social and e-commerce websites. However, the abundance of user-generated content comes at a price: there may be high-quality content, but also much spam content such as advertisements, selfpromotion, pointless babbles, or misleading information. Therefore, assessing the quality of information has become a challenging problem for many tasks such as information retrieval, review mining (Lu et al., 2010) , and question answering (Agichtein et al., 2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 492, |
|
"end": 509, |
|
"text": "(Lu et al., 2010)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 535, |
|
"end": 559, |
|
"text": "(Agichtein et al., 2008)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we focus on predicting the quality of very short texts which are obtained from Twitter. Twitter is a free social networking and microblogging service that enables its users to send and read other users' updates, known as \"Tweets\". Each tweet has up to 140 characters in length. With more than 200 million users (March 2011), Twitter has become one of the biggest mass media to broadcast and digest information for users. It has exhibited advantages over traditional news agencies in the success of reporting news more timely, for instance, in reporting the Chilean earthquake of 2010 (Mendoza et al., 2010) . A comparative study (Teevan et al., 2011) shows that queries issued to Twitter tend to seek more temporally relevant information than those to general web search engines.", |
|
"cite_spans": [ |
|
{ |
|
"start": 599, |
|
"end": 621, |
|
"text": "(Mendoza et al., 2010)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 644, |
|
"end": 665, |
|
"text": "(Teevan et al., 2011)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Due to the massive information broadcasted on Twitter, there are a huge amount of searches every day and Twitter has become an important source for seeking information. However, according to the Pear Analytics (2009) report on 2000 sample tweets, 40.5% of the tweets are pointless babbles, 37.5% are conversational tweets, and only 3.6% are news (which are most valuable for users who seek news information). Therefore, when a user issues a query, recommending tweets of good quality has become extremely important to satisfy the user's information need: how can we retrieve trustworthy and informative posts to users?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "However, we must note that Twitter is a social networking service that encourages various content such as news reports, personal updates, babbles, conversations, etc. In this sense, we can not say which content has better quality without considering the value to the writer or reader. For instance, for a reader, the tweets from his friends or who he follows may be more desirable than those from others, whatever the quality is. In this paper, we have a special focus on finding tweets on news topics when we construct the evaluation datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We propose a method of incorporating various features for quality-biased tweet recommendation in response to a query. The major contributions of this paper are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We propose an approach for quality-biased ranking of short documents. Quality-biased is referred to the fact that we explore various features that may indicate quality. We also present a complete feature analysis to show which features are most important for this problem.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We propose a content conformity hypothesis, and then formulate it into a regularization factor on top of a regression model. The performance of the system with such a factor is boosted.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 It is feasible to plug unlabeled data into our approach and leveraging unlabeled data can enhance the performance. This characteristics is appealing for information retrieval tasks since only a few labeled data are available in such tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of the paper is organized as follows: in Section 2 we survey related work. We then formulate our problem in Section 3 and present the hypothesis in Section 4. Various features are presented in Section 5. The dataset and experiment results are presented in Section 6 and Section 7, respectively. We summarize this work in Section 8.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Quality prediction has been a very important problem in many tasks. In review mining, quality prediction has two lines of research: one line is to detect spam reviews (Jindal and Liu, 2008) or spam reviewers (Lim et al., 2010) , which is helpful to exclude misleading information; the other is to identify high-quality reviews, on which we will focus in this survey. Various factors and contexts have been studied to produce reliable and consistent quality prediction. Danescu-Niculescu-Mizil et al. (2009) stud-ied several factors on helpfulness voting of Amazon product reviews. Ghose and Ipeirotis (2010) studied several factors on assessing review helpfulness including reviewer characteristics, reviewer history, and review readability and subjectivity. Lu et al. (2010) proposed a linear regression model with various social contexts for review quality prediction. The authors employed author consistency, trust consistency and co-citation consistency hypothesis to predict more consistently. studied three factors, i.e., reviewer expertise, writing style, and timeliness, and proposed a non-linear regression model with radial basis functions to predict the helpfulness of movie reviews. Kim et al. (2006) used SVM regression with various features to predict review helpfulness.", |
|
"cite_spans": [ |
|
{ |
|
"start": 167, |
|
"end": 189, |
|
"text": "(Jindal and Liu, 2008)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 208, |
|
"end": 226, |
|
"text": "(Lim et al., 2010)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 469, |
|
"end": 506, |
|
"text": "Danescu-Niculescu-Mizil et al. (2009)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 759, |
|
"end": 775, |
|
"text": "Lu et al. (2010)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1195, |
|
"end": 1212, |
|
"text": "Kim et al. (2006)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quality Prediction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Finding high-quality content and reliable users is also very important for question answering. Agichtein et al. (2008) proposed a classification framework of estimating answer quality. They studied content-based features (e.g. the answer length) and usage-based features derived from question answering communities. Jeon et al. 2006used nontextual features extracted from the Naver Q&A service to predict the quality of answers. Bian et al. (2009) proposed a mutual reinforcement learning framework to simultaneously predict content quality and user reputation. Shah and Pomerantz (2010) proposed 13 quality criteria for answer quality annotation and then found that contextual information such as a user's profile, can be critical in predicting the quality of answers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 118, |
|
"text": "Agichtein et al. (2008)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 429, |
|
"end": 447, |
|
"text": "Bian et al. (2009)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 562, |
|
"end": 587, |
|
"text": "Shah and Pomerantz (2010)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quality Prediction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "However, the task we address in this paper is quite different from previous problems. First, the document to deal with is very short. Each tweet has up to 140 characters. Thus, we are going to investigate those factors that influence the quality of such short texts. Second, as mentioned, high-quality information on Twitter (e.g., news) is only a very small proportion. Thus, how to distill high quality content from majority proportions of low-quality content may be more challenging.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quality Prediction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Twitter is of high value for both personal and commercial use. Users can post personal updates, keep tight contact with friends, and obtain timely information. Companies can broadcast latest news to and interact with customers, and collect business intelligence via opinion mining. Under this background, there has been a large body of novel applications on Twitter, including social networking mining (Kwark et al., 2010) , real time search 1 , sentiment analysis 2 , detecting influenza epidemics (Culotta, 2010) , and even predicting politics elections (Tumasjan et al., 2010) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 402, |
|
"end": 422, |
|
"text": "(Kwark et al., 2010)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 499, |
|
"end": 514, |
|
"text": "(Culotta, 2010)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 556, |
|
"end": 579, |
|
"text": "(Tumasjan et al., 2010)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Novel Applications on Twitter", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "As Twitter has shown to report news more timely than traditional news agencies, detecting tweets of news topic has received much attention. Sakaki et al. (2010) proposed a real-time earthquake detection framework by treating each Twitter user as a sensor. addressed the problem of detecting new events from a stream of Twitter posts and adopted a method based on localitysensitive hashing to make event detection feasible on web-scale corpora. To facilitate fine-grained information extraction on news tweets, presented a work on semantic role labeling for such texts. Corvey et al. (2010) proposed a work for entity detection and entity class annotation on tweets that were posted during times of mass emergency. Ritter et al. (2010) proposed a topic model to detect conversational threads among tweets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 160, |
|
"text": "Sakaki et al. (2010)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 569, |
|
"end": 589, |
|
"text": "Corvey et al. (2010)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 714, |
|
"end": 734, |
|
"text": "Ritter et al. (2010)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Novel Applications on Twitter", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Since a large amount of tweets are posted every day, ranking strategies is extremely important for users to find information quickly. Current ranking strategy on Twitter considers relevance to an input query, information recency (the latest tweets are preferred), and popularity (the retweet times by other users). The recency information, which is useful for real-time web search, has also been explored by Dong et al. (2010) who used fresh URLs present in tweets to rank documents in response to recency sensitive queries. Duan et al. (2010) proposed a ranking SVM approach to rank tweets with various features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 408, |
|
"end": 426, |
|
"text": "Dong et al. (2010)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 525, |
|
"end": 543, |
|
"text": "Duan et al. (2010)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Novel Applications on Twitter", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Given a set of queries", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Q = {q 1 , q 2 , \u2022 \u2022 \u2022 , q n }, for each query q k , we have a set of short documents D k = {d 1 k , d 2 k , \u2022 \u2022 \u2022 }", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "which are retrieved by our builtin search engine. The document set D k is partially labeled, i.e., a small portion of documents in D k were annotated with a category set C={1, 2, 3, 4, 5} where 5 means the highest quality and 1 lowest. Therefore, we denote", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "D k = D U k \u222a D L k , where D U k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "indicates the unlabeled documents, and D L k the labeled documents. Each document in D k is represented as a feature vector,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "d i = (x 1 , x 2 , \u2022 \u2022 \u2022 , x m )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where m is the total number of features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The learning task is to train a mapping function f (D) : D \u2192 C, to predict the quality label of a document given a query q. We use a linear function f (d) = w T d for learning and where w is the weight vector. Formally, we define an objective function as follows to guide the learning process:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u0398(w) = 1 n n \u2211 k=1 1 | D L k | \u2211 d i \u2208D L k \u2113(w T d i ,\u0177 i ) + \u03b1w T w", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(1) where \u2113(., .) is the loss function that measures the difference between a predicted quality f", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(d i ) = w T d i and the labelled quality\u0177 i , D L", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "k is the labeled documents for query q k ,\u0177 i is the quality label for document d i , n is the total number of queries, and \u03b1 is a regularization parameter for w. The loss function used in this work is the square error loss, as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2113(w T d i , y i ) = (w T d i \u2212\u0177 i ) 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "It's easy to see that this problem has a closed-form solution, as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "w = arg min w \u0398(w) = ( N l \u2211 i=1 d i d i T + \u03b1N l I) \u22121 N l \u2211 i=1\u0177 i d i", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(3) where I is an identity matrix of size m (the dimension of feature vector), and N l is the total number of labeled documents in all the queries. As mentioned, there are a large number of documents retrieved for each query while we only sample a small number of documents for manual annotation. Thus there are much more unlabeled documents yet to be utilized.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation and Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To make quality prediction more consistent and to utilize the unlabeled data, we propose the content conformity hypothesis which assumes that the quality of documents similar in content should be close to each other. This hypothesis can be formulated as a regularization factor in the objective, as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Conformity Hypothesis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u0398 1 (w) = \u0398(w)+\u03b2 n \u2211 k=1 \u2211 di, dj \u2208D k \u2227IsSim(di,dj ) (w T d i \u2212 w T d j ) 2 (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Conformity Hypothesis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Conformity Hypothesis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "IsSim(d i , d j )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Conformity Hypothesis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "is a predicate asserting that two documents are similar, and \u03b2 is an empirical parameter. Note that D k is usually all labeled data but it may also include unlabeled documents for query q k . In this way, we can utilize the unlabeled documents as well as the labeled ones. There are various ways to determine whether two documents of the same query are similar. One way is to use TF*IDF cosine similarity to find similar documents with a threshold, and another way is to use clustering where two documents in the same cluster are similar. We use the first means in this paper and leave the second for future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Conformity Hypothesis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "To obtain the closed-form solution of Eq. 4, we define an auxiliary matrix A = (a ij ) where each a ij is 1 if document d i is similar to document d j for some query. Then, Eq. 4 can be re-written as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Conformity Hypothesis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u0398 1 (w) = \u0398(w) + \u03b2 \u2211 i<j a ij (w T d i \u2212 w T d j ) 2 (5) Let D = [d 1 , d 2 , . . . , d N ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Conformity Hypothesis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "be an m \u00d7 N matrix in which each d i is a column feature vector for a document. Note that this matrix includes both labeled and unlabeled documents, and N is the total number of documents. Then the last term in Eq. 5 can be re-written as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Conformity Hypothesis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2211 i<j a ij (w T d i \u2212 w T d j ) 2 = w T D\u039b A D T w (6)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Conformity Hypothesis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "where \u039b A = \u2206 A \u2212 A and \u2206 A is a diagonal matrix with (\u2206 A ) ii = \u2211 j a ij . By some mathematical manipulations, the problem in Eq. 6 has the following closed-form solution (Zhu and Goldberg, 2009) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 173, |
|
"end": 197, |
|
"text": "(Zhu and Goldberg, 2009)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Conformity Hypothesis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "w = ( N l \u2211 i=1 d i d i T + \u03b1N l I + \u03b2N l D\u039b A D T ) \u22121 N l \u2211 i=1\u0177 i d i", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Content Conformity Hypothesis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "5 Features", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Conformity Hypothesis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We design several groups of features to indicate the quality of tweets from different perspectives. These features include: content quality, user profile and authority, sentiment polarity, query relevance, and Twitter specific features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Conformity Hypothesis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Documents with higher quality in content will be more desirable for users in search. We thus exploit several features to respect quality: Tweet's length: longer tweet may be more informative as each tweet has been limited to up to 140 characters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Quality", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Average term similarity:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Quality", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "each tweet d has a score of 1 |Di| \u2211 di\u2208Di sim(d, d i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Quality", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "where D i is the document set for query q i , and sim(., .) is a cosine TF*IDF similarity measure for two documents. Ratio of unique words: in some tweets, the same word is repeated many times while there are only few unique words. Tweets with more unique words may have more information. The number of unique words is normalized by the total number of words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Quality", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Ratio of POS tags: We compute the ratio of nouns, verbs, adverbs, adjectives, etc. in a tweet. Each POS tag corresponds to one dimension in the feature vector.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Quality", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "A user with a real and complete profile may post tweets responsibly and accountably. Authoritative users (particularly celebrity users) are more probably to post highquality tweets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User Profile and User Authority", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Profile integrity: we have several features for measuring this property: whether the user of a tweet has a description field, whether the description field contains a url, whether the user verifies her account via the registered email, and whether the user provides the location information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User Profile and User Authority", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "User Activeness: the average number of tweets that the user posted per day and how many days a user has registered.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User Profile and User Authority", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "User authority: In the spirit of (Duan et al., 2010), we utilize follower score (the number of followers) , mention score (the number of times a user is referred to in tweets), popularity score to measure the authority of a user. The popularity score is obtained with the PageRank algorithm based on retweet relationship (two users have an edge in the graph if a tweet posted by one user is retweeted by another user).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User Profile and User Authority", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "As mentioned, Twitter has become a popular site for expressing opinions and personal sentiment towards public persons or events. Thus we believe that a tweet with clear sentiment polarity will be more favorable for users. Therefore, we adopt a sentiment lexicon (SentiWordNet) and collect the top 200 frequent emoticons from our tweet corpus to identify positive and negative sentiment words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Positive sentiment: the ratio of positive sentiment words or emoticons in a tweet.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Negative sentiment: the ratio of negative sentiment words or emoticons.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Sensitive words: the number of sensitive words. We manually collect 458 offending or pornographic words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "The emoticon lexicon and the sensitive word lexicon will be available to public.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Tweet has its own characteristics which may be used as features, such as whether a tweet contains a common url (such as http://www.google.com), whether the url is a tinyurl (Twitter has a service which shortens urls to very short url), the number of hashtags (topical terms leading by a '#') in a tweet, how many users are mentioned in the tweet (a user is mentioned if the tweet contains a term like @user name), and how many times the tweet has been re-posted (so-called 'retweeted').", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Twitter-Specific Features", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "As our task is to supply quality-biased ranking of tweets for an input query, query-specific features will favor those tweets relevant to the input query.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Query-specific Features", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "Query term frequency: the frequency of the query term (exact matching) in a tweet.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Query-specific Features", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "BM25 score: the BM25 score is used to quantify the overall relevance of a tweet to the query.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Query-specific Features", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "Recency: the time lag (in days) between the current tweet and the earliest tweet in the collection for the query. In this case, the more recent tweets may contain latest information, which will be more desirable for users.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Query-specific Features", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "To investigate the factors that influence the quality of short texts, we use the Edinburgh Twitter Corpus (Petrovic et al., 2010) 3 in which each tweet has up to 140 characters. The corpus contains 97 million tweets, and takes up 14 GB of disk space uncompressed. The corpus was collected through the Twitter streaming API from a period spanning November 11th 2009 until February 1st 2010. Each tweet has some meta-data: the timestamp of the tweet, an anonymized user name, the textual content, and the posting source (via web, mobile, etc.).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We collect a set of news queries using Google Trends. Intuitively, those hot queries in Google Trends will also have high possibility to be discussed on Twitter. The top 10 queries per day captured by Google Trends for the period 11th November, 2009 to 1st February, 2010 are collected. We then randomly sample 60 hot queries from these queries. And for each query, we use our own built-in search engine (based on BM25) to retrieve a set of tweets for manual annotation. To minimize the labeling cost, for each query, we sample 150-200 tweets for annotation as each query may return thousands of results, which makes the complete annotation impossible. These queries are grouped into four categories: thing (10 queries), person (15), event (30) and place (5). Table 1 shows some example queries of each type. For all these queries, there are about 9,000 unique tweets to be annotated.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 760, |
|
"end": 767, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Then, two computer science students were asked to annotate the tweets. The quality of a tweet was judged to a 5-star likert scale, according to the relevance, informativeness, readability, and politeness of the content. If the label difference of two tweets is larger than 1, the tweets were re-annotated until the quality difference is within 1. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We adopt information retrieval metrics to measure the performance since the task can be viewed as a ranking task (ranking document according to its quality). nDCG (J\u00e4rvelin and Kek\u00e4l\u00e4inen., 2000) is used to evaluate the ranking performance, as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 163, |
|
"end": 195, |
|
"text": "(J\u00e4rvelin and Kek\u00e4l\u00e4inen., 2000)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "nDCG(\u2126, k) = 1 |\u2126| \u2211 q\u2208\u2126 1 Z q k \u2211 i=1 2 r q i \u2212 1 log(1 + i)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "where \u2126 is the set of test queries, k indicates the top k positions in a ranked list, Z q is a normalization factor obtained from a perfect ranking (based on the labels), and r q i is the relevance score (the annotated quality label) for the i-th document in the predicted ranking list for query q. We also evaluate the system in terms of M AP 4 where the document whose quality score is larger than 3 is viewed as relevant and otherwise irrelevant. Note that the ranking task is approached as a regression problem, mean square error is thus adopted to measure the learning performance:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "M SE(D) = 1 |D| \u2211 dj \u2208D (f (d j ) \u2212\u0177 j ) 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "where D is the test document collection, f (d j ) is the predicted label, and\u0177 j is the annotated label. nDCG and M SE have a significant difference in that nDCG only considers the top k documents for each query while M SE takes into account all documents in the test collection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In this section, we will first assess whether the proposed hypothesis holds on our labeled data. We then evaluate whether the performance of the model with the regularization factor (as defined in Eq. 4) can be enhanced. We next compare the regression model with several baselines: BM25 model, Length Model (tweets containing more words may have better quality), ReTweet Model (tweets of higher quality may be re-posted by more users), and a Learning-to-Rank model (L2R) as used in (Duan et al., 2010 )(a ranking SVM model). Finally, we investigate the influence of different feature groups on the performance. We conduct five-fold cross validation in the following experiments (3/5 partitions are for training, 1/5 are used as a validation set, and the left for test). ", |
|
"cite_spans": [ |
|
{ |
|
"start": 482, |
|
"end": 500, |
|
"text": "(Duan et al., 2010", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment and Evaluation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We will evaluate whether the content conformity hypothesis holds on our manually annotated dataset. To define the similarity predicate (IsSim in Eq. 4), we assume two documents are similar if their TF*IDF cosine similarity is no less than 0.6. We then compute the statistics of the quality difference of similar pairs and that of dissimilar pairs. We find that more than 53% similar pairs have exactly identical quality labels, out of all similar pairs. And more than 93% similar pairs have a quality difference within 1. For dissimilar pairs, only 35% pairs have identical quality labels. This shows that if two documents are similar, there is high probability that their quality labels are close to each other, and that if two documents are dissimilar, it's more likely that they have more divergent quality scores. These statistics are shown in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 848, |
|
"end": 856, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hypothesis Evaluation", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "As shown in the figure, we can see that the hypothesis holds. Therefore, we can safely formulate the hypothesis into a regularization factor in the subsequent experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hypothesis Evaluation", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "We explain here how the parameters (\u03b1, \u03b2) are chosen. In Table 2 , we can see clearly that the best performance is obtained when \u03b1 = 1e \u2212 8. In Table 3 , the model that utilizes only labeled data obtains most of the best nD-CG scores when \u03b2 = 0.001. For the MAP metric, the scores when \u03b2 = 0.001 and \u03b2 = 0.0001 are very close. Unlike MSE that considers all documents in the test collection, nDCG only considers the top ranked documents, which are more desirable for parameter choosing since most users are only interested in top ranked items. In Table 4, the model that utilizes unlabeled data obtains best performance when \u03b2 = 0.0001. These optimal parameters will be used in our subsequent experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 64, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 144, |
|
"end": 151, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Parameter Tuning", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "In this section, we address two issues: (1) whether the regularization factor inspired by content conformity hypothesis (Eq. 4) can improve the performance; and (2) whether the performance can be improved if using unlabeled data (see D k in Eq. 4). As shown in Figure 2 , under the hypothesis the ranking performance is boosted compared to the basic model. As shown in Eq. 4, unlabeled data can be included in the regularization factor, thus we add the same number of unlabeled documents 5 for each query. We conduct experiments with and without such unlabeled data respectively. Adding unlabeled data can improve the ranking performance. This is appealing for most IR applications since \u03b1 1e-10 1e-9 1e-8 1e-7 1e-6 1e-5 0.0001 0.001 0.01 nDCG@ 1.863 1.862 1.893 1.893 1.859 1.845 1.763 1.315 0.908 Table 3 : The performance of different \u03b2 parameters with only labeled data (\u03b1=1e-8 according to Table 2 ). The bolded cells show the optimal performance. \u03b2 1e-10 1e-9 1e-8 1e-7 1e-6 1e-5 0.0001 0.001 0.01 nDCG@1 0.542 0.542 0.542 0.542 0.542 0.542 0.671 0.277 0.147 nDCG@2 0.521 0.521 0.521 0.521 0.521 0.570 0.701 0.429 0.146 nDCG@3 0.527 0.527 0.527 0.527 0.558 0.565 0.603 0.438 0.168 nDCG@4 0.518 0.518 0.486 0.518 0.522 0.550 0.610 0.437 0.208 nDCG@5 0.519 0.516 0.488 0.545 0.548 0.527 0.579 0.429 0.218 nDCG@6 0.514 0.518 0.499 0.537 0.547 0.528 0.576 0.431 0.235 nDCG@7 0.529 0.535 0.503 0.538 0.541 0.516 0.565 0.453 0.256 nDCG@8 0.520 0.525 0.503 0.528 0.530 0.532 0.558 0.454 0.286 nDCG@9 0.513 0.518 0.520 0.521 0.535 0.534 0.553 0.478 0.300 nDCG@10 0.523 0. Table 4 : The performance of different \u03b2 parameters with unlabeled data (\u03b1=1e-8 according to Table 2 ). The bolded cells show the optimal performance.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 261, |
|
"end": 269, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 799, |
|
"end": 806, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 895, |
|
"end": 902, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 1570, |
|
"end": 1577, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1663, |
|
"end": 1670, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Influence of the Regularization Factor", |
|
"sec_num": "7.3" |
|
}, |
|
{ |
|
"text": "most IR problems only have a small number of labeled data available.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Influence of the Regularization Factor", |
|
"sec_num": "7.3" |
|
}, |
|
{ |
|
"text": "To demonstrate the performance of our approach, we compare our system to three unsupervised models and one supervised model. The unsupervised models are: the BM25 model, the Length model which ranks tweets by the document length in tokens, and the RTNum model which ranks tweets by the frequency of being re-posted. The supervised model is a ranking SVM model (L2R) that was used in (Duan et al., 2010) . In this experiment, the model (as indicated by \"Full\" in Fig. 3 ) is the best model presented in the preceding section. We can see that the proposed approach outperforms those unsupervised models remarkably, and it also performs better than the L2R model (Ranking SVM). No-ticeably, the Length model is strong in performance, which shows the document length is a good indicator of quality. The RTNum model takes advantage of a Twitter specific property -a document of higher quality may be posted repeatedly by other users with higher probability. This is a special property for Twitter documents. Not surprisingly, the supervised methods outperform all unsupervised methods.", |
|
"cite_spans": [ |
|
{ |
|
"start": 383, |
|
"end": 402, |
|
"text": "(Duan et al., 2010)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 462, |
|
"end": 468, |
|
"text": "Fig. 3", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison to Baselines", |
|
"sec_num": "7.4" |
|
}, |
|
{ |
|
"text": "To further demonstrate that our approach outperforms the baselines, we list the results (in terms of nDCG@k = 1, 5, 10, and M AP ) in Table 5 which clearly shows the advantages of our proposed approach. Note that our performance shown in Table 5 is significantly better than all the baselines (p-value<0.001 by t-test). We choose the significance level of 0.01 through the paper. Table 5 : Performance comparison between systems. Our results are significantly better than the baselines.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 141, |
|
"text": "Table 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 238, |
|
"end": 245, |
|
"text": "Table 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 380, |
|
"end": 387, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison to Baselines", |
|
"sec_num": "7.4" |
|
}, |
|
{ |
|
"text": "To investigate the influence of different features on performance, we perform a feature ablation study. As shown in Section 5, we classify features into different groups. In this experiment, we first train the basic model (as defined in Eq. 1) with all the features, and then remove one group of features each time. We also experiment with only content features to justify the effectiveness of these features. Figure 4 : nDCG@k performance with different feature groups. 'Full' means all the features. '-' means removing that feature group from the full feature set.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 410, |
|
"end": 418, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Feature Study", |
|
"sec_num": "7.5" |
|
}, |
|
{ |
|
"text": "We can see that when removing content features, the performance drops substantially, which indicates that content is the most important indicator of quality. When using only content features, the performance is also fairly good (but significantly worse than the Full model, p-value<0.01 by t-test), showing that content is reliable features for this task. When removing Twitter specific features, there is a significant drop in performance (p-value<0.01). This indicates that such prior knowledge on tweets is helpful for ranking such documents. However, removing user profile and authority features does not affect the system much. Similar observation can be seen when removing sentiment features and queryspecific features respectively. For query-specific features, it seems that such features play a light-weighted role. There may be two reasons for this: First, the documents are obtained from the BM25 model in our approach, thus all documents are more or less relevant to the query while our approach can be treated as a re-ranking process; Second, the document is very short, thus query-specific features may not be as important as in retrieving longer documents, more specifically, the query term frequency may not be as accurate as in longer documents.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Study", |
|
"sec_num": "7.5" |
|
}, |
|
{ |
|
"text": "To further investigate which specific content features are important, we conduct a further feature ablation study on content features. We find that the average term similarity (p-value<0.01), ratio of unique words (p-value=0.08), and ratio of POS tags (p-value<0.02) play more roles in performance. Not as expected, removing the length features does not lead to as a remarkable drop as removing other features (p-value=0.12). However, as shown in Figure 3, the Length model is strong in performance. This may infer that the length feature may be complemented by other content features. Figure 5 : nDCG@k performance with different content features. '-' means removing the feature group from the full content features.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 447, |
|
"end": 453, |
|
"text": "Figure", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 586, |
|
"end": 594, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Feature Study", |
|
"sec_num": "7.5" |
|
}, |
|
{ |
|
"text": "Note that these experiments are performed with the basic model (Eq. 1). We also conduct similar feature studies with the regularization factor and similar observations are seen.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Study", |
|
"sec_num": "7.5" |
|
}, |
|
{ |
|
"text": "We presented a regression model which incorporates various features for suggesting quality-biased short-text documents. We proposed a content conformity hypothesis and formulated it into a regularization factor. The performance was boosted with such a factor. Moreover, unlabeled data can be used seamlessly in this approach, and leveraging such data leads to improvements in ranking performance. The comparative results demonstrate the effectiveness of finding high-quality tweets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Short-text ranking is still in its infancy. There is still much work to do. For example, it is feasible to plug other hypotheses in this approach. As an instance, celebrity users may be more likely to post responsible tweets than common users. We also note that the quality of a tweet is not only determined by the text itself, but also by the external resources it points to (via a tiny URL) or it attaches (a picture or a video). Therefore, considering these factors would also be helpful in finding high-quality posts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "http://twittertroll.com/ 2 http://twittersentiment.appspot.com/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Though the corpus is not available now on the original website due to licensing problems, readers are encouraged to request a copy from us. We are downloading a new dateset for further evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://en.wikipedia.org/wiki/ Information_retrieval", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Arbitrary number of documents may be added but we will evaluate this as future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Twitter Under Crisis: Can we trust what we RT", |
|
"authors": [ |
|
{ |
|
"first": "Marcelo", |
|
"middle": [], |
|
"last": "Mendoza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barbara", |
|
"middle": [], |
|
"last": "Poblete", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Castillo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "1st Workshop on Social Media Analytics (SO-MA'10)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcelo Mendoza, Barbara Poblete, Carlos Castillo. 2010. Twitter Under Crisis: Can we trust what we RT?. 1st Workshop on Social Media Analytics (SO- MA'10), July 25, 2010, Washington DC, USA.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Finding High-Quality Content in Social Media. WSDM'08", |
|
"authors": [ |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Agichtein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Castillo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Debora", |
|
"middle": [], |
|
"last": "Donato", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "183--193", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eugene Agichtein, Carlos Castillo, Debora Donato. 2008. Finding High-Quality Content in Social Media. WSDM'08, February 11-12, 2008, Palo Alto, Califor- nia, USA. pp 183-193.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Learning to Recognize Reliable Users and Content in Social Media with Coupled Mutual Reinforcement", |
|
"authors": [ |
|
{ |
|
"first": "Jiang", |
|
"middle": [], |
|
"last": "Bian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yandong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ding", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Agichtein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongyuan", |
|
"middle": [], |
|
"last": "Zha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiang Bian, Yandong Liu, Ding Zhou, Eugene Agichtein, Hongyuan Zha. 2009. Learning to Recognize Reliable Users and Content in Social Media with Coupled Mu- tual Reinforcement. WWW 2009, April 20-24, 2009, Madrid, Spain.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A Framework to Predict the Quality of Answers with Non-Textual Features. SIGIR'06", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"Bruce" |
|
], |
|
"last": "Jiwoon Jeon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Croft", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwoon Jeon, W. Bruce Croft, Joon Ho Lee, Soyeon Park. 2006. A Framework to Predict the Quality of Answers with Non-Textual Features. SIGIR'06, August 6-11, 2006, Seattle, Washington, USA", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Evaluating and Predicting Answer Quality in Community QA. SI-GIR'10", |
|
"authors": [ |
|
{ |
|
"first": "Chirag", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jefferey", |
|
"middle": [], |
|
"last": "Pomerantz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chirag Shah, Jefferey Pomerantz. 2010. Evaluating and Predicting Answer Quality in Community QA. SI- GIR'10, July 19-23, 2010, Geneva, Switzerland.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Earthquake Shakes Twitter Users: Real-time Event Detection by Social Sensors. WWW2010", |
|
"authors": [ |
|
{ |
|
"first": "Takeshi", |
|
"middle": [], |
|
"last": "Sakaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Makoto", |
|
"middle": [], |
|
"last": "Okazaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yutaka", |
|
"middle": [], |
|
"last": "Matsuo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Takeshi Sakaki, Makoto Okazaki, Yutaka Matsuo. 2010. Earthquake Shakes Twitter Users: Real-time Even- t Detection by Social Sensors. WWW2010, April 26- 30, 2010, Raleigh, North Carolina.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Hosung Park, and Sue Moon", |
|
"authors": [ |
|
{ |
|
"first": "Haewoon", |
|
"middle": [], |
|
"last": "Kwak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changhyun", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haewoon Kwak, Changhyun Lee, Hosung Park, and Sue Moon. 2010. What is Twitter, a Social Network or a News Media? WWW 2010, April 26-30, 2010, Raleigh, North Carolina, USA.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Unsupervised Modeling of Twitter Conversations", |
|
"authors": [ |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Ritter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Cherry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "172--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan Ritter, Colin Cherry, Bill Dolan. Unsupervised Modeling of Twitter Conversations. 2010. Human Language Technologies: The 2010 Annual Confer- ence of the North American Chapter of the ACL, pages 172-180.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Streaming First Story Detection with application to Twitter", |
|
"authors": [ |
|
{ |
|
"first": "Sasa", |
|
"middle": [], |
|
"last": "Petrovic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miles", |
|
"middle": [], |
|
"last": "Osborne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Lavrenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "The 2010 Annual Conference of the North American Chapter of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "181--189", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sasa Petrovic, Miles Osborne, Victor Lavrenko. 2010. Streaming First Story Detection with application to Twitter. The 2010 Annual Conference of the North American Chapter of the ACL, pages 181-189, Los Angeles, California, June 2010.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Time is of the Essence: Improving Recency Ranking Using Twitter Data", |
|
"authors": [ |
|
{ |
|
"first": "Anlei", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruiqiang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranam", |
|
"middle": [], |
|
"last": "Kolari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Bai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Diaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhaohui", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anlei Dong, Ruiqiang Zhang, Pranam Kolari, Jing Bai, Fernando Diaz, Yi Chang, Zhaohui Zheng, Hon-gyuan Zha. 2010. Time is of the Essence: Improving Recency Ranking Using Twitter Data. WWW 2010, April 26- 30, 2010, Raleigh, North Carolina, USA.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Towards detecting influenza epidemics by analyzing Twitter messages", |
|
"authors": [ |
|
{ |
|
"first": "Aron", |
|
"middle": [], |
|
"last": "Culotta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "1st Workshop on Social Media Analytics (SOMA'10)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aron Culotta. 2010. Towards detecting influenza epi- demics by analyzing Twitter messages. 1st Workshop on Social Media Analytics (SOMA'10), July 25, 2010, Washington, DC, USA.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Predicting Elections with Twitter: What 140 Characters Reveal about Political Sentiment. Association for the Advancement of Artificial Intelligence", |
|
"authors": [ |
|
{ |
|
"first": "Andranik", |
|
"middle": [], |
|
"last": "Tumasjan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Timm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Sprenger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sandner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Isabell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Welpe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andranik Tumasjan, Timm O. Sprenger, Philipp G. Sand- ner, Isabell M. Welpe. 2010. Predicting Elections with Twitter: What 140 Characters Reveal about Political Sentiment. Association for the Advancement of Arti- ficial Intelligence (www.aaai.org).", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "An Empirical Study on Learning to Rank of Tweets", |
|
"authors": [ |
|
{ |
|
"first": "Yajuan", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Long", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heung-Yeung", |
|
"middle": [], |
|
"last": "Shum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 23rd International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "295--303", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yajuan Duan, Long Jiang, Tao Qin, Ming Zhou and Heung-Yeung Shum. 2010. An Empirical Study on Learning to Rank of Tweets. Proceedings of the 23rd International Conference on Computational Linguis- tics (Coling 2010), pages 295-303, Beijing, August 2010.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Semantic Role Labeling for News Tweets", |
|
"authors": [ |
|
{ |
|
"first": "Xiaohua", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kuan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Long", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhongyang", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changning", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of 23rd International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaohua Liu, Kuan Li, Bo Han, Ming Zhou, Long Jiang, Zhongyang Xiong and Changning Huang. 2010. Se- mantic Role Labeling for News Tweets. Proceedings of 23rd International Conference on Computational Linguistics (Coling 2010)", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Pear Analytics", |
|
"authors": [], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pear Analytics. 2009. Twitter Study-August 2009.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Estimating the Helpfulness and Economic Impact of Product Reviews: Mining Text and Reviewer Characteristics", |
|
"authors": [ |
|
{ |
|
"first": "Anindya", |
|
"middle": [], |
|
"last": "Ghose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Panagiotis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ipeirotis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Available at SSRN", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anindya Ghose, Panagiotis G. Ipeirotis. 2010. Estimat- ing the Helpfulness and Economic Impact of Prod- uct Reviews: Mining Text and Reviewer Character- istics. (January 24, 2010), Available at SSRN: http://ssrn.com/abstract=1261751.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Exploiting social context for review quality prediction", |
|
"authors": [ |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Panayiotis", |
|
"middle": [], |
|
"last": "Tsaparas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yue Lu, Panayiotis Tsaparas, Alexandros Ntoulas, Livia Polanyi. 2010. Exploiting social context for review quality prediction. WWW 2010, April 26-30, 2010, Raleigh, North Carolina, USA.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Modeling and Predicting the Helpfulness of Online Reviews", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiangji", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aijun", |
|
"middle": [], |
|
"last": "An", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaohui", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Eighth IEEE International Conference on Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "443--452", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Liu, Xiangji Huang, Aijun An, Xiaohui Yu. 2008. Modeling and Predicting the Helpfulness of Online Reviews. 2008 Eighth IEEE International Conference on Data Mining. pp. 443-452.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Automatically Assessing Review Helpfulness", |
|
"authors": [ |
|
{ |
|
"first": "Soo-Min", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Pantel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Chklovski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Pennacchiotti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 2006 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "423--430", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Soo-Min Kim, Patrick Pantel, Tim Chklovski, Marco Pennacchiotti. 2006. Automatically Assessing Review Helpfulness. Proceedings of the 2006 Conference on Empirical Methods in Natural Language Processing (EMNLP 2006), pp. 423-430, Sydney, July 2006.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Opinion Spam and Analysis. WSDM'08", |
|
"authors": [ |
|
{ |
|
"first": "Nitin", |
|
"middle": [], |
|
"last": "Jindal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nitin Jindal, Bing Liu. 2008. Opinion Spam and Anal- ysis. WSDM'08, February 11-12, 2008, Palo Alto, California, USA.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Detecting Product Review Spammers using Rating Behaviors. CIKM'10", |
|
"authors": [ |
|
{ |
|
"first": "Ee-Peng", |
|
"middle": [], |
|
"last": "Lim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Viet-An", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nitin", |
|
"middle": [], |
|
"last": "Jindal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Hady", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lauw", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ee-Peng Lim, Viet-An Nguyen, Nitin Jindal, Bing Liu, Hady W. Lauw. 2010. Detecting Product Review S- pammers using Rating Behaviors. CIKM'10, October 26-30, 2010, Toronto, Ontario, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Twitter in Mass Emergency: What NLP Techniques Can Contribute", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Corvey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Vieweg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Travis", |
|
"middle": [], |
|
"last": "Rood", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the NAACL HLT 2010 Workshop on Computational Linguistics in a World of Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "23--24", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William J. Corvey, Sarah Vieweg, Travis Rood, Martha Palmer. 2010. Twitter in Mass Emergency: What NLP Techniques Can Contribute. Proceedings of the NAACL HLT 2010 Workshop on Computational Lin- guistics in a World of Social Media, pages 23-24, Los Angeles, California, June 2010.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "The Edinburgh Twitter Corpus", |
|
"authors": [ |
|
{ |
|
"first": "Sasa", |
|
"middle": [], |
|
"last": "Petrovic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miles", |
|
"middle": [], |
|
"last": "Osborne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Lavrenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the NAACL HLT 2010 Workshop on Computational Linguistics in a World of Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "25--26", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sasa Petrovic, Miles Osborne, Victor Lavrenko. 2010. The Edinburgh Twitter Corpus. Proceedings of the NAACL HLT 2010 Workshop on Computational Lin- guistics in a World of Social Media, pages 25-26, Los Angeles, California, June 2010", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Ir evaluation methods for retrieving highly relevant documents", |
|
"authors": [ |
|
{ |
|
"first": "Kalervo", |
|
"middle": [], |
|
"last": "J\u00e4rvelin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaana", |
|
"middle": [], |
|
"last": "Kek\u00e4l\u00e4inen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "SIGIR 2000: Proceedings of the 23th annual international ACM SIGIR conference on Research and development in information retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--48", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kalervo J\u00e4rvelin and Jaana Kek\u00e4l\u00e4inen. 1998. Ir evalua- tion methods for retrieving highly relevant documents. In SIGIR 2000: Proceedings of the 23th annual inter- national ACM SIGIR conference on Research and de- velopment in information retrieval, pages 41-48, 2000.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Introduction to Semi-Supervised Learning", |
|
"authors": [ |
|
{ |
|
"first": "Xiaojin", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ronald", |
|
"middle": [], |
|
"last": "Brachman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Dietterich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Synthesis Lectures on Artificial Intelligence and Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaojin Zhu, Andrew B. Goldberg, Ronald Brachman, Thomas Dietterich. 2009. Introduction to Semi- Supervised Learning. Synthesis Lectures on Artificial Intelligence and Machine Learning. Morgan & Clay- pool Publishers, 2009.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "TwitterSearch: A Comparison of Microblog Search and Web Search. WSDM11", |
|
"authors": [ |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Teevan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Ramage", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meredith", |
|
"middle": [ |
|
"Ringel" |
|
], |
|
"last": "Morris", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jaime Teevan, Daniel Ramage, Meredith Ringel Morris. 2009. #TwitterSearch: A Comparison of Microblog Search and Web Search. WSDM11, February 9C12, 2011, Hong Kong, China.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "The hypothesis holds on the annotated dataset. y-axis is the percentage of pairs and x-axis is the quality difference between two documents in a pair.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"text": "nDCG performance (y-axis) for top k ranks. The similarity predicate (IsSim(a, b) in Eq. 4) is implemented with TF*IDF cosine similarity.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"text": "542 0.542 0.542 0.542 0.542 0.542 0.542 0.671 0.480 nDCG@2 0.521 0.521 0.521 0.521 0.521 0.570 0.570 0.600 0.472 nDCG@3 0.527 0.527 0.480 0.480 0.527 0.596 0.549 0.541 0.501 nDCG@4 0.520 0.518 0.481 0.481 0.518 0.543 0.515 0.558 0.468 nDCG@5 0.503 0.528 0.512 0.512 0.516 0.521 0.505 0.521 0.448 nDCG@6 0.514 0.515 0.521 0.521 0.511 0.522 0.533 0.539 0.468 nDCG@7 0.524 0.523 0.524 0.524 0.529 0.517 0.517 0.547 0.461 nDCG@8 0.515 0.525 0.514 0.514 0.527 0.521 0.519 0.548 0.473 nDCG@9 0.518 0.519 0.513 0.513 0.524 0.536 0.521 0.543 0.470 nDCG@10 0.528 0.529 0.517 0.517 0.535 0.537 0.545 0.545 0.472 MAP 0.386 0.385 0.367 0.367 0.369 0.375 0.388 0.387 0.264 MSE", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"text": "nDCG performance (y-axis) with different approaches. x-axis is the top k ranks. The Full model used unlabeled data with the regularization factor.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"text": "The performance of different \u03b1 parameters. The bolded cells show the optimal performance.", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |