{ "paper_id": "N06-1029", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T14:46:13.693350Z" }, "title": "Unsupervised and Semi-supervised Learning of Tone and Pitch Accent", "authors": [ { "first": "Gina-Anne", "middle": [], "last": "Levow", "suffix": "", "affiliation": { "laboratory": "", "institution": "University of Chicago", "location": { "addrLine": "1100 E. 58th St. Chicago", "postCode": "60637", "region": "IL", "country": "USA" } }, "email": "levow@cs.uchicago.edu" } ], "year": "", "venue": null, "identifiers": {}, "abstract": "Recognition of tone and intonation is essential for speech recognition and language understanding. However, most approaches to this recognition task have relied upon extensive collections of manually tagged data obtained at substantial time and financial cost. In this paper, we explore two approaches to tone learning with substantially reductions in training data. We employ both unsupervised clustering and semi-supervised learning to recognize pitch accent in English and tones in Mandarin Chinese. In unsupervised Mandarin tone clustering experiments, we achieve 57-87% accuracy on materials ranging from broadcast news to clean lab speech. For English pitch accent in broadcast news materials, results reach 78%. In the semi-supervised framework, we achieve Mandarin tone recognition accuracies ranging from 70% for broadcast news speech to 94% for read speech, outperforming both Support Vector Machines (SVMs) trained on only the labeled data and the 25% most common class assignment level. These results indicate that the intrinsic structure of tone and pitch accent acoustics can be exploited to reduce the need for costly labeled training data for tone learning and recognition.", "pdf_parse": { "paper_id": "N06-1029", "_pdf_hash": "", "abstract": [ { "text": "Recognition of tone and intonation is essential for speech recognition and language understanding. However, most approaches to this recognition task have relied upon extensive collections of manually tagged data obtained at substantial time and financial cost. In this paper, we explore two approaches to tone learning with substantially reductions in training data. We employ both unsupervised clustering and semi-supervised learning to recognize pitch accent in English and tones in Mandarin Chinese. In unsupervised Mandarin tone clustering experiments, we achieve 57-87% accuracy on materials ranging from broadcast news to clean lab speech. For English pitch accent in broadcast news materials, results reach 78%. In the semi-supervised framework, we achieve Mandarin tone recognition accuracies ranging from 70% for broadcast news speech to 94% for read speech, outperforming both Support Vector Machines (SVMs) trained on only the labeled data and the 25% most common class assignment level. These results indicate that the intrinsic structure of tone and pitch accent acoustics can be exploited to reduce the need for costly labeled training data for tone learning and recognition.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Abstract", "sec_num": null } ], "body_text": [ { "text": "Tone and intonation play a crucial role across many languages. However, the use and structure of tone varies widely, ranging from lexical tone which determines word identity to pitch accent signalling information status. Here we consider the recognition of lexical tones in Mandarin Chinese syllables and pitch accent in English.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Although intonation is an integral part of language and is requisite for understanding, recognition of tone and pitch accent remains a challenging problem. The majority of current approaches to tone recognition in Mandarin and other East Asian tone languages integrate tone identification with the general task of speech recognition within a Hidden Markov Model framework. In some cases tone recognition is done only implicitly when a word or syllable is constrained jointly by the segmental acoustics and a higher level language model and the word identity determines tone identity. Other strategies build explicit and distinct models for the syllable final region, the vowel and optionally a final nasal, for each tone.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Recent research has demonstrated the importance of contextual and coarticulatory influences on the surface realization of tones. (Xu, 1997; Shen, 1990) The overall shape of the tone or accent can be substantially modified by the local effects of adjacent tone and intonational elements. Furthermore, broad scale phenomena such as topic and phrase structure can affect pitch height, and pitch shape may be variably affected by the presence of boundary tones. These findings have led to explicit modeling of tonal context within the HMM framework. In addition to earlier approaches that employed phrase structure (Fujisaki, 1983) , several recent approaches to tone recognition in East Asian languages (Wang and Seneff, 2000; Zhou et al., 2004) have incorporated elements of local and broad range contextual influence on tone. Many of these techniques create explicit context-dependent models of the phone, tone, or accent for each context in which they appear, either using the tone sequence for left or right context or using a simplified high-low contrast, as is natural for integration in a Hidden Markov Model speech recognition framework. In pitch accent recognition, recent work by (Hasegawa-Johnson et al., 2004) has integrated pitch accent and boundary tone recognition with speech recognition using prosodically conditioned models within an HMM framework, improving both speech and prosodic recognition.", "cite_spans": [ { "start": 129, "end": 139, "text": "(Xu, 1997;", "ref_id": "BIBREF22" }, { "start": 140, "end": 151, "text": "Shen, 1990)", "ref_id": "BIBREF12" }, { "start": 611, "end": 627, "text": "(Fujisaki, 1983)", "ref_id": "BIBREF5" }, { "start": 700, "end": 723, "text": "(Wang and Seneff, 2000;", "ref_id": "BIBREF19" }, { "start": 724, "end": 742, "text": "Zhou et al., 2004)", "ref_id": "BIBREF24" }, { "start": 1187, "end": 1218, "text": "(Hasegawa-Johnson et al., 2004)", "ref_id": "BIBREF7" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Since these approaches are integrated with HMM speech recognition models, standard HMM training procedures which rely upon large labeled training sets are used for tone recognition as well. Other tone and pitch accent recognition approaches using other classification frameworks such as support vector machines (Thubthong and Kijsirikul, 2001) and decision trees with boosting and bagging (Sun, 2002) have relied upon large labeled training setsthousands of instances -for classifier learning. This labelled training data is costly to construct, both in terms of time and money, with estimates for some intonation annotation tasks reaching tens of times realtime. This annotation bottleneck as well as a theoretical interest in the learning of tone motivates the use of unsupervised or semi-supervised approaches to tone recognition whereby the reliance on this often scarce resource can be reduced.", "cite_spans": [ { "start": 311, "end": 343, "text": "(Thubthong and Kijsirikul, 2001)", "ref_id": "BIBREF18" }, { "start": 389, "end": 400, "text": "(Sun, 2002)", "ref_id": "BIBREF16" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "Little research has been done in the application of unsupervised and semi-supervised techniques for tone and pitch accent recognition. Some preliminary work by (Gauthier et al., 2005) employs selforganizing maps and measures of f0 velocity for tone learning. In this paper we explore the use of spectral and standard k-means clustering for unsupervised acquisition of tone, and the framework of manifold regularization for semi-supervised tone learning. We find that in clean read speech, unsupervised techniques can identify the underlying Mandarin tone categories with high accuracy, while even on noisier broadcast news speech, Mandarin tones can be recognized well above chance levels, with English pitch accent recognition at near the levels achieved with fully supervised Support Vector Machine (SVM) classifiers. Likewise in the semi-supervised framework, tone classification outperforms both most common class assignment and a comparable SVM trained on only the same small set of labeled instances, without recourse to the unlabeled instances.", "cite_spans": [ { "start": 160, "end": 183, "text": "(Gauthier et al., 2005)", "ref_id": "BIBREF6" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "The remainder of paper is organized as follows. Section 2 describes the data sets on which English pitch accent and Mandarin tone learning are performed and the feature extraction process. Section 3 describes the unsupervised and semisupervised techniques employed. Sections 4 and 5 describe the experiments and results in unsupervised and semi-supervised frameworks respectively. Section 6 presents conclusions and future work.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "We consider two corpora: one in English for pitch accent recognition and two in Mandarin for tone recognition. We introduce each briefly below.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Data Sets", "sec_num": "2" }, { "text": "We employ a subset of the Boston Radio News Corpus (Ostendorf et al., 1995) , read by female speaker F2B, comprising 40 minutes of news material. The corpus includes pitch accent, phrase and boundary tone annotation in the ToBI framework (Silverman et al., 1992) aligned with manual transcription and syllabification of the materials. Following earlier research (Ostendorf and Ross, 1997; Sun, 2002) , we collapse the ToBI pitch accent labels to four classes: unaccented, high, low, and downstepped high for experimentation.", "cite_spans": [ { "start": 51, "end": 75, "text": "(Ostendorf et al., 1995)", "ref_id": "BIBREF10" }, { "start": 238, "end": 262, "text": "(Silverman et al., 1992)", "ref_id": "BIBREF15" }, { "start": 362, "end": 388, "text": "(Ostendorf and Ross, 1997;", "ref_id": "BIBREF9" }, { "start": 389, "end": 399, "text": "Sun, 2002)", "ref_id": "BIBREF16" } ], "ref_spans": [], "eq_spans": [], "section": "English Corpus", "sec_num": "2.1" }, { "text": "Mandarin Chinese is a language with lexical tone in which each syllable carries a tone and the meaning of the syllable is jointly determined by the tone and segmental information. Mandarin Chinese has four canonical lexical tones, typically described as follows: 1) high level, 2) mid-rising, 3) low fallingrising, and 4) high falling. 1 The canonical pitch con- We employ data from two distinct sources in the experiments reported here.", "cite_spans": [ { "start": 336, "end": 337, "text": "1", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Mandarin Chinese Tone Data", "sec_num": "2.2" }, { "text": "The first data set is very clean speech data drawn from a collection of read speech collected under laboratory conditions by (Xu, 1999) . In these materials, speakers read a set of short sentences where syllable tone and position of focus were varied to assess the effects of focus position on tone realization. Focus here corresponds to narrow focus, where speakers were asked to emphasize a particular word or syllable. Tones on focussed syllables were found to conform closely to the canonical shapes described above, and in previous supervised experiments using a linear support vector machine classifier trained on focused syllables, accuracy approached 99%. For these materials, pitch tracks were manually aligned to the syllable and automatically smoothed and timenormalized by the original researcher, resulting in 20 pitch values for each syllable.", "cite_spans": [ { "start": 125, "end": 135, "text": "(Xu, 1999)", "ref_id": "BIBREF23" } ], "ref_spans": [], "eq_spans": [], "section": "Read Speech", "sec_num": "2.2.1" }, { "text": "The second data set is drawn from the Voice of America Mandarin broadcast news, distributed by the Linguistic Data Consortium 2 , as part of the Topic Detection and Tracking (TDT-2) evaluation. Using the corresponding anchor scripts, automatically word-segmented, as gold standard transcription, audio from the news stories was force-aligned to the text transcripts. The forced alignment employed the language porting functionality of the University of speech data described below contains no such instances.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Broadcast News Speech", "sec_num": "2.2.2" }, { "text": "2 http://www.ldc.upenn.edu", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Broadcast News Speech", "sec_num": "2.2.2" }, { "text": "Colorado Sonic speech recognizer (Pellom et al., 2001) . A mapping from the transcriptions to English phone sequences supported by Sonic was created using a Chinese character-pinyin pronunciation dictionary and a manually constructed mapping from pinyin sequences to the closest corresponding English phone sequences. 3", "cite_spans": [ { "start": 33, "end": 54, "text": "(Pellom et al., 2001)", "ref_id": "BIBREF11" } ], "ref_spans": [], "eq_spans": [], "section": "Broadcast News Speech", "sec_num": "2.2.2" }, { "text": "Using Praat's (Boersma, 2001 ) \"To pitch\" and \"To intensity\" functions and the alignments generated above, we extract acoustic features for the prosodic region of interest. This region corresponds to the \"final\" region of each syllable in Chinese, including the vowel and any following nasal, and to the syllable nucleus in English. 4 For all pitch and intensity features in both datasets, we compute per-speaker zscore normalized log-scaled values. We extract pitch values from points across valid pitch tracked regions in the syllable. We also compute mean pitch across the syllable. Recent phonetic research (Xu, 1997; Shih and Kochanski, 2000) has identified significant effects of carryover coarticulation from preceding adjacent syllable tones. To minimize these effects consistent with the pitch target approximation model , we compute slope features based on the second half of this final region, where this model predicts that the underlying pitch height and slope targets of the syllable will be most accurately approached. We further log-scale and normalize slope values to compensate for greater speeds of pitch fall than pitch rise (Xu and Sun, 2002) . We consider two types of contextualized features as well, to model and compensate for coarticulatory effects from neighboring syllables. The first set of features, referred to as \"extended features\", includes the maximum and mean pitch from adjacent syllables as well as the nearest pitch point or points from the preceding and following syllables. These features extend the modeled tone beyond the strict bounds of the syllable segmentation. A second set of contextual features, termed \"difference features\", captures the change in pitch maximum, mean, midpoint, and slope as well as intensity maximum be-tween the current syllable and the previous or following syllable.", "cite_spans": [ { "start": 14, "end": 28, "text": "(Boersma, 2001", "ref_id": "BIBREF2" }, { "start": 333, "end": 334, "text": "4", "ref_id": null }, { "start": 611, "end": 621, "text": "(Xu, 1997;", "ref_id": "BIBREF22" }, { "start": 622, "end": 647, "text": "Shih and Kochanski, 2000)", "ref_id": "BIBREF14" }, { "start": 1145, "end": 1163, "text": "(Xu and Sun, 2002)", "ref_id": "BIBREF20" } ], "ref_spans": [], "eq_spans": [], "section": "Acoustic Features", "sec_num": "2.3" }, { "text": "In prior supervised experiments using support vector machines (Levow, 2005) , variants of this representation achieved competitive recognition levels for both tone and pitch accent recognition. Since many of the experiments for Mandarin Chinese tone recognition deal with clean, careful lab speech, we anticipate little coarticulatory influence, and use a simple pitch-only context-free representation for our primary Mandarin tone recognition experiments. For primary experiments in pitch accent recognition, we employ a high-performing contextualized representation in (Levow, 2005) , using both \"extended\" and \"difference\" features computed only on the preceding syllable. We will also report some contrastive experimental results varying the amount of contextual information.", "cite_spans": [ { "start": 62, "end": 75, "text": "(Levow, 2005)", "ref_id": "BIBREF8" }, { "start": 571, "end": 584, "text": "(Levow, 2005)", "ref_id": "BIBREF8" } ], "ref_spans": [], "eq_spans": [], "section": "Acoustic Features", "sec_num": "2.3" }, { "text": "The bottleneck of time and monetary cost associated with manual annotation has generated significant interest in the development of techniques for machine learning and classification that reduce the amount of annotated data required for training. Likewise, learning from unlabeled data aligns with the perspective of language acquisition, as child learners must identify these linguistic categories without explicit instruction by observation of natural language interaction. Of particular interest are techniques in unsupervised and semi-supervised learning where the structure of unlabeled examples may be exploited. Here we consider both unsupervised techniques with no labeled training data and semi-supervised approaches where unlabeled training data is used in conjunction with small amounts of labeled data. A wide variety of unsupervised clustering techniques have been proposed. In addition to classic clustering techniques such as k-means, recent work has shown good results for many forms of spectral clustering including those by (Shi and Malik, 2000; Belkin and Niyogi, 2002; Fischer and Poland, 2004) . In the unsupervised experiments reported here, we employ asymmetric k-lines clustering by (Fischer and Poland, 2004) using code available at the authors' site, as our primary unsupervised learning approach. Asymmetric clustering is distinguished from other techniques by the construction and use of context-dependent kernel radii. Rather than assuming that all clusters are uniform and spherical, this approach enhances clustering effectiveness when clusters may not be spherical and may vary in size and shape. We will see that this flexibility yields a good match to the structure of Mandarin tone data where both shape and size of clusters vary across tones. In additional contrastive experiments reported below, we also compare kmeans clustering, symmetric k-lines clustering (Fischer and Poland, 2004), and Laplacian Eigenmaps (Belkin and Niyogi, 2002) with k-lines clustering. The spectral techniques all perform spectral decomposition on some representation of the affinity or adjacency graph.", "cite_spans": [ { "start": 1042, "end": 1063, "text": "(Shi and Malik, 2000;", "ref_id": "BIBREF13" }, { "start": 1064, "end": 1088, "text": "Belkin and Niyogi, 2002;", "ref_id": "BIBREF0" }, { "start": 1089, "end": 1114, "text": "Fischer and Poland, 2004)", "ref_id": "BIBREF4" }, { "start": 1207, "end": 1233, "text": "(Fischer and Poland, 2004)", "ref_id": "BIBREF4" }, { "start": 1949, "end": 1974, "text": "(Belkin and Niyogi, 2002)", "ref_id": "BIBREF0" } ], "ref_spans": [], "eq_spans": [], "section": "Unsupervised and Semi-supervised Learning", "sec_num": "3" }, { "text": "For semi-supervised learning, we employ learners in the Manifold Regularization framework developed by (Belkin et al., 2004) . This work postulates an underlying intrinsic distribution on a low dimensional manifold for data with an observed, ambient distribution that may be in a higher dimensional space. It further aims to preserve locality in that elements that are neighbors in the ambient space should remain \"close\" in the intrinsic space. A semisupervised classification algorithm, termed \"Laplacian Support Vector Machines\", allows training and classification based on both labeled and unlabeled training examples.", "cite_spans": [ { "start": 103, "end": 124, "text": "(Belkin et al., 2004)", "ref_id": "BIBREF1" } ], "ref_spans": [], "eq_spans": [], "section": "Unsupervised and Semi-supervised Learning", "sec_num": "3" }, { "text": "We contrast results under both unsupervised and semi-supervised learning with most common class assignment and previous results employing fully supervised approaches, such as SVMs.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Unsupervised and Semi-supervised Learning", "sec_num": "3" }, { "text": "We executed four sets of experiments in unsupervised clustering using the (Fischer and Poland, 2004) asymmetric clustering algorithm.", "cite_spans": [ { "start": 74, "end": 100, "text": "(Fischer and Poland, 2004)", "ref_id": "BIBREF4" } ], "ref_spans": [], "eq_spans": [], "section": "Unsupervised Clustering Experiments", "sec_num": "4" }, { "text": "In these experiments, we chose increasingly difficult and natural test materials. In the first experiment with the cleanest data, we used only focused syllables from the read Mandarin speech dataset. In the second, we included both in-focus (focused) and pre-focus syllables from the read Mandarin speech dataset. 5 In the third and fourth experiments, we chose subsets of broadcast news report data, from the Voice of America (VOA) in Mandarin and Boston University Radio News corpus in English.", "cite_spans": [ { "start": 314, "end": 315, "text": "5", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Experiment Configuration", "sec_num": "4.1" }, { "text": "In all experiments on Mandarin data, we performed clustering on a balanced sampling set of tones, with 100 instances from each class 6 , yielding a baseline for assignment of a single class to all instances of 25%. We then employed a two-stage repeated clustering process, creating 2 or 3 clusters at each stage.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experiment Configuration", "sec_num": "4.1" }, { "text": "For experiments on English data, we extracted a set of 1000 instances, sampling pitch accent types according to their frequency in the collection. We performed a single clustering phase with 2 to 16 clusters, reporting results at different numbers of clusters.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experiment Configuration", "sec_num": "4.1" }, { "text": "For evaluation, we report accuracy based on assigning the most frequent class label in each cluster to all members of the cluster.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experiment Configuration", "sec_num": "4.1" }, { "text": "We find that in all cases, accuracy based on the asymmetric clustering is significantly better than most common class assignment and in some cases approaches labelled classification accuracy. Unsurprisingly, the best results, in absolute terms, are achieved on the clean focused syllables, reaching 87% accuracy. For combined in-focus and pre-focus syllables, this rate drops to 77%. These rates contrast with 99-93% accuracies in supervised classification using linear SVM classifiers with several thousand labelled training examples (Surendran et al., 2005) .", "cite_spans": [ { "start": 535, "end": 559, "text": "(Surendran et al., 2005)", "ref_id": "BIBREF17" } ], "ref_spans": [], "eq_spans": [], "section": "Experimental Results", "sec_num": "4.2" }, { "text": "On broadcast news audio, accuracy for Mandarin reaches 57%, still much better than the 25% level, though below a 72% accuracy achieved using supervised linear SVMs with 600 labeled training examples. Interestingly, for English pitch accent recognition, accuracy reaches 78.4%, aproaching the 80.1% ", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experimental Results", "sec_num": "4.2" }, { "text": "We further contrast the use of different unsupervised learners, comparing the three spectral techniques and k-means with Euclidean distance. All contrasts are presented for English pitch accent classification, ranging over different numbers of clusters, with the best parameter setting of neighborhood size. The results are illustrated in Figure 2 . K-means and the asymmetric clustering technique are presented for the clean focal Mandarin speech under the standard two stage clustering, in Table 1 .", "cite_spans": [], "ref_spans": [ { "start": 339, "end": 347, "text": "Figure 2", "ref_id": "FIGREF1" }, { "start": 492, "end": 499, "text": "Table 1", "ref_id": null } ], "eq_spans": [], "section": "Contrastive Experiments", "sec_num": "4.3" }, { "text": "The asymmetric k-lines clustering approach consistently outperforms the corresponding symmetric clustering learner, as well as Laplacian Eigenmaps with binary weights for pitch accent classification. Somewhat surprisingly, k-means clustering outperforms all of the other approaches when producing 3-14 clusters. Accuracy for the optimal choice of clusters and parameters is comparable for asymmetric k-lines clustering and k-means, and somewhat better than all other techniques considered. The careful feature selection process for tone and pitch accent modeling may reduce the difference between the spectral and k-means approaches. In contrast, for the four tone classification task in Mandarin using two stage clustering with 2 or 3 initial clusters, the best clustering using asymmetric k-lines strongly outperforms k-means.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Contrastive Experiments", "sec_num": "4.3" }, { "text": "We also performed a contrastive experiment in pitch accent recognition in which we excluded contextual information from both types of contextual features. We find little difference for the majority of Asymm. K-means Clear speech 87% 74.75% Table 1 : Clustering effectiveness for asymmetric k-lines and k-means on clear focused speech. 4the unsupervised clustering algorithms, with results from symmetric, asymmetric and k-means clustering differing by less than 1% in absolute accuracy. It is, however, worth noting that exclusion of these features from experiments using supervised learning led to a 4% absolute reduction in accuracy.", "cite_spans": [], "ref_spans": [ { "start": 240, "end": 247, "text": "Table 1", "ref_id": null } ], "eq_spans": [], "section": "Contrastive Experiments", "sec_num": "4.3" }, { "text": "An examination of both the clusters formed and the structure of the data provides insight into the effectiveness of this process. Figure 3 displays 2 dimensions of the Mandarin four-tone data from the focused read speech, where normalized pitch mean is on the x-axis and slope is on the y-axis. The separation of classes and their structure is clear. One observes that rising tone (tone 2) lies above the x-axis, while high-level (tone 1) lies along the x-axis. Low (tone 3) and falling (tone 4) tones lie mostly below the x-axis as they generally have falling slope. Low tone (3) appears to the left of falling tone (4) in the figure, corresponding to differences in mean pitch.", "cite_spans": [], "ref_spans": [ { "start": 130, "end": 138, "text": "Figure 3", "ref_id": "FIGREF2" } ], "eq_spans": [], "section": "Discussion", "sec_num": "4.4" }, { "text": "In clustering experiments, an initial 2-or 3-way split separates falling from rising or level tones based on pitch slope. The second stage of clustering splits either by slope (tones 1,2, some 3) or by pitch height (tones 3,4). These clusters capture the natural structure of the data where tones are characterized by pitch height and slope targets.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Discussion", "sec_num": "4.4" }, { "text": "By exploiting a semi-supervised approach, we hope to enhance classification accuracy over that achievable by unsupervised methods alone by incorporating small amounts of labeled data while exploiting the structure of the unlabeled examples.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Semi-supervised Learning", "sec_num": "5" }, { "text": "We again conduct contrastive experiments using both the clean focused read speech and the more challenging broadcast news data. In each Mandarin case, for each class, we use only a small set (40) of labeled training instances in conjunction with an additional sixty unlabeled instances, testing on 40 instances. For English pitch accent, we restricted the task to the binary classification of syllables as accented or unaccented. For the one thousand samples we proportionally labeled 200 unaccented examples and 100 accented examples. 7 We configure the Laplacian SVM classification with binary neighborhood weights, radial basis function kernel, and cosine distance measure typically with 6 nearest neighbors. Following (C-C. Cheng and Lin, 2001 ), for -class classification we train \u00a1 \u00a3 \u00a2 \u00a4 \u00a1 \u00a3 \u00a5 \u00a7 \u00a6 \u00a9 binary classifiers. We then classify each test instance using all of the classifiers and assign the most frequent prediction, with ties broken randomly. We contrast these results both with conventional SVM classification with a radial basis function kernel excluding the unlabeled training examples and with most common class assignment, which gives a 25% baseline.", "cite_spans": [ { "start": 536, "end": 537, "text": "7", "ref_id": null }, { "start": 728, "end": 747, "text": "Cheng and Lin, 2001", "ref_id": null } ], "ref_spans": [], "eq_spans": [], "section": "Experiment Configuration", "sec_num": "5.1" }, { "text": "For the Mandarin focused read syllables, we achieve 94% accuracy on the four-way classification task.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experimental Results", "sec_num": "5.2" }, { "text": "For the noisier broadcast news data, the accuracy is 70% for the comparable task. These results all substantially outperform the 25% most common class assignment level. The semi-supervised classifier also reliably outperforms an SVM classifier with an RBF kernel trained on the same labeled training instances. This baseline SVM classifier with a very small training set achieves 81% accuracy on clean read speech, but only 35% on the broadcast news speech. Finally, for English pitch accent recognition in broadcast news data, the classifier achieves 81.5%, relative to 84% accuracy in the fully supervised case.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experimental Results", "sec_num": "5.2" }, { "text": "We have demonstrated the effectiveness of both unsupervised and semi-supervised techniques for recognition of Mandarin Chinese syllable tones and English pitch accents using acoustic features alone to capture pitch target height and slope. Although outperformed by fully supervised classification techniques using much larger samples of labelled training data, these unsupervised and semi-supervised techniques perform well above most common class assignment, in the best cases approaching 90% of supervised levels, and, where comparable, well above a good discriminative classifier trained on a comparably small set of labelled data. Unsupervised techniques achieve accuracies of 87% on the cleanest read speech, reaching 57% on data from a standard Mandarin broadcast news corpus, and over 78% on pitch accent classification for English broadcast news. Semi-supervised classification in the Mandarin four-class classification task reaches 94% accuracy on read speech, 70% on broadcast news data, improving dramatically over both the simple baseline of 25% and a standard SVM with an RBF kernel trained only on the labeled examples.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion & Future Work", "sec_num": "6" }, { "text": "Future work will consider a broader range of tone and intonation classification, including the richer tone set of Cantonese as well as Bantu family tone languages, where annotated data truly is very rare. We also hope to integrate a richer contextual representation of tone and intonation consistent with phonetic theory within this unsupervised and semisupervised learning framework. We will further explore improvements in classification accuracy based on increases in labeled and unlabeled training examples.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusion & Future Work", "sec_num": "6" }, { "text": "For the experiments in this paper, we exclude the neutral tone, which appears on unstressed syllables, because the clear", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "All tone transformations due to third tone sandhi are applied to create the label set.4 We restrict our experiments to syllables with at least 50 ms of tracked pitch in this final region.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "Post-focus syllables typically have decreased pitch height and range, resulting in particularly poor recognition accuracy. We chose not to concentrate on this specific tone modeling problem here.6 Sample sizes were bounded to support rapid repeated experimentation and for consistency with the relatively small VOA data set.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "The framework is transductive; the test samples are a subset of the unlabeled training examples.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null } ], "back_matter": [ { "text": "We would like to thank Yi Xu for granting access to the read speech data, Vikas Sindhwani, Mikhail Belkin, and Partha Niyogi for their implementation of Laplacian SVM, and Igor Fischer and J. Poland for their implementation of asymmetric clustering.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Acknowledgements", "sec_num": null } ], "bib_entries": { "BIBREF0": { "ref_id": "b0", "title": "Laplacian eigenmaps and spectral techniques for embedding and clustering", "authors": [ { "first": "Mikhail", "middle": [], "last": "Belkin", "suffix": "" }, { "first": "Partha", "middle": [], "last": "Niyogi", "suffix": "" } ], "year": 2002, "venue": "Proceeding of NIPS'02", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Mikhail Belkin and Partha Niyogi. 2002. Laplacian eigenmaps and spectral techniques for embedding and clustering. In Proceeding of NIPS'02.", "links": null }, "BIBREF1": { "ref_id": "b1", "title": "Manifold regularization: a geometric framework for learning from examples", "authors": [ { "first": "M", "middle": [], "last": "Belkin", "suffix": "" }, { "first": "P", "middle": [], "last": "Niyogi", "suffix": "" }, { "first": "V", "middle": [], "last": "Sindhwani", "suffix": "" } ], "year": 2004, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "M. Belkin, P. Niyogi, and V. Sindhwani. 2004. Mani- fold regularization: a geometric framework for learn- ing from examples. Technical Report TR-2004-06, University of Chicago Computer Science.", "links": null }, "BIBREF2": { "ref_id": "b2", "title": "Praat, a system for doing phonetics by computer", "authors": [ { "first": "P", "middle": [], "last": "Boersma", "suffix": "" } ], "year": 2001, "venue": "Glot International", "volume": "5", "issue": "9", "pages": "341--345", "other_ids": {}, "num": null, "urls": [], "raw_text": "P. Boersma. 2001. Praat, a system for doing phonetics by computer. Glot International, 5(9-10):341-345.", "links": null }, "BIBREF3": { "ref_id": "b3", "title": "LIBSVM:a library for support vector machines", "authors": [ { "first": "C-C", "middle": [], "last": "Cheng", "suffix": "" }, { "first": "C-J", "middle": [], "last": "Lin", "suffix": "" } ], "year": 2001, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "C-C.Cheng and C-J. Lin. 2001. LIBSVM:a library for support vector machines. Software available at: http://www.csie.ntu.edu.tw/ cjlin/libsvm.", "links": null }, "BIBREF4": { "ref_id": "b4", "title": "New methods for spectral clustering", "authors": [ { "first": "I", "middle": [], "last": "Fischer", "suffix": "" }, { "first": "J", "middle": [], "last": "Poland", "suffix": "" } ], "year": 2004, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "I. Fischer and J. Poland. 2004. New methods for spectral clustering. Technical Report ISDIA-12-04, IDSIA.", "links": null }, "BIBREF5": { "ref_id": "b5", "title": "Dynamic characteristics of voice fundamental frequency in speech and singing", "authors": [ { "first": "H", "middle": [], "last": "Fujisaki", "suffix": "" } ], "year": 1983, "venue": "The Production of Speech", "volume": "", "issue": "", "pages": "39--55", "other_ids": {}, "num": null, "urls": [], "raw_text": "H. Fujisaki. 1983. Dynamic characteristics of voice fun- damental frequency in speech and singing. In The Pro- duction of Speech, pages 39-55. Springer-Verlag.", "links": null }, "BIBREF6": { "ref_id": "b6", "title": "Neural-network simulation of tonal categorization based on f0 velocity profiles", "authors": [ { "first": "Bruno", "middle": [], "last": "Gauthier", "suffix": "" }, { "first": "Rushen", "middle": [], "last": "Shi", "suffix": "" }, { "first": "Yi", "middle": [], "last": "Xu", "suffix": "" }, { "first": "Robert", "middle": [], "last": "Proulx", "suffix": "" } ], "year": 2005, "venue": "Journal of the Acoustical Society of America", "volume": "117", "issue": "2", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Bruno Gauthier, Rushen Shi, Yi Xu, and Robert Proulx. 2005. Neural-network simulation of tonal categoriza- tion based on f0 velocity profiles. Journal of the Acoustical Society of America, 117, Pt. 2:2430.", "links": null }, "BIBREF7": { "ref_id": "b7", "title": "Speech recognition models of the interdependence among syntax, prosody, and segmental acoustics", "authors": [ { "first": "M", "middle": [], "last": "Hasegawa-Johnson", "suffix": "" }, { "first": "Jennifer", "middle": [], "last": "Cole", "suffix": "" }, { "first": "Chilin", "middle": [], "last": "Shih", "suffix": "" }, { "first": "Ken", "middle": [], "last": "Chen", "suffix": "" }, { "first": "Aaron", "middle": [], "last": "Cohen", "suffix": "" }, { "first": "Sandra", "middle": [], "last": "Chavarria", "suffix": "" }, { "first": "Heejin", "middle": [], "last": "Kim", "suffix": "" }, { "first": "Taejin", "middle": [], "last": "Yoon", "suffix": "" }, { "first": "Sarah", "middle": [], "last": "Borys", "suffix": "" }, { "first": "Jeung-Yoon", "middle": [], "last": "Choi", "suffix": "" } ], "year": 2004, "venue": "HLT/NAACL-2004", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "M. Hasegawa-Johnson, Jennifer Cole, Chilin Shih abd Ken Chen, Aaron Cohen, Sandra Chavarria, Heejin Kim, Taejin Yoon, Sarah Borys, and Jeung-Yoon Choi. 2004. Speech recognition models of the interdepen- dence among syntax, prosody, and segmental acous- tics. In HLT/NAACL-2004.", "links": null }, "BIBREF8": { "ref_id": "b8", "title": "Context in multi-lingual tone and pitch accent prediction", "authors": [ { "first": "Gina-Anne", "middle": [], "last": "Levow", "suffix": "" } ], "year": 2005, "venue": "Proc. of Interspeech 2005", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Gina-Anne Levow. 2005. Context in multi-lingual tone and pitch accent prediction. In Proc. of Interspeech 2005 (to appear).", "links": null }, "BIBREF9": { "ref_id": "b9", "title": "A multi-level model for recognition of intonation labels", "authors": [ { "first": "M", "middle": [], "last": "Ostendorf", "suffix": "" }, { "first": "K", "middle": [], "last": "Ross", "suffix": "" } ], "year": 1997, "venue": "Computing Prosody", "volume": "", "issue": "", "pages": "291--308", "other_ids": {}, "num": null, "urls": [], "raw_text": "M. Ostendorf and K. Ross. 1997. A multi-level model for recognition of intonation labels. In Y. Sagisaka, N. Campbell, and N. Higuchi, editors, Computing Prosody, pages 291-308.", "links": null }, "BIBREF10": { "ref_id": "b10", "title": "The Boston University radio news corpus", "authors": [ { "first": "M", "middle": [], "last": "Ostendorf", "suffix": "" }, { "first": "P", "middle": [ "J" ], "last": "Price", "suffix": "" }, { "first": "S", "middle": [], "last": "Shattuck-Hufnagel", "suffix": "" } ], "year": 1995, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "M. Ostendorf, P. J. Price, and S. Shattuck-Hufnagel. 1995. The Boston University radio news corpus. Technical Report ECS-95-001, Boston University.", "links": null }, "BIBREF11": { "ref_id": "b11", "title": "University of Colorado dialog systems for travel and navigation", "authors": [ { "first": "B", "middle": [], "last": "Pellom", "suffix": "" }, { "first": "W", "middle": [], "last": "Ward", "suffix": "" }, { "first": "J", "middle": [], "last": "Hansen", "suffix": "" }, { "first": "K", "middle": [], "last": "Hacioglu", "suffix": "" }, { "first": "J", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "X", "middle": [], "last": "Yu", "suffix": "" }, { "first": "S", "middle": [], "last": "Pradhan", "suffix": "" } ], "year": 2001, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "B. Pellom, W. Ward, J. Hansen, K. Hacioglu, J. Zhang, X. Yu, and S. Pradhan. 2001. University of Colorado dialog systems for travel and navigation.", "links": null }, "BIBREF12": { "ref_id": "b12", "title": "Tonal co-articulation in Mandarin", "authors": [ { "first": "Xiao-Nan", "middle": [], "last": "Shen", "suffix": "" } ], "year": 1990, "venue": "Journal of Phonetics", "volume": "18", "issue": "", "pages": "281--295", "other_ids": {}, "num": null, "urls": [], "raw_text": "Xiao-Nan Shen. 1990. Tonal co-articulation in Man- darin. Journal of Phonetics, 18:281-295.", "links": null }, "BIBREF13": { "ref_id": "b13", "title": "Normalized cuts and image segmentation", "authors": [ { "first": "Jianbo", "middle": [], "last": "Shi", "suffix": "" }, { "first": "Jitendra", "middle": [], "last": "Malik", "suffix": "" } ], "year": 2000, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "volume": "22", "issue": "8", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Jianbo Shi and Jitendra Malik. 2000. Normalized cuts and image segmentation. IEEE Transactions on Pat- tern Analysis and Machine Intelligence, 22(8).", "links": null }, "BIBREF14": { "ref_id": "b14", "title": "Chinese tone modeling with stem-ml", "authors": [ { "first": "C", "middle": [], "last": "Shih", "suffix": "" }, { "first": "G", "middle": [ "P" ], "last": "Kochanski", "suffix": "" } ], "year": 2000, "venue": "Proceedings of the International Conference on Spoken Language Processing", "volume": "2", "issue": "", "pages": "67--70", "other_ids": {}, "num": null, "urls": [], "raw_text": "C. Shih and G. P. Kochanski. 2000. Chinese tone model- ing with stem-ml. In Proceedings of the International Conference on Spoken Language Processing, Volume 2, pages 67-70.", "links": null }, "BIBREF15": { "ref_id": "b15", "title": "ToBI: A standard for labelling English prosody", "authors": [ { "first": "K", "middle": [], "last": "Silverman", "suffix": "" }, { "first": "M", "middle": [], "last": "Beckman", "suffix": "" }, { "first": "J", "middle": [], "last": "Pitrelli", "suffix": "" }, { "first": "M", "middle": [], "last": "Ostendorf", "suffix": "" }, { "first": "C", "middle": [], "last": "Wightman", "suffix": "" }, { "first": "P", "middle": [], "last": "Price", "suffix": "" }, { "first": "J", "middle": [], "last": "Pierrehumbert", "suffix": "" }, { "first": "J", "middle": [], "last": "Hirschberg", "suffix": "" } ], "year": 1992, "venue": "Proceedings of ICSLP", "volume": "", "issue": "", "pages": "867--870", "other_ids": {}, "num": null, "urls": [], "raw_text": "K. Silverman, M. Beckman, J. Pitrelli, M. Osten- dorf, C. Wightman, P. Price, J. Pierrehumbert, and J. Hirschberg. 1992. ToBI: A standard for labelling English prosody. In Proceedings of ICSLP, pages 867-870.", "links": null }, "BIBREF16": { "ref_id": "b16", "title": "Pitch accent prediction using ensemble machine learning", "authors": [ { "first": "Xuejing", "middle": [], "last": "Sun", "suffix": "" } ], "year": 2002, "venue": "Proceedings of ICSLP-2002", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Xuejing Sun. 2002. Pitch accent prediction using ensem- ble machine learning. In Proceedings of ICSLP-2002.", "links": null }, "BIBREF17": { "ref_id": "b17", "title": "Tone recognition in Mandarin using focus", "authors": [ { "first": "D", "middle": [], "last": "Surendran", "suffix": "" }, { "first": "Gina-Anne", "middle": [], "last": "Levow", "suffix": "" }, { "first": "Yi", "middle": [], "last": "Xu", "suffix": "" } ], "year": 2005, "venue": "Proc. of Interspeech 2005", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "D. Surendran, Gina-Anne Levow, and Yi Xu. 2005. Tone recognition in Mandarin using focus. In Proc. of Inter- speech 2005 (to appear).", "links": null }, "BIBREF18": { "ref_id": "b18", "title": "Support vector machines for Thai phoneme recognition", "authors": [ { "first": "Nuttakorn", "middle": [], "last": "Thubthong", "suffix": "" }, { "first": "Boonserm", "middle": [], "last": "Kijsirikul", "suffix": "" } ], "year": 2001, "venue": "International Journal of Uncertainty, Fuzziness and Knowledge-Based Systems", "volume": "9", "issue": "6", "pages": "803--813", "other_ids": {}, "num": null, "urls": [], "raw_text": "Nuttakorn Thubthong and Boonserm Kijsirikul. 2001. Support vector machines for Thai phoneme recogni- tion. International Journal of Uncertainty, Fuzziness and Knowledge-Based Systems, 9(6):803-813.", "links": null }, "BIBREF19": { "ref_id": "b19", "title": "Improved tone recognition by normalizing for coarticulation and intonation effects", "authors": [ { "first": "C", "middle": [], "last": "Wang", "suffix": "" }, { "first": "S", "middle": [], "last": "Seneff", "suffix": "" } ], "year": 2000, "venue": "Proceedings of 6th International Conference on Spoken Language Processing", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "C. Wang and S. Seneff. 2000. Improved tone recogni- tion by normalizing for coarticulation and intonation effects. In Proceedings of 6th International Confer- ence on Spoken Language Processing.", "links": null }, "BIBREF20": { "ref_id": "b20", "title": "Maximum speed of pitch change and how it may relate to speech", "authors": [ { "first": "Yi", "middle": [], "last": "Xu", "suffix": "" }, { "first": "X", "middle": [], "last": "Sun", "suffix": "" } ], "year": 2002, "venue": "Journal of the Acoustical Society of America", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Yi Xu and X. Sun. 2002. Maximum speed of pitch change and how it may relate to speech. Journal of the Acoustical Society of America, 111.", "links": null }, "BIBREF21": { "ref_id": "b21", "title": "A pitch target approximation model for f0 contours in Mandarin", "authors": [ { "first": "C", "middle": [ "X" ], "last": "Xu", "suffix": "" }, { "first": "Y", "middle": [], "last": "Xu", "suffix": "" }, { "first": "L.-S", "middle": [], "last": "Luo", "suffix": "" } ], "year": 1999, "venue": "Proceedings of the 14th International Congress of Phonetic Sciences", "volume": "", "issue": "", "pages": "2359--2362", "other_ids": {}, "num": null, "urls": [], "raw_text": "C.X. Xu, Y. Xu, and L.-S. Luo. 1999. A pitch tar- get approximation model for f0 contours in Mandarin. In Proceedings of the 14th International Congress of Phonetic Sciences, pages 2359-2362.", "links": null }, "BIBREF22": { "ref_id": "b22", "title": "Contextual tonal variations in Mandarin", "authors": [ { "first": "Yi", "middle": [], "last": "Xu", "suffix": "" } ], "year": 1997, "venue": "Journal of Phonetics", "volume": "25", "issue": "", "pages": "62--83", "other_ids": {}, "num": null, "urls": [], "raw_text": "Yi Xu. 1997. Contextual tonal variations in Mandarin. Journal of Phonetics, 25:62-83.", "links": null }, "BIBREF23": { "ref_id": "b23", "title": "Effects of tone and focus on the formation and alignment of f0 contours -evidence from Mandarin", "authors": [ { "first": "Y", "middle": [], "last": "Xu", "suffix": "" } ], "year": 1999, "venue": "Journal of Phonetics", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Y. Xu. 1999. Effects of tone and focus on the formation and alignment of f0 contours -evidence from Man- darin. Journal of Phonetics, 27.", "links": null }, "BIBREF24": { "ref_id": "b24", "title": "Tone articulation modeling for Mandarin spontaneous speech recognition", "authors": [ { "first": "J", "middle": [ "L" ], "last": "Zhou", "suffix": "" }, { "first": "Ye", "middle": [], "last": "Tian", "suffix": "" }, { "first": "Yu", "middle": [], "last": "Shi", "suffix": "" }, { "first": "Chao", "middle": [], "last": "Huang", "suffix": "" }, { "first": "Eric", "middle": [], "last": "Chang", "suffix": "" } ], "year": 2004, "venue": "Proceedings of ICASSP", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "J. L. Zhou, Ye Tian, Yu Shi, Chao Huang, and Eric Chang. 2004. Tone articulation modeling for Man- darin spontaneous speech recognition. In Proceedings of ICASSP 2004.", "links": null } }, "ref_entries": { "FIGREF0": { "num": null, "uris": null, "text": "Contours for canonical Mandarin tones tours for these tones appear inFigure 1.", "type_str": "figure" }, "FIGREF1": { "num": null, "uris": null, "text": "Differences for alternative unsupervised learners across numbers of clusters. accuracy achieved with SVMs on a comparable data representation.", "type_str": "figure" }, "FIGREF2": { "num": null, "uris": null, "text": "Scatterplot of pitch height vs pitch slope. Open Diamond: High tone (1), Filled black traingle: Rising tone (2), Filled grey square: Low tone (3), X: Falling tone", "type_str": "figure" } } } }