|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:06:35.801783Z" |
|
}, |
|
"title": "The NiuTrans System for WNGT 2020 Efficiency Task", |
|
"authors": [ |
|
{ |
|
"first": "Chi", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "NLP Lab", |
|
"institution": "Northeastern University", |
|
"location": { |
|
"settlement": "Shenyang", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Bei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "NLP Lab", |
|
"institution": "Northeastern University", |
|
"location": { |
|
"settlement": "Shenyang", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ye", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "NLP Lab", |
|
"institution": "Northeastern University", |
|
"location": { |
|
"settlement": "Shenyang", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yinqiao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "NLP Lab", |
|
"institution": "Northeastern University", |
|
"location": { |
|
"settlement": "Shenyang", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yanyang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "NLP Lab", |
|
"institution": "Northeastern University", |
|
"location": { |
|
"settlement": "Shenyang", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Chenglong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "NLP Lab", |
|
"institution": "Northeastern University", |
|
"location": { |
|
"settlement": "Shenyang", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "NLP Lab", |
|
"institution": "Northeastern University", |
|
"location": { |
|
"settlement": "Shenyang", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "xiaotong@mail.neu.edu" |
|
}, |
|
{ |
|
"first": "Jingbo", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "NLP Lab", |
|
"institution": "Northeastern University", |
|
"location": { |
|
"settlement": "Shenyang", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "zhujingbo@mail.neu.edu" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes the submissions of the Ni-uTrans Team to the WNGT 2020 Efficiency Shared Task. We focus on the efficient implementation of deep Transformer models (Wang et al., 2019; Li et al., 2019) using NiuTensor 1 , a flexible toolkit for NLP tasks. We explored the combination of deep encoder and shallow decoder in Transformer models via model compression and knowledge distillation. The neural machine translation decoding also benefits from FP16 inference, attention caching, dynamic batching, and batch pruning. Our systems achieve promising results in both translation quality and efficiency, e.g., our fastest system can translate more than 40,000 tokens per second with an RTX 2080 Ti while maintaining 42.9 BLEU on newstest2018.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes the submissions of the Ni-uTrans Team to the WNGT 2020 Efficiency Shared Task. We focus on the efficient implementation of deep Transformer models (Wang et al., 2019; Li et al., 2019) using NiuTensor 1 , a flexible toolkit for NLP tasks. We explored the combination of deep encoder and shallow decoder in Transformer models via model compression and knowledge distillation. The neural machine translation decoding also benefits from FP16 inference, attention caching, dynamic batching, and batch pruning. Our systems achieve promising results in both translation quality and efficiency, e.g., our fastest system can translate more than 40,000 tokens per second with an RTX 2080 Ti while maintaining 42.9 BLEU on newstest2018.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In recent years, the Transformer model and its variants (Vaswani et al., 2017; Shaw et al., 2018; So et al., 2019; Wu et al., 2019; have established state-of-the-art results on machine translation (MT) tasks. However, achieving high performance requires an enormous amount of computations (Strubell et al., 2019) , limiting the deployment of these models on devices with constrained hardware resources.", |
|
"cite_spans": [ |
|
{ |
|
"start": 56, |
|
"end": 78, |
|
"text": "(Vaswani et al., 2017;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 79, |
|
"end": 97, |
|
"text": "Shaw et al., 2018;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 98, |
|
"end": 114, |
|
"text": "So et al., 2019;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 115, |
|
"end": 131, |
|
"text": "Wu et al., 2019;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 289, |
|
"end": 312, |
|
"text": "(Strubell et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The efficiency task aims at developing MT systems to achieve not only translation accuracy but also memory efficiency or translation speed across different devices. This competition constraints systems to translate 1 million English sentences within 2 hours. Our goal is to improve the quality of translations while maintaining enough speed. We participated in both CPUs and GPUs tracks in the shared task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our system was built with NiuTensor, an opensource tensor toolkit written in C++ and CUDA 1 https://github.com/NiuTrans/NiuTensor based on dynamic computational graphs. NiuTensor is developed for facilitating NLP research and industrial deployment. The system is lightweight, high-quality, production-ready, and incorporated with the latest research ideas.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We investigated with a different number of encoder/decoder layers to make trade-offs between translation performance and speed. We first trained several strong teacher models and then compressed teachers to compact student models via knowledge distillation (Hinton et al., 2015; Kim and Rush, 2016) . We find that using a deep encoder (up to 35 layers) and a shallow decoder (1 layer) gives reasonable improvements in speed while maintaining high translation quality. We also optimized the Transformer model decoding in engineering, such as caching the decoder's attention results and using low precision data type.", |
|
"cite_spans": [ |
|
{ |
|
"start": 257, |
|
"end": 278, |
|
"text": "(Hinton et al., 2015;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 279, |
|
"end": 298, |
|
"text": "Kim and Rush, 2016)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We present teacher models and training details in Section 2, then in Section 3 we describe how to obtain lightweight student models for efficient decoding. Optimizations for the decoding across different devices are discussed in Section 4. We show the details of our submissions and the results in Section 5. Section 6 summarizes this paper and describes future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Recent years have witnessed the success of transformer-based models in MT tasks. Many works (Dehghani et al., 2019; focus on designing new attention mechanisms and Transformer architectures. Shaw et al. (2018) extended the self-attention to consider the relative position representations or distances between words. Wu et al. (2019) replaced the self-attention components with lightweight and dynamic convolutions. Deep Transformer mod-els also attracted a lot of attention. proposed a multi-layer representation fusion approach to learn a better representation from the stack. analyzed the high risk of gradient vanishing or exploring in the standard Transformer, which place the layer normalization (Ba et al., 2016) after the attention and feed-forward components. They showed that a deep Transformer model can surpass the big one by proper use of layer normalization and dynamic combinations of different layers. In their method, the input of layer l + 1 is defined by:", |
|
"cite_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 115, |
|
"text": "(Dehghani et al., 2019;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 191, |
|
"end": 209, |
|
"text": "Shaw et al. (2018)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 332, |
|
"text": "Wu et al. (2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 701, |
|
"end": 718, |
|
"text": "(Ba et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Transformer Architectures", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "x l+1 = G (y 0 , . . . , y l )", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Deep Transformer Architectures", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "G (y 0 , . . . , y l ) = l k=0 W (l+1) k LN (y k ) (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Transformer Architectures", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where y l is the output of the l t h layer and W is the weights of different layers. We employed the dynamic linear combination of layers Transformer architecture incorporated with relative position representations as our teacher network, call it Transformer-DLCL-RPR.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Transformer Architectures", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We followed the constrained condition of the WMT 2019 English-German news translation task and used the same data filtering method as . We also normalized punctuation and tokenized all sentences with the Moses tokenizer (Koehn et al., 2007) . The training set contains about 10M sentences pairs after processed. In our systems, the data was tokenized, and jointly byte pair encoded (Sennrich et al., 2016) with 32K merge operations using a shared vocabulary. After decoding, we removed the BPE separators and de-tokenize all tokens.", |
|
"cite_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 240, |
|
"text": "(Koehn et al., 2007)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 382, |
|
"end": 405, |
|
"text": "(Sennrich et al., 2016)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Details", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We trained four teacher models using new-stest2018 as the development set with fairseq (Ott et al., 2019) . Table 1 shows the results of all teacher models and their ensemble, where we report SacreBLEU (Post, 2018) and the model size. The difference between teachers is the number of encoder layers and whether they contain a dynamic linear combination of layers. All teachers have 6 decoder layers, 512 hidden dimensions, and 8 attention heads. We shared the source-side and target-side embeddings with the decoder output weights. The maximum relative length was 8, and the maximum position for both source and target was 1024. We used the Adam optimizer (Kingma and Ba, 2015) with \u03b2 1 = 0.9, \u03b2 2 = 0.997 and = 10 \u22128 as well as gradient accumulation due to the high GPU memory footprint. Each model was trained on 8 RTX 2080Ti GPUs for up to 21 epochs. We batched sentence pairs by approximate length and limited input/output tokens per batch to 2048/GPU. Following the method of (Wang et al., 2019), we accumulated every two steps for a better batching. This resulted in approximately 56000 tokens per training batch. The learning rate was decayed based on the inverse square root of the update number after 16000 warm-up steps, and the maximum learning rate was 0.002. Furthermore, we averaged the last five checkpoints in the training process for all models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 105, |
|
"text": "(Ott et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 202, |
|
"end": 214, |
|
"text": "(Post, 2018)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 668, |
|
"end": 677, |
|
"text": "Ba, 2015)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 115, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training Details", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "As shown in Table 1 , the best single teacher model achieves 44.5 BLEU (beam size 4) on new-stest2018. Then we obtained an improvement of 1 BLEU via a simple ensemble strategy used in .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 19, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training Details", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "After the training of deep Transformer teachers, we compressed the knowledge in an ensemble into a single model through knowledge distillation (Hinton et al., 2015; Kim and Rush, 2016). Then we analyzed the decoding time of each part in the deep Transformer. We further pruned the encoder and decoder layers to improve the decoding efficiency.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lightweight Student Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Knowledge distillation approaches (Hinton et al., 2015; Kim and Rush, 2016) have proven successful in reducing the size of neural networks. They learn a smaller student model to mimic the original teacher network by minimizing the loss between the student and teacher output. We applied the sequence-level knowledge distillation on the teacher ensemble described in Section 2. We used the ensemble to generate multiple translations of", |
|
"cite_spans": [ |
|
{ |
|
"start": 34, |
|
"end": 55, |
|
"text": "(Hinton et al., 2015;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 56, |
|
"end": 75, |
|
"text": "Kim and Rush, 2016)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Distillation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Encoder Decoder Others the raw English sentences. In particular, we collected the 4-best list for each sentence against the original target to create the synthetic training data. Our base student model consists of 35 encoder layers and six decoder layers (call it 35-6) with nearly 150M parameters. It achieves 44.6 BLEU on the test set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "35% 54% 11%", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Although the deep model can obtain high-quality translations, its speed is not satisfactory. For example, it costs 6.7 seconds to translate 2998 sentences on a 2080Ti GPU using a 35-6 model with the greedy search. Statistics show that the most time-consuming part of the decoding process is the decoder, as presented in Figure 1 , so the most efficient optimization is to use a lightweight decoder.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 320, |
|
"end": 328, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Fast Student Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To make a comparison, we kept the 35 encoder layers and reduced the decoder layer to 1. In practice, we copied the bottom layers' parameters from big models to small models for initialization. Then we trained the small models as usual. Similar to , the encoder has a more significant influence on the translation quality than the decoder. Reducing the number of decoder layers brings us a speedup of more than 30% with a slight loss of 0.3 BLEU.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fast Student Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We further compressed the model by shrinking the encoder. Unless otherwise stated, the following student models have only one decoder layer. We copied the bottom layer parameters from big models to initialize small models to stabilize the training. We trained two small models with an 18layer encoder and a 9-layer encoder, respectively. Table 2 : Results on newstest18. The students were trained by sequence-level knowledge distillation. The tiny setting keeps the 9-1 model's configurations except for using a model size of 256. We report the translation speed on a single 2080Ti.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 338, |
|
"end": 345, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Fast Student Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "cutting off half of the encoder layer reduces the parameters by nearly half and gives a speedup of 20% with a decrease of 0.2 BLEU. The 9-1 model is the fastest model we run on the GPU. It can translate newstest2018 within 3 seconds on a 2080Ti GPU and obtain 42.9 BLEU. All models mentioned above can translate 1 million sentences on the GPU in 2 hours. However, using a CPU to achieve this goal is not easy, so we need smaller models. We set the 9-1 model size to 256 for the CPU version, namely 9-1-tiny, which has only half the 9-1 model parameters. This model achieves 37.2 BLEU on newstest2018 and reduces 90% parameters compared to the 35-6 model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fast Student Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "First, we discuss some device-independent optimization methods. Caching We can cache the output of the top layer of the encoder and each step of the decoder since we use an autoregressive model. More specifically, we cache the linear transformations for keys and values before the self-attention and crossattention layers. Faster Beam Search Beam search is a common approach in sequence decoding. The standard beam search strategy generates the target sequence in a self-regression manner and keeps a fixed amount of active candidates during decoding. We adopt a basic strategy to accelerate beam search: the search ends when any candidate predicts the EOS symbol, and there are no candidates with higher scores. This strategy brings us up to a 20% speedup on the WMT test set. Other threshold-based pruning strategies (Freitag and Al-Onaizan, 2017) are not appropriate due to the complex hyper-parameters.", |
|
"cite_spans": [ |
|
{ |
|
"start": 819, |
|
"end": 849, |
|
"text": "(Freitag and Al-Onaizan, 2017)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "General Optimizations", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Batch Pruning The length of target sequences may vary for different sentences in a batch, which makes the computation inefficient. We prune the finished hypotheses in a batch during decoding but only gain little accelerations on CPUs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "General Optimizations", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For the GPU-based decoding, we mainly explored dynamic batching, FP16 inference, and profiling. Dynamic Batching Unlike the CPU version, the easiest way to reduce the translation time on GPUs is to increase the batch size within a specific range. We implemented a dynamic batching scheme that maximizes the number of sentences in the batch while limiting the number of tokens. This strategy significantly accelerates decoding compared to using a fixed batch size when the sequence length is short. FP16 Inference Since the Tesla T4 GPU supports calculations under FP16, our systems execute almost all operations in 16-bit floating-point. All model parameters are stored in FP16, which reduces the model size on disk by half. We tried to run all operations at a 16-bit floating-point. However, in our test, some particular inputs will cause numerical instability, such as large batch size or sequence length. To escape overflow, we convert the data type around some potentially problematic operations, i.e., all operations related to reduce sum.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Optimizing for GPUs", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "As mentioned above, the goal we set for the CPU version is to translate 1 million sentences in 2 hours. We used the same settings as the 9-1 model except that the model size is 256 and therefore sacrifice about 6 BLEU on the WMT test set. We employed two methods to speed up the decoding on CPUs. Using of MKL To make the full use of the Intel architecture and to extract the maximum performance, the NiuTensor framework is optimized using the Intel Math Kernel Library for basic operators. We can take advantage of this convenience with only minor changes to the configuration. Decoding in Parallel The target machine in this task has 96 logical processors (with hyperthreading) and 192 GB RAM so that we can run our multi-threading system. We split the input into several parts according to the number of lines and start multiple processes to translate simultaneously. Then we merge each part of translations to one file in the original order.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Optimizing for CPUs", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In addition to the methods above, we also tried to find the optimal settings for our system. Greedy Search In the practice of knowledge distillation, we find that our systems are insensitive to the beam size. It means that the translation quality is good enough even we use greedy search in all submissions. Better decoding configurations As mentioned earlier, our GPU versions use a large batch size, but the number on the CPU is much smaller. We use a fixed batch size (number of sentences) of 512 on the GPU and 64 on the CPU. We also set the number of processes on the CPU as 24 and use 2 MKL threads for each process. The maximum sequence length is 120 for the source and 200 for the target.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Other Optimizations", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "To further improve our systems' efficiency, we identified and optimized the performance bottlenecks in our implementation. There are many off-the-shelf tools for performance profiling such as the gprof 2 for C++ and the nvprof 3 for CUDA. We run our systems on the WMT test set for ten times and collect profile data for all functions. Figure 2(a) shows the profiling results for different operations on GPUs before optimizing. Before optimizing, the most timeconsuming functions on CPUs is pre-processing and post-processing. We gain 2x speedup on CPUs by using multi-threads for Moses (4 threads) and replacing the Python subword tool with the C++ implementation 4 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 336, |
|
"end": 347, |
|
"text": "Figure 2(a)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Profile-guided optimization", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For GPU-based decoding, the bottleneck is matrix multiplication and memory management. Therefore we use a memory pool to control allocation/deallocation, which dynamically allocates blocks during decoding and releases them after the translation finished. Compared with the on-the-fly mode, this strategy significantly improves the efficiency of our systems by up to 3x speedup and slightly increases the memory usage. We further remove the log sof tmax in the output layer for greedy search and other data transfers with a slight acceleration of about 10%. Figure 2 : Profiling results of all operations during inference before or after optimizing on newstest2018 using a 9-1 model on a 2080Ti. We performed decoding for ten times to get more convincing results. Before optimizing, the decoding time is 76.9 seconds. The combination of different optimizations reduces the time to 24.9 seconds. MM is matrix multiplication, and CopyBlocks is used in the tensor copy.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 557, |
|
"end": 565, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Profile-guided optimization", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We submitted five systems to this shared task, one for the CPU track and four for the GPU track, summarized as Table 3 . We report file sizes, model architectures, configurations, metrics for translation, including BLEU on newstest2018 and the real translation time on a combination of test sets. The BLEU and translation time were measured by the shared-task organizers on AWS c5.metal (CPU) and g4dn.xlarge (GPU) instances.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 118, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Submissions and Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For the GPU tracks, our systems were measured on a Tesla T4 GPU. GPU versions were compiled with CUDA 10.1, and the executable file is about 96 MiB. Our models differ in encoder and decoder layers. The base model (35-6) has 35 encoder layers and six decoder layers and achieves 44.6 BLEU on the newstest2018. Then we see a speedup of more than one-third and a slight decrease of only 0.2 BLEU by reducing the decoder layer to 1 (35-1). We continue to reduce the number of encoder layers for more accelerations. The 18-1 system reduces the translation time by one-third with only half of the encoder layers compared to the 35-1 model. Our fastest system consists of 9 encoder layers and one decoder layer, which has one-third parameters of the 35-6 model, achieves 40 BLEU on the WMT 2019 test set, and speeds up the baseline by 3x.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Submissions and Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For the CPU track, we used the entire machine, which has 96 virtual cores. Our CPU version is compiled with MKL static library, and the executable file is 22 MiB. We used a tiny model for the CPU with 256 hidden dimensions and kept other hyper-parameters as the 9-1 model in the GPU version. Interestingly, using half of the hidden size significantly reduces the translation quality. The main reason is that the parameters of large models cannot be reused when using smaller dimensions. This also proves that reducing the number of encoder and decoder layers is a more effective compression method. The CPU system achieves 37.2 BLEU on the newstest2018 and is 1.2x faster than the fastest GPU system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Submissions and Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We made fewer efforts to reduce the model size and memory footprint. Our systems use a global memory pool, and we sort the input sentences in descending order of length. Thus the memory consumption will reach a peak in the early stage of decoding and then decrease. Our base model contains 152 million parameters, and the file size is 291 MiB when stored in 16-bit floats. The docker image size ranges from 724 MiB to 930 MiB for our GPU systems, while the CPU version is 452 MiB. All systems running in docker are slightly slow down, and we plan to improve this in subsequent versions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Submissions and Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "To maximize the decoding efficiency while ensuring sufficiently high translation quality, we explored different techniques, including knowledge distillation, model compression, and decoding algorithms. The deep encoder and shallow decoder networks achieve impressive performance in both translation quality and speed. We speed up the decoding by 3x with lightweight models and efficient Student-9-1-tiny \u2020 67 810.9 37.2 Table 3 : Results of all submissions. \u2020 indicates the CPU system. All student systems were running with greedy search. The time was measured by the organizers on their test set and we only report the BLEU on the newstest2018.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 420, |
|
"end": 427, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "implementations. For the GPU system, we plan to optimize the FP16 inference by reducing the type conversion and applying kernel fusion (Wang et al., 2010) for Transformer models. For the CPU system, we will further speed up the inference by restricting the output vocabulary to a subset of likely candidates given the source (Shi and Knight, 2017; Senellart et al., 2018) and using low precision data type (Bhandare et al., 2019; Kim et al., 2019; Lin et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 154, |
|
"text": "(Wang et al., 2010)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 325, |
|
"end": 347, |
|
"text": "(Shi and Knight, 2017;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 371, |
|
"text": "Senellart et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 406, |
|
"end": 429, |
|
"text": "(Bhandare et al., 2019;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 430, |
|
"end": 447, |
|
"text": "Kim et al., 2019;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 448, |
|
"end": 465, |
|
"text": "Lin et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://ftp.gnu.org/old-gnu/Manuals/ gprof-2.9.1/html_node/gprof_toc.html 3 http://docs.nvidia.com/cuda/ profiler-users-guide/index.html 4 https://github.com/glample/fastBPE", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported in part by the National Science Foundation of China (Nos. 61876035 and 61732005) and the National Key R&D Program of China (No.2019QY1801). The authors would like to thank anonymous reviewers for their comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Layer normalization. ArXiv", |
|
"authors": [ |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [ |
|
"Ryan" |
|
], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jimmy Ba, Jamie Ryan Kiros, and Geoffrey E. Hinton. 2016. Layer normalization. ArXiv, abs/1607.06450.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Efficient 8-bit quantization of transformer neural machine language translation model", |
|
"authors": [ |
|
{ |
|
"first": "Aishwarya", |
|
"middle": [], |
|
"last": "Bhandare", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vamsi", |
|
"middle": [], |
|
"last": "Sripathi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deepthi", |
|
"middle": [], |
|
"last": "Karkada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Menon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sun", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kushal", |
|
"middle": [], |
|
"last": "Datta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vikram", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Saletore", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aishwarya Bhandare, Vamsi Sripathi, Deepthi Karkada, Vivek Menon, Sun Choi, Kushal Datta, and Vikram A. Saletore. 2019. Efficient 8-bit quan- tization of transformer neural machine language translation model. ArXiv, abs/1906.00532.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Universal transformers", |
|
"authors": [ |
|
{ |
|
"first": "Mostafa", |
|
"middle": [], |
|
"last": "Dehghani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Gouws", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mostafa Dehghani, Stephan Gouws, Oriol Vinyals, Jakob Uszkoreit, and Lukasz Kaiser. 2019. Univer- sal transformers. ArXiv, abs/1807.03819.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Beam search strategies for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Freitag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaser", |
|
"middle": [], |
|
"last": "Al-Onaizan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the First Workshop on Neural Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "56--60", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-3207" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Markus Freitag and Yaser Al-Onaizan. 2017. Beam search strategies for neural machine translation. In Proceedings of the First Workshop on Neural Ma- chine Translation, pages 56-60, Vancouver. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Distilling the knowledge in a neural network", |
|
"authors": [ |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Geoffrey E. Hinton, Oriol Vinyals, and Jeffrey Dean. 2015. Distilling the knowledge in a neural network. ArXiv, abs/1503.02531.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Sequencelevel knowledge distillation", |
|
"authors": [ |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1317--1327", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1139" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoon Kim and Alexander M. Rush. 2016. Sequence- level knowledge distillation. In Proceedings of the 2016 Conference on Empirical Methods in Natu- ral Language Processing, pages 1317-1327, Austin, Texas. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "From research to production and back: Ludicrously fast neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Young Jin", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hany", |
|
"middle": [], |
|
"last": "Hassan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alham", |
|
"middle": [], |
|
"last": "Fikri Aji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Grundkiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikolay", |
|
"middle": [], |
|
"last": "Bogoychev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 3rd Workshop on Neural Generation and Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "280--288", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-5632" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Young Jin Kim, Marcin Junczys-Dowmunt, Hany Has- san, Alham Fikri Aji, Kenneth Heafield, Roman Grundkiewicz, and Nikolay Bogoychev. 2019. From research to production and back: Ludicrously fast neural machine translation. In Proceedings of the 3rd Workshop on Neural Generation and Transla- tion, pages 280-288, Hong Kong. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. CoRR, abs/1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Moses: Open source toolkit for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Bertoldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brooke", |
|
"middle": [], |
|
"last": "Cowan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wade", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Moran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Constantin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evan", |
|
"middle": [], |
|
"last": "Herbst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics Companion Volume Proceedings of the Demo and Poster Sessions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, Chris Dyer, Ond\u0159ej Bojar, Alexandra Constantin, and Evan Herbst. 2007. Moses: Open source toolkit for statistical machine translation. In Proceedings of the 45th Annual Meeting of the As- sociation for Computational Linguistics Companion Volume Proceedings of the Demo and Poster Ses- sions, pages 177-180, Prague, Czech Republic. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "The NiuTrans machine translation systems for WMT19", |
|
"authors": [ |
|
{ |
|
"first": "Bei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinqiao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ye", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiqiang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ziyang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuhao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nuo", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeyang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hexuan", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tengbo", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanyang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingbo", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "257--266", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-5325" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bei Li, Yinqiao Li, Chen Xu, Ye Lin, Jiqiang Liu, Hui Liu, Ziyang Wang, Yuhao Zhang, Nuo Xu, Zeyang Wang, Kai Feng, Hexuan Chen, Tengbo Liu, Yanyang Li, Qiang Wang, Tong Xiao, and Jingbo Zhu. 2019. The NiuTrans machine translation sys- tems for WMT19. In Proceedings of the Fourth Con- ference on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 257-266, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Neural machine translation with joint representation", |
|
"authors": [ |
|
{ |
|
"first": "Yanyang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingbo", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yanyang Li, Qiang Wang, Tong Xiao, T Liu, and Jingbo Zhu. 2020. Neural machine translation with joint representation. ArXiv, abs/2002.06546.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Towards fully 8-bit integer inference for the transformer model", |
|
"authors": [ |
|
{ |
|
"first": "Ye", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanyang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tengbo", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tongran", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingbo", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ye Lin, Yanyang Li, Tengbo Liu, Tong Xiao, Tongran Liu, and Jingbo Zhu. 2020. Towards fully 8-bit inte- ger inference for the transformer model. In Proceed- ings of the Twenty-Ninth International Joint Confer- ence on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "fairseq: A fast, extensible toolkit for sequence modeling", |
|
"authors": [ |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Edunov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexei", |
|
"middle": [], |
|
"last": "Baevski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "48--53", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-4009" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. fairseq: A fast, extensible toolkit for sequence modeling. In Proceedings of the 2019 Conference of the North American Chap- ter of the Association for Computational Linguistics (Demonstrations), pages 48-53, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A call for clarity in reporting BLEU scores", |
|
"authors": [ |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Post", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "186--191", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-6319" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186- 191, Brussels, Belgium. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "OpenNMT system description for WNMT 2018: 800 words/sec on a single-core CPU", |
|
"authors": [ |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Senellart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dakun", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean-Pierre", |
|
"middle": [], |
|
"last": "Ramatchandirin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josep", |
|
"middle": [], |
|
"last": "Crego", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2nd Workshop on Neural Machine Translation and Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "122--128", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-2715" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jean Senellart, Dakun Zhang, Bo Wang, Guillaume Klein, Jean-Pierre Ramatchandirin, Josep Crego, and Alexander Rush. 2018. OpenNMT system de- scription for WNMT 2018: 800 words/sec on a single-core CPU. In Proceedings of the 2nd Work- shop on Neural Machine Translation and Genera- tion, pages 122-128, Melbourne, Australia. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1715--1725", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715- 1725, Berlin, Germany. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Self-attention with relative position representations", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Shaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "464--468", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2074" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Shaw, Jakob Uszkoreit, and Ashish Vaswani. 2018. Self-attention with relative position represen- tations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 2 (Short Papers), pages 464-468, New Orleans, Louisiana. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Speeding up neural machine translation decoding by shrinking runtime vocabulary", |
|
"authors": [ |
|
{ |
|
"first": "Xing", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "574--579", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-2091" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xing Shi and Kevin Knight. 2017. Speeding up neu- ral machine translation decoding by shrinking run- time vocabulary. In Proceedings of the 55th Annual Meeting of the Association for Computational Lin- guistics (Volume 2: Short Papers), pages 574-579, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "The evolved transformer. ArXiv, abs", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "So", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1901, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David R. So, Chen Liang, and Quoc V. Le. 2019. The evolved transformer. ArXiv, abs/1901.11117.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Energy and policy considerations for deep learning in NLP", |
|
"authors": [ |
|
{ |
|
"first": "Emma", |
|
"middle": [], |
|
"last": "Strubell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ananya", |
|
"middle": [], |
|
"last": "Ganesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3645--3650", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1355" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emma Strubell, Ananya Ganesh, and Andrew McCal- lum. 2019. Energy and policy considerations for deep learning in NLP. In Proceedings of the 57th Annual Meeting of the Association for Computa- tional Linguistics, pages 3645-3650, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Kernel fusion: An effective method for better power efficiency on multithreaded gpu", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Yi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "2010 IEEE/ACM Int'l Conference on Green Computing and Communications Int'l Conference on Cyber, Physical and Social Computing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "344--350", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Wang, Y. Lin, and W. Yi. 2010. Kernel fusion: An effective method for better power efficiency on multithreaded gpu. In 2010 IEEE/ACM Int'l Con- ference on Green Computing and Communications Int'l Conference on Cyber, Physical and Social Com- puting, pages 344-350.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Learning deep transformer models for machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Qiang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingbo", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changliang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Derek", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Wong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidia", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Chao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1810--1822", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1176" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qiang Wang, Bei Li, Tong Xiao, Jingbo Zhu, Changliang Li, Derek F. Wong, and Lidia S. Chao. 2019. Learning deep transformer models for ma- chine translation. In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 1810-1822, Florence, Italy. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Multi-layer representation fusion for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Qiang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fuxue", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanyang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinqiao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingbo", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3015--3026", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qiang Wang, Fuxue Li, Tong Xiao, Yanyang Li, Yin- qiao Li, and Jingbo Zhu. 2018. Multi-layer repre- sentation fusion for neural machine translation. In Proceedings of the 27th International Conference on Computational Linguistics, pages 3015-3026, Santa Fe, New Mexico, USA. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Pay less attention with lightweight and dynamic convolutions", |
|
"authors": [ |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexei", |
|
"middle": [], |
|
"last": "Baevski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Dauphin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Felix Wu, Angela Fan, Alexei Baevski, Yann Dauphin, and Michael Auli. 2019. Pay less attention with lightweight and dynamic convolutions. ArXiv, abs/1901.10430.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Improving deep transformer with depth-scaled initialization and merged attention", |
|
"authors": [ |
|
{ |
|
"first": "Biao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "898--909", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1083" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Biao Zhang, Ivan Titov, and Rico Sennrich. 2019. Improving deep transformer with depth-scaled ini- tialization and merged attention. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Interna- tional Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP), pages 898-909, Hong Kong, China. Association for Computational Lin- guistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "Profiling of the throughput during inference on newstest2018 using a 35-6 model.", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"text": "Figure 2(b) shows the statistics of optimized operations. The data type conversion overhead takes about 12% of the decoding time.", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"html": null, |
|
"text": "shows the comparison of different teachers and students. Compared with the 35-1 model,", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Model</td><td colspan=\"3\">Param. Speedup BLEU</td></tr><tr><td>Teacher-40-6</td><td>168M</td><td>1x</td><td>44.5</td></tr><tr><td>Student-35-6</td><td>152M</td><td>1.1x</td><td>44.6</td></tr><tr><td>Student-35-1</td><td>131M</td><td>1.6x</td><td>44.3</td></tr><tr><td>Student-18-1</td><td>77M</td><td>2.0x</td><td>43.4</td></tr><tr><td>Student-9-1</td><td>49M</td><td>2.4x</td><td>42.9</td></tr><tr><td>Student-tiny</td><td>25M</td><td>2.9x</td><td>37.2</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |