|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:28:17.699872Z" |
|
}, |
|
"title": "When an Image Tells a Story: The Role of Visual and Semantic Information for Generating Paragraph Descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Nikolai", |
|
"middle": [], |
|
"last": "Ilinykh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Centre for Linguistic Theory and Studies in Probability (CLASP)", |
|
"institution": "University of Gothenburg", |
|
"location": { |
|
"country": "Sweden" |
|
} |
|
}, |
|
"email": "nikolai.ilinykh@gu.se" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Dobnik", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Centre for Linguistic Theory and Studies in Probability (CLASP)", |
|
"institution": "University of Gothenburg", |
|
"location": { |
|
"country": "Sweden" |
|
} |
|
}, |
|
"email": "simon.dobnik@gu.se" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Generating multi-sentence image descriptions is a challenging task, which requires a good model to produce coherent and accurate paragraphs, describing salient objects in the image. We argue that multiple sources of information are beneficial when describing visual scenes with long sequences. These include (i) perceptual information and (ii) semantic (language) information about how to describe what is in the image. We also compare the effects of using two different pooling mechanisms on either a single modality or their combination. We demonstrate that the model which utilises both visual and language inputs can be used to generate accurate and diverse paragraphs when combined with a particular pooling mechanism. The results of our automatic and human evaluation show that learning to embed semantic information along with visual stimuli into the paragraph generation model is not trivial, raising a variety of proposals for future experiments.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Generating multi-sentence image descriptions is a challenging task, which requires a good model to produce coherent and accurate paragraphs, describing salient objects in the image. We argue that multiple sources of information are beneficial when describing visual scenes with long sequences. These include (i) perceptual information and (ii) semantic (language) information about how to describe what is in the image. We also compare the effects of using two different pooling mechanisms on either a single modality or their combination. We demonstrate that the model which utilises both visual and language inputs can be used to generate accurate and diverse paragraphs when combined with a particular pooling mechanism. The results of our automatic and human evaluation show that learning to embed semantic information along with visual stimuli into the paragraph generation model is not trivial, raising a variety of proposals for future experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The quality of automatically generated image captions (Bernardi et al., 2016) has been continuously improving as evaluated by a variety of metrics. These improvements include use of neural networks (Kiros et al., 2014; Vinyals et al., 2014) , attention mechanisms (Xu et al., 2015; Lu et al., 2017) and more fine-grained image features (Anderson et al., 2017) . More recently, a novel openended task of image paragraph generation has been proposed by Krause et al. (2017) . This task requires the generation of multi-sentence image descriptions, which are highly informative, thus, include descriptions of a large variety of image objects, and attributes, which makes them different from standard single sentence captions. In particular, a good paragraph generation model has to produce descrip-tive, detailed and coherent text passages, depicting salient parts in an image.", |
|
"cite_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 77, |
|
"text": "(Bernardi et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 198, |
|
"end": 218, |
|
"text": "(Kiros et al., 2014;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 219, |
|
"end": 240, |
|
"text": "Vinyals et al., 2014)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 281, |
|
"text": "(Xu et al., 2015;", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 282, |
|
"end": 298, |
|
"text": "Lu et al., 2017)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 336, |
|
"end": 359, |
|
"text": "(Anderson et al., 2017)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 451, |
|
"end": 471, |
|
"text": "Krause et al. (2017)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "When humans describe images, especially over longer discourses, they take take into account (at least) two sources of information that interact with each other: (i) perceptual information as expressed by visual features and (ii) cognitive reasoning that determines the communicative intent of the text and the use of language (Kelleher and Dobnik, 2019) . Perceptual information mainly determines what to refer to while the reasoning mechanisms tell us how and when to refer to it. Both mechanisms interact: that a particular object is described at a particular point of discourse and with particular words depends not only on its perceptual salience but also whether that object should be referred to at that point of the story that the text is narrating which is its discourse salience. Compare for example: \"two cows are standing in the field\", \"there are trees in the field\" and \"a few of them are close to the trees\". The selection and the order of the relevant features are described by a cognitive mechanism of attention and memory (Lavie et al., 2004; Dobnik and Kelleher, 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 326, |
|
"end": 353, |
|
"text": "(Kelleher and Dobnik, 2019)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1039, |
|
"end": 1059, |
|
"text": "(Lavie et al., 2004;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1060, |
|
"end": 1086, |
|
"text": "Dobnik and Kelleher, 2016)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we investigate the interplay between visual and textual information (reflecting background knowledge about the world and communicative intent) and their ability to generate natural linguistic discourses spanning over several sentences. Our primary research question is as follows: does using both visual and linguistic information improve accuracy and diversity of generated paragraphs? We experiment with several types of inputs to the paragraph generator: visual, language or both. We also investigate the effects of different kinds of information fusion between visual and textual information using either attention or maxpooling. We demonstrate that multimodal input paired with attention on these modalities benefits model's ability to generate more diverse and accu-rate paragraphs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We evaluate the accuracy and diversity of our paragraphs with both automatic metrics and human judgements. We also argue that, as some previous work shows (van der Lee et al., 2019) , n-grambased metrics might be unreliable for quality evaluation of generated texts. The generated paragraph can be accurate as of the image, but because it does not match the ground truth, this would score low based on the automatic evaluation. To provide a different view on paragraph evaluation, we asked humans to judge the subset of generated paragraphs across several criteria, more specifically described in Section 3.4 and Appendix A.", |
|
"cite_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 181, |
|
"text": "(van der Lee et al., 2019)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In language and vision literature, \"diversity\" of image descriptions has been mostly defined in terms of lexical diversity, word choice and n-gram based metrics (Devlin et al., 2015; Vijayakumar et al., 2016; Lindh et al., 2018; van Miltenburg et al., 2018) . In these papers, the focus is on generating a diverse set of independent, one-sentence captions, with each describing image as a whole. Each of these captions might refer to identical objects due to the nature of the task (\"describe an image with a single sentence\"). Then, diversity is measured in terms of how different object descriptions are from one caption to another (e.g. a man can be described as a \"person\" or \"human\" in two different captions). However, as argued above, a good image paragraph model must also introduce diversity at the sentence level, describing different scene objects throughout the paragraph. Here, we define paragraph diversity with two essential conditions. First, a generative model must demonstrate the ability to use relevant words to describe objects without unnecessary repetitions (word-level diversity). Secondly, it must produce a set of sentences with relevant mentions of a variety of image objects in an appropriate order (sentence-level diversity).", |
|
"cite_spans": [ |
|
{ |
|
"start": 161, |
|
"end": 182, |
|
"text": "(Devlin et al., 2015;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 183, |
|
"end": 208, |
|
"text": "Vijayakumar et al., 2016;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 209, |
|
"end": 228, |
|
"text": "Lindh et al., 2018;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 257, |
|
"text": "van Miltenburg et al., 2018)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Producing structured and ordered sets of sentences (e.g. coherent paragraphs) has been a topic of research in NLG community for a long time with both formal theories of coherence (Grosz et al., 1995; Barzilay and Lapata, 2008) and traditional rule-based model implementations (Reiter and Dale, 2000; Deemter, 2016) . The coherence of generated text depends on several NLG sub-tasks: content determination (selection), the task of deciding which parts of the source information should be included in the output description, and text structuring (micro-planning), the task of ordering selected information (Gatt and Krahmer, 2017) . We believe that the hierarchical structure of our models reflects the nature of these tasks. First, the model attends to the image objects and defines both their salience and order of mention and then it starts to realise them linguistically, first as paragraph visual-textual topics and then as individual sentences within paragraphs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 199, |
|
"text": "(Grosz et al., 1995;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 200, |
|
"end": 226, |
|
"text": "Barzilay and Lapata, 2008)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 276, |
|
"end": 299, |
|
"text": "(Reiter and Dale, 2000;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 314, |
|
"text": "Deemter, 2016)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 604, |
|
"end": 628, |
|
"text": "(Gatt and Krahmer, 2017)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Overview For our experiments we implement and adapt the hierarchical image paragraph model by Krause et al. (2017) . 1 We deliberately chose to re-implement an existing model to study the effects of using different modalities (visual or language). However, through our implementation and extensions, we propose several new models based on the original model in (Krause et al., 2017) . To prepare input features, we utilise the pre-trained model for dense captioning (Johnson et al., 2016) in two ways. First, we use it to extract convolutional features of identified image regions. We also use its hidden states from the RNN layer as language features. In the original model, these states are used to generate region descriptions; therefore, these vectors represent semantic information about objects. We construct a multi-modal space, in which we learn mappings from both text and vision features. Lastly, we concatenate both modalities and attend to them to form a multi-modal vector, which is used as an input to the paragraph generator. Our paragraph generator consists of two components: discourse-level and sentence-level LSTMs (Hochreiter and Schmidhuber, 1997). First, the discourselevel LSTM learns the topic of each sentence from the multi-modal representation, capturing information flow between sentences. Second, each of the topics is used by sentence-level LSTM to generate an actual sentence. Finally, all generated sentences per image are concatenated to form a final paragraph. An overview of our model and a more detailed description is shown in Fig. 1 . Our model is different from the model by Krause et al. (2017) in the following ways: (i) we use either max-pooling or attention in our models, (ii) we do not learn to predict the end of the paragraph, but generate the same number of sentences as we find in groundtruth paragraph per each image, (iii) we use seman- Figure 1 : Multimodal paragraph generator architecture. The orange area on the left is the learned space where two modalities are attended to (vision in purple, language in green). The mapped features are concatenated together and passed to the attention mechanism, that outputs a vector which is used as an input to the discourse LSTM (in blue, marked with \u03b4 ). The attention module also uses the last hidden state of the discourse LSTM at each timestamp. The sentence LSTM (in green, marked with \u03c2 ) is given the sentence topic and word embeddings. Due to limited space, we omit the linear layer and the softmax layer which are used to predict the next word from the output of the sentence LSTM.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 114, |
|
"text": "Krause et al. (2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 117, |
|
"end": 118, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 361, |
|
"end": 382, |
|
"text": "(Krause et al., 2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 466, |
|
"end": 488, |
|
"text": "(Johnson et al., 2016)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1614, |
|
"end": 1634, |
|
"text": "Krause et al. (2017)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1564, |
|
"end": 1570, |
|
"text": "Fig. 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1888, |
|
"end": 1896, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "tic information about objects in the visual scene. The focus of our work is not to improve on the results of Krause et al. (2017) but to investigate the effects of different multi-modal fusion on the accuracy or the diversity of paragraph descriptions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 129, |
|
"text": "Krause et al. (2017)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Visual Features We use DenseCap region detector (Johnson et al., 2016) 2 to identify salient image regions and extract their convolutional features. First, a resized image is passed through the VGG-16 network (Simonyan and Zisserman, 2015) to output a feature map of the image. A region proposal network is conditioned on the feature map to identify the set of salient image regions which are then mapped back onto the feature map to produce corresponding map regions. Each of these map regions is then fed to the two-layer perceptron which outputs a set of the final region features", |
|
"cite_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 70, |
|
"text": "(Johnson et al., 2016)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 209, |
|
"end": 239, |
|
"text": "(Simonyan and Zisserman, 2015)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "{v 1 , ..., v M }, where v m \u2208 R 1\u00d7D", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "with M = 50 and D = 4096. This matrix V \u2208 R M\u00d7D provides us with fine-grained image representation at the object level. We use this representation as features of visual modality.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Language Features In the dense captioning task, a single layer LSTM is conditioned on region features to produce descriptions of these regions in natural language. We propose to utilise its outputs as language features, using them as additional semantic background information about detected objects. Specifically, we condition a pre-trained LSTM on region features to output a set Y = {y m , ..., y M } with y m \u2208 R 1\u00d7T \u00d7H , where T = 15 and H = 512.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We condense each vector over the second dimension T, which determines the maximum number of words in each description. We achieve this by summing all elements across this dimension and dividing the result by the actual length of the corresponding region description, which we generate from Y . The final matrix L \u2208 R M\u00d7H , contains language representations of M detected regions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Multimodal Features First, we learn two different mappings, using V map for vision and L map for language. These linear projections learn to embed modality-specific information into the attention space. Then, we concatenate these mappings to form the multimodal vector f , which is then combined with the mapping from the hidden state. We have experimented with fusing two attended modalities into a single vector via an additional linear layer but observed no improvement. We also tried to use modality-dependent attention (early attention) as such setting has shown to produce good joint representation for the task of multimodal machine translation (Caglayan et al., 2016 (Caglayan et al., , 2019 , which is very similar to image captioning in its nature. However, this set-up provided us with worse scores of automatic metrics. Therefore, here we use late attention: attending to the visual and textual features when they are already concatenated. As shown in Eq. 1, at each timestamp t we concatenate mapped features from both modalities to output the multimodal vector mult t , where t \u2208 {1, ..., S} and S is the maximum number of sentences to generate. We use \u03b4 to refer to the discourse LSTM and \u03c2 when referring to the sentence LSTM. Concatenation, the logistic sigmoid function and element-wise multiplication are indicated with \u2295, \u03c3 and respectively. We set S depending on the number of sentences in the ground-truth paragraph with the maximum S = 6. Then, as Eq. 2 indicates, we generate attention weights for our multimodal vector mult t . We use additive (concat) attention mechanism and concatenate multimodal representation with the previous hidden state of the discourse LSTM. Finally, as in Eq. 3, we obtain a weighted multimodal vector f \u2208 R 1\u00d7H , which encapsulates and merges salient information from attended visual and textual modalities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 652, |
|
"end": 674, |
|
"text": "(Caglayan et al., 2016", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 675, |
|
"end": 699, |
|
"text": "(Caglayan et al., , 2019", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "mult t = [W V m V t \u2295W L m L t ]", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b1 mult t = so f tmax(W L a tanh(mult t \u2295W h h \u03b4 t\u22121 ) (2) f t = [\u03b1 mult t mult t ]", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "2.2 Discourse LSTM", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Our discourse-level LSTM is responsible for modelling multi-modal topics of each of the individual sentences in the paragraph. At each timestamp, it is conditioned on the weighted multimodal vector f t , and its output is a set of hidden states {h 1 , ..., h S }, where each state is used as an input to the sentencelevel LSTM. In its nature, the discourse LSTM has to simultaneously complete at least two tasks: produce a topic with a relevant combination of visual and linguistic information for each sentence, while preserving some type of ordering between the topics. Such topic ordering is essential for keeping a natural transition between sentences (discourse items) in the paragraph (discourse). We expect attention on the combination of two modalities to assist the discourse LSTM in its multiple objectives since attention weights specific parts of the input as more relevant for a particular sentence. We expect that this allows discourse LSTM to learn better sentence representations and sentence order. Similar to Xu et al. (2015) , we also learn a gating scalar \u03b2 and apply it to f t :", |
|
"cite_spans": [ |
|
{ |
|
"start": 1027, |
|
"end": 1043, |
|
"text": "Xu et al. (2015)", |
|
"ref_id": "BIBREF46" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b2 = \u03c3 (W b h \u03b4 t\u22121 ),", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where W b is a learnable model parameter. Thus, the input to discourse LSTM is computed as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "f \u03b4 t = \u03b2 f t", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "2.3 Sentence LSTM", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Our sentence-level LSTM is a single-layer LSTM that generates individual sentences in the paragraph. We run the sentence LSTM S times. Each time we use a concatenation of the corresponding hidden state of the discourse LSTM with the learned embeddings of the words in the target sentence y s as its input:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "x \u03c2 s = [h \u03b4 s \u2295 Ey s ]", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Our word embedding matrix E \u2208 R K\u00d7H is learned from scratch, K is the vocabulary size. This is different from (Krause et al., 2017) , who use word embeddings and LSTM weights from the pre-trained DenseCap model. We have also experimented with transferring DenseCap weights and embeddings into our model but observed no significant improvement.", |
|
"cite_spans": [ |
|
{ |
|
"start": 110, |
|
"end": 131, |
|
"text": "(Krause et al., 2017)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "At each timestamp t, our sentence LSTM is unrolled N + 1 times, where N is the number of words to generate. At each step, its hidden state is used to predict a probability distribution over the words in the vocabulary. We set N = 50. The final set of sentences is concatenated together to form a paragraph.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We train our model end-to-end with imageparagraph pairs (x, y) from the training data. Our training loss is a simple cross-entropy loss on the sentence level:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Objective", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "loss \u03c2 (x, y) = \u2212 S \u2211 i=1 M i \u2211 j=1 log(p j,s )", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Learning Objective", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "where p j,s is the softmax probability of the j th word in the i th sentence given all previously generated words for the current sentence y 1: j\u22121,i . For the first sentence, the hidden states of both LSTMs are initialised with zeros. For every subsequent sentence, both LSTMs use the last hidden states generated for the previous sentence for each respective layer. During training, we use teacher forcing and feed ground-truth words as target words at each timestamp. We use Adam (Kingma and Ba, 2014) as an optimiser and choose the best model based on the validation loss (early stopping). For decoding we use beam search (Freitag and Al-Onaizan, 2017) with beam width B = 2 (we tested several values for the beam width B \u2208 {2, 4, 6, 8, 10}). We leave the investigation of the effects of using different decoding strategies such as nucleus sampling (Holtzman et al., 2020) or various techniques for controlling decoding (length penalty, n-gram repetition penalty (Klein et al., 2017; Paulus et al., 2017) ) for future work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 483, |
|
"end": 504, |
|
"text": "(Kingma and Ba, 2014)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 626, |
|
"end": 656, |
|
"text": "(Freitag and Al-Onaizan, 2017)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 853, |
|
"end": 876, |
|
"text": "(Holtzman et al., 2020)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 967, |
|
"end": 987, |
|
"text": "(Klein et al., 2017;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 988, |
|
"end": 1008, |
|
"text": "Paulus et al., 2017)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Objective", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "We describe six configurations of our model, which we train, validate and test on the released Stanford paragraph dataset splits (14,575, 2,487, 2,489 for training, validation and testing respectively) (Krause et al., 2017) . Our models are described as follows: the IMG model is conditioned only on the mapped visual features, while the LNG model only uses the mapped semantic information to generate paragraphs. The IMG+NLG is conditioned on both mapped visual and semantic information. All models with +ATT use late attention on either uni-modal or multi-modal features. We also test another configuration of the models with max-pooling of input features across M regions, represented by mapping from either language features", |
|
"cite_spans": [ |
|
{ |
|
"start": 202, |
|
"end": 223, |
|
"text": "(Krause et al., 2017)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "x = W L m L t or visual features x = W V m V t : x \u03c2 s = max M i=1 (x)", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Models", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In the IMG+LNG model we apply max-pooling on both modalities and concatenate them into a single vector:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "x \u03c2 s = [max M i=1 (W L m L t ) \u2295 max M i=1 (W V m V t )]", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Models", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Typically, a variety of n-gram based automatic metrics is used to measure the correctness/accuracy of image captions. We evaluate our models with the following metrics: CIDEr (Vedantam et al., 2014), METEOR (Denkowski and Lavie, 2014), BLEU-{1, 2, 3, 4} (Papineni et al., 2002) , and Word Mover's Distance (Kusner et al., 2015; Kilickaya et al., 2017) . We also measure lexical diversity of sentences within the generated paragraphs. For this we report self-BLEU (Zhu et al., 2018) which is sometimes referred to as mBLEU (Shetty et al., 2017) . Estimating lexical diversity is important for paragraph generation as their sentences should be neither too similar nor too different from each other. We calculate self-BLEU as follows: we split each generated paragraph into sentences and use one sentence as a hypothesis and the other sentences as references. A lower score indicates more diversity, e.g. fewer n-gram matches between compared sentences. We also calculate the diversity metric introduced by Wang and Chan (2019) . This metric applies Latent Semantic Analysis (Deerwester et al., 1990) to the weighted n-gram feature representations (CIDEr values between unique pairs of sentences) and identifies the number of topics among sentences. Compared to self-BLEU, which measures n-gram overlap, LSA combined with CIDErbased kernel metric measures semantic differences between sentences as well. More identified topics in paragraph sentences indicate a higher level of diversity. However, this intrinsic metric does not evaluate if the paragraph demonstrates discourse coherence in terms of how these topics are introduced and the quality of the generated sentences and their sequences (Section 1).", |
|
"cite_spans": [ |
|
{ |
|
"start": 254, |
|
"end": 277, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 306, |
|
"end": 327, |
|
"text": "(Kusner et al., 2015;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 328, |
|
"end": 351, |
|
"text": "Kilickaya et al., 2017)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 463, |
|
"end": 481, |
|
"text": "(Zhu et al., 2018)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 522, |
|
"end": 543, |
|
"text": "(Shetty et al., 2017)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 1004, |
|
"end": 1024, |
|
"text": "Wang and Chan (2019)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 1072, |
|
"end": 1097, |
|
"text": "(Deerwester et al., 1990)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "As the results in Table 1 demonstrate, models which utilise both semantic and visual information (any IMG+LNG configuration) outperform their single modality variants in both attention and max-pooling settings. When using max-pooling, IMG+LNG model improves on CIDEr by 0.72 and METEOR by 0.10. Also, two-modal architecture is slightly lexically more diverse from the ground truth paragraphs, according to the WMD scores. This result comes at no decrease in other metrics, concerned with lexical accuracy. When replacing max-pooling with late attention, we observe that the IMG model reaches the highest scores in BLEU-{2, 3, 4}, while finishing second in all other metrics. However, IMG+LNG model does not seem to benefit from the attention that much, reaching lower scores in comparison to its version with max-pooling. Interestingly, semantic information is beneficial to WMD, CIDEr and ME-TEOR, which also take into account the syntactic structure of the sentences. Table 2 contains the scores of the lexical diversity metrics. The best (i.e. the lowest) mBLEU scores are achieved by models which use either a visual modality with max-pooling (IMG+MAX) or both modalities with attention (IMG+LNG+ATT). The best self-CIDEr scores are achieved by both bi-modal architectures. In addition, IMG+LNG+ATT strongly outperforms all other models in both lexical diversity metrics: Table 2 : Automatic paragraph diversity evaluation. mBLEU stands for the average score between all self-BLEU scores for n-grams (1, 2, 3, 4). Self-CIDEr stands for the average score of the LSA-based diversity metric. We also include ground-truth scores calculated from the test set (GT, coloured in blue). Best models are shown in bold. All scores are multiplied by 100 for better interpretability.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 25, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 970, |
|
"end": 977, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1376, |
|
"end": 1383, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "mBLEU is reduced by 3.21% indicating a smaller ngram overlap between paragraph sentences, while self-CIDEr increases by 1.93% demonstrating that attention in the model which uses multimodal features helps to generate a more diverse set of sentences in terms of topicality. We include two examples of generated texts by humans and our models. As Figure 2a demonstrates, the IMG+LNG+ATT model can generate less redundant/repetitive descriptions compared to the IMG+LNG+MAX model. Figure 2b demonstrates a case where IMG+LNG+ATT generated a paragraph which seems correct but different from the human-generated text (e.g. 'human' instead of a 'dog'). However, to a human eye it is not entirely clear whether the surfer is a human or a dog. Neither of our models was able to see a 'dog' in this image. Similar challenging examples include cases where different human describers have a different take on what the image is about or what is the focus of the image.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 345, |
|
"end": 354, |
|
"text": "Figure 2a", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 478, |
|
"end": 487, |
|
"text": "Figure 2b", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Therefore, the intrinsic metrics might not be the best indicator for identifying clear differences in diversity and accuracy of the generated texts. In addition, such diversity metrics as mBLEU underrepresent the diversity, being unable to take into account semantic differences between sentences. Therefore, we conduct a human evaluation experiment to achieve a better understanding of which input features and which pooling mechanism assists in the generation of both accurate and diverse paragraphs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In the human evaluation task we are interested in the following properties of generated paragraphs covering both accuracy and diversity aspects: word choice, object salience, sentence structure and paragraph coherence. We randomly chose 10% of the images from our test set, resulting in 250 images. For each of these images, we gathered seven paragraphs (six from the models and one from the test set). We presented workers with the instructions shown in Appendix A. To ensure quality and variety of workers' judgements, we presented our tasks only to the Master workers (those with the high reputation and task acceptance rate) and controlled for the number of tasks a single worker is able to submit (we set it to 30). We paid 0.15$ per task to a single worker. Finally, we obtained judgements from 154 unique Master workers for 1,750 image paragraphs overall. For each judgement criteria, we took the average score across all models; the results are shown in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 962, |
|
"end": 969, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "As shown by human evaluation, looking at the overall mean, the multi-modal information does help the generation of better paragraphs when using max-pooling. The IMG+LNG model with max-pooling might be a beneficial choice (scores first in two criteria out of four) in terms of word choice and identification of salient objects. The performance of the IMG+LNG model with maxpooling is close to the performance of the IMG model while the performance of the LNG model is (a) HUMAN: There are several cars parked along a street. There are many trees in a field in front of the street. There are small blue parking meters on the sidewalk next to the street. IMG+MAX : There are several cars parked on the road. There are cars parked on the street. There are trees behind the street. LNG+MAX : There are several cars on the street. There are trees on the street. There are trees on the street. IMG+LNG+MAX : There are several cars on the street. There are two cars on the street. There are cars parked on the sidewalk. IMG+ATT : There are several cars parked on the street. There are two cars parked on the road. There are two cars parked on the road. LNG+ATT : There are several signs on the street. There are signs on the street. The pole is white. IMG+LNG+ATT : There is a parking meter on a sidewalk. There are cars next to the street. There is a parking lot next to the street.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "(b) HUMAN: A large splash is in front of a wave in the water. There is a large white and black surf board in the water. There is a black dog that is riding on top of the surf board. IMG+MAX : A man is riding a wave. He is holding a surfboard. The man is wearing a black wet suit. LNG+MAX : A person is surfing in the water. The surfboard is black and white. The surfboard is black and white. IMG+LNG+MAX : A man is standing on a surfboard. The surfboard is black. The man is wearing black shorts. IMG+ATT : A man is standing on a surfboard. The surfboard is black and white. The man has black hair. LNG+ATT : A person is standing in the water. The person is wearing a black suit. The person is holding a black surfboard. IMG+LNG+ATT : A person is surfing in the ocean. She is wearing a black wet suit. She is holding a white surfboard. slightly lower. Overall, attention is judged as more advantageous in general than max pooling, having higher mean scores across all criteria compared to the mean scores of max-pooling models. However, here the IMG+LNG model is outperformed by both uni-modal models. The LNG model which utilises semantic information and uses attention is judged as the best configuration by humans, which is in line with some previous work that reports strong bias on the semantic information (Agrawal et al., 2017) . Note that while its performance is close to the IMG model in terms of word choice and object salience, the improvement of the LNG model is much more expressed in terms of sentence structure and paragraph coherence, categories where one would expect that semantic information matters most. Interestingly, max-pooling does not seem to have the same effect on utilisation of semantic information: the LNG+MAX model achieves the lowest scores. A possible explanation for this is that when using max-pooling, the same semantic information is chosen for every sentence topic. At the same time, attention learns to select different semantic information for a sequence of topics. This appears to affect semantic features more than visual features. Note that humans mostly judge models that incorporate linguistic information as the best ones for the word choice criterion.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1312, |
|
"end": 1334, |
|
"text": "(Agrawal et al., 2017)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "This supports the idea that utilising semantic information reduces redundancy in terms of the number of repeated words in the generated paragraph. Overall, the results indicate that both visual and semantic information are beneficial for the generated paragraphs as they affect different evaluation categories differently. The main challenge lies in information fusion of visual and semantic information in the model with attention. We believe that these results suggest the following future experiments: (i) detailed investigation of early vs. late attention (when to fuse two modalities and how), (ii) as van Miltenburg et al. (2017) argue, more control over human evaluation can provide us with better, more precise human judgements, (iii) training with other decoding strategies such as top-k sampling or nucleus sampling (Holtzman et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 826, |
|
"end": 849, |
|
"text": "(Holtzman et al., 2020)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Neural image paragraph captioning The task of generating image paragraphs has been introduced in (Krause et al., 2017) along with the dataset of image-paragraph pairs. The authors hierarchically construct their model: sentence RNN is conditioned on visual features to output sentence topics. Then, each of these topics is used by another RNN to generate actual sentences. Our models are based on this hierarchical model. However, we substantially change its structure and also remove the end of paragraph prediction. Liang et al. (2017) also use the hierarchical network, but with an adversarial discriminator, that forces model to generate realistic paragraphs with smooth transitions between sentences. Chatterjee and Schwing (2018) also address cross-sentence topic consistency by modelling the global coherence vector, conditioned on all sentence topics. Different from these approaches, Melas-Kyriazi et al. (2018) employ self-critical training technique (Rennie et al., 2017) to directly optimise a target evaluation metric for image paragraph generation. Lastly, use convolutional auto-encoder for topic modelling based on region-level image features. They demonstrate that extracted topics are more representative and contain information relevant to sentence generation. We also model topic representations, but we use additional semantic representations of image objects as part of the input to our topic generator. Lin et al. (2015) has proposed a non-neural approach to generate texts describing images. However, this approach depends on multi-ple components: visual scene parsing, generative grammar for learning from training descriptions, and an algorithm, which analyses scene graphs and extracts semantic trees to learn about dependencies across sentences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 118, |
|
"text": "(Krause et al., 2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 517, |
|
"end": 536, |
|
"text": "Liang et al. (2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1425, |
|
"end": 1442, |
|
"text": "Lin et al. (2015)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Language representation for image captioning Several existing models for image captioning are conditioned on both visual and background information. You et al. (2016) detect visual concepts found in the scene (objects, attributes) and extract top-down visual features. Both of these modalities are then fed to the RNN-based caption generator. Attention is applied on detected concepts to inform the generator about how relevant a particular concept is at each timestamp. Our approach does not use any attribute detectors to identify objects in the scene. Instead, we use the output of another pre-trained model for the task of dense captioning. Lu et al. (2017) emphasise that image is not always useful in generating some function words (\"of\", \"the\"). They introduce adaptive attention, which determines when to look at the image and when it is more important to use the language model to generate the next word. In their work, the attention vector is a mixture of visual features and visual sentinel, a vector obtained through the additional gate function on decoder memory state. Our model is guided by their approach: we are interested in deciding which type of information is more relevant at a particular timestamp, but we also look at how merging two modalities into a single representation performs and how it affects attention of the model. Closest to our work is the work by Liang et al. (2017) , who apply attention to region description representation and use it to assist recurrent word generation in producing sentences in a paragraph. Similar to our approach, they also supply their model with embeddings of local phrases used to describe image objects. However, they use textual phrases directly, while we are using hidden representations from the model trained to generate such phrases (Johnson et al., 2016) . Also, our approach explores a different application of semantic information encoded in language: we use phrase representations to define sentence topics to choose from (topic selection) rather than directly guide the generation of words (micro-planning).", |
|
"cite_spans": [ |
|
{ |
|
"start": 149, |
|
"end": 166, |
|
"text": "You et al. (2016)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 645, |
|
"end": 661, |
|
"text": "Lu et al. (2017)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1385, |
|
"end": 1404, |
|
"text": "Liang et al. (2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1803, |
|
"end": 1825, |
|
"text": "(Johnson et al., 2016)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In this paper, we addressed the problem of generating both accurate and diverse image paragraphs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We demonstrated that utilising both visual and linguistic information might benefit the quality of generated texts depending on the pooling mechanism that is used. We showed that intrinsic evaluation metrics are insufficient for evaluation of paragraphs as they focus on lexical choice and do not capture human level of judgement: LNG+ATT is judged as the best model in human evaluation, while it is not among the leaders according to the automatic evaluation. We believe that our work is a good starting point for further investigation of the ways multiple sources of information about the world can be merged for learning generation of high-quality multi-sentence stories, describing real-world visual scenes. In our future work we also intend to test how our models can generate task-dependent paragraphs. For this task we will use the dataset of image description sequences (Ilinykh et al., 2019) which consists of paragraphs collected in a taskbased setting to train our models. In contrast, in the Stanford dataset humans were not given a specific task when describing images. We believe that generation from more context-dependent and structured descriptions can open up new perspectives for the research on image paragraphs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 878, |
|
"end": 900, |
|
"text": "(Ilinykh et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The authors have not publicly released the code of their model and hence the model implementation is based on our interpretation of their paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Available at: https://github.com/jcjohnson/densecap", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The research reported in this paper was supported by a grant from the Swedish Research Council (VR project 2014-39) for the establishment of the Centre for Linguistic Theory and Studies in Probability (CLASP) at the University of Gothenburg.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A Human Evaluation: AMT Instructions Short Summary: You are going to be shown an image and several sentences describing the image. Below you will see statements that relate to the image descriptions. Please rate each of these statements by moving the slider along the scale where 0% stands for 'I do not agree', 100% stands for 'I fully agree'.Detailed Instructions: In general, you are required to judge image descriptions based on the following:\u2022 choice of words: does the text correctly describe objects and events in the scene and with the right detail?\u2022 relevance: does the text describe relevant objects and events in the scene?\u2022 sentence structure: do the sentences have a good and grammatical structure?\u2022 coherence: does the test progresses in a natural way forming a narrative?You can enter any feedback you have for us, for example if some questions were not easy to answer, in the corresponding feedback field (right after the survey).DESCRIPTION: there are two cows standing in the field. there are trees behind them.How well do you agree with the following statements?1. The description contains words that correctly refer to the objects and events in the image 2. The description is referring to the relevant/important parts of the image.3. The sentences have a correct structure and are grammatical.4. The sentences are well-connected and form a single story.Write you feedback in the field below if you have any (not necessary).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "annex", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Don't just assume; look and answer: Overcoming priors for visual question answering. arXiv", |
|
"authors": [ |
|
{ |
|
"first": "Aishwarya", |
|
"middle": [], |
|
"last": "Agrawal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aniruddha", |
|
"middle": [], |
|
"last": "Kembhavi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1712.00377" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aishwarya Agrawal, Dhruv Batra, Devi Parikh, and Aniruddha Kembhavi. 2017. Don't just assume; look and answer: Overcoming priors for visual question answering. arXiv, arXiv:1712.00377", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Bottom-up and top-down attention for image captioning and visual question answering", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Anderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Buehler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Damien", |
|
"middle": [], |
|
"last": "Teney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Gould", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Anderson, Xiaodong He, Chris Buehler, Damien Teney, Mark Johnson, Stephen Gould, and Lei Zhang. 2017. Bottom-up and top-down attention for image captioning and visual question answering.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Modeling local coherence: An entity-based approach", |
|
"authors": [ |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Computational Linguistics", |
|
"volume": "34", |
|
"issue": "1", |
|
"pages": "1--34", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/coli.2008.34.1.1" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Regina Barzilay and Mirella Lapata. 2008. Modeling local coherence: An entity-based approach. Compu- tational Linguistics, 34(1):1-34.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Automatic description generation from images: A survey of models, datasets, and evaluation measures", |
|
"authors": [ |
|
{ |
|
"first": "Raffaella", |
|
"middle": [], |
|
"last": "Bernardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruket", |
|
"middle": [], |
|
"last": "Cakici", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Desmond", |
|
"middle": [], |
|
"last": "Elliott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aykut", |
|
"middle": [], |
|
"last": "Erdem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erkut", |
|
"middle": [], |
|
"last": "Erdem", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Raffaella Bernardi, Ruket Cakici, Desmond Elliott, Aykut Erdem, Erkut Erdem, Nazli Ikizler-Cinbis, Frank Keller, Adrian Muscat, and Barbara Plank. 2016. Automatic description generation from im- ages: A survey of models, datasets, and evaluation measures.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Multimodal Attention for Neural Machine Translation. arXiv", |
|
"authors": [ |
|
{ |
|
"first": "Ozan", |
|
"middle": [], |
|
"last": "Caglayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lo\u00efc", |
|
"middle": [], |
|
"last": "Barrault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fethi", |
|
"middle": [], |
|
"last": "Bougares", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1609.03976" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ozan Caglayan, Lo\u00efc Barrault, and Fethi Bougares. 2016. Multimodal Attention for Neural Machine Translation. arXiv, arXiv:1609.03976 [cs.CL].", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Probing the need for visual context in multimodal machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Ozan", |
|
"middle": [], |
|
"last": "Caglayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranava", |
|
"middle": [], |
|
"last": "Madhyastha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lo\u00efc", |
|
"middle": [], |
|
"last": "Barrault", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/n19-1422" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ozan Caglayan, Pranava Madhyastha, Lucia Specia, and Lo\u00efc Barrault. 2019. Probing the need for visual context in multimodal machine translation.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Diverse and coherent paragraph generation from images", |
|
"authors": [ |
|
{ |
|
"first": "Moitreya", |
|
"middle": [], |
|
"last": "Chatterjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Schwing", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ECCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moitreya Chatterjee and Alexander G. Schwing. 2018. Diverse and coherent paragraph generation from im- ages. In ECCV.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Computational models of referring: a study in cognitive science", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kees Van Deemter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kees van Deemter. 2016. Computational models of re- ferring: a study in cognitive science. The MIT Press, Cambridge, Massachusetts and London, England.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Indexing by latent semantic analysis", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Deerwester", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Dumais", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Landauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Furnas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Harshman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Journal of the American Society of Information Science", |
|
"volume": "41", |
|
"issue": "6", |
|
"pages": "391--407", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. C. Deerwester, S. T. Dumais, T. K. Landauer, G. W. Furnas, and R. A. Harshman. 1990. Indexing by la- tent semantic analysis. Journal of the American So- ciety of Information Science, 41(6):391-407.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Meteor universal: Language specific translation evaluation for any target language", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Denkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the EACL 2014 Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Denkowski and Alon Lavie. 2014. Meteor uni- versal: Language specific translation evaluation for any target language. In Proceedings of the EACL 2014 Workshop on Statistical Machine Translation.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Xiaodong He, Geoffrey Zweig, and Margaret Mitchell. 2015. Language Models for Image Captioning: The Quirks and What Works. arXiv", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saurabh", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1505.01809" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Hao Cheng, Hao Fang, Saurabh Gupta, Li Deng, Xiaodong He, Geoffrey Zweig, and Mar- garet Mitchell. 2015. Language Models for Image Captioning: The Quirks and What Works. arXiv, arXiv:1505.01809 [cs.CL].", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "A model for attention-driven judgements in Type Theory with Records", |
|
"authors": [ |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Dobnik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kelleher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "JerSem: The 20th Workshop on the Semantics and Pragmatics of Dialogue", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "25--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simon Dobnik and John D. Kelleher. 2016. A model for attention-driven judgements in Type Theory with Records. In JerSem: The 20th Workshop on the Semantics and Pragmatics of Dialogue, volume 20, pages 25-34, New Brunswick, NJ USA.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Beam search strategies for neural machine translation. Proceedings of the First Workshop on Neural Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Freitag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaser", |
|
"middle": [], |
|
"last": "Al-Onaizan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/w17-3207" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Markus Freitag and Yaser Al-Onaizan. 2017. Beam search strategies for neural machine translation. Pro- ceedings of the First Workshop on Neural Machine Translation.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Survey of the State of the Art in Natural Language Generation: Core tasks, applications and evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Albert", |
|
"middle": [], |
|
"last": "Gatt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emiel", |
|
"middle": [], |
|
"last": "Krahmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Journal of AI Research (JAIR)", |
|
"volume": "61", |
|
"issue": "", |
|
"pages": "75--170", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Albert Gatt and Emiel Krahmer. 2017. Survey of the State of the Art in Natural Language Generation: Core tasks, applications and evaluation. Journal of AI Research (JAIR), 61:75-170.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Centering: A framework for modeling the local coherence of discourse", |
|
"authors": [ |
|
{ |
|
"first": "Barbara", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Grosz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aravind", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Scott", |
|
"middle": [], |
|
"last": "Weinstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Computational Linguistics", |
|
"volume": "21", |
|
"issue": "2", |
|
"pages": "203--225", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barbara J. Grosz, Aravind K. Joshi, and Scott Wein- stein. 1995. Centering: A framework for model- ing the local coherence of discourse. Computational Linguistics, 21(2):203-225.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural Comput", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/neco.1997.9.8.1735" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural Comput., 9(8):1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "The curious case of neural text degeneration", |
|
"authors": [ |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Holtzman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Buys", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxwell", |
|
"middle": [], |
|
"last": "Forbes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ari Holtzman, Jan Buys, Maxwell Forbes, and Yejin Choi. 2020. The curious case of neural text degener- ation. International Conference on Learning Repre- sentations.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Tell me more: A dataset of visual scene description sequences", |
|
"authors": [ |
|
{ |
|
"first": "Nikolai", |
|
"middle": [], |
|
"last": "Ilinykh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sina", |
|
"middle": [], |
|
"last": "Zarrie\u00df", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Schlangen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 12th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "152--157", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-8621" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikolai Ilinykh, Sina Zarrie\u00df, and David Schlangen. 2019. Tell me more: A dataset of visual scene de- scription sequences. In Proceedings of the 12th In- ternational Conference on Natural Language Gener- ation, pages 152-157, Tokyo, Japan. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Densecap: Fully convolutional localization networks for dense captioning", |
|
"authors": [ |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrej", |
|
"middle": [], |
|
"last": "Karpathy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Justin Johnson, Andrej Karpathy, and Li Fei-Fei. 2016. Densecap: Fully convolutional localization networks for dense captioning. In Proceedings of the IEEE Conference on Computer Vision and Pat- tern Recognition.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Referring to the recently seen: reference and perceptual memory in situated dialogue", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Kelleher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Dobnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "CLASP Papers in Computational Linguistics: Dialogue and Perception -Extended papers from DaP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--50", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John D. Kelleher and Simon Dobnik. 2019. Referring to the recently seen: reference and perceptual mem- ory in situated dialogue. In CLASP Papers in Com- putational Linguistics: Dialogue and Perception - Extended papers from DaP-2018 Gothenburg, pages 41-50.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Re-evaluating automatic metrics for image captioning", |
|
"authors": [ |
|
{ |
|
"first": "Mert", |
|
"middle": [], |
|
"last": "Kilickaya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aykut", |
|
"middle": [], |
|
"last": "Erdem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazli", |
|
"middle": [], |
|
"last": "Ikizler-Cinbis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erkut", |
|
"middle": [], |
|
"last": "Erdem", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "199--209", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mert Kilickaya, Aykut Erdem, Nazli Ikizler-Cinbis, and Erkut Erdem. 2017. Re-evaluating automatic metrics for image captioning. In Proceedings of the 15th Conference of the European Chapter of the As- sociation for Computational Linguistics: Volume 1, Long Papers, pages 199-209, Valencia, Spain. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "Diederik", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Multimodal neural language models", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 31st International Conference on Machine Learning", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "595--603", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan Kiros, Ruslan Salakhutdinov, and Rich Zemel. 2014. Multimodal neural language models. In Pro- ceedings of the 31st International Conference on Machine Learning, volume 32 of Proceedings of Ma- chine Learning Research, pages 595-603, Bejing, China. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "OpenNMT: Opensource toolkit for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuntian", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Senellart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of ACL 2017, System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "67--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Klein, Yoon Kim, Yuntian Deng, Jean Senel- lart, and Alexander Rush. 2017. OpenNMT: Open- source toolkit for neural machine translation. In Proceedings of ACL 2017, System Demonstrations, pages 67-72, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "A hierarchical approach for generating descriptive image paragraphs", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Krause", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ranjay", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Computer Vision and Patterm Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan Krause, Justin Johnson, Ranjay Krishna, and Li Fei-Fei. 2017. A hierarchical approach for gen- erating descriptive image paragraphs. In Computer Vision and Patterm Recognition (CVPR).", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "From word embeddings to document distances", |
|
"authors": [ |
|
{ |
|
"first": "Matt", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Kusner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicholas", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Kolkin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kilian", |
|
"middle": [ |
|
"Q" |
|
], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matt J. Kusner, Yu Sun, Nicholas I. Kolkin, and Kil- ian Q. Weinberger. 2015. From word embeddings to document distances. In ICML.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Load theory of selective attention and cognitive control", |
|
"authors": [ |
|
{ |
|
"first": "Nilli", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aleksandra", |
|
"middle": [], |
|
"last": "Hirst", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "De Fockert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Essi", |
|
"middle": [], |
|
"last": "Viding", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Journal of Experimental Psychology: General", |
|
"volume": "133", |
|
"issue": "3", |
|
"pages": "339--354", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nilli Lavie, Aleksandra Hirst, Jan W de Fockert, and Essi Viding. 2004. Load theory of selective atten- tion and cognitive control. Journal of Experimental Psychology: General, 133(3):339-354.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Best practices for the human evaluation of automatically generated text", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Van Der Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Albert", |
|
"middle": [], |
|
"last": "Gatt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sander", |
|
"middle": [], |
|
"last": "Emiel Van Miltenburg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emiel", |
|
"middle": [], |
|
"last": "Wubben", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Krahmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 12th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "355--368", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-8643" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris van der Lee, Albert Gatt, Emiel van Miltenburg, Sander Wubben, and Emiel Krahmer. 2019. Best practices for the human evaluation of automatically generated text. In Proceedings of the 12th Interna- tional Conference on Natural Language Generation, pages 355-368, Tokyo, Japan. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Sanja Fidler, and Raquel Urtasun. 2015. Generating Multi-Sentence Lingual Descriptions of Indoor Scenes. arXiv", |
|
"authors": [ |
|
{ |
|
"first": "Dahua", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1503.00064" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dahua Lin, Chen Kong, Sanja Fidler, and Raquel Urtasun. 2015. Generating Multi-Sentence Lin- gual Descriptions of Indoor Scenes. arXiv, arXiv:1503.00064 [cs.CV].", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Knowing when to look: Adaptive attention via a visual sentinel for image captioning", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3242--3250", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/CVPR.2017.345" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Lu, C. Xiong, D. Parikh, and R. Socher. 2017. Know- ing when to look: Adaptive attention via a visual sentinel for image captioning. In 2017 IEEE Confer- ence on Computer Vision and Pattern Recognition (CVPR), pages 3242-3250.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Training for diversity in image paragraph captioning", |
|
"authors": [ |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Melas-Kyriazi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "757--761", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1084" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luke Melas-Kyriazi, Alexander Rush, and George Han. 2018. Training for diversity in image paragraph cap- tioning. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 757-761, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Cross-linguistic differences and similarities in image descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Desmond", |
|
"middle": [], |
|
"last": "Emiel Van Miltenburg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piek", |
|
"middle": [], |
|
"last": "Elliott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Vossen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 10th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "21--30", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-3503" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emiel van Miltenburg, Desmond Elliott, and Piek Vossen. 2017. Cross-linguistic differences and simi- larities in image descriptions. In Proceedings of the 10th International Conference on Natural Language Generation, pages 21-30, Santiago de Compostela, Spain. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Measuring the Diversity of Automatic Image Descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Desmond", |
|
"middle": [], |
|
"last": "Emiel Van Miltenburg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piek", |
|
"middle": [], |
|
"last": "Elliott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Vossen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1730--1741", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emiel van Miltenburg, Desmond Elliott, and Piek Vossen. 2018. Measuring the Diversity of Auto- matic Image Descriptions. Proceedings of the 27th International Conference on Computational Linguis- tics, pages 1730-1741.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1073083.1073135" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Com- putational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "A deep reinforced model for abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Romain", |
|
"middle": [], |
|
"last": "Paulus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Romain Paulus, Caiming Xiong, and Richard Socher. 2017. A deep reinforced model for abstractive sum- marization.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Building natural language generation systems", |
|
"authors": [ |
|
{ |
|
"first": "Ehud", |
|
"middle": [], |
|
"last": "Reiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Dale", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ehud Reiter and Robert Dale. 2000. Building natural language generation systems.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Self-critical sequence training for image captioning", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Steven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Rennie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Youssef", |
|
"middle": [], |
|
"last": "Marcheret", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Mroueh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Ross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Goel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1179--1195", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steven J. Rennie, E. Marcheret, Youssef Mroueh, J. Ross, and V. Goel. 2017. Self-critical sequence training for image captioning. 2017 IEEE Confer- ence on Computer Vision and Pattern Recognition (CVPR), pages 1179-1195.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Speaking the same language: Matching machine to human captions by adversarial training", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Shetty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Rohrbach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Hendricks", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Fritz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Schiele", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 IEEE International Conference on Computer Vision (ICCV)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4155--4164", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICCV.2017.445" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Shetty, M. Rohrbach, L. A. Hendricks, M. Fritz, and B. Schiele. 2017. Speaking the same language: Matching machine to human captions by adversarial training. In 2017 IEEE International Conference on Computer Vision (ICCV), pages 4155-4164.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Very deep convolutional networks for large-scale image recognition", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Simonyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Zisserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. Simonyan and Andrew Zisserman. 2015. Very deep convolutional networks for large-scale image recog- nition. CoRR, abs/1409.1556.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Cider: Consensus-based image description evaluation", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"Lawrence" |
|
], |
|
"last": "Ramakrishna Vedantam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramakrishna Vedantam, C. Lawrence Zitnick, and Devi Parikh. 2014. Cider: Consensus-based image description evaluation.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Diverse beam search: Decoding diverse solutions from neural sequence models", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Ashwin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Vijayakumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Cogswell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Selvaraju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Crandall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashwin K. Vijayakumar, Michael Cogswell, R. R. Sel- varaju, Q. Sun, Stefan Lee, David J. Crandall, and Dhruv Batra. 2016. Diverse beam search: Decod- ing diverse solutions from neural sequence models. ArXiv, abs/1610.02424.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Show and tell: A neural image caption generator", |
|
"authors": [ |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Toshev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samy", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dumitru", |
|
"middle": [], |
|
"last": "Erhan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oriol Vinyals, Alexander Toshev, Samy Bengio, and Dumitru Erhan. 2014. Show and tell: A neural im- age caption generator.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Convolutional auto-encoding of sentence topics for image paragraph generation", |
|
"authors": [ |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yingwei", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinhui", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Mei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jing Wang, Yingwei Pan, Ting Yao, Jinhui Tang, and Tao Mei. 2019. Convolutional auto-encoding of sen- tence topics for image paragraph generation.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Describing like humans: on diversity in image captioning", |
|
"authors": [ |
|
{ |
|
"first": "Qingzhong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoni", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Chan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qingzhong Wang and Antoni B. Chan. 2019. Describ- ing like humans: on diversity in image captioning.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Show, attend and tell: Neural image caption generation with visual attention", |
|
"authors": [ |
|
{ |
|
"first": "Kelvin", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [], |
|
"last": "Courville", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kelvin Xu, Jimmy Ba, Ryan Kiros, Kyunghyun Cho, Aaron Courville, Ruslan Salakhutdinov, Richard Zemel, and Yoshua Bengio. 2015. Show, attend and tell: Neural image caption generation with visual at- tention.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Image captioning with semantic attention", |
|
"authors": [ |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "You", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4651--4659", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/CVPR.2016.503" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Q. You, H. Jin, Z. Wang, C. Fang, and J. Luo. 2016. Image captioning with semantic attention. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4651-4659.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Texygen: A benchmarking platform for text generation models", |
|
"authors": [ |
|
{ |
|
"first": "Yaoming", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sidi", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiaxian", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weinan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "The 41st International ACM SIGIR Conference on Research Development in Information Retrieval, SIGIR '18", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1097--1100", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3209978.3210080" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yaoming Zhu, Sidi Lu, Lei Zheng, Jiaxian Guo, Weinan Zhang, Jun Wang, and Yong Yu. 2018. Texy- gen: A benchmarking platform for text generation models. In The 41st International ACM SIGIR Con- ference on Research Development in Information Re- trieval, SIGIR '18, page 1097-1100, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Two example images with generated paragraphs from our models (incl. ground truth descriptions).", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF2": { |
|
"text": "Automatic evaluation results. Models are separated based on the input features (one modality / multimodal) and type of the mechanism used to compactly describe content of the image (max-pooling / attention). Best scores for both +MAX and +ATT modes are shown in bold. The colour intensity indicates how good the score is compared to the other models' scores.", |
|
"num": null, |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"2\">Model Input Type</td><td colspan=\"2\">mBLEU self-CIDEr</td></tr><tr><td>IMG</td><td>+MAX</td><td>50.63</td><td>76.43</td></tr><tr><td>LNG</td><td>+MAX</td><td>52.24</td><td>75.59</td></tr><tr><td>IMG+LNG</td><td>+MAX</td><td>52.09</td><td>76.46</td></tr><tr><td>IMG</td><td>+ATT</td><td>51.82</td><td>75.51</td></tr><tr><td>LNG</td><td>+ATT</td><td>50.93</td><td>76.41</td></tr><tr><td>IMG+LNG</td><td>+ATT</td><td>47.42</td><td>78.39</td></tr><tr><td>GT</td><td>-</td><td>18.84</td><td>96.51</td></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"text": "Human evaluation results. WC, OS, SS, PC stand for word choice, object salience, sentence structure and paragraph coherence. Each value in the table is the average of all scores for the corresponding criterion. The mean values per each model and type of pooling mechanism are coloured in light cyan.", |
|
"num": null, |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |