image
stringlengths
15
15
id
stringlengths
8
12
conversations
stringlengths
79
268
00000004412.jpg
4412_evjvqa
[{"from": "human", "value": "what holiday do these people go out for a walk?\n<image>"}, {"from": "gpt", "value": "the chirstmas"}]
00000004412.jpg
4412_evjvqa
[{"from": "human", "value": "what is the holiday decorated these statues?\n<image>"}, {"from": "gpt", "value": "the chirstmas"}]
00000004412.jpg
4412_evjvqa
[{"from": "human", "value": "where are these people going to visit?\n<image>"}, {"from": "gpt", "value": "at the christmas decoration area"}]
00000004414.jpg
4414_evjvqa
[{"from": "human", "value": "where is the road lead to?\n<image>"}, {"from": "gpt", "value": "the historical monument"}]
00000004414.jpg
4414_evjvqa
[{"from": "human", "value": "what do people put on both sides of the road?\n<image>"}, {"from": "gpt", "value": "planting the bushes"}]
00000004417.jpg
4417_evjvqa
[{"from": "human", "value": "where are these two rocks placed?\n<image>"}, {"from": "gpt", "value": "in front of the thatched house"}]
00000004417.jpg
4417_evjvqa
[{"from": "human", "value": "what do people put in front of the thatched house?\n<image>"}, {"from": "gpt", "value": "there are two rocks"}]
00000004419.jpg
4419_evjvqa
[{"from": "human", "value": "where is the woman going?\n<image>"}, {"from": "gpt", "value": "this woman is bringing rice to her relatives"}]
00000004419.jpg
4419_evjvqa
[{"from": "human", "value": "where is the woman going?\n<image>"}, {"from": "gpt", "value": "she is walking past two fences"}]
00000004420.jpg
4420_evjvqa
[{"from": "human", "value": "how many people are choosing clothes at the clothes shop on the left hand side?\n<image>"}, {"from": "gpt", "value": "there are two girls"}]
00000004420.jpg
4420_evjvqa
[{"from": "human", "value": "which way is the girl in the black t-shirt looking?\n<image>"}, {"from": "gpt", "value": "the clothes shop"}]
00000004421.jpg
4421_evjvqa
[{"from": "human", "value": "what do people put on the sidewalks on both sides of the road?\n<image>"}, {"from": "gpt", "value": "they plant the old trees"}]
00000004422.jpg
4422_evjvqa
[{"from": "human", "value": "what is the group of people standing?\n<image>"}, {"from": "gpt", "value": "at the fair area"}]
00000004422.jpg
4422_evjvqa
[{"from": "human", "value": "how many men are in this group of friends?\n<image>"}, {"from": "gpt", "value": "two"}]
00000004422.jpg
4422_evjvqa
[{"from": "human", "value": "how many girls are in this group of friends?\n<image>"}, {"from": "gpt", "value": "four"}]
00000004428.jpg
4428_evjvqa
[{"from": "human", "value": "where is this man taking the girl?\n<image>"}, {"from": "gpt", "value": "going to the supermarket to buy vegetable"}]
00000004428.jpg
4428_evjvqa
[{"from": "human", "value": "where does the girl go with the boy?\n<image>"}, {"from": "gpt", "value": "the supermarket"}]
00000004428.jpg
4428_evjvqa
[{"from": "human", "value": "what is the boy doing?\n<image>"}, {"from": "gpt", "value": "choosing vegetables"}]
00000004428.jpg
4428_evjvqa
[{"from": "human", "value": "what is the woman in the pink t-shirt doing?\n<image>"}, {"from": "gpt", "value": "choosing vegetable with the boy"}]
00000004428.jpg
4428_evjvqa
[{"from": "human", "value": "where are these men and women standing?\n<image>"}, {"from": "gpt", "value": "in the supermarket"}]
00000004433.jpg
4433_evjvqa
[{"from": "human", "value": "who is goinf to the supermarket with the man in blue t-shirt?\n<image>"}, {"from": "gpt", "value": "the man is wearing white t-shirt"}]
00000004433.jpg
4433_evjvqa
[{"from": "human", "value": "where is the boy in purple t-shirt with black mask standing?\n<image>"}, {"from": "gpt", "value": "at the vegetable stall in the supermarket"}]
00000004433.jpg
4433_evjvqa
[{"from": "human", "value": "what is the woman in yellow t-shirt doing?\n<image>"}, {"from": "gpt", "value": "she is choosing fruits"}]
00000004433.jpg
4433_evjvqa
[{"from": "human", "value": "what is the boy in white t-shirt going with the boy in blue t-shirt doing?\n<image>"}, {"from": "gpt", "value": "he is taking fruit and putting it in a bag"}]
00000004439.jpg
4439_evjvqa
[{"from": "human", "value": "what is the woman in yellow helmet doing?\n<image>"}, {"from": "gpt", "value": "in the restaurant"}]
00000004439.jpg
4439_evjvqa
[{"from": "human", "value": "what is the woman in white shirt doing?\n<image>"}, {"from": "gpt", "value": "in the restaurant"}]
00000004439.jpg
4439_evjvqa
[{"from": "human", "value": "who is the woman sitting at the table with?\n<image>"}, {"from": "gpt", "value": "the man is wearing white shirt"}]
00000004439.jpg
4439_evjvqa
[{"from": "human", "value": "whose car is parked in front of the restaurant?\n<image>"}, {"from": "gpt", "value": "it is of the woman in white shirt"}]
00000004439.jpg
4439_evjvqa
[{"from": "human", "value": "who is the man sitting at the table with?\n<image>"}, {"from": "gpt", "value": "the woman is wearing the white shirt"}]
00000004440.jpg
4440_evjvqa
[{"from": "human", "value": "where is the people standing?\n<image>"}, {"from": "gpt", "value": "these people are standing around the table of food"}]
00000004440.jpg
4440_evjvqa
[{"from": "human", "value": "what is on the table?\n<image>"}, {"from": "gpt", "value": "types of food"}]
00000004444.jpg
4444_evjvqa
[{"from": "human", "value": "what items the man in white shirt selling?\n<image>"}, {"from": "gpt", "value": "types of shoes"}]
00000004444.jpg
4444_evjvqa
[{"from": "human", "value": "where is the woman in ao dai visiting?\n<image>"}, {"from": "gpt", "value": "in the shopping fair"}]
00000004444.jpg
4444_evjvqa
[{"from": "human", "value": "where are the stalls sold?\n<image>"}, {"from": "gpt", "value": "in the shopping fair"}]
00000004445.jpg
4445_evjvqa
[{"from": "human", "value": "how many turns are there?\n<image>"}, {"from": "gpt", "value": "three"}]
00000004446.jpg
4446_evjvqa
[{"from": "human", "value": "what is the woman in yellow doing?\n<image>"}, {"from": "gpt", "value": "choosing the products"}]
00000004446.jpg
4446_evjvqa
[{"from": "human", "value": "where is the woman in the yellow shirt choosing goods?\n<image>"}, {"from": "gpt", "value": "in the supermarket"}]
00000004446.jpg
4446_evjvqa
[{"from": "human", "value": "which area is the woman in the yellow shirt choosing?\n<image>"}, {"from": "gpt", "value": "vegetable stall"}]
00000004446.jpg
4446_evjvqa
[{"from": "human", "value": "what is the woman in yellow intend to buy?\n<image>"}, {"from": "gpt", "value": "vegetable"}]
00000004448.jpg
4448_evjvqa
[{"from": "human", "value": "where is this view recorded?\n<image>"}, {"from": "gpt", "value": "the downwards"}]
00000004450.jpg
4450_evjvqa
[{"from": "human", "value": "what is the woman in conical hat doing?\n<image>"}, {"from": "gpt", "value": "she is rowing a canoe"}]
00000004450.jpg
4450_evjvqa
[{"from": "human", "value": "where is the woman in conical hat sitting?\n<image>"}, {"from": "gpt", "value": "on the boat"}]
00000004451.jpg
4451_evjvqa
[{"from": "human", "value": "what is this ship used to transport?\n<image>"}, {"from": "gpt", "value": "the containers"}]
00000004451.jpg
4451_evjvqa
[{"from": "human", "value": "where do people put the containers?\n<image>"}, {"from": "gpt", "value": "on the ship"}]
00000004452.jpg
4452_evjvqa
[{"from": "human", "value": "what is the woman in blue doing?\n<image>"}, {"from": "gpt", "value": "she is instructing the girl i red shirt to buy goods"}]
00000004452.jpg
4452_evjvqa
[{"from": "human", "value": "what is the girl in red doing?\n<image>"}, {"from": "gpt", "value": "choosing some fruits"}]
00000004452.jpg
4452_evjvqa
[{"from": "human", "value": "what item is the girl intending to buy?\n<image>"}, {"from": "gpt", "value": "lots of fruits"}]
00000004452.jpg
4452_evjvqa
[{"from": "human", "value": "what is the woman in the blue shirt working?\n<image>"}, {"from": "gpt", "value": "working as a fruit counter clerk in a supermarket"}]
00000004452.jpg
4452_evjvqa
[{"from": "human", "value": "where is the girl in red standing?\n<image>"}, {"from": "gpt", "value": "the fruit stall"}]
00000004452.jpg
4452_evjvqa
[{"from": "human", "value": "where is the woman in the blue shirt pointing?\n<image>"}, {"from": "gpt", "value": "the fruit stall"}]
00000004453.jpg
4453_evjvqa
[{"from": "human", "value": "which stall is the woman in black standing at?\n<image>"}, {"from": "gpt", "value": "the stuffed animal stall"}]
00000004457.jpg
4457_evjvqa
[{"from": "human", "value": "what do the stalls in this area sell?\n<image>"}, {"from": "gpt", "value": "selling all kinds of clothes and fabrics"}]
00000004457.jpg
4457_evjvqa
[{"from": "human", "value": "what do these people come here to buy?\n<image>"}, {"from": "gpt", "value": "choosing clothes"}]
00000004460.jpg
4460_evjvqa
[{"from": "human", "value": "what is this woman holding in her hand?\n<image>"}, {"from": "gpt", "value": "the gift "}]
00000004460.jpg
4460_evjvqa
[{"from": "human", "value": "where is the girl sitting?\n<image>"}, {"from": "gpt", "value": "in the room with a christmas tree"}]
00000004460.jpg
4460_evjvqa
[{"from": "human", "value": "what color shirt are you wearing?\n<image>"}, {"from": "gpt", "value": "red"}]
00000004461.jpg
4461_evjvqa
[{"from": "human", "value": "what are these trays used for?\n<image>"}, {"from": "gpt", "value": "to store all kinds of food"}]
00000004462.jpg
4462_evjvqa
[{"from": "human", "value": "where is the people visiting?\n<image>"}, {"from": "gpt", "value": "these people are sightseeing in a tunnel area"}]
00000004462.jpg
4462_evjvqa
[{"from": "human", "value": "what is the woman in white doing?\n<image>"}, {"from": "gpt", "value": "she is going down to the underground tunnel"}]
00000004464.jpg
4464_evjvqa
[{"from": "human", "value": "where is the people standing?\n<image>"}, {"from": "gpt", "value": "in the clothes shop"}]
00000004464.jpg
4464_evjvqa
[{"from": "human", "value": "how many men are standing in the clothes shop?\n<image>"}, {"from": "gpt", "value": "two"}]
00000004464.jpg
4464_evjvqa
[{"from": "human", "value": "how many women are standing in the clothes shop?\n<image>"}, {"from": "gpt", "value": "two"}]
00000004465.jpg
4465_evjvqa
[{"from": "human", "value": "where are these people standing?\n<image>"}, {"from": "gpt", "value": "on the beach"}]
00000004465.jpg
4465_evjvqa
[{"from": "human", "value": "what are two men using yellow plastic bags doing?\n<image>"}, {"from": "gpt", "value": "they are picking up trash on the beach"}]
00000004465.jpg
4465_evjvqa
[{"from": "human", "value": "what does the man in the white shirt use to contain garbage?\n<image>"}, {"from": "gpt", "value": "the yellow plastic bag"}]
00000004465.jpg
4465_evjvqa
[{"from": "human", "value": "what is the man in white shirt doing?\n<image>"}, {"from": "gpt", "value": "to pick up trash on the beach"}]
00000004466.jpg
4466_evjvqa
[{"from": "human", "value": "where is the girl in green standing?\n<image>"}, {"from": "gpt", "value": "in the grocery store"}]
00000004469.jpg
4469_evjvqa
[{"from": "human", "value": "where is the group of young people sitting to eat?\n<image>"}, {"from": "gpt", "value": "at a sidewalk restaurant"}]
00000004471.jpg
4471_evjvqa
[{"from": "human", "value": "what is the woman doing?\n<image>"}, {"from": "gpt", "value": "choosing beef"}]
00000004471.jpg
4471_evjvqa
[{"from": "human", "value": "what is the woman in black intend to buy?\n<image>"}, {"from": "gpt", "value": "lots of beef"}]
00000004471.jpg
4471_evjvqa
[{"from": "human", "value": "what do they sell at this stall?\n<image>"}, {"from": "gpt", "value": "types of beef"}]
00000004471.jpg
4471_evjvqa
[{"from": "human", "value": "what is the woman in black holding?\n<image>"}, {"from": "gpt", "value": "the woman is holding two blisters of raw meat"}]
00000004472.jpg
4472_evjvqa
[{"from": "human", "value": "where is the group of people standing?\n<image>"}, {"from": "gpt", "value": "on top of the tunnel"}]
00000004472.jpg
4472_evjvqa
[{"from": "human", "value": "where is the group of people intend visitting?\n<image>"}, {"from": "gpt", "value": "the historical monument"}]
00000004473.jpg
4473_evjvqa
[{"from": "human", "value": "where is the girl in black standing?\n<image>"}, {"from": "gpt", "value": "she is standing at the escalator in the supermarket"}]
00000004473.jpg
4473_evjvqa
[{"from": "human", "value": "who is going shopping with the girl in black?\n<image>"}, {"from": "gpt", "value": "the man"}]
00000004474.jpg
4474_evjvqa
[{"from": "human", "value": "where is the girl standing?\n<image>"}, {"from": "gpt", "value": "this girl is sitting near the christmas tree"}]
00000004477.jpg
4477_evjvqa
[{"from": "human", "value": "how many employees are standing at the reception?\n<image>"}, {"from": "gpt", "value": "there are two female staff"}]
00000004477.jpg
4477_evjvqa
[{"from": "human", "value": "how many people are standing at the freezer?\n<image>"}, {"from": "gpt", "value": "four"}]
00000004477.jpg
4477_evjvqa
[{"from": "human", "value": "what is the woman in the blue coat doing?\n<image>"}, {"from": "gpt", "value": "she is choosing the beef in the freezer"}]
00000004482.jpg
4482_evjvqa
[{"from": "human", "value": "what is the woman doing?\n<image>"}, {"from": "gpt", "value": "she is choosing some fresh food"}]
00000004482.jpg
4482_evjvqa
[{"from": "human", "value": "what item does the woman intend to buy?\n<image>"}, {"from": "gpt", "value": "some raw meat"}]
00000004482.jpg
4482_evjvqa
[{"from": "human", "value": "what does the stall next to the stall the woman is standing on sell for?\n<image>"}, {"from": "gpt", "value": "vegetables"}]
00000004485.jpg
4485_evjvqa
[{"from": "human", "value": "what color coat does the woman selling bouquet wear?\n<image>"}, {"from": "gpt", "value": "the pink overcoat"}]
00000004485.jpg
4485_evjvqa
[{"from": "human", "value": "what is the woman in pink jacket doing?\n<image>"}, {"from": "gpt", "value": "she is handing the bouquet of flowers to the woman wearing the helmet"}]
00000004485.jpg
4485_evjvqa
[{"from": "human", "value": "what item does the woman in black jacket buy?\n<image>"}, {"from": "gpt", "value": "the woman in black coat is buying a bouquet of flowers"}]
00000004485.jpg
4485_evjvqa
[{"from": "human", "value": "where is the woman in black standing?\n<image>"}, {"from": "gpt", "value": "at the bouquet stall"}]
00000004487.jpg
4487_evjvqa
[{"from": "human", "value": "what are the two teenagers doing?\n<image>"}, {"from": "gpt", "value": "they are taking a photos"}]
00000004487.jpg
4487_evjvqa
[{"from": "human", "value": "who is photopraphed by the boy?\n<image>"}, {"from": "gpt", "value": "the girl"}]
00000004487.jpg
4487_evjvqa
[{"from": "human", "value": "who is photopraphed by the girl?\n<image>"}, {"from": "gpt", "value": "the boy"}]
00000004487.jpg
4487_evjvqa
[{"from": "human", "value": "what is the boy holding the camera doing?\n<image>"}, {"from": "gpt", "value": "to take a photo for the girl"}]
00000004489.jpg
4489_evjvqa
[{"from": "human", "value": "which are the stalls selling?\n<image>"}, {"from": "gpt", "value": "clothes"}]
00000004492.jpg
4492_evjvqa
[{"from": "human", "value": "what is the boy doing?\n<image>"}, {"from": "gpt", "value": "he is selling the balloons"}]
00000004492.jpg
4492_evjvqa
[{"from": "human", "value": "where is the boy selling the balloons?\n<image>"}, {"from": "gpt", "value": "at the foot of the bridge"}]
00000004492.jpg
4492_evjvqa
[{"from": "human", "value": "what does this boy stand by the roadside selling?\n<image>"}, {"from": "gpt", "value": "selling the ballons"}]
00000004493.jpg
4493_evjvqa
[{"from": "human", "value": "how many men are standing in the shop?\n<image>"}, {"from": "gpt", "value": "two"}]
00000004493.jpg
4493_evjvqa
[{"from": "human", "value": "what are the two men doing?\n<image>"}, {"from": "gpt", "value": "they are choosing poducts"}]
00000004493.jpg
4493_evjvqa
[{"from": "human", "value": "where are the two men standing?\n<image>"}, {"from": "gpt", "value": "at the stall in the market"}]
00000004495.jpg
4495_evjvqa
[{"from": "human", "value": "where is the woman in the conical hat sitting?\n<image>"}, {"from": "gpt", "value": "on the sidewalk"}]
00000004495.jpg
4495_evjvqa
[{"from": "human", "value": "what is the woman in the conical hat doing?\n<image>"}, {"from": "gpt", "value": "she is selling on the sidewalk"}]