<?xml version="1.0" encoding="UTF-8"?><?xml-stylesheet type="text/xsl" href="static/style.xsl"?><OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"><responseDate>2026-04-18T07:19:01Z</responseDate><request verb="GetRecord" identifier="oai:www.recercat.cat:10230/32703" metadataPrefix="oai_dc">https://recercat.cat/oai/request</request><GetRecord><record><header><identifier>oai:recercat.cat:10230/32703</identifier><datestamp>2025-12-20T16:56:06Z</datestamp><setSpec>com_2072_6</setSpec><setSpec>col_2072_452952</setSpec></header><metadata><oai_dc:dc xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:doc="http://www.lyncode.com/xoai" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
   <dc:title>“Look, some green circles!”: learning to quantify from images</dc:title>
   <dc:creator>Boleda, Gemma</dc:creator>
   <dc:creator>Sorodoc, Ionut-Teodor</dc:creator>
   <dc:creator>Lazaridou, Angeliki</dc:creator>
   <dc:creator>Herbelot, Aurélie</dc:creator>
   <dc:creator>Pezzelle, Sandro</dc:creator>
   <dc:creator>Bernardi, Raffaella</dc:creator>
   <dc:subject>Language and vision</dc:subject>
   <dc:subject>Grounding</dc:subject>
   <dc:subject>Quantification</dc:subject>
   <dc:subject>Distributed representations</dc:subject>
   <dc:subject>Semantics</dc:subject>
   <dc:subject>Computational semantics</dc:subject>
   <dc:subject>Computational Linguistics</dc:subject>
   <dc:subject>Natural Language Processing</dc:subject>
   <dc:description>In this paper, we investigate whether a neural network model can learn the meaning of natural language quantifiers (no,some and all) from their use in visual contexts. We show that memory networks perform&#xd;
well in this task, and that explicit counting is not necessary to the system’s performance, supporting psycholinguistic evidence on the acquisition of quantifiers.</dc:description>
   <dc:description>This project has received funding from the European Unions Horizon 2020 research and innovation programme under the Marie Sklodowska-Curie grant agreement No 655577 (LOVe); ERC 2011 Starting Independent Research Grant n. 283554 (COMPOSES).</dc:description>
   <dc:date>2017-08-25T17:17:10Z</dc:date>
   <dc:date>2017-08-25T17:17:10Z</dc:date>
   <dc:date>2016</dc:date>
   <dc:type>info:eu-repo/semantics/conferenceObject</dc:type>
   <dc:type>info:eu-repo/semantics/publishedVersion</dc:type>
   <dc:identifier>Sorodoc I, Lazaridou A, Boleda A, Herbelot A, Pezzelle S, Bernardi R. “Look, some green circles!”: learning to quantify from images. In: Proceedings of the 5th Workshop on Vision and Language (ACL 2016). Berlin: Association for Computational Linguistics; 2016. p. 75-79.</dc:identifier>
   <dc:identifier>http://hdl.handle.net/10230/32703</dc:identifier>
   <dc:language>eng</dc:language>
   <dc:relation>Proceedings of the 5th Workshop on Vision and Language (ACL 2016). Berlin: Association for Computational Linguistics; 2016. p. 75-79</dc:relation>
   <dc:relation>info:eu-repo/grantAgreement/EC/H2020/655577</dc:relation>
   <dc:relation>info:eu-repo/grantAgreement/EC/FP7/283554</dc:relation>
   <dc:rights>© ACL, Creative Commons Attribution 4.0 License</dc:rights>
   <dc:rights>http://creativecommons.org/licenses/by/4.0/</dc:rights>
   <dc:rights>info:eu-repo/semantics/openAccess</dc:rights>
   <dc:format>application/pdf</dc:format>
   <dc:format>application/pdf</dc:format>
   <dc:publisher>ACL (Association for Computational Linguistics)</dc:publisher>
</oai_dc:dc></metadata></record></GetRecord></OAI-PMH>