<?xml version="1.0" encoding="UTF-8"?><?xml-stylesheet type="text/xsl" href="static/style.xsl"?><OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"><responseDate>2026-04-13T02:59:17Z</responseDate><request verb="GetRecord" identifier="oai:www.recercat.cat:10230/4795" metadataPrefix="didl">https://recercat.cat/oai/request</request><GetRecord><record><header><identifier>oai:recercat.cat:10230/4795</identifier><datestamp>2025-12-20T01:55:43Z</datestamp><setSpec>com_2072_6</setSpec><setSpec>col_2072_452953</setSpec></header><metadata><d:DIDL xmlns:d="urn:mpeg:mpeg21:2002:02-DIDL-NS" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:doc="http://www.lyncode.com/xoai" xsi:schemaLocation="urn:mpeg:mpeg21:2002:02-DIDL-NS http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/did/didl.xsd">
   <d:Item id="hdl_10230_4795">
      <d:Descriptor>
         <d:Statement mimeType="application/xml; charset=utf-8">
            <dii:Identifier xmlns:dii="urn:mpeg:mpeg21:2002:01-DII-NS" xsi:schemaLocation="urn:mpeg:mpeg21:2002:01-DII-NS http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/dii/dii.xsd">urn:hdl:10230/4795</dii:Identifier>
         </d:Statement>
      </d:Descriptor>
      <d:Descriptor>
         <d:Statement mimeType="application/xml; charset=utf-8">
            <oai_dc:dc xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns:dc="http://purl.org/dc/elements/1.1/" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
               <dc:title>Decision making in uncertain and changing environments</dc:title>
               <dc:creator>Schlag, Karl</dc:creator>
               <dc:creator>Zapechelnyuk, Andriy</dc:creator>
               <dc:subject>adaptive learning</dc:subject>
               <dc:subject>experts</dc:subject>
               <dc:subject>distribution-free</dc:subject>
               <dc:subject>e-optimality</dc:subject>
               <dc:subject>hannan regret</dc:subject>
               <dc:subject>Microeconomics</dc:subject>
               <dc:subject>Statistics, Econometrics and Quantitative Methods</dc:subject>
               <dc:description>We consider an agent who has to repeatedly make choices in an uncertain
and changing environment, who has full information of the past, who discounts
future payoffs, but who has no prior. We provide a learning algorithm that
performs almost as well as the best of a given finite number of experts or
benchmark strategies and does so at any point in time, provided the agent
is sufficiently patient. The key is to find the appropriate degree of forgetting
distant past. Standard learning algorithms that treat recent and distant past
equally do not have the sequential epsilon optimality property.</dc:description>
               <dc:date>2017-07-26T10:50:34Z</dc:date>
               <dc:date>2017-07-26T10:50:34Z</dc:date>
               <dc:date>2009-06-01</dc:date>
               <dc:date>2017-07-23T02:12:36Z</dc:date>
               <dc:type>info:eu-repo/semantics/workingPaper</dc:type>
               <dc:relation>Economics and Business Working Papers Series; 1160</dc:relation>
               <dc:rights>L&amp;apos;accés als continguts d&amp;apos;aquest document queda condicionat a l&amp;apos;acceptació de les condicions d&amp;apos;ús establertes per la següent llicència Creative Commons</dc:rights>
               <dc:rights>http://creativecommons.org/licenses/by-nc-nd/3.0/es/</dc:rights>
               <dc:rights>info:eu-repo/semantics/openAccess</dc:rights>
            </oai_dc:dc>
         </d:Statement>
      </d:Descriptor>
   </d:Item>
</d:DIDL></metadata></record></GetRecord></OAI-PMH>