<?xml version="1.0" encoding="UTF-8"?><?xml-stylesheet type="text/xsl" href="static/style.xsl"?><OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"><responseDate>2026-04-13T01:18:10Z</responseDate><request verb="GetRecord" identifier="oai:www.recercat.cat:10230/71932" metadataPrefix="didl">https://recercat.cat/oai/request</request><GetRecord><record><header><identifier>oai:recercat.cat:10230/71932</identifier><datestamp>2025-11-21T04:22:11Z</datestamp><setSpec>com_2072_6</setSpec><setSpec>col_2072_452952</setSpec></header><metadata><d:DIDL xmlns:d="urn:mpeg:mpeg21:2002:02-DIDL-NS" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:doc="http://www.lyncode.com/xoai" xsi:schemaLocation="urn:mpeg:mpeg21:2002:02-DIDL-NS http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/did/didl.xsd">
   <d:DIDLInfo>
      <dcterms:created xmlns:dcterms="http://purl.org/dc/terms/" xsi:schemaLocation="http://purl.org/dc/terms/ http://dublincore.org/schemas/xmls/qdc/dcterms.xsd">2025-11-21T04:22:11Z</dcterms:created>
   </d:DIDLInfo>
   <d:Item id="hdl_10230_71932">
      <d:Descriptor>
         <d:Statement mimeType="application/xml; charset=utf-8">
            <dii:Identifier xmlns:dii="urn:mpeg:mpeg21:2002:01-DII-NS" xsi:schemaLocation="urn:mpeg:mpeg21:2002:01-DII-NS http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/dii/dii.xsd">urn:hdl:10230/71932</dii:Identifier>
         </d:Statement>
      </d:Descriptor>
      <d:Descriptor>
         <d:Statement mimeType="application/xml; charset=utf-8">
            <oai_dc:dc xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns:dc="http://purl.org/dc/elements/1.1/" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
               <dc:title>Leveraging melodic context for improved Svara representation</dc:title>
               <dc:creator>Nuttall, Thomas</dc:creator>
               <dc:creator>Vijayan, Vivek</dc:creator>
               <dc:creator>Serra, Xavier</dc:creator>
               <dc:creator>Pearson, Lara</dc:creator>
               <dc:subject>Representation learning</dc:subject>
               <dc:subject>Time series analysis</dc:subject>
               <dc:subject>Carnatic music</dc:subject>
               <dc:subject>Coarticulation</dc:subject>
               <dc:description>For the South Indian musical tradition known as Carnatic music, embeddings of svara (note) pitch time series have proven useful for tasks such as svara classification and performance analysis. In this paper, we extend an existing embedding method by incorporating findings from musicological research on the relationship between the performance of a svara and its immediate melodic context, in order to improve the learning of these embedding models. We present a context-aware GRUbased model, adapting the existing DeepGRU architecture to encode both svara and its surrounding melodic context, before combining them via a co-attention mechanism prior to classification. For a ground truth dataset of 2,077 expert svara annotations across two performances in r¯aga Bhairavi, we observe that the inclusion of melodic context leads to a 6.6% absolute increase in F1 score for svara label classification (from 78.3% to 84.9%), and an 7.8% absolute increase (from 59.9% to 67.7%) for classification of svara-form: sub-svara clusters that capture gamaka (ornamentation) variations in the performed svara.</dc:description>
               <dc:description>This research was carried out as part of the "IA y Música: Cátedra en Inteligencia Artificial y Música" (TSI-100929-2023-1), funded by the Secretaría de Estado de Digitalización e Inteligencia Artificial, and the European Union-Next Generation EU, under the program Cátedras ENIA 2022 para la creación de cátedras universidad-empresa en IA.</dc:description>
               <dc:date>2025-11-21T04:22:11Z</dc:date>
               <dc:date>2025-11-21T04:22:11Z</dc:date>
               <dc:date>2025-11-19T08:32:07Z</dc:date>
               <dc:date>2025-11-19T08:32:07Z</dc:date>
               <dc:date>2025</dc:date>
               <dc:type>info:eu-repo/semantics/conferenceObject</dc:type>
               <dc:type>info:eu-repo/semantics/publishedVersion</dc:type>
               <dc:identifier>http://hdl.handle.net/10230/71932</dc:identifier>
               <dc:relation>The 17th International Symposium on Computer Music Multidisciplinary Research CMMR 2025; 2025 Nov 3-7; London, UK. MArseille: Laboratory PRISM; 2025.</dc:relation>
               <dc:rights>All copyrights remain with the authors.&#xd;
Creative Commons Attribution 4.0 International</dc:rights>
               <dc:rights>http://creativecommons.org/licenses/by/4.0</dc:rights>
               <dc:rights>info:eu-repo/semantics/openAccess</dc:rights>
               <dc:publisher>Laboratory PRISM</dc:publisher>
            </oai_dc:dc>
         </d:Statement>
      </d:Descriptor>
   </d:Item>
</d:DIDL></metadata></record></GetRecord></OAI-PMH>