<?xml version="1.0" encoding="UTF-8"?><?xml-stylesheet type="text/xsl" href="static/style.xsl"?><OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"><responseDate>2026-04-13T16:57:04Z</responseDate><request verb="GetRecord" identifier="oai:www.recercat.cat:10230/34505" metadataPrefix="qdc">https://recercat.cat/oai/request</request><GetRecord><record><header><identifier>oai:recercat.cat:10230/34505</identifier><datestamp>2025-12-22T13:44:28Z</datestamp><setSpec>com_2072_6</setSpec><setSpec>col_2072_452952</setSpec></header><metadata><qdc:qualifieddc xmlns:qdc="http://dspace.org/qualifieddc/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dcterms="http://purl.org/dc/terms/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:doc="http://www.lyncode.com/xoai" xsi:schemaLocation="http://purl.org/dc/elements/1.1/ http://dublincore.org/schemas/xmls/qdc/2006/01/06/dc.xsd http://purl.org/dc/terms/ http://dublincore.org/schemas/xmls/qdc/2006/01/06/dcterms.xsd http://dspace.org/qualifieddc/ http://www.ukoln.ac.uk/metadata/dcmi/xmlschema/qualifieddc.xsd">
   <dc:title>Supporting soundscape design in virtual environments with content-based audio retrieval</dc:title>
   <dc:creator>Janer Mestres, Jordi</dc:creator>
   <dc:creator>Finney, Nathaniel</dc:creator>
   <dc:creator>Roma Trepat, Gerard</dc:creator>
   <dc:creator>Kersten, Stefan</dc:creator>
   <dc:creator>Serra, Xavier</dc:creator>
   <dc:subject>Content-based</dc:subject>
   <dc:subject>Audio retrieval</dc:subject>
   <dc:subject>Freesound</dc:subject>
   <dc:subject>Virtual worlds</dc:subject>
   <dc:subject>Soundscape</dc:subject>
   <dcterms:abstract>The computer-assisted design of soundscapes for virtual environments has&#xd;
received far less attention than the creation of graphical content. In this “think&#xd;
piece” we briefly introduce the principal characteristics of a framework under&#xd;
development that aims towards the creation of an automatic sonification of virtual&#xd;
worlds. As a starting point, the proposed system is based on an on-line collaborative&#xd;
sound repository that, together with content-based audio retrieval tools, assists the&#xd;
search of sounds to be associated with 3D models or scenes</dcterms:abstract>
   <dcterms:issued>2018-04-27T09:11:36Z</dcterms:issued>
   <dcterms:issued>2018-04-27T09:11:36Z</dcterms:issued>
   <dcterms:issued>2009</dcterms:issued>
   <dc:type>info:eu-repo/semantics/article</dc:type>
   <dc:type>info:eu-repo/semantics/publishedVersion</dc:type>
   <dc:relation>Journal of Virtual Worlds Research. 2009:2(3):4-6.</dc:relation>
   <dc:rights>This work is copyrighted under the Creative Commons Attribution-No Derivative Works 3.0 United States License by the Journal of Virtual Worlds Research.</dc:rights>
   <dc:rights>info:eu-repo/semantics/openAccess</dc:rights>
   <dc:publisher>University of Texas at Austin. Department of Radio-Television-Film</dc:publisher>
</qdc:qualifieddc></metadata></record></GetRecord></OAI-PMH>