<?xml version="1.0" encoding="UTF-8"?><?xml-stylesheet type="text/xsl" href="static/style.xsl"?><OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"><responseDate>2026-04-13T01:03:46Z</responseDate><request verb="GetRecord" identifier="oai:www.recercat.cat:2117/356936" metadataPrefix="qdc">https://recercat.cat/oai/request</request><GetRecord><record><header><identifier>oai:recercat.cat:2117/356936</identifier><datestamp>2025-07-22T16:54:32Z</datestamp><setSpec>com_2072_1033</setSpec><setSpec>col_2072_452951</setSpec></header><metadata><qdc:qualifieddc xmlns:qdc="http://dspace.org/qualifieddc/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dcterms="http://purl.org/dc/terms/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:doc="http://www.lyncode.com/xoai" xsi:schemaLocation="http://purl.org/dc/elements/1.1/ http://dublincore.org/schemas/xmls/qdc/2006/01/06/dc.xsd http://purl.org/dc/terms/ http://dublincore.org/schemas/xmls/qdc/2006/01/06/dcterms.xsd http://dspace.org/qualifieddc/ http://www.ukoln.ac.uk/metadata/dcmi/xmlschema/qualifieddc.xsd">
   <dc:title>Efficient deep ensembles by averaging neural networks in parameter space</dc:title>
   <dc:creator>Norris Mitchell, Philip</dc:creator>
   <dc:subject>Àrees temàtiques de la UPC::Informàtica::Intel·ligència artificial</dc:subject>
   <dc:subject>Artificial intelligence</dc:subject>
   <dc:subject>Ensemble learning</dc:subject>
   <dc:subject>Deep ensembles</dc:subject>
   <dc:subject>Knowledge distillation</dc:subject>
   <dc:subject>Permutation learning</dc:subject>
   <dc:subject>Intel·ligència artificial</dc:subject>
   <dc:subject>Classificació AMS::68 Computer science::68T Artificial intelligence</dc:subject>
   <dcterms:abstract>Although deep ensembles provide large accuracy boosts relative to individual models, their use is not widespread in environments in which computational constraints are limited, as deep ensembles require storing M models and require M forward passes at prediction time. We propose a novel, computationally efficient alternative, which we name permAVG. Although deep ensembles cannot simply be average in parameter space, as all models find distinct perhaps distant local optima, permAVG exploits the symmetries of the loss landscape by learning permutations, such that all M models can be permuted into the same local optimum and can thereafter safely be averaged.</dcterms:abstract>
   <dcterms:issued>2021-10</dcterms:issued>
   <dc:type>Master thesis</dc:type>
   <dc:rights>Open Access</dc:rights>
   <dc:publisher>Universitat Politècnica de Catalunya</dc:publisher>
</qdc:qualifieddc></metadata></record></GetRecord></OAI-PMH>