<?xml version="1.0" encoding="UTF-8"?><?xml-stylesheet type="text/xsl" href="static/style.xsl"?><OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"><responseDate>2026-04-14T05:38:06Z</responseDate><request verb="GetRecord" identifier="oai:www.recercat.cat:10256/28363" metadataPrefix="didl">https://recercat.cat/oai/request</request><GetRecord><record><header><identifier>oai:recercat.cat:10256/28363</identifier><datestamp>2026-03-07T19:50:51Z</datestamp><setSpec>com_2072_452966</setSpec><setSpec>com_2072_2054</setSpec><setSpec>col_2072_452969</setSpec></header><metadata><d:DIDL xmlns:d="urn:mpeg:mpeg21:2002:02-DIDL-NS" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:doc="http://www.lyncode.com/xoai" xsi:schemaLocation="urn:mpeg:mpeg21:2002:02-DIDL-NS http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/did/didl.xsd">
   <d:DIDLInfo>
      <dcterms:created xmlns:dcterms="http://purl.org/dc/terms/" xsi:schemaLocation="http://purl.org/dc/terms/ http://dublincore.org/schemas/xmls/qdc/dcterms.xsd">2026-03-07T19:50:51Z</dcterms:created>
   </d:DIDLInfo>
   <d:Item id="hdl_10256_28363">
      <d:Descriptor>
         <d:Statement mimeType="application/xml; charset=utf-8">
            <dii:Identifier xmlns:dii="urn:mpeg:mpeg21:2002:01-DII-NS" xsi:schemaLocation="urn:mpeg:mpeg21:2002:01-DII-NS http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/dii/dii.xsd">urn:hdl:10256/28363</dii:Identifier>
         </d:Statement>
      </d:Descriptor>
      <d:Descriptor>
         <d:Statement mimeType="application/xml; charset=utf-8">
            <oai_dc:dc xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns:dc="http://purl.org/dc/elements/1.1/" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
               <dc:title>Vision-Based Tracking and Following of a Moving Target Using an Unmanned Aerial Vehicle</dc:title>
               <dc:creator>Rustamani, Fatima Yousif</dc:creator>
               <dc:subject>Autonomous aerial vehicles</dc:subject>
               <dc:subject>Vehicles aeris autònoms</dc:subject>
               <dc:subject>UAV (Vehicle aeri no tripulat)</dc:subject>
               <dc:subject>Drone aircraft</dc:subject>
               <dc:subject>Object tracking (Computer vision)</dc:subject>
               <dc:subject>Pattern recognition systems</dc:subject>
               <dc:subject>Patrons, Sistemes de reconeixement de</dc:subject>
               <dc:subject>Algorithms</dc:subject>
               <dc:subject>Algorismes</dc:subject>
               <dc:subject>Seguiment d’objectes (visió per computador)</dc:subject>
               <dc:description>This work introduces an autonomous system for mobile target tracking and follow ing using vision-based (RGB) uni-modal data, specifically designed for unmanned&#xd;
aerial vehicles (UAVs) and enhanced by multi-target information. It addresses the&#xd;
gap in current research by applying state-of-the-art multi-object tracking (MOT)&#xd;
techniques to target following scenarios, moving beyond traditional single-object&#xd;
tracking (SOT) methods. The system combines the real-time object detector&#xd;
YOLOv8 with MOT algorithms BoT-SORT and ByteTrack to extract and uti lize multi-target data, improving re-identification performance and reducing ID&#xd;
switches, especially under partial or full occlusions in dynamic environments. A&#xd;
3D flight control mechanism is implemented to enable responsive target following,&#xd;
maintaining line-of-sight despite changes in target speed or direction. The system&#xd;
is validated through simulation testing, demonstrating accurate and robust track ing that effectively differentiates the intended target from surrounding bystanders.&#xd;
By tackling key challenges, this work paves the way for practical UAV applications&#xd;
in vision-based target following using multi-target information.</dc:description>
               <dc:description>9</dc:description>
               <dc:date>2026-03-07T19:50:51Z</dc:date>
               <dc:date>2026-03-07T19:50:51Z</dc:date>
               <dc:date>2025-06</dc:date>
               <dc:type>info:eu-repo/semantics/masterThesis</dc:type>
               <dc:identifier>https://hdl.handle.net/10256/28363</dc:identifier>
               <dc:rights>Attribution-NonCommercial-NoDerivatives 4.0 International</dc:rights>
               <dc:rights>http://creativecommons.org/licenses/by-nc-nd/4.0/</dc:rights>
               <dc:rights>info:eu-repo/semantics/openAccess</dc:rights>
               <dc:publisher>Universitat de Girona. Institut de Recerca en Visió per Computador i Robòtica</dc:publisher>
               <dc:source>Erasmus Mundus Joint Master in Intelligent Field Robotic Systems (IFROS)</dc:source>
            </oai_dc:dc>
         </d:Statement>
      </d:Descriptor>
   </d:Item>
</d:DIDL></metadata></record></GetRecord></OAI-PMH>