<?xml version='1.0' encoding='UTF-8'?>
<OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd">
  <responseDate>2026-03-10T21:49:35Z</responseDate>
  <request identifier="oai:kougei.repo.nii.ac.jp:02000109" verb="GetRecord" metadataPrefix="oai_dc">https://kougei.repo.nii.ac.jp/oai</request>
  <GetRecord>
    <record>
      <header>
        <identifier>oai:kougei.repo.nii.ac.jp:02000109</identifier>
        <datestamp>2024-06-24T23:56:59Z</datestamp>
        <setSpec>12:17:1719207285271:1719207449675</setSpec>
      </header>
      <metadata>
        <oai_dc:dc xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns="http://www.w3.org/2001/XMLSchema" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
          <dc:title>Vision Transformer を用いた動画像の手話認識に関する研究調査</dc:title>
          <dc:title>Sign language recognition in videos using Vision Transformer</dc:title>
          <dc:creator>三浦, 璃桜</dc:creator>
          <dc:creator>3941</dc:creator>
          <dc:creator>ミウラ, リオ</dc:creator>
          <dc:creator>Miura, Rio</dc:creator>
          <dc:creator>近藤, 多聞</dc:creator>
          <dc:creator>3942</dc:creator>
          <dc:creator>コンドウ, タモン</dc:creator>
          <dc:creator>Kondo, Tamon</dc:creator>
          <dc:creator>姜, 有宣</dc:creator>
          <dc:creator>3634</dc:creator>
          <dc:creator>Kang, Yousun</dc:creator>
          <dc:subject>日本語の手話認識</dc:subject>
          <dc:subject>Japanese Sign Language Recognition</dc:subject>
          <dc:description>This study aims to build a recognition system for sign language video images using Vision Transformer to support communication for the hearing impaired. In the experiments, the system learned images of signs including actions and static signs and verified the accuracy. 82% accuracy was obtained in a 10-class experiment, suggesting the influence of insufficient data sets and model settings. Future work is needed to increase the amount of data and improve discrimination accuracy, as well as to investigate methods for discriminating subtle differences between signs that are similar in form and motion.</dc:description>
          <dc:description>departmental bulletin paper</dc:description>
          <dc:publisher>東京工芸大学工学部</dc:publisher>
          <dc:date>2024-06-25</dc:date>
          <dc:format>application/pdf</dc:format>
          <dc:identifier>東京工芸大学工学部紀要</dc:identifier>
          <dc:identifier>1</dc:identifier>
          <dc:identifier>47</dc:identifier>
          <dc:identifier>21</dc:identifier>
          <dc:identifier>27</dc:identifier>
          <dc:identifier>The Academic Reports, the Faculty of Engineering, Tokyo Polytechnic University</dc:identifier>
          <dc:identifier>AN1034237X</dc:identifier>
          <dc:identifier>03876055</dc:identifier>
          <dc:identifier>https://kougei.repo.nii.ac.jp/record/2000109/files/vol47-1-03.pdf</dc:identifier>
          <dc:identifier>https://kougei.repo.nii.ac.jp/records/2000109</dc:identifier>
          <dc:language>jpn</dc:language>
          <dc:rights>open access</dc:rights>
        </oai_dc:dc>
      </metadata>
    </record>
  </GetRecord>
</OAI-PMH>
