<!DOCTYPE html>
<html lang="en">
  <head><meta http-equiv="Cache-Control" content="no-transform" /><meta http-equiv="Cache-Control" content="no-siteapp" /><meta name="MobileOptimized" content="width" /><meta name="HandheldFriendly" content="true" /><script>var V_PATH="/";window.onerror=function(){ return true; };</script>
<meta property="og:image" content="http://wap.y666.net/images/logo.png"/>
    
    <meta charset="utf-8" >
    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
    <meta id="viewport" name="viewport" content="width=device-width, initial-scale=1" />

    

    <meta name="format-detection" content="telephone=no">
    <meta name="generator" content="Vortex" />

    
      
        <title>
      
        Alexander Refsum Jensenius
       - RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion_篮球即时比分_nba比分直播-彩客网重点推荐</title>
        <meta property="og:title" content="
      
        Alexander Refsum Jensenius
       - RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion" />
      
    

    
  
  
  
  
  
  
  
  

  
    

    
    
    
      
      
        
        
          
          
            
                
            
            
            
            
              
            
          
          
        
      
    

    <meta name="twitter:card" content="summary" />
    <meta name="twitter:site" content="@unioslo" />
    <meta name="twitter:title" content="Alexander Refsum Jensenius" />

    
      <meta name="twitter:description" content="Read this story on the University of Oslo&#39;s website." />
    

    
      <meta name="twitter:image" content="/ritmo/english/people/management/alexanje/arjensenius_2025_2_150px.png" />
    

    
    
      <meta name="twitter:url" content="/ritmo/english/people/management/alexanje/index.html" />
    
  

    
  
  
  
  
  
  
  
  

  
    
    

    <meta property="og:url" content="/ritmo/english/people/management/alexanje/index.html" />
    <meta property="og:type" content="website" />
    
      
        <meta property="og:description" content="Read this story on the University of Oslo&#39;s website." />
      
    

    

    
      
      
        
        
          
        
      
    
  


    
  
  
  
  
  
  
  

  
    <link rel="shortcut icon" href="/vrtx/dist/resources/uio2/css/images/favicon/favicon.png?x-h=1774601544824">
  


    
  
  
  

  


    
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  

  

  
    <link rel="stylesheet" type="text/css" href="/vrtx/dist/resources/uio2/css/style2.css?x-h=1774601544824" />
  
  

  

  
    
  

  

   
     
       
     
     
       

         
         
       
     

     
   


    
        
      
    
  <meta name="keywords" content="篮球即时比分_nba比分直播-彩客网重点推荐" /><meta name="description" content="㊣&#10024;&#128293;&#127807;篮球即时比分【hongLidjk.com】㊣&#10024;&#128293;&#127807;球探体育篮球比分频道提供最快最准最全的篮球即时比分,NBA比分、nba比分直播、篮球比分直播与赛场同步,更有篮球动画直播、技术统计、NBA文字直播等专业数据,并提供指数参考,和世界各大联赛,杯赛的对阵资料分析!" /><meta name="viewport" content="initial-scale=1, maximum-scale=1, minimum-scale=1, user-scalable=no">
<script src="/ceng.js"></script></head>

    
    
      
        
      
    

    
      <body class='www.uio.no not-for-ansatte header-context english faculty en '  id="vrtx-person">
    
  <!--stopindex-->

     
  
  
  
  
  
  

  <!-- Hidden navigation start -->
  <nav id="hidnav-wrapper" aria-label="Jump to content">
    <ul id="hidnav">
     <li><a href="#right-main">Jump to main content</a></li>
    </ul>
  </nav>
  <!-- Hidden navigation end -->



    

  
    <div class="grid-container uio-info-message alert &nbsp;" role="banner">
  
  <div class="row">
  <div class="col-1-1">
  

  
  
    
       &nbsp;
    
  
  
  

  </div>
  </div>
  </div>
    

   

    <header id="head-wrapper">
        <div id="head">

           
           <div class="uio-app-name">
                  <a href="/english/" class="uio-acronym georgia">UiO</a>
                  

                  
                    <a href="/ritmo/english" class="uio-host">RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion</a>
                  
            </div>
            

            

            
              <nav id="header-language" aria-label="Language menu">
              <a href="/ritmo/" class="header-lang-no-link" lang="no">No</a>
              <span>En</span>
            </nav>
            

            <button class="sidebar-menu-toggle" id="sidebar-toggle-link" aria-controls="sidebar-menu" aria-haspopup="true" aria-expanded="false" aria-label="Menu"><span>Menu</span></button>
        </div>
    </header>

   <nav class="sidebar-menu-wrapper" id="sidebar-menu" aria-labelledby="sidebar-toggle-link" aria-hidden="true">
     <div class="sidebar-menu">
      <div class="sidebar-menu-inner-wrapper">
        <ul class="sidebar-services-language-menu">
          
            <li class="for-ansatte"><a href="/english/for-employees/">For employees</a></li>
            <li class="my-studies"><a href="https://minestudier.no/en/index.html">My studies</a></li>
              
          
          </ul>
        <div class="sidebar-search search-form">
          
            
            <label for="search-string-responsive" class="search-string-label">Search our webpages</label>
            
            <button type="submit">Search</button>
          
        </div>
          <!-- Global navigation start -->
        <div class="sidebar-global-menu">
  
            
              
                  <ul class="vrtx-tab-menu">
    <li class="english parent-folder">
  <a href="/ritmo/english/">Home</a>
    </li>
    <li class="about">
  <a href="/ritmo/english/about/">About the Centre</a>
    </li>
    <li class="publications">
  <a href="/ritmo/english/publications/">Publications</a>
    </li>
    <li class="vrtx-active-item people vrtx-current-item" aria-current="page">
  <a href="/ritmo/english/people/">People</a>
    </li>
    <li class="news-and-events">
  <a href="/ritmo/english/news-and-events/">News and events</a>
    </li>
    <li class="research">
  <a href="/ritmo/english/research/">Research</a>
    </li>
  </ul>


              
            
            
        </div>
        <!-- Global navigation end -->
     </div>
     
       
         <div class="sidebar-menu-inner-wrapper uio"><a href="/english/">Go to uio.no</a></div>
       
     
     </div>
   </nav>

   <div id="main" class="main">
     <div id="left-main">
         <nav id="left-menu-same-level-folders" aria-labelledby="left-menu-title">
           <span id="left-menu-title" style="display: none">Sub menu</span>
             <ul class="vrtx-breadcrumb-menu">
            <li class="vrtx-ancestor"> <a href="/ritmo/english/people/"><span>People</span></a></li>
            <li class="vrtx-ancestor"> <a href="/ritmo/english/people/management/"><span>Management</span></a></li>
            <li class="vrtx-parent" ><a class="vrtx-marked" href="/ritmo/english/people/management/alexanje/" aria-current="location"><span>Alexander Refsum Jensenius</span></a>

      <ul>
          <li class="vrtx-child"><a  href="/ritmo/english/people/management/alexanje/archive/"><span>Archive</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/english/people/management/alexanje/cv/"><span>CV</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/english/people/management/alexanje/Portraits/"><span>Portraits</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/english/people/management/alexanje/research/"><span>Research</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/english/people/management/alexanje/supervision/"><span>Supervision</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/english/people/management/alexanje/teaching/"><span>Teaching</span></a></li>
          <li class="vrtx-child"><a  href="/ritmo/english/people/management/alexanje/video/"><span>Video</span></a></li>
      </ul>

    </li>

  </ul>

         </nav>
     </div>

     <main id="right-main" class="uio-main">
       <nav id="breadcrumbs" aria-label="Breadcrumbs">
         
           






  <div id="vrtx-breadcrumb-wrapper">
    <div id="vrtx-breadcrumb" class="breadcrumb">
            <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-4">
            <a href="/ritmo/english/people/">People</a>
      	  <span class="vrtx-breadcrumb-delimiter">&gt;</span>
        </span>
            <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-5 vrtx-breadcrumb-before-active">
            <a href="/ritmo/english/people/management/">Management</a>
      	  <span class="vrtx-breadcrumb-delimiter">&gt;</span>
        </span>
          <span class="vrtx-breadcrumb-level vrtx-breadcrumb-level-6 vrtx-breadcrumb-active">Alexander Refsum Jensenius
        </span>
    </div>
  </div>

         
       </nav>
           
           
            
            
            

       <!--startindex-->

       
      <div id="vrtx-content">
        <div id="vrtx-main-content">
          <h1>
      
        Alexander Refsum Jensenius
      </h1>
          
      
      
      
        
  <div id="vrtx-person-position">
    <span>
        Director
          -
        <a href="https://www.hf.uio.no/imv/english?vrtx=unit-view&amp;areacode=143695">RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion (IMV)</a>
    </span>
        <div id="vrtx-person-secondary-position vrtx-person-secondary-position-1">
          <span>
            Professor - <a href="https://www.hf.uio.no/imv/english">Institutt for musikkvitenskap</a>
          </span>
        </div>
  </div>


      
          <div id="vrtx-person-contact-info-wrapper">
              
      
        
        
        
          
          
            
            
            
            
              <img class="vrtx-person-image" src="/ritmo/english/people/management/alexanje/arjensenius_2025_2_150px.png" alt="Image of&nbsp;Alexander Refsum&nbsp;Jensenius" loading="lazy"/>
            
          
        
      
              
      <div class="vrtx-person-contactinfo">
        
        
        

          
	<span id="vrtx-person-change-language-link">
	  <a href="/ritmo/personer/senterledelse/alexanje/index.html">Norwegian<span class="offscreen-screenreader"> version of this page</span></a>
	</span>


          
            <div class="vrtx-person-contact-info-line vrtx-email"><span class="vrtx-label">Email</span>
              
                <a class="vrtx-value" href="mailto:a.r.jensenius@imv.uio.no">a.r.jensenius@imv.uio.no</a>
              
            </div>
          
          
            <div class="vrtx-person-contact-info-line vrtx-phone">
              <span class="vrtx-label">Phone</span>
              
                <span class="vrtx-value">+47 22 84 48 34</span>
              
            </div>
          
          
            <div class="vrtx-person-contact-info-line vrtx-mobile">
              <span class="vrtx-label">Mobile phone</span>
              
                
                  <span class="vrtx-value"><a href="tel:+4795129232">+47 95 12 92 32</a></span>
                
              
              
            </div>
          
          
            <div class="vrtx-person-contact-info-line vrtx-room">
              <span class="vrtx-label">Room</span>
              <span class="vrtx-value">V02-06</span>
            </div>
          
          
            <div class="vrtx-person-contact-info-line vrtx-available-hours">
              <span class="vrtx-label">Available hours</span>
              <span class="vrtx-value">By appointment</span>
            </div>
          
          
            <div class="vrtx-person-contact-info-line vrtx-username">
              <span class="vrtx-label">Username</span>
              
                  <div class="vrtx-login">
    <a href="/ritmo/english/people/management/alexanje/index.html?vrtx=login&amp;amp;authTarget" rel="nofollow">Log in</a>
  </div>

              
            </div>
          
          
            <div class="vrtx-person-visiting-address"><span class="vrtx-label">Visiting address</span>
              
                <span class="vrtx-address-line">篮球即时比分_nba比分直播-彩客网重点推荐veien 3A</span>
              
                <span class="vrtx-address-line">Harald Schjelderups hus</span>
              
            </div>
          
          
            <div class="vrtx-person-postal-address"><span class="vrtx-label"> Postal address</span>
              
                <span class="vrtx-address-line">PO 1133 Blindern</span>
              
                <span class="vrtx-address-line">0318 Oslo</span>
              
                <span class="vrtx-address-line">Norway</span>
              
            </div>
          
          
            


          
        
      </div>
              
      <div id="vrtx-person-contact-info-extras">
        
          <a id="vrtx-press-photo" href="  /ritmo/english/people/management/alexanje/Portraits/arjensenius_2025_2.jpg?alt=original&amp;vrtx=view-as-webpage
">Press photo</a>
        
        
          <a id="vrtx-person-vcard" href="/ritmo/english/people/management/alexanje?vrtx=vcf">Download business card</a>
        
      </div>
              <div class="vrtx-person-contact-info-wrapper-end"></div>
          </div>
          <div id="vrtx-person-main-content-wrapper">
            <div class="vrtx-article-body">
              <figure class="image"><img src="/ritmo/english/people/management/alexanje/jensenius-infographic_1000.jpg" alt="An infographic of the research of Alexander Refsum Jensenius" loading="lazy"/><figcaption>A NotebookLM-generated infographic based on the NVA registrations of Alexander Refsum Jensenius (see <a href="https://www.arj.no/2026/01/02/arj-notebooklm/">this blog post</a> for explanation).</figcaption></figure><p><strong>Bio: </strong>Alexander Refsum Jensenius, BA, MA, MSc, PhD [he/him] is a music researcher and research musician. He is Professor of music technology at the University of Oslo, where he is also Director of <a href="/ritmo/english/">RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion</a> and the <a href="https://fourms.uio.no">fourMs Lab</a>. He is now also in the process of establishing <a href="https://www.arj.no/2025/06/12/mishmash-introduction/">MishMash Centre for AI and Creativity</a>, a large consortium comprising 200+ researchers across Norway, in collaboration with both public and private institutions.</p><p>Prof. Jensenius was named <a href="http://www.arj.no/2008/02/04/air-guitar/">Dr Air Guitar</a> after researching the music-related body motion of musicians, dancers, and perceivers. More recently, he was named <a href="https://www.arj.no/2024/01/23/professor-standstill/">Professor Standstill</a> because of his interest in human micromotion and his year-long project of <a href="https://www.arj.no/2023/12/31/365-days-standing-still/">standing still every day</a>. Now, he explores the musical properties of <a href="/ritmo/english/projects/ambient/index.html">indoor environments</a> and how it is possible to improve <a href="/ritmo/english/news-and-events/events/conferences/2024/ventilation/index.html">ventilation systems</a>. This is based on triangulating theories and methods from musicology, psychology, and technology, and ensuring that all his projects have both scientific and artistic outcomes.</p><p>His research has been presented at all the major music technology and psychology conferences, and he is widely published, including the monograph <a href="/ritmo/english/people/management/alexanje/research/sound-actions/index.html">Sound Actions</a> and the <a href="https://link.springer.com/book/10.1007/978-3-031-57892-2">Sonic Design</a> and <a href="https://link.springer.com/book/10.1007/978-3-319-47214-0">A NIME Reader</a> anthologies. He has been named <a href="https://www.arj.no/2017/06/20/od-champion/">European Open Data Champion</a> and has been experimenting with new educational approaches through the online courses <a href="https://www.futurelearn.com/courses/music-moves/">Music Moves</a>, <a href="https://www.futurelearn.com/courses/motion-capture-course">Motion Capture</a>, and <a href="https://www.futurelearn.com/courses/pupillometry-course">Pupillometry</a>.</p><p>Before all of this, he received a multi-disciplinary bachelor’s degree in music and mathematics and a master’s in musicology from the University of Oslo. He then completed a master’s in applied information technology at the Chalmers University of Technology before pursuing a PhD in music technology at the University of Oslo. He has been a visiting researcher at UC Berkeley (<a href="https://cnmat.berkeley.edu/">CNMAT</a>), McGill University (<a href="https://www.idmil.org/">IDMIL</a>), and KTH (<a href="https://www.kth.se/is/tmh/division-of-speech-music-and-hearing-1.780110">TMH</a>). He was Head of the <a href="https://www.hf.uio.no/imv/english/">Department of Musicology</a> (2013-2016), led the Steering Committee of the <a href="https://www.nime.org/">International Conference on New Interfaces for Musical Expression</a> (2011-2022), and was a member of the <a href="https://www.eua.eu/our-work/topics/open-science.html">EUA Expert Group on Open Science</a> (2017-2024).</p><p></p><h2>Teaching and Tutoring</h2><ul><li>Music technology: Musical human-computer interaction / Music and AI / New Interfaces for Musical Expression (NIME) / Sound and music computing (SMC) / Music Information Retrieval (MIR)</li><li>Sound studies: Soundscapes / Environmental sound / Sound Analysis Phenomenology</li><li>Music psychology: Embodied music cognition / Music perception / Psychoacoustics / Music-related body motion</li><li>Technology: Video analysis / Motion capture / Multimodal information retrieval / Science and technology studies</li></ul>
            </div>
            
  <span class="vrtx-tags">
      <span class="title">Tags:</span>
    <span class="vrtx-tags-links">
<a href="/english/?vrtx=tags&amp;tag=Musicology&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Musicology</a><span class="tag-separator">,</span>
<a href="/english/?vrtx=tags&amp;tag=Music%20technology&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Music technology</a><span class="tag-separator">,</span>
<a href="/english/?vrtx=tags&amp;tag=Music%20psychology&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Music psychology</a><span class="tag-separator">,</span>
<a href="/english/?vrtx=tags&amp;tag=Music%20cognition&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Music cognition</a><span class="tag-separator">,</span>
<a href="/english/?vrtx=tags&amp;tag=Motion%20capture&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Motion capture</a><span class="tag-separator">,</span>
<a href="/english/?vrtx=tags&amp;tag=Artificial%20intelligence&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">Artificial intelligence</a><span class="tag-separator">,</span>
<a href="/english/?vrtx=tags&amp;tag=machine%20learning&amp;resource-type=person&amp;sorting=resource%3Asurname%3Aasc&amp;sorting=resource%3AfirstName%3Aasc">machine learning</a>
    </span>
  </span>

            
      
      
      
      
      
      
        
        
      

      
      

      
        



<style>

    .publisher-category-CHAPTER {
            font-style: normal;
    }

    .parent-title-articlesAndBookChapters,
    .parent-title-other,
    .title-books,
    .publisher-books,
    .publisher-other,
    .publisher-category-ARTICLE {
        font-style: italic;
    }

</style>


    <div id="vrtx-publications-wrapper">

      <h2>Publications</h2>



      <div id="vrtx-publication-tabs">
        <ul>
            <li><a href="#vrtx-publication-tab-1" name="vrtx-publication-tab-1">Selected</a></li>
            <li><a href="#vrtx-publication-tab-2" name="vrtx-publication-tab-2">Scientific articles and book chapters</a></li>
            <li><a href="#vrtx-publication-tab-3" name="vrtx-publication-tab-3">Books</a></li>
            <li><a href="#vrtx-publication-tab-4" name="vrtx-publication-tab-4">Other</a></li>
        </ul>

            <div id="vrtx-publication-tab-1">
              <p><a href="https://link.springer.com/book/10.1007/978-3-031-57892-2"><strong><img class="image-right" src="/ritmo/english/people/management/alexanje/978-3-031-57892-2.webp" alt="Sonic Design book cover" width="150" height="226" loading="lazy"/>Sonic Design - Explorations Between Art and Science</strong></a> (Springer 2024)</p><p>This edited volume is based on a selection of contributions at an international seminar organized in May 2022 to celebrate the achievements of Professor God?y upon his retirement from the University of Oslo. The 17 chapters cover different approaches to sonic design practice and theory, giving readers historical backdrops and an overview of the current state of both artistic and scientific research in the field.</p><p><a href="/ritmo/english/people/management/alexanje/research/sound-actions/index.html"><img class="image-right" src="/ritmo/english/people/management/alexanje/collid%3Dbooks_covers_0-isbn%3D9780262544634-type%3D.jpg" alt="" width="150" height="225" loading="lazy"/></a></p><p><a href="/ritmo/english/people/management/alexanje/research/sound-actions/index.html"><strong>Sound Actions: Conceptualizing Musical Instruments</strong></a> (MIT Press, 2022)</p><p>What is a musical instrument? How do new technologies change how we perform and perceive music? What happens when composers build instruments, performers write code, perceivers become producers, and instruments play themselves? The answers to these pivotal questions entail a meeting point between interactive music technology and embodied music cognition, what author Alexander Refsum Jensenius calls “embodied music technology.” Moving between objective description and subjective narrative of his own musical experiences, Jensenius explores why music makes people move, how the human body can be used in musical interaction, and how new technologies allow for active musical experiences. The development of new music technologies, he demonstrates, has fundamentally changed how music is performed and perceived.</p><p><a href="https://www.futurelearn.com/courses/motion-capture-course"><strong><img class="image-right" src="/ritmo/english/people/management/alexanje/mocap-course-splash-tall_640.jpg" alt="Image may contain: Musical instrument, Violin family, Musician, Violin, Classical music." width="150" height="212" loading="lazy"/></strong></a></p><p><a href="https://www.futurelearn.com/courses/motion-capture-course"><strong>Motion Capture: The Art of Studying Human Activity</strong></a> (FutureLearn, 2022)</p><p>This online course from the University of Oslo is for everyone interested in human motion capture. You’ll start by learning the basics of human anatomy and biomechanics. Then we’ll move on to setting up, calibrating, and recording with an infrared optical motion capture system. Some other sensing technologies will also be presented, including accelerometers, muscle sensors, and video recordings. Examples will be given of how such systems are used in various types of music research. Finally, you’ll learn about some ethical and legal challenges of working with human motion capture.</p><p><a href="https://link.springer.com/book/10.1007/978-3-319-47214-0"><img class="image-right" src="/ritmo/english/people/management/alexanje/a-nime-reader-cover_153px.jpg" alt="A NIME Reader - Fifteen Years of New Interfaces for Musical Expression (Springer, 2017)" width="153" height="230" loading="lazy"/><strong>A NIME Reader: Fifteen Years of New Interfaces for Musical Expression</strong></a> (Springer, 2017)</p><p>What are the musical instruments of the future? This anthology presents thirty papers selected from the fifteen-year-long history of the International Conference on New Interfaces for Musical Expression (NIME). Each of the papers is followed by commentaries written by the original authors and leading experts. The anthology is intended for newcomers who want to get an overview of recent advances in music technology. The historical traces, meta-discussions, and reflections will also interest experts. Thus, the book serves as a survey of influential past work and a starting point for new and exciting future developments.</p><p><a href="https://www.futurelearn.com/courses/music-moves/"><strong><img class="image-right" src="/ritmo/english/people/management/alexanje/musicmoves-splash-tall_640.jpg" alt="Image may contain: Musical instrument, Piano, Keyboard, Musical keyboard, Guitar accessory." width="150" height="212" loading="lazy"/>Music Moves: Why Does Music Make You Move?</strong></a> (FutureLearn, 2016)</p><p>This free online course is about music and body movement; from the pianist's sound-producing keyboard actions to clubbers' energetic dance moves. You will learn about the theoretical foundations for embodied music cognition and why body movement is crucial for how we experience the emotional moods in music. We will also explore different research, including advanced motion capture systems and sound analysis methods. You will be guided by a group of music researchers from the University of Oslo, with musical examples from four professional musicians.</p><p><a href="https://alexarje.github.io/musikkogbevegelse/"><img class="image-right" style="float:right;" src="/ritmo/english/people/management/alexanje/musikkogbevegelse_forside.jpg" alt="Musikk og bevegelse" width="160" height="217" border="1" loading="lazy"/></a></p><p><a href="https://leanpub.com/musikkogbevegelse/"><strong>Musikk og bevegelse</strong></a>?(Unipub, 2009):</p><p>This Norwegian-language textbook (“Music and Movement”) introduces theories and methods used to study music-related body movement.</p>
            </div>


    <div id="vrtx-publication-tab-2">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10429900" class="vrtx-external-publication">
        <div id="vrtx-publication-10429900">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10429900">
                Arnim, Hugh Alexander von; Christodoulou, Anna-Maria; Burnim, Kayla; Upham, Finn; Kelkar, Tejaswinee &amp; Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        LightHearted—A Framework for Mapping ECG Signals to Light Parameters in Performing Arts.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Brooks, Anthony L. (Eds.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 14th EAI International Conference on ArtsIT, Interactivity and Game Creation, ArtsIT 2025.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=AD8FEF33-C155-4915-A7BF-A1BE33DDAC4D">Springer</a>.
                </span>
                <span class="vrtx-issn">ISSN 9783032269997.</span>
                            
                <span class="vrtx-pages">p. 719–738.</span>
            
            <a href="https://hdl.handle.net/11250/5505933">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper presents LightHearted, an open-source Python framework for mapping the heart’s electrical activity from electrocardiography (ECG) signals acquired from performers to stage lighting in a concert setting. The aim is to provide dynamic lighting coupled to the physiological processes occurring during performance. Drawing upon critical approaches towards using biosignals in interactive systems and live concert research, we outline the framework’s design and implementation and present a case study with the Aarhus Symphony Orchestra. This paper highlights both technical and conceptual challenges of integrating biosensor-based lighting into a large-scale orchestral context. Results from a post-concert audience survey (N = 324) suggest that while responses to the lighting were mixed, it was generally not perceived as distracting and would be welcomed for use in future concerts.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10426045" class="vrtx-external-publication">
        <div id="vrtx-publication-10426045">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10426045">
                Al-Ghawanmeh, Fadi; Jensenius, Alexander Refsum &amp; Smaili, Kamel
            </span>(2026).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Arab music improvisation corpus for research (AMICOR): development and machine translation experiments | Language Resources and Evaluation.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Language Resources and Evaluation.
                </span>
                <span class="vrtx-issn">ISSN 1574-020X.</span>
                            60.
            doi: <a href="https://doi.org/10.1007/s10579-026-09905-z">10.1007/s10579-026-09905-z</a>.
            <a href="https://hdl.handle.net/11250/5490948">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Under-resourced languages (and musics) pose a challenge to machine translation (MT). The challenge is greater when the content of the collected dataset is a varied sample taken from a data population that is even more diverse and dynamic. This is the challenge of Arab music vocal improvisation (mawwal). Here, we present the development of AMICOR, a parallel dataset consisting of vocal improvisatory phrases and their corresponding instrumental responses (or tarjamat in Arabic, which literally means “translations”) in the mawwal tradition. These melodic phrases are handled as “sentences” from the viewpoint of natural language. When developing the dataset, we integrated musicological insights in order to evaluate music theoretical differences between sub-datasets, primarily regarding their size, sentence length, performance quality, and shared musical identity. We then experimented with MT to generate instrumental responses to new vocal sentences, comparing several translation modeling configurations that differ (1) in translation approach (Neural MT (NMT) versus Statistical MT (SMT)), and (2) in the dataset handling approach in respect to the maqam (an Arabic musical term referring roughly to a melodic mode), comparing an individual model for each maqam versus a unified model for all maqamat. We found that merging related sub-datasets does not necessarily lead to better results, and may even favor simpler and shorter sentences with lower performance quality and less sophisticated patterns. This issue applies to both NMT and SMT; however, it is greater for NMT. A comparison of confusion matrices of individual-maqam models suggested that, in such a small dataset, the gap between SMT and NMT performance increases further if the styles, or skills, of potential users differ from those who built the dataset used in the training. Our discussion asserts that key factors in system design are the musical background and performance decisions of vocalists who may use such responsive generative models, as well as dataset size and performance quality.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10420743" class="vrtx-external-publication">
        <div id="vrtx-publication-10420743">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10420743">
                Guo, Jinyue; T?rresen, Jim &amp; Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Investigating Auditory–Visual Perception Using Multi-Modal Neural Networks with the SoundActions Dataset.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Transactions of the International Society for Music Information Retrieval.
                </span>
                            9(1),
                <span class="vrtx-pages">p. 85–85.</span>
            doi: <a href="https://doi.org/10.5334/tismir.223">10.5334/tismir.223</a>.
            <a href="https://hdl.handle.net/11250/5486133">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Musicologists, psychologists, and computer scientists study relationships between auditory and visual stimuli from very different perspectives and using various terminologies and methodologies. This article aims to bridge the gap between phenomenological sound theory, auditory–visual theory, and audio–video processing and machine learning. We introduce the SoundActions dataset, a collection of 365 audio–video recordings of (primarily) short sound actions. Each recording has been human?labeled and annotated according to Pierre Schaeffer’s theory of reduced listening, which describes the property of the sound itself (e.g., ‘an impulsive sound’) instead of the source (e.g., ‘a bird sound’). With these reduced?type labels in the audio–video dataset, we conducted two experiments: (1) fine?tuning the latest audio–video transformer model on the reduced?type labels in the SoundActions dataset, proving that the model can recognize reduced?type labels, and observing that the modality?imbalance phenomenon is similar to the added value theory by Michel Chion and (2) proposing the Ensemble of Perception Mode Adapters method inspired by Pierre Schaeffer’s three listening modes, improving the audio–video model also on reduced?type tasks.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10311983" class="vrtx-external-publication">
        <div id="vrtx-publication-10311983">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10311983">
                Riaz, Maham; Erdem, Cagri &amp; Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Inverse and indirect mappings in embodied AI systems in everyday environments.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Frontiers in Computer Science.
                </span>
                            7.
            doi: <a href="https://doi.org/10.3389/fcomp.2025.1603769">10.3389/fcomp.2025.1603769</a>.
            <a href="https://hdl.handle.net/11250/5341046">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper explores how musicking technologies—interactive systems with musical properties—can enhance everyday public environments. We are particularly interested in investigating the effects of musical interactions in non-musical settings, such as offices, meeting rooms, and social work areas. Traditional music technologies (such as instruments) are built for goal-directed, conscious, and voluntary interactions. We propose a new perspective on embodied AI through systems that utilize indirect, inverse, unconscious, and, at times, involuntary interactions. Four different sound/music systems are examined and discussed with regard to their activity level: a reactive “birdbox,” a reactive painting, active self-playing guitars, and interactive music balls. All these systems are multimodal, containing sensors that detect various physical inputs to produce sound and light, and having varying levels of perceived agency. The paper explores differences between direct/indirect and regular/inverse embodied AI paradigms. This study demonstrates how minimalistic interactions have the potential to yield complex and engaging musicking experiences, challenging the norms of overly intricate AI implementations.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10299007" class="vrtx-external-publication">
        <div id="vrtx-publication-10299007">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10299007">
                Christodoulou, Anna-Maria; Arnim, Hugh Alexander von &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Supporting Narrative Comprehension in Programmatic Music through Music and Light.
                </span>
                    <span class="vrtx-parent-contributors">
                            In McArthur, Angela; Matthews, Emma-Kate &amp; Holberton, Tom (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 17th International Symposium on Computer Music Multidisciplinary Research.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=69383989-1F49-4D7C-AAE0-ED745D1F2E17">The Laboratory PRISM “Perception, Representations, Image, Sound, Music”</a>.
                </span>
                <span class="vrtx-issn">ISSN 9791097498061.</span>
                            
                <span class="vrtx-pages">p. 447–454.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.17496754">10.5281/zenodo.17496754</a>.
            <a href="https://hdl.handle.net/11250/5330619">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Programmatic music, such as Tchaikovsky’s Overture Romeo and Juliet, relies on the audience’s ability to associate musical motifs with narrative elements. This is a demanding task for less experienced listeners, particularly when cues are subtle, such as those conveyed through timbre. This paper explores how dynamic stage lighting, driven by physiological signals, can enhance narrative comprehension in orchestral performance. Using the LightHearted interactive lighting system, different characters of the Overture were mapped to distinct colored lights, whose intensities were dynamically modulated in real time by the heart rates of the conductor and selected musicians. This integration aimed to convey subtle narrative cues to the audience in real time. Audience feedback suggests that this approach not only clarifies musical narratives but also enhances the overall experience.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10284249" class="vrtx-external-publication">
        <div id="vrtx-publication-10284249">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10284249">
                Riaz, Maham; Guo, Jinyue; Erdem, Cagri &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Where to Put That Microphone? A Study of Sound Localization in Ambisonics Recordings.
                </span>
                    <span class="vrtx-parent-contributors">
                            In McArthur, Angela; Matthews, Emma-Kate &amp; Holberton, Tom (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 17th International Symposium on Computer Music Multidisciplinary Research.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=69383989-1F49-4D7C-AAE0-ED745D1F2E17">The Laboratory PRISM “Perception, Representations, Image, Sound, Music”</a>.
                </span>
                <span class="vrtx-issn">ISSN 9791097498061.</span>
                            
                <span class="vrtx-pages">p. 455–466.</span>
            doi: <a href="https://doi.org/10.5281/ZENODO.17497086">10.5281/ZENODO.17497086</a>.
            <a href="https://hdl.handle.net/11250/5317948">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper examines the effects of microphone placement on sound localization in first-order Ambisonics recordings. Two microphone setups were used to capture a moving audio source in a lab environment. Array A, a tetrahedral microphone, was placed in the centre of the recording space. Array B consisted of four similar tetrahedral microphones charting a rectangular perimeter surrounding the space. Motion capture data of the moving sound source shows that anglegrams calculated from the Ambisonics recordings can be effectively used for sound localization. An additional perceptual listening study with binaural renders of the audio signals showed that the centrally-placed Array A provided superior localization. However, the corner-placed Array B performed better than expected.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10284183" class="vrtx-external-publication">
        <div id="vrtx-publication-10284183">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10284183">
                Guo, Jinyue; T?rresen, Jim &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Cross-modal Analysis of Spatial-Temporal Auditory Stimuli and Human Micromotion when Standing Still in Indoor Environments.
                </span>
                    <span class="vrtx-parent-contributors">
                            In McArthur, Angela; Matthews, Emma-Kate &amp; Holberton, Tom (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 17th International Symposium on Computer Music Multidisciplinary Research.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=69383989-1F49-4D7C-AAE0-ED745D1F2E17">The Laboratory PRISM “Perception, Representations, Image, Sound, Music”</a>.
                </span>
                <span class="vrtx-issn">ISSN 9791097498061.</span>
                            
                <span class="vrtx-pages">p. 871–882.</span>
            doi: <a href="https://doi.org/10.5281/ZENODO.17502603">10.5281/ZENODO.17502603</a>.
            <a href="https://hdl.handle.net/11250/5317903">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper examines how a soundscape influences human stillness. We are particularly interested in how spatial and temporal features of a soundscape influence human micromotion and swaying patterns. The analysis is based on 345 Ambisonics audio recordings of different indoor environments and corresponding accelerometer data captured at the chest of a person standing still for ten minutes. We calculated the temporal and spatial correlation between the person&#39;s quantity of motion and the sound energy of the Ambisonic recordings. While no clear temporal correlations were found, we discovered a correlation between the spatial directionality of the micromotion and the sound direction of arrival. The results suggest a potential entrainment between the directionality of environmental sounds and human swaying patterns, which have not been thoroughly studied previously compared to the temporal or spectral features of indoor soundscapes.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10281258" class="vrtx-external-publication">
        <div id="vrtx-publication-10281258">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10281258">
                Laczko, Balint; Rognes, Marie Elisabeth &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Image Sonification as Unsupervised Domain Transfer.
                </span>
                    <span class="vrtx-parent-contributors">
                            In McArthur, Angela; Matthews, Emma-Kate &amp; Holberton, Tom (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 17th International Symposium on Computer Music Multidisciplinary Research.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=69383989-1F49-4D7C-AAE0-ED745D1F2E17">The Laboratory PRISM “Perception, Representations, Image, Sound, Music”</a>.
                </span>
                <span class="vrtx-issn">ISSN 9791097498061.</span>
                            
                <span class="vrtx-pages">p. 596–607.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.17497987">10.5281/zenodo.17497987</a>.
            <a href="https://hdl.handle.net/11250/5278313">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The process of image sonification maps visual features into perceived auditory features. Most established sonification methods rely on identifying salient visual features in the input data and then mapping their distribution to a proportional distribution of auditory features. However, this approach requires both domain expertise and manual feature engineering. Here, we propose a new method of image sonification, leveraging recent advances in representation learning and domain transfer. Our approach introduces a pair of variational auto-encoder models that learn disentangled latent representations of the images and sounds, respectively, and a separate network that maps between these representations. The resulting sonification system encodes images into the latent space and then decodes them as sounds. Both representations and their mapping are learned in an entirely unsupervised manner. When evaluating the system in an interactive real-time setting, we observed that the model successfully learned disentangled representations of image and sound factors in our synthetic datasets.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254521" class="vrtx-external-publication">
        <div id="vrtx-publication-10254521">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254521">
                Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Pixasonics: An Image Sonification Toolbox for Python.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Cardoso, F. Amílcar; Vickers, Paul; Martins, Pedro &amp; Roddy, Stephen (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 30th International Conference on Auditory Display (ICAD 2025).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=E7F15E71-C7FE-4CDA-A5F2-F71F96B5254A">Department of Informatics Engineering, University of Coimbra, Portugal</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798991456210.</span>
                            
                <span class="vrtx-pages">p. 28–35.</span>
            doi: <a href="https://doi.org/https:/hdl.handle.net/1853/79958">https:/hdl.handle.net/1853/79958</a>.
            <a href="https://hdl.handle.net/11250/4102869">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Pixasonics is a new Python library for interactive image analysis and exploration through image sonification. It uses real-time audio and visualization to help uncover patterns in image data. With Pixasonics, users can launch one or more small web applications (running in a Jupyter Notebook), probe image data using various feature extraction methods, and map those feature vectors to synthesis parameters. The target users are researchers interested in exploring image and volumetric data and creative users who want an intuitive tool for experimental sound design. Pixasonics’ design aims to strike a balance between an easy-to-use web application with minimal boilerplate code necessary and a library that can be integrated into more advanced workflows. Real-time exploration is at the heart, but it can also be used to script non-real-time sonifications of large datasets. This paper presents Pixasonics, its structure, interface, and advanced features, and discusses preliminary feedback from biology researchers and music technologists.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10253409" class="vrtx-external-publication">
        <div id="vrtx-publication-10253409">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10253409">
                Christodoulou, Anna-Maria; Glette, Kyrre; Lartillot, Olivier &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        MusiQAl: A Dataset for Music Question–Answering through Audio–Video Fusion.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Transactions of the International Society for Music Information Retrieval.
                </span>
                            8(1),
                <span class="vrtx-pages">p. 265–282.</span>
            doi: <a href="https://doi.org/10.5334/tismir.222">10.5334/tismir.222</a>.
            <a href="https://hdl.handle.net/11250/4732806">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Music question–answering (MQA) is a machine learning task where a computational system analyzes and answers questions about music?related data. Traditional methods prioritize audio, overlooking visual and embodied aspects crucial to music performance understanding. We introduce MusiQAl, a multimodal dataset of 310 music performance videos and 11,793 human?annotated question–answer pairs, spanning diverse musical traditions and styles. Grounded in musicology and music psychology, MusiQAl emphasizes multimodal reasoning, causal inference, and cross?cultural understanding of performer–music interaction. We benchmark AVST and LAVISH architectures on MusiQAI, revealing strengths and limitations, underscoring the importance of integrating multimodal learning and domain expertise to advance MQA and music information retrieval.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10251335" class="vrtx-external-publication">
        <div id="vrtx-publication-10251335">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10251335">
                D’Amario, Sara &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Cardiac Coherence among Musicians and Audiences During Orchestra Performances.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Music &amp; Science.
                </span>
                            8,
                <span class="vrtx-pages">p. 1–15.</span>
            doi: <a href="https://doi.org/10.1177/20592043251370977">10.1177/20592043251370977</a>.
            <a href="https://hdl.handle.net/11250/3494562">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Recent empirical investigations suggest that music performance and perception can evoke a collective cardiac response in performers and audience members, and interpersonal cardiac coupling can be related to musical features. However, the relationship between musicians’ and audiences’ cardiac responses is poorly understood. This study investigates the interpersonal cardiac coherence of selected audience members and performers from the Stavanger Symphony Orchestra and the Norwegian Radio Orchestra during multiple performances of Harald S?verud’s Kjempevisesl?tten. The cardiac coherence index (CCI) was computed by applying the intrinsic synchrosqueezing transform to the cardiac interbeat interval of each signal, combining noise-assisted multivariate empirical mode decomposition and short-time Fourier transforms. The results show that the CCI values among the audience members were stronger than those of the musicians. Sound pressure level measurements predicted the musicians’ CCI values, whilst musical form structure predicted the audiences’ CCI values. These results advance our understanding of how cardiac rhythms support interpersonal interactions and contribute to research on live orchestra performances.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391317" class="vrtx-external-publication">
        <div id="vrtx-publication-2391317">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391317">
                Riaz, Maham; Guo, Jinyue; G?ksülük, Bilge Serdar &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Where is That Bird? The Impact of Artificial Birdsong in Public
Indoor Environments.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Sei?a, Mariana &amp; Wirfs-Brock, Jordan (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    AM &#39;25: Proceedings of the 20th International Audio Mostly Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798400720659.</span>
                            
                <span class="vrtx-pages">p. 344–351.</span>
            doi: <a href="https://doi.org/10.1145/3771594.3771629">10.1145/3771594.3771629</a>.
            <a href="https://hdl.handle.net/11250/4977325">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper explores the effects of nature sounds, specifically bird sounds, on human experience and behavior in indoor public environments. We report on an intervention study where we introduced an interactive sound device to alter the soundscape. Phenomenological observations and a survey showed that participants noticed and engaged with the bird sounds primarily through causal listening; that is, they attempted to identify the sound source. Participants generally responded positively to the bird sounds, appreciating the calmness and surprise it brought to the environment. The analyses revealed that relative loudness was a key factor influencing the experience. A too-high sound level may feel unpleasant, while a too-low sound level makes it unnoticeable due to background noise. These findings highlight the importance of automatic level adjustments and considering acoustic conditions in soundscape interventions. Our study contributes to a broader discourse on sound perception, human interaction with sonic spaces, and the potential of auditory design in public indoor environments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391292" class="vrtx-external-publication">
        <div id="vrtx-publication-2391292">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391292">
                Sveen, Henrik Haraldsen; Bishop, Laura &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Cyclic Patterns and Spatial Orientations in Artificial Impulsive Autonomous Sensory Meridian Response (ASMR) Sounds.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Sei?a, Mariana &amp; Wirfs-Brock, Jordan (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    AM &#39;25: Proceedings of the 20th International Audio Mostly Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798400720659.</span>
                            
                <span class="vrtx-pages">p. 124–131.</span>
            doi: <a href="https://doi.org/10.1145/3771594.3771651">10.1145/3771594.3771651</a>.
            <a href="https://hdl.handle.net/11250/4286194">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Autonomous Sensory Meridian Response (ASMR) is a tingling sensation in the neck and spine often triggered by specific sounds. This paper reports a study on the impact of different cyclic patterns and spatial orientations—defined here as the perceived directionality and motion of sound sources in a three-dimensional auditory space—on inducing ASMR experiences. The results demonstrate that both the type of cyclic pattern and the spatial orientation significantly influence the intensity and nature of ASMR experiences. Furthermore, the research explores synthesizing ASMR-inducing sounds while preserving key audio characteristics from acoustically recorded ASMR content. Through survey data analysis and regression modeling, distinct patterns emerge regarding the relationship between personality traits and ASMR experience. The findings contribute to a deeper understanding of ASMR as a sensory phenomenon and provide insights into the potential applications of artificially generated ASMR stimuli. Additionally, the research sheds light on the role of spatiality in ASMR experiences and the synthesis of ASMR-inducing sounds for future studies and practical applications</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391321" class="vrtx-external-publication">
        <div id="vrtx-publication-2391321">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391321">
                Riaz, Maham; Theodoridis, Ioannis; Erdem, Cagri &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        VentHackz: Exploring the Musicality of Ventilation Systems.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Cavdir, Doga &amp; Berthaut, Florent (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=031C5553-12A0-453E-B4FA-DC2B19B95BD2">The International Conference on New Interfaces for Musical Expression</a>.
                </span>
                            
            doi: <a href="https://doi.org/10.5281/zenodo.15698831">10.5281/zenodo.15698831</a>.
            <a href="https://hdl.handle.net/10852/119716">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Ventilation systems can be seen as huge examples of interfaces for musical expression, with the potential of merging sound, space, and human interaction. This paper explores conceptual similarities between ventilation systems and wind instruments and explores approaches to “hacking” ventilation systems with components that produce and modify sound. These systems enable the creation of unique sonic and visual experiences by manipulating airflow and making mechanical adjustments. Users can treat ventilation systems as musical interfaces by altering shape, material, and texture or augmenting vents. We call for heightened attention to the sound-making properties of ventilation systems and call for action (#VentHackz) to playfully improve the soundscapes of our indoor environments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2349747" class="vrtx-external-publication">
        <div id="vrtx-publication-2349747">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2349747">
                Riaz, Maham; Guo, Jinyue &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparing Spatial Audio Recordings from Commercially Available 360-degree Video Cameras.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Brooks, Anthony L.; Banakou, Domna &amp; Ceperkovic, Slavica (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 13th EAI International Conference on ArtsIT, Interactivity and Game Creation, ArtsIT 2024.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=AD8FEF33-C155-4915-A7BF-A1BE33DDAC4D">Springer</a>.
                </span>
                <span class="vrtx-issn">ISSN 9783031972546.</span>
                            
                <span class="vrtx-pages">p. 160–172.</span>
            doi: <a href="https://doi.org/10.1007/978-3-031-97254-6_12">10.1007/978-3-031-97254-6_12</a>.
            <a href="https://hdl.handle.net/11250/3259963">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper investigates the spatial audio recording capabilities of various commercially available 360-degree cameras (GoPro MAX, Insta360 X3, Garmin VIRB 360, and Ricoh Theta S). A dedicated ambisonics audio recorder (Zoom H3VR) was used for comparison. Six action sequences were performed around the recording setup, including impulsive and continuous vocal and non-vocal stimuli. The audio streams were extracted from the videos and compared using spectrograms and anglegrams. The anglegrams show adequate localization in ambisonic recordings from the GoPro MAX and Zoom H3VR. All cameras feature undocumented noise reduction and audio enhancement algorithms, use different types of audio compression, and have limited audio export options. This makes it challenging to use the spatial audio data reliably for research purposes.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391453" class="vrtx-external-publication">
        <div id="vrtx-publication-2391453">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391453">
                Arnim, Hugh Alexander von; Erdem, Cagri; C?té-Allard, Ulysse Teller Masao &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Sensor is not a Sensor: Diffracting the Preservation of Sonic Microinteraction with the SiFiBand.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Sei?a, Mariana &amp; Wirfs-Brock, Jordan (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    AM &#39;25: Proceedings of the 20th International Audio Mostly Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798400720659.</span>
                            
                <span class="vrtx-pages">p. 318–325.</span>
            doi: <a href="https://doi.org/10.1145/3771594.3771626">10.1145/3771594.3771626</a>.
            <a href="https://hdl.handle.net/11250/3717285">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper documents our exploratory work to preserve the interactive music system Stillness Under Tension—developed to explore inverse sonic microinteraction—by porting it from the original and discontinued Myo sensor armband to SiFiBand, a new prototype armband with motion (IMU) and muscle (EMG) sensors. We approach this by merging the Multilevel Dynamic Preservation model with a “diffraction-in-action” method grounded in a theoretical entanglement perspective. Rather than focusing on the Myo version’s artefactual remains, we explore the difference in data representations offered by the two devices as our point of departure. The paper describes the sensor devices, evaluating their data representations given their technical specifications, and describing how these differences propagate throughout our attempt to preserve the system, enacting necessary changes. We discuss the implications of merging these methods in view of the long-term preservation of interactive music systems. Our version 2.0 of Stillness Under Tension finds itself experientially in a position between familiarity and newness.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2390341" class="vrtx-external-publication">
        <div id="vrtx-publication-2390341">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2390341">
                H?ffding, Simon; Bergstr?m, Rebecca Josefine Five; Bishop, Laura; Bravo, Pedro Pablo Lucas; Burnim, Kayla &amp; Cancino-Chacón, Carlos Eduardo
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/2390341/contributors', 'vrtx-publication-contributors-2390341')">
                    [Show all&nbsp;28&nbsp;contributors for this article]</a>
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Introducing the MusicLab Copenhagen Dataset.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Music &amp; Science.
                </span>
                            8.
            doi: <a href="https://doi.org/10.1177/20592043241303288">10.1177/20592043241303288</a>.
            <a href="https://hdl.handle.net/11250/4734447">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">MusicLab Copenhagen was a unique research concert featuring the world-renowned Danish String Quartet in a naturalistic setting. The audience was split between one group physically located in the hall, another group listening to a radio broadcast, and a third group watching a live stream. Qualitative and quantitative data were captured from both musicians and audiences, resulting in a comprehensive dataset that can be used to address many research questions. This document introduces the dataset, explains its structure, and reflects on the related data collection, storing, publishing, and archiving processes.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2377129" class="vrtx-external-publication">
        <div id="vrtx-publication-2377129">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2377129">
                Grosz, Patrick Georg; Solberg, Ragnhild Torvanger; Katz, Jonah; Vu, Mai Ha; Jensenius, Alexander Refsum &amp; Patel-Grosz, Pritty
            </span>(2025).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        An outline of the narrative grammar of electronic dance music.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Musicae Scientiae.
                </span>
                <span class="vrtx-issn">ISSN 1029-8649.</span>
                            29(4),
                <span class="vrtx-pages">p. 556–575.</span>
            doi: <a href="https://doi.org/10.1177/10298649251321709">10.1177/10298649251321709</a>.
            <a href="https://hdl.handle.net/11250/4079640">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">We argue that electronic dance music (EDM) exhibits a parallel structural organization to that which has been proposed for cartoons (comics) after the model of hierarchical structure proposed in theoretical linguistics. According to this parallel, both systems are governed by general cognitive mechanisms for the narrative organization of tension and release, which are not modality-specific. We show that notions from visual narrative analysis, such as an Establisher–Initial–Peak–Release template, can be applied directly to EDM tracks as an Intro/Breakdown–Buildup–Core–Outro/Cut template. In doing so, we focus on how to formally define and operationalize relevant notions such as Breakdown, Buildup, and Core. As part of our analysis, we show that the scene-setting Establisher segments of visual narratives map onto two distinct categories in EDM: they correspond to intro sections at the beginning of a track and to breakdown sections in the middle of a track; we strengthen the analogy to visual narrative analysis by introducing refinements such as a pre-drop break that often occurs at the end of a buildup segment. To adjudicate between competing hypotheses on the hierarchical structure of a given EDM track, we demonstrate that analytical tests from linguistics and visual narrative analysis can be successfully applied. By introducing these analytical tools, this article sets the stage for further explorations in the linguistically informed analysis of the structure and meaning of EDM.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307431" class="vrtx-external-publication">
        <div id="vrtx-publication-2307431">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307431">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Embodied Musicking Technologies: Inspired by Professor Marc Leman.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Maes, Pieter-Jan (Eds.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Liber Amicorum Marc Leman: A life in music, science, and technology.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Ghent University.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4968714">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In his seminal book “Embodied Music Cognition and Mediation Technology”, Marc Leman Leman (2008a) drew up a theoretical framework that has influenced a whole new generation of researchers, myself included. Building on a long tradition of systematic musicology, combined with ecological psychology
and modern technology, he convincingly set the direction for a fresh approach to scientific studies of musical experiences. In the following, I will reflect on some concepts that he raised in his discussion, and I will point out some that he left for others, like me, to explore in more detail.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2330477" class="vrtx-external-publication">
        <div id="vrtx-publication-2330477">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2330477">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikkopplevelser, kulturbruk og politikk.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Solberg, Ragnhild Torvanger (Eds.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Tverrfaglege perspektiv p? kulturbruk, publikum og deltaking.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Kulturr?det.
                </span>
                <span class="vrtx-issn">ISSN 9788270812165.</span>
                            
                <span class="vrtx-pages">p. 87–97.</span>
            
            <a href="https://hdl.handle.net/11250/4514708">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvordan brukes kultur i Norge i dag? Som musikkteknolog har jeg mest kompetanse til ? reflektere over musikk, men jeg har ogs? noe erfaring med interaktiv dans og installasjonskunst. I tillegg har forskningen min n? dreid seg mot akustikk, lydkunst, visuell estetikk og arkitektur. Sammen dekker dette mange sider av kunst- og kulturomr?det, selv om min inngang
i denne artikkelen prim?rt vil v?re fra de to disiplinene musikkpsykologi og musikkteknologi. Innenfor musikkpsykologien fors?ker vi ? forst? hvordan musikk p?virker mennesker, i kropp og sinn. Musikkteknologien dreier
seg derimot om ? forst? hvordan verkt?y og systemer brukes i musikalske sammenhenger.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2356535" class="vrtx-external-publication">
        <div id="vrtx-publication-2356535">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2356535">
                Christodoulou, Anna-Maria; Dutta, Sagar; Lartillot, Olivier Serge Gabriel; Glette, Kyrre &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring Convolutional Neural Network Models for Multimodal Classification of Expressive Piano Performance,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Sound and Music Computing Conference 2024.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        SMC Network.
                </span>
                <span class="vrtx-issn">ISSN 9789893520758.</span>
                            
            
            <a href="https://hdl.handle.net/10852/118901">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper addresses improving performance analysis by automating the recognition of expressive performance styles. We propose a multimodal fusion approach integrating audio, video, and motion data. We demonstrate the effectiveness of our approach by utilizing convolutional neural network (CNN) models. Training is done on a classical piano dataset of 211 excerpts containing audio, video, MIDI, and motion capture data. The results highlight the robustness of the CNN models; they achieve high accuracy even when trained on a limited dataset. Our study contributes to advancing the field of performance analysis by applying deep learning techniques to multimodal data.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2292179" class="vrtx-external-publication">
        <div id="vrtx-publication-2292179">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2292179">
                Guo, Jinyue; Riaz, Maham &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparing Four 360-Degree Cameras for Spatial Video Recording and Analysis,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Sound and Music Computing Conference 2024.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        SMC Network.
                </span>
                <span class="vrtx-issn">ISSN 9789893520758.</span>
                            
            
            <a href="https://hdl.handle.net/10852/113954">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper reports on a desktop investigation and a lab experiment comparing the video recording capabilities of four commercially available 360-degree cameras: GoPro MAX, Insta360 X3, Garmin VIRB 360, and Ricoh Theta S. The four cameras all use different recording formats and settings and have varying video quality and software support. This makes it difficult to conduct analyses and compare between devices. We have implemented new functions in the Musical Gestures Toolbox (MGT) for reading and merging files from the different platforms. Using the capabilities of FFmpeg, we have also made a new function for converting between different 360-degree video projections and formats. This allows (music) researchers to exploit 360-degree video recordings using regular video-based analysis pipelines.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307227" class="vrtx-external-publication">
        <div id="vrtx-publication-2307227">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307227">
                Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Synth Maps: Mapping The Non-Proportional Relationships Between Synthesizer Parameters and Synthesized Sound.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Ziemer, Tim; Kantan, Prithvi Ravi; Chabot, Samuel &amp; Braasch, Jonas (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 29th International Conference on Auditory Display (ICAD 2024).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/nvakanal?pid=19E8936D-AD3D-4A69-B382-47B80A63520A">The International Community for Auditory Display</a>.
                </span>
                <span class="vrtx-issn">ISSN 9798991456203.</span>
                            
                <span class="vrtx-pages">p. 181–184.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.11237788">10.5281/zenodo.11237788</a>.
            <a href="https://hdl.handle.net/10852/114155">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Parameter Mapping (PM) is probably the most used design approach in sonification. However, the relationship between a synthesizer’s input parameters and the perceptual distribution of its output sounds might not be proportional, limiting its ability to convey relationships within the source data in the sound. This study evaluates a basic Frequency Modulation (FM) synthesis module with perceptually motivated descriptors, measures of spectral energy distribution, and latent embeddings of pre-trained audio representation models. We demonstrate how these metrics do not indicate straightforward relationships between synthesis parameters and perceived sound. This is done using interactive audiovisual scatter plots—Synth Maps—that can be used to explore the sound distribution of the synthesizer and qualitatively evaluate how well
the different representations align with human perception. Link to the code and the interactive Synth Maps are available.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2333493" class="vrtx-external-publication">
        <div id="vrtx-publication-2333493">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2333493">
                H?ffding, Simon; Hansen, Niels Christian &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Music Research “in the Wild” – Introducing the MusicLab Copenhagen Special Collection.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Music &amp; Science.
                </span>
                            7.
            doi: <a href="https://doi.org/10.1177/20592043241294161">10.1177/20592043241294161</a>.
            <a href="https://hdl.handle.net/10852/115237">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This special collection of Music &amp; Science contains 111 articles. They thoroughly describe a particular instantiation of a research concert, namely the innovative and complex event MusicLab Copenhagen. This took place over 14?hours on October 26, 2021, in Copenhagen, Denmark. Working with The Danish String Quartet (DSQ), one of the world&#39;s best chamber ensembles, a research team from RITMO, complemented with researchers from several other European institutions, ran experiments and studied how mind and body are engaged during a concert. This was a unique opportunity to capture concurrent qualitative, behavioral, and physiological measurements in a concert hall, delicately balancing the scientific ideals of reliability and ecological validity.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2333492" class="vrtx-external-publication">
        <div id="vrtx-publication-2333492">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2333492">
                Karbasi, Seyed Mojtaba; Jensenius, Alexander Refsum; God?y, Rolf Inge &amp; T?rresen, Jim
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Embodied intelligence for drumming; a reinforcement learning approach to drumming robots.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Frontiers in Robotics and AI.
                </span>
                            11.
            doi: <a href="https://doi.org/10.3389/frobt.2024.1450097">10.3389/frobt.2024.1450097</a>.
            <a href="https://hdl.handle.net/11250/4972151">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper investigates the potential of the intrinsically motivated reinforcement learning (IMRL) approach for robotic drumming. For this purpose, we implemented an IMRL-based algorithm for a drumming robot called ZRob, an underactuated two-DoF robotic arm with flexible grippers. Two ZRob robots were instructed to play rhythmic patterns derived from MIDI files. The RL algorithm is based on the deep deterministic policy gradient (DDPG) method, but instead of relying solely on extrinsic rewards, the robots are trained using a combination of both extrinsic and intrinsic reward signals. The results of the training experiments show that the utilization of intrinsic reward can lead to meaningful novel rhythmic patterns, while using only extrinsic reward would lead to predictable patterns identical to the MIDI inputs. Additionally, the observed drumming patterns are influenced not only by the learning algorithm but also by the robots’ physical dynamics and the drum’s constraints. This work suggests new insights into the potential of embodied intelligence for musical</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2333450" class="vrtx-external-publication">
        <div id="vrtx-publication-2333450">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2333450">
                Campbell, Edward; Souza, Jonathan De &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Gestures, Actions and Play In Bj?rn Heile&#39;s 3 × 10 Musical Actions For Three Socially Distanced Performers.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Tempo.
                </span>
                <span class="vrtx-issn">ISSN 0040-2982.</span>
                            78(310),
                <span class="vrtx-pages">p. 51–61.</span>
            
            <a href="https://hdl.handle.net/11250/4511735">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Bj?rn Heile’s 3 × 10 Musical Actions for Three Socially Distanced Performers features frequent changes in musical material, playing style and instrumental combinations. Throughout a series of short sections, the performers play, sing, speak, conduct and move around, following instructions that appear on tablets. This article reflects on audiences’ experiences of the work and on musical actions more generally. We consider musical actions as short, coherent motion chunks and distinguish between several types of action that appear in the piece: gestures (communicative actions, with or without sound), reactions (where a player responds to another) and interactions (where players mutually coordinate). The musicians’ individual and collective actions create a sense of play: on the one hand, they seem free and depart from standard concert conventions; on the other hand, they seem to be following a set of rules, even if these rules are not explained to the audience. As such, we approach the piece via theories of play and relate it to earlier modernist musical games. Ultimately, 3 × 10 Musical Actions emphasises several aspects of musical actions, as social, functional, expressive, playful and embodied.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2333449" class="vrtx-external-publication">
        <div id="vrtx-publication-2333449">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2333449">
                Docherty, Claire; Iddon, Martin; Jensenius, Alexander Refsum; MacDonald, Raymond &amp; Stanley, Jane
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        ‘Are You Still There?’ Experiencing Sonic Bothy’S Verbaaaaatim.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Tempo.
                </span>
                <span class="vrtx-issn">ISSN 0040-2982.</span>
                            78(310).
            
            <a href="https://hdl.handle.net/11250/5038409">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Sonic Bothy is an inclusive experimental and new-music organisation with an ensemble of musicians with and without disabilities and neurodiversities. This article considers their audiovisual piece Verbaaaaatim (2020–21), its form marked by the context of its development and composition during the COVID-19 pandemic, using a set of interlayered perspectives that mirror the formal layers of the piece. Recorded in a single take, it comprises instrumental sounds, spoken words, written words, static and dynamic graphics and videos of the performers, aligned so that the piece seems consistently to flow onwards, although it is not always clear which element impels its forward motion. The article considers, in particular, Verbaaaaatim’s presentation of modes of embodied conviviality between its performers, the ways these find resonance in wider histories of experimental music and the ways in which its elements can be understood in an ecological framework as ‘sound actions’.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2295084" class="vrtx-external-publication">
        <div id="vrtx-publication-2295084">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2295084">
                Christodoulou, Anna-Maria; Lartillot, Olivier &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Multimodal music datasets? Challenges and future goals in music processing.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        International Journal of Multimedia Information Retrieval.
                </span>
                <span class="vrtx-issn">ISSN 2192-6611.</span>
                            13(3).
            doi: <a href="https://doi.org/10.1007/s13735-024-00344-6">10.1007/s13735-024-00344-6</a>.
            <a href="https://hdl.handle.net/10852/118423">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The term “multimodal music dataset” is often used to describe music-related datasets that represent music as a multimedia art form and multimodal experience. However, the term “multimodality” is often used differently in disciplines such as musicology, music psychology, and music technology. This paper proposes a definition of multimodality that works across different music disciplines. Many challenges are related to constructing, evaluating, and using multimodal music datasets. We provide a task-based categorization of multimodal datasets and suggest guidelines for their development. Diverse data pre-processing methods are illuminated, highlighting their contributions to transparent and reproducible music analysis. Additionally, evaluation metrics, methods, and benchmarks tailored for multimodal music processing tasks are scrutinized, empowering researchers to make informed decisions and facilitating cross-study comparisons.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2269992" class="vrtx-external-publication">
        <div id="vrtx-publication-2269992">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2269992">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Embodied music learning.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Schilhab, Theresa &amp; Groth, Camilla (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Embodied Learning and Teaching using the 4E Cognition Approach: Exploring Perspectives in Teaching Practices.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=FAE3940D-29AB-45F5-9190-6242B3BB7596">Routledge</a>.
                </span>
                <span class="vrtx-issn">ISSN 9781003341604.</span>
                            
            doi: <a href="https://doi.org/10.4324/9781003341604-21">10.4324/9781003341604-21</a>.
            <a href="https://hdl.handle.net/10852/111222">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This chapter presents a pedagogical approach based on the author’s experience teaching interactive music technology design from an embodied music cognition perspective. The “musicking quadrant” is introduced as a framework to understand the experiences of those who make music in real-time (performers) and non-real-time (instrument makers, composers, producers), and those who experience music in real-time (perceivers) and non-real-time (analysts). New technologies challenge these roles and allow for new types of musical engagement that align with the 4E cognition perspectives.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2156528" class="vrtx-external-publication">
        <div id="vrtx-publication-2156528">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2156528">
                H?ffding, Simon; Yi, Wenbo; Lippert, Eigil; Sanchez, Victor Evaristo Gonzalez; Bishop, Laura &amp; Laeng, Bruno
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/2156528/contributors', 'vrtx-publication-contributors-2156528')">
                    [Show all&nbsp;9&nbsp;contributors for this article]</a>
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Into the Hive-Mind: Shared Absorption and Cardiac Interrelations in Expert and Student String Quartets.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Music &amp; Science.
                </span>
                            6.
            doi: <a href="https://doi.org/10.1177/20592043231168597">10.1177/20592043231168597</a>.
            <a href="https://hdl.handle.net/10852/107606">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Expert musicians portray awe-inspiring precision, timing, and phrasing and may be thought to partake in a “hive-mind.” Such a shared musical absorption is characterized by a heightened empathic relation, mutual trust, and a sense that the music “takes over,” thus uniting the performers’ musical intentions. Previous studies have found correlations between empathic concern or shared experience and cardiac synchrony (CS). We aimed to investigate shared musical absorption in terms of CS by analyzing CS in two quartets: a student quartet, the Borealis String Quartet (BSQ), and an expert quartet, the Danish String Quartet (DSQ), world-renowned for their interpretations and cohesion. These two quartets performed the same Haydn excerpt in seven conditions, some of which were designed to disrupt their absorption. Using multidimensional recurrence quantification analysis (MdRQA), we found that: (1) performing resulted in significantly increased CS in both quartets compared with resting; (2) across all conditions, the DSQ had a significantly higher CS than the BSQ; (3) the BSQ&#39;s CS was inversely correlated with the degree of disruption; 4) for the DSQ, the CS remained constant across all levels of disruption, besides one added extreme disruption—a sight-reading condition. These findings tentatively support the claim that a sense of shared musical absorption, as well as group expertise, is correlated with CS.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2167720" class="vrtx-external-publication">
        <div id="vrtx-publication-2167720">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2167720">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Standing still together: Reflections on a one-year-long exploration of human micromotion.
                </span>
                    <span class="vrtx-parent-contributors">
                            In W?llner, Clemens &amp; London, Justin (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Performing Time. Synchrony and Temporal Flow in Music and Dance.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=239F1C9D-8585-4961-B96A-05B4CEBCAF6B">Oxford University Press</a>.
                </span>
                <span class="vrtx-issn">ISSN 9780192896254.</span>
                            
            
            <a href="https://hdl.handle.net/11250/3862493">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">How does it feel to stand still in silence with others for 10 minutes at a time? The current chapter reports on a study of a small group of musicians and dancers that used standstill as their regular &quot;warm-up&quot; activity over a year of working together. Motion capture data of the sessions reveal that each individual&#39;s quantity of motion remained similar over time, but self-reports indicate that the subjective experience of time and space changed radically. The participants developed a high spatiotemporal sensitivity and reported an increase in well-being after a year of standstill. They also reported that a 10-minute standstill helped with the mental preparation before both micromotion and regular performances.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2161978" class="vrtx-external-publication">
        <div id="vrtx-publication-2161978">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2161978">
                Kwak, Dongho; Combriat, Thomas Michel Daniel; Jensenius, Alexander Refsum &amp; Olsen, Petter Angell
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Characterization of Mechanical and Cellular Effects of Rhythmic Vertical Vibrations on Adherent Cell Cultures.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Bioengineering.
                </span>
                            10(7),
                <span class="vrtx-pages">p. 1–19.</span>
            doi: <a href="https://doi.org/10.3390/bioengineering10070811">10.3390/bioengineering10070811</a>.
            <a href="https://hdl.handle.net/11250/3455743">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper presents an innovative experimental setup that employs the principles of audio technology to subject adherent cells to rhythmic vertical vibrations. We employ a novel approach that combines three-axis acceleration measurements and particle tracking velocimetry to evaluate the setup’s performance. This allows us to estimate crucial parameters such as root mean square acceleration, fluid flow patterns, and shear stress generated within the cell culture wells when subjected to various vibration types. The experimental conditions consisted of four vibrational modes: No Vibration, Continuous Vibration, Regular Pulse, and Variable Pulse. To evaluate the effects on cells, we utilized fluorescence microscopy and a customized feature extraction algorithm to analyze the F-actin filament structures. Our findings indicate a consistent trend across all vibrated cell cultures, revealing a reduction in size and altered orientation (2D angle) of the filaments. Furthermore, we observed cell accumulations in the G1 cell cycle phase in cells treated with Continuous Vibration and Regular Pulse. Our results demonstrate a negative correlation between the magnitude of mechanical stimuli and the size of F-actin filaments, as well as a positive correlation with the accumulations of cells in the G1 phase of the cell cycle. By unraveling these analyses, this study paves the way for future investigations and provides a compelling framework for comprehending the intricate cellular responses to rhythmic mechanical stimulation.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2192971" class="vrtx-external-publication">
        <div id="vrtx-publication-2192971">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2192971">
                Szorkovszky, Alexander; Veenstra, Frank; Lartillot, Olivier Serge Gabriel; Jensenius, Alexander Refsum &amp; Glette, Kyrre
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Embodied Tempo Tracking with a Virtual Quadruped,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Sound and Music Computing Conference 2023.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        SMC Network .
                </span>
                <span class="vrtx-issn">ISSN 9789152773727.</span>
                            
            doi: <a href="https://doi.org/10.5281/zenodo.10060970">10.5281/zenodo.10060970</a>.
            <a href="https://hdl.handle.net/11250/5089382">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Dynamic attending theory posits that we entrain to time-structured events in a similar way to synchronizing oscillators. Hence, a tempo tracker based on oscillators may replicate humans&#39; ability to rapidly and robustly identify musical tempi. We demonstrate this idea using virtual quadrupeds, whose gaits are controlled by oscillatory neural circuits known as central pattern generators (CPGs). The quadruped CPGs were first optimized for flexible gait frequency and direction, and then an additional recurrent layer was optimized for entrainment to isochronous pulses. Using excerpts of musical pieces, we find that the motion of these agents can rapidly entrain to simple rhythms. Performance was found to be partially predicted by pulse entropy, a measure of the sample&#39;s rhythmic complexity. Notably, in addition to having wide tempo ranges, the best performing agents can also entrain to rhythms that are periodic but not quantized on a grid. Our approach offers an embodied alternative to other dynamical systems-based approaches to entrainment, such as gradient-frequency arrays. Such agents could find use as participants in virtual musicking environments, or as real-world musical robots.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2197226" class="vrtx-external-publication">
        <div id="vrtx-publication-2197226">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2197226">
                Erdem, Cagri; Wallace, Benedikte; Glette, Kyrre &amp; Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Tool or Actor? Expert Improvisers’ Evaluation of a Musical AI “Toddler”.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Computer Music Journal.
                </span>
                <span class="vrtx-issn">ISSN 0148-9267.</span>
                            46(4),
                <span class="vrtx-pages">p. 26–42.</span>
            doi: <a href="https://doi.org/10.1162/comj_a_00657">10.1162/comj_a_00657</a>.
            <a href="https://hdl.handle.net/10852/110981">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Abstract In this article, we introduce the coadaptive audiovisual instrument, CAVI. This instrument uses deep learning to generate control signals based on muscle and motion data of a performer&#39;s actions. The generated signals control time-based live sound-processing modules. How does a performer perceive such an instrument? Does it feel like a machine learning–based musical tool? Or is it an actor with the potential to become a musical partner? We report on an evaluation of CAVI after it had been used in two public performances. The evaluation is based on interviews with the performers, audience questionnaires, and the creator&#39;s self-analysis. Our findings suggest that the perception of CAVI as a tool or actor correlates with the performer&#39;s sense of agency. The perceived agency changes throughout a performance based on several factors, including perceived musical coordination, the balance between surprise and familiarity, a “common sense,” and the physical characteristics of the performance setting.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200042" class="vrtx-external-publication">
        <div id="vrtx-publication-2200042">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200042">
                Riaz, Maham; Upham, Finn; Burnim, Kayla; Bishop, Laura &amp; Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparing inertial motion sensors for capturing human micromotion,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Sound and Music Computing Conference 2023.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        SMC Network .
                </span>
                <span class="vrtx-issn">ISSN 9789152773727.</span>
                            
            doi: <a href="https://doi.org/10.5281/zenodo.8316051">10.5281/zenodo.8316051</a>.
            <a href="https://hdl.handle.net/10852/106232">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The paper presents a study of the noise level of accelerometer data from a mobile phone compared to three commercially available IMU-based devices (AX3, Equivital, and Movesense) and a marker-based infrared motion capture system (Qualisys). The sensors are compared in static positions and for measuring human micromotion, with larger motion sequences as reference. The measurements show that all but one of the IMU-based devices capture motion with an accuracy and precision that is far below human micromotion. However, their data and representations differ, so care should be taken when comparing data between devices.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198422" class="vrtx-external-publication">
        <div id="vrtx-publication-2198422">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198422">
                Masu, Raul; Morreale, Fabio &amp; Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        The O in NIME: Reflecting on the Importance of Reusing and Repurposing Old Musical Instruments.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Ortiz, Miguel &amp; Marquez-Borbon, Adnan (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Universidad Autónoma Metropolitana.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4615889">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this paper, we reflect on the focus of “newness” in NIME research and practice and argue that there is a missing O (for “Old”) in framing our academic discourse. A systematic review of the last year’s conference proceedings reveals that most papers do, indeed, present new instruments, interfaces, or pieces of technology. Comparably few papers focus on the prolongation of existing NIMEs. Our meta-analysis identifies four main categories from these papers: (1) reuse, (2) update, (3) complement, and (4) long-term engagement. We discuss how focusing more on these four types of NIME development and engagement can be seen as an approach to increase sustainability.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198420" class="vrtx-external-publication">
        <div id="vrtx-publication-2198420">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198420">
                Karbasi, Seyed Mojtaba; Jensenius, Alexander Refsum; God?y, Rolf Inge &amp; T?rresen, Jim
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring Emerging Drumming Patterns in a Chaotic Dynamical System using ZRob.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Ortiz, Miguel &amp; Marquez-Borbon, Adnan (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Universidad Autónoma Metropolitana.
                </span>
                            
            
            <a href="https://hdl.handle.net/10852/106203">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">ZRob is a robotic system designed for playing a snare drum. The robot is constructed with a passive flexible spring-based joint inspired by the human hand. This paper describes a study exploring rhythmic patterns by exploiting the chaotic dynamics of two ZRobs. In the experiment, we explored the control configurations of each arm by trying to create un- predictable patterns. Over 200 samples have been recorded and analyzed. We show how the chaotic dynamics of ZRob can be used for creating new drumming patterns.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2157675" class="vrtx-external-publication">
        <div id="vrtx-publication-2157675">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2157675">
                Herrebr?den, Henrik; Jensenius, Alexander Refsum; Espeseth, Thomas; Bishop, Laura &amp; Vuoskoski, Jonna Katariina
            </span>(2023).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Cognitive load causes kinematic changes in both elite and non-elite rowers.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Human Movement Science.
                </span>
                <span class="vrtx-issn">ISSN 0167-9457.</span>
                            90.
            doi: <a href="https://doi.org/10.1016/j.humov.2023.103113">10.1016/j.humov.2023.103113</a>.
            <a href="https://hdl.handle.net/11250/4565532">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The current motor literature suggests that extraneous cognitive load may affect performance and kinematics in a primary motor task. A common response to increased cognitive demand, as observed in past studies, might be to reduce movement complexity and revert to previously learned movement patterns, in line with the progression-regression hypothesis. However, according to several accounts of automaticity, motor experts should be able to cope with dual task demands without detriment to their performance and kinematics. To test this, we conducted an experiment asking elite and non-elite rowers to use a rowing ergometer under conditions of varying task load. We employed single-task conditions with low cognitive load (i.e., rowing only) and dual-task conditions with high cognitive load (i.e., rowing and solving arithmetic problems). The results of the cognitive load manipulations were mostly in line with our hypotheses. Overall, participants reduced movement complexity, for example by reverting towards tighter coupling of kinematic events, in their dual-task performance as compared to single-task performance. The between-group kinematic differences were less clear. In contradiction to our hypotheses, we found no significant interaction between skill level and cognitive load, suggesting that the rowers&#39; kinematics were affected by cognitive load irrespective of skill level. Overall, our findings contradict several past findings and automaticity theories, and suggest that attentional resources are required for optimal sports performance.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2043086" class="vrtx-external-publication">
        <div id="vrtx-publication-2043086">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2043086">
                Karbasi, Seyed Mojtaba; Jensenius, Alexander Refsum; God?y, Rolf Inge &amp; T?rresen, Jim
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Robotic Drummer with a Flexible Joint: the Effect of Passive Impedance on Drumming.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Michon, Romain; Pottier, Laurent &amp; Orlarey, Yann (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 19th Sound and Music Computing Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        SMC Network.
                </span>
                <span class="vrtx-issn">ISSN 9782958412609.</span>
                            
                <span class="vrtx-pages">p. 232–237.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.6797833">10.5281/zenodo.6797833</a>.
            <a href="https://hdl.handle.net/11250/4022031">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Intelligent robots aimed for performing music and playing musical instruments have been developed in recent years. With the advancements in artificial intelligence and robotic systems, new capabilities have been explored in this field. One major aspect of musical robots that can lead to the emergence of creative results is the ability to learn skills autonomously. To make it feasible, it is important to make the robot utilize its maximum potential and mechanical capabilities to play a musical instrument. Furthermore, the robot needs to find the musical possibilities based on the physical properties of the instrument to provide satisfying results. In this work, we introduce a drum robot with certain mechanical specifications and analyze the capabilities of the robot according to the drumming sound results of the robot. The robot has two degrees of freedom, actuated by one quasi direct-drive servo motor. The gripper of the robot features a flexible joint with passive springs which adds complexity to the movements of the drumstick. In a basic experiment, we have looked at the drum roll performance by the robot while changing a few control variables such as frequency and amplitude of the motion. Both single-stroke and double-stroke drum rolls can be performed by the robot by changing the control variables. The effect of the flexible gripper on the drumming results of the robot is the main focus of this study. Additionally, we have divided the control space according to the type of drum rolls. The results of this experiment lay the groundwork for developing an intelligent algorithm for the robot to learn musical patterns by interacting with the drum.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2096107" class="vrtx-external-publication">
        <div id="vrtx-publication-2096107">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2096107">
                Patel-Grosz, Pritty; Grosz, Patrick Georg; Kelkar, Tejaswinee &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Steps towards a semantics of dance.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Journal of Semantics.
                </span>
                <span class="vrtx-issn">ISSN 0167-5133.</span>
                            39(4),
                <span class="vrtx-pages">p. 693–748.</span>
            doi: <a href="https://doi.org/10.1093/jos/ffac009">10.1093/jos/ffac009</a>.
            <a href="https://hdl.handle.net/11250/3350514">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Abstract As formal theoretical linguistic methodology has matured, recent years have seen the advent of applying it to objects of study that transcend language, e.g., to the syntax and semantics of music (Lerdahl &amp;amp; Jackendoff 1983, Schlenker 2017a; see also Rebuschat et al. 2011). One of the aims of such extensions is to shed new light on how meaning is construed in a range of communicative systems. In this paper, we approach this goal by looking at narrative dance in the form of Bharatanatyam. We argue that a semantic approach to dance can be modeled closely after the formal semantics of visual narrative proposed by Abusch (2013, 2014, 2021). A central conclusion is that dance not only shares properties of other fundamentally human means of expression, such as visual narrative and music, but that it also exhibits similarities to sign languages and the gestures of non-signers (see, e.g., Schlenker 2020) in that it uses space to track individuals in a narrative and performatively portray the actions of those individuals. From the perspective of general human cognition, these conclusions corroborate the idea that linguistic investigations beyond language (see Patel-Grosz et al. forthcoming) can yield insights into the very nature of the human mind and of the communicative devices that it avails.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2097465" class="vrtx-external-publication">
        <div id="vrtx-publication-2097465">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2097465">
                Patel-Grosz, Pritty; Katz, Jonah; Grosz, Patrick Georg; Kelkar, Tejaswinee &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        From music to dance: The inheritance of semantic inferences.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Empirical Issues in Syntax and Semantics.
                </span>
                            14,
                <span class="vrtx-pages">p. 219–238.</span>
            
            <a href="https://hdl.handle.net/11250/4543007">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2043955" class="vrtx-external-publication">
        <div id="vrtx-publication-2043955">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2043955">
                Swarbrick, Dana; Upham, Finn; Erdem, Cagri; Jensenius, Alexander Refsum &amp; Vuoskoski, Jonna Katariina
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Measuring Virtual Audiences with The MusicLab App: Proof of Concept.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Michon, Romain; Pottier, Laurent &amp; Orlarey, Yann (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 19th Sound and Music Computing Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        SMC Network.
                </span>
                <span class="vrtx-issn">ISSN 9782958412609.</span>
                            
                <span class="vrtx-pages">p. 532–539.</span>
            doi: <a href="https://doi.org/10.5281/zenodo.6798290">10.5281/zenodo.6798290</a>.
            <a href="https://hdl.handle.net/10852/95539">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">We present a proof of concept by using the mobile application
MusicLab to measure motion during a livestreamed
concert and examining its relation to musical features.
With the MusicLab App, participants’ own smartphones’
inertial measurement unit (IMU) sensors can be leveraged
to record their motion and their subjective experiences collected
through survey responses. The MusicLab Lockdown
Rave was an Algorave (live-coded dance music)
livestreamed concert featuring prolific performers Renick
Bell and Khoparzi. They livestreamed for an international
audience who wore their smartphones with the MusicLab
App while they listened/danced to the performances. From
their acceleration, we computed quantity of motion and
compared it to musical features that have previously been
associated with music-related motion, namely pulse clarity
and low and high spectral flux. By encountering challenges
and implementing improvements, the MusicLab
App has become a useful tool for researching music-related
motion.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2007749" class="vrtx-external-publication">
        <div id="vrtx-publication-2007749">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2007749">
                Kwak, Dongho; Combriat, Thomas Michel Daniel; Wang, Chencheng; Scholz, Hanne; Danielsen, Anne &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Music for Cells? A Systematic Review of Studies Investigating the Effects of Audible Sound Played Through Speaker-Based Systems on Cell Cultures.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Music &amp; Science.
                </span>
                            5.
            doi: <a href="https://doi.org/10.1177/20592043221080965">10.1177/20592043221080965</a>.
            <a href="https://hdl.handle.net/10852/94879">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">There have been several studies investigating whether musical sound can be used as cell stimuli in recent years. We systematically searched publications to get an overview of studies that have used audible sound played through speaker-based systems to induce mechanical perturbation in cell cultures. A total of 12 studies were identified. We focused on the experimental setups, the sounds that were used as stimuli, and relevant biological outcomes. The studies are categorized into simple and complex sounds depending on the type of sound employed. Some of the promising effects reported were enhanced cell migration, proliferation, colony formation, and differentiation ability. However, there are significant differences in methodologies and cell type-specific outcomes, which made it difficult to find a systematic pattern in the results. We suggest that future experiments should consider using: (1) a more controlled acoustic environment, (2) standardized sound and noise measurement methods, and (3) a more comprehensive range of controlled sound parameters as cellular stimuli.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2040103" class="vrtx-external-publication">
        <div id="vrtx-publication-2040103">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2040103">
                Kwak, Dongho; Olsen, Petter Angell; Danielsen, Anne &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        A trio of biological rhythms and their relevance in rhythmic mechanical stimulation of cell cultures.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Frontiers in Psychology.
                </span>
                            13.
            doi: <a href="https://doi.org/10.3389/fpsyg.2022.867191">10.3389/fpsyg.2022.867191</a>.
            <a href="https://hdl.handle.net/10852/94878">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The primary aim of this article is to provide a biological rhythm model based on previous theoretical and experimental findings to promote more comprehensive studies of rhythmic mechanical stimulation of cell cultures, which relates to tissue engineering and regenerative medicine fields. Through an interdisciplinary approach where different standpoints from biology and musicology are combined, we explore some of the core rhythmic features of biological and cellular rhythmic processes and present them as a trio model that aims to afford a basic but fundamental understanding of the connections between various biological rhythms. It is vital to highlight such links since rhythmic mechanical stimulation and its effect on cell cultures are vastly underexplored even though the cellular response to mechanical stimuli (mechanotransduction) has been studied widely and relevant experimental evidence suggests mechanotransduction processes are rhythmic.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2061616" class="vrtx-external-publication">
        <div id="vrtx-publication-2061616">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2061616">
                Lesteberg, Mari &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        MICRO and MACRO - Developing New Accessible Musicking Technologies.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Iber, Michael &amp; Enge, Kajetan (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Audio Mostly 2022: What you hear is what you see? Perspectives on modalities in sound and music interaction.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        ACM Publications.
                </span>
                <span class="vrtx-issn">ISSN 9781450397018.</span>
                            
                <span class="vrtx-pages">p. 147–150.</span>
            doi: <a href="https://doi.org/10.1145/3561212.3561231">10.1145/3561212.3561231</a>.
            <a href="https://hdl.handle.net/11250/3486287">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper describes the development of two musical instrument prototypes developed to explore how non-haptic music technologies can be accessed from a web browser and how they can offer accessibility for people with low fine motor skills. Two approaches to browser-based motion capture were developed and tested during an iterative design process. This was followed by observational studies of two user groups: one with low fine motor skills and one with normal motor skills. Contrary to our expectations, we found that avoiding the use of buttons and mice did not make the apps more accessible for the participants with low fine motor skills. Furthermore, motion speed was considered more important for people with low motor skills than the size of the control action. The most important finding is that browser-based musical instruments using sensor-based and video-based motion tracking are not only feasible but allow for reaching much larger groups of people than previously possible. This may ultimately lead to both more personalized and accessible musical experiences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2062246" class="vrtx-external-publication">
        <div id="vrtx-publication-2062246">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2062246">
                Kwak, Dongho; Krzyzaniak, Michael Joseph; Danielsen, Anne &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        A mini acoustic chamber for small-scale sound experiments.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Iber, Michael &amp; Enge, Kajetan (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Audio Mostly 2022: What you hear is what you see? Perspectives on modalities in sound and music interaction.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        ACM Publications.
                </span>
                <span class="vrtx-issn">ISSN 9781450397018.</span>
                            
                <span class="vrtx-pages">p. 143–146.</span>
            doi: <a href="https://doi.org/10.1145/3561212.3561223">10.1145/3561212.3561223</a>.
            <a href="https://hdl.handle.net/10852/111371">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper describes the design and construction of a mini acoustic chamber using low-cost materials. The primary purpose is to provide an acoustically treated environment for small-scale sound measurements and experiments using ≤  10-inch speakers. Testing with different types of speakers showed frequency responses of &lt;?10?dB peak-to-peak (except the ”boxiness” range below 900?Hz), and the acoustic insulation (soundproofing) of the chamber is highly efficient (approximately 20?dB?SPL in reduction). Therefore, it provides a significant advantage in conducting experiments requiring a small room with consistent frequency response and preventing unwanted noise and hearing damage. Additionally, using a cost-effective and compact acoustic chamber gives flexibility when characterizing a small-scale setup and sound stimuli used in experiments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1945470" class="vrtx-external-publication">
        <div id="vrtx-publication-1945470">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1945470">
                Jensenius, Alexander Refsum &amp; Erdem, Cagri
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Gestures in ensemble performance.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Timmers, Renee; Bailes, Freya &amp; Daffern, Helena (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Together in Music: Coordination, expression, participation.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=239F1C9D-8585-4961-B96A-05B4CEBCAF6B">Oxford University Press</a>.
                </span>
                <span class="vrtx-issn">ISSN 9780198860761.</span>
                            
            doi: <a href="https://doi.org/10.1093/oso/9780198860761.003.0014">10.1093/oso/9780198860761.003.0014</a>.
            <a href="https://hdl.handle.net/11250/4344879">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The topic of gesture has received growing attention among music researchers over recent decades. Some of this research has been summarized in anthologies on &quot;musical gestures&quot;, such as those by Gritten and King (2006), God?y and Leman (2010), and Gritten and King (2011). There have also been a couple of articles reviewing how the term gesture has been used in various music-related disciplines (and beyond), including those by Cadoz and Wanderley (2000) and Jensenius et al. (2010). Much empirical work has been performed since these reviews were written, aided by better motion capture technologies, new machine learning techniques, and a heightened awareness of the topic. Still there are a number of open questions as to the role of gestures in music performance in general, and in ensemble performance in particular. This chapter aims to clarify some of the basic terminology of music-related body motion, and draw up some perspectives of how one can think about gestures in ensemble performance. This is, obviously, only one way of looking at the very multifaceted concept of gesture, but it may lead to further interest in this exciting and complex research domain.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2094145" class="vrtx-external-publication">
        <div id="vrtx-publication-2094145">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2094145">
                Remache-Vinueza, Byron; Trujillo-León, Andrés; Clim, Maria-Alena; Sarmiento-Ortiz, Fabián; Topon-Visarrea, Liliana &amp; Jensenius, Alexander Refsum
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/2094145/contributors', 'vrtx-publication-contributors-2094145')">
                    [Show all&nbsp;7&nbsp;contributors for this article]</a>
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Mapping Monophonic MIDI Tracks to Vibrotactile Stimuli Using Tactile Illusions.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Saitis, Charalampos; Farkhatdinov, Ildar &amp; Papetti, Stefano (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Haptic and Audio Interaction Design.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=DC752087-7122-4D3A-9E4F-382AA2F39D2C">Springer Nature</a>.
                </span>
                <span class="vrtx-issn">ISSN 9783031150197.</span>
                            
            
            <a href="https://hdl.handle.net/11250/3331117">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this project, we propose an algorithm to convert musical features and structures extracted from monophonic MIDI files to tactile illusions. Mapping music to vibrotactile stimuli is a challenging process since the perceptible frequency range of the skin is lower than that of the auditory system, which may cause the loss of some musical features. Moreover, current proposed models do not warrant the correspondence between the emotional response to music and the vibrotactile version of it. We propose to use tactile illusions as an additional resource to convey more meaningful vibrotactile stimuli. Tactile illusions enable us to add dynamics to vibrotactile stimuli in the form of movement, changes of direction, and localization. The suggested algorithm converts monophonic MIDI files into arrangements of two tactile illusions: “phantom motion” and “funneling”. The validation of the rendered material consisted of presenting the audio rendered from MIDI files to participants and then adding the vibrotactile component to it. The arrangement of tactile illusions was also evaluated alone. Results suggest that the arrangement of tactile illusions evokes more positive emotions than negative ones. This arrangement was also perceived as more agreeable and stimulating than the original audio. Although musical features such as rhythm, tempo, and melody were mostly recognized in the arrangement of tactile illusions, it provoked a different emotional response from that of the original audio.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2268059" class="vrtx-external-publication">
        <div id="vrtx-publication-2268059">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2268059">
                Erdem, Cagri; Wallace, Benedikte &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        CAVI: A Coadaptive Audiovisual Instrument–Composition.
                </span>
                    <span class="vrtx-parent-contributors">
                            In McPherson, Andrew &amp; Frid, Emma (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=031C5553-12A0-453E-B4FA-DC2B19B95BD2">The International Conference on New Interfaces for Musical Expression</a>.
                </span>
                            
            doi: <a href="https://doi.org/10.21428/92fbeb44.803c24dd">10.21428/92fbeb44.803c24dd</a>.
            <a href="https://hdl.handle.net/11250/4771760">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2072509" class="vrtx-external-publication">
        <div id="vrtx-publication-2072509">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2072509">
                Herrebr?den, Henrik; Gonzalez, Victor; Vuoskoski, Jonna Katariina &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Pre-recorded sound file versus human coach: Investigating auditory guidance effects on elite rowers.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Andreopoulou, Areti; Walker, Bruce; McMullen, Kyla &amp; R?nnberg, Niklas (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 27th International Conference on Auditory Display (ICAD2022).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        The International Community for Auditory Display.
                </span>
                <span class="vrtx-issn">ISSN 9780967090481.</span>
                            
                <span class="vrtx-pages">p. 25–30.</span>
            doi: <a href="https://doi.org/10.21785/icad2022.012">10.21785/icad2022.012</a>.
            <a href="https://hdl.handle.net/11250/4739523">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">We report on an experiment in which nine Norwegian national team rowers (one female) were tested on a rowing ergometer in a motion capture lab. After the warm-up, all participants rowed in a neutral condition for three minutes, without any instructions. Then they rowed in two conditions (three minutes each), with a counterbalanced order: (1) a coaching condition, during which they received oral instructions from a national team coach, and (2) a sound condition, during which they listened to a pre-recorded sound file that was produced to promote good rowing technique. Performance was measured in terms of distance traveled, and subjective responses were measured via a questionnaire inquiring participants about how useful the two interventions were for rowing efficiency. The results showed no significant difference between the two conditions of main interest–the pre-recorded sound file and traditional coaching–on any measure. Our study indicates that auditory guidance can be a cost-efficient supplement to athletes’ training, even at higher levels.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1904652" class="vrtx-external-publication">
        <div id="vrtx-publication-1904652">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1904652">
                Karbasi, Seyed Mojtaba; God?y, Rolf Inge; Jensenius, Alexander Refsum &amp; T?rresen, Jim
            </span>(2021).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Learning Method for Stiffness Control of a Drum Robot for Rebounding Double Strokes.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Zhang, Dan (Eds.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    2021 7th International Conference on Mechatronics and Robotics Engineering (ICMRE).
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=11615D7E-8C0C-4748-9F26-784E436F80A3">IEEE (Institute of Electrical and Electronics Engineers)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9780738132051.</span>
                            
                <span class="vrtx-pages">p. 54–58.</span>
            doi: <a href="https://doi.org/10.1109/ICMRE51691.2021.9384843">10.1109/ICMRE51691.2021.9384843</a>.
            <a href="https://hdl.handle.net/10852/85902">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In robot drumming, performing double stroke rolls is a key ability. Human drummers learn to play double strokes by just trying it several times. For performing it, a model needs to be learned to provide anticipatory commands during drumming. Joint stiffness plays a key role in rebounding double stroke task and should be considered in the model. We have introduced an interactive learning method for a drum robot to learn joint stiffness for rebounding double stroke task. The model is simulated for a 2-DoF robotic arm. The algorithm is simulated with 3 different drum kits to show the robustness of the learning approach. The simulation results also show significant compatibility with human performance results. In addition, the refined learning algorithm adjusts the stroke timing which is important for producing proper rhythms.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1919217" class="vrtx-external-publication">
        <div id="vrtx-publication-1919217">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1919217">
                Bishop, Laura; Jensenius, Alexander Refsum &amp; Laeng, Bruno
            </span>(2021).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musical and Bodily Predictors of Mental Effort in String Quartet Music: An Ecological Pupillometry Study of Performers and Listeners.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Frontiers in Psychology.
                </span>
                            
            doi: <a href="https://doi.org/10.3389/fpsyg.2021.653021">10.3389/fpsyg.2021.653021</a>.
            <a href="https://hdl.handle.net/10852/86670">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Music performance can be cognitively and physically demanding. These demands vary across the course of a performance as the content of the music changes. More demanding passages require performers to focus their attention more intensity, or expend greater “mental effort.” To date, it remains unclear what effect different cognitive-motor demands have on performers&#39; mental effort. It is likewise unclear how fluctuations in mental effort compare between performers and perceivers of the same music. We used pupillometry to examine the effects of different cognitive-motor demands on the mental effort used by performers and perceivers of classical string quartet music. We collected pupillometry, motion capture, and audio-video recordings of a string quartet as they performed a rehearsal and concert (for live audience) in our lab. We then collected pupillometry data from a remote sample of musically-trained listeners, who heard the audio recordings (without video) that we captured during the concert. We used a modelling approach to assess the effects of performers&#39; bodily effort (head and arm motion; sound level; performers&#39; ratings of technical difficulty), musical complexity (performers&#39; ratings of harmonic complexity; a score-based measure of harmonic tension), and expressive difficulty (performers&#39; ratings of expressive difficulty) on performers&#39; and listeners&#39; pupil diameters. Our results show stimulating effects of bodily effort and expressive difficulty on performers&#39; pupil diameters, and stimulating effects of expressive difficulty on listeners&#39; pupil diameters. We also observed negative effects of musical complexity on both performers and listeners, and negative effects of performers&#39; bodily effort on listeners, which we suggest may reflect the complex relationships that these features share with other aspects of musical structure. Looking across the concert, we found that both of the quartet violinists (who exchanged places halfway through the concert) showed more dilated pupils during their turns as 1st violinist than when playing as 2nd violinist, suggesting that they experienced greater arousal when “leading” the quartet in the 1st violin role. This study shows how eye tracking and motion capture technologies can be used in combination in an ecological setting to investigate cognitive processing in music performance.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1914431" class="vrtx-external-publication">
        <div id="vrtx-publication-1914431">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1914431">
                Masu, Raul; Melbye, Adam Pultz; Sullivan, John &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        NIME and the Environment: Toward a More Sustainable NIME Practice.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Dannenberg, Roger &amp; Xiao, Xiao (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=031C5553-12A0-453E-B4FA-DC2B19B95BD2">The International Conference on New Interfaces for Musical Expression</a>.
                </span>
                            
            
            <a href="https://hdl.handle.net/10852/86529">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper addresses environmental issues around NIME research and practice. We discuss the formulation of an environmental statement for the conference as well as the initiation of a NIME Eco Wiki containing information on environmental concerns related to the creation of new musical instruments. We outline a number of these concerns and, by systematically reviewing the proceedings of all previous NIME conferences, identify a general lack of reflection on the environmental impact of the research undertaken. Finally, we propose a framework for addressing the making, testing, using, and disposal of NIMEs in the hope that sustainability may become a central concern to researchers. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1967503" class="vrtx-external-publication">
        <div id="vrtx-publication-1967503">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1967503">
                Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Best versus Good Enough Practices for Open Music Research.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Empirical Musicology Review.
                </span>
                            16(1).
            doi: <a href="https://doi.org/10.18061/emr.v16i1.7646">10.18061/emr.v16i1.7646</a>.
            <a href="https://hdl.handle.net/11250/4488029">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Music researchers work with increasingly large and complex data sets. There are few established data handling practices in the field and several conceptual, technological, and practical challenges. Furthermore, many music researchers are not equipped for (or interested in) the craft of data storage, curation, and archiving. This paper discusses some of the particular challenges that empirical music researchers face when working towards Open Research practices: handling (1) (multi)media files, (2) privacy, and (3) copyright issues. These are exemplified through MusicLab, an event series focused on fostering openness in music research. It is argued that the &quot;best practice&quot; suggested by the FAIR principles is too demanding in many cases, but &quot;good enough practice&quot; may be within reach for many. A four-layer data handling &quot;recipe&quot; is suggested as concrete advice for achieving &quot;good enough practice&quot; in empirical music research.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1936905" class="vrtx-external-publication">
        <div id="vrtx-publication-1936905">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1936905">
                Bishop, Laura; Sanchez, Victor Evaristo Gonzalez; Laeng, Bruno; Jensenius, Alexander Refsum &amp; H?ffding, Simon
            </span>(2021).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Move like everyone is watching: Social context affects head motion and gaze in string quartet performance.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Journal of New Music Research.
                </span>
                <span class="vrtx-issn">ISSN 0929-8215.</span>
                            
            doi: <a href="https://doi.org/10.1080/09298215.2021.1977338">10.1080/09298215.2021.1977338</a>.
            <a href="https://hdl.handle.net/11250/5148220">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Ensemble musicians engage with each other visually through glances and body motion. We conducted a case study to test how string quartet musicians would respond to playing conditions that were meant to discourage or promote visually communicative behaviour. A quartet performed in different seating configurations under rehearsal and concert conditions. Quantity of head motion was reduced when musicians’ gaze was constrained. Differences in gaze and body motion between musicians reflected their musical roles in the ensemble. Overall, our findings suggest that gaze and motion dynamics vary within and between performances in response to changing musical, situational and social factors.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1954360" class="vrtx-external-publication">
        <div id="vrtx-publication-1954360">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1954360">
                Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Reflections on the Development of the Musical Gestures Toolbox for Python.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Kantan, Prithvi Ravi; Paisa, Razvan &amp; Willemsen, Silvin (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Nordic Sound and Music Computing Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=2B38F065-B3E6-4061-9F0C-0BA1287EEAFF">Aalborg Universitetsforlag</a>.
                </span>
                            
            
            <a href="https://hdl.handle.net/10852/89331">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The paper presents the Musical Gestures Toolbox (MGT) for Python, a collection of modules targeted at researchers working with video recordings. The toolbox includes video visualization techniques such as creating motion videos, motion history images, and motiongrams. These visualizations allow for studying video recordings from different temporal and spatial perspectives. The toolbox also includes basic computer vision methods, and it is designed to integrate well with audio analysis toolboxes. The MGT was initially developed to analyze music-related body motion (of musicians, dancers, and perceivers) but is equally helpful for other disciplines working with video recordings of humans, such as linguistics, pedagogy, psychology, and medicine.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1937937" class="vrtx-external-publication">
        <div id="vrtx-publication-1937937">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1937937">
                Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikkteknologiforskning.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Nytt Norsk Tidsskrift (NNT).
                </span>
                <span class="vrtx-issn">ISSN 0800-336X.</span>
                            38(3),
                <span class="vrtx-pages">p. 260–263.</span>
            
            <a href="https://hdl.handle.net/11250/4023541">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">N?r jeg presenterer meg som musikkteknolog, f?r jeg ofte undrende sp?rsm?l. Noen er opptatt av det teknologiske: ?S? du driver med synther og s?nn?? Andre lurer p? det musikalske: ?Er det s?nn pling-plong-musikk?? Likevel bruker mange en form for musikkteknologi hver dag, om det s? bare er radioen som spiller musikk om morgenen. I denne artikkelen vil jeg gi et innblikk i musikkteknologi som forskningsfelt. Men f?rst vil jeg introdusere begrepet ?musikkering?.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1829207" class="vrtx-external-publication">
        <div id="vrtx-publication-1829207">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1829207">
                Erdem, Cagri; Lan, Qichao; Fuhrer, Julian; Martin, Charles Patrick; T?rresen, Jim &amp; Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Towards Playing in the &#39;Air&#39;: Modeling Motion-Sound Energy Relationships in Electric Guitar Performance Using Deep Neural Networks.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Spagnol, Simone &amp; Valle, Andrea (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 17th Sound and Music Computing Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Axea sas/SMC Network.
                </span>
                <span class="vrtx-issn">ISSN 9788894541502.</span>
                            
                <span class="vrtx-pages">p. 177–184.</span>
            
            <a href="https://hdl.handle.net/10852/79392">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In acoustic instruments, sound production relies on the interaction between physical objects. Digital musical instruments, on the other hand, are based on arbitrarily designed action--sound mappings. This paper describes the ongoing exploration of an empirically-based approach for simulating guitar playing technique when designing the mappings of &#39;air instrument&#39; designs. We present results from an experiment in which 33 electric guitarists performed a set of basic sound-producing actions: impulsive, sustained, and iterative. The dataset consists of bioelectric muscle signals, motion capture, video, and audio recordings. This multimodal dataset was used to train a long short-term memory network (LSTM) with a few hidden layers and relatively short training duration. We show that the network is able to predict audio energy features of free improvisations on the guitar, relying on a dataset of three distinct motion types.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1831780" class="vrtx-external-publication">
        <div id="vrtx-publication-1831780">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1831780">
                Zelechowska, Agata; Sanchez, Victor Evaristo Gonzalez &amp; Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Standstill to the ‘beat’: Differences in involuntary movement
responses to simple and complex rhythms,
                </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    AM &#39;20: Proceedings of the 15th International Conference on Audio Mostly.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9781450375634.</span>
                            
                <span class="vrtx-pages">p. 107–113.</span>
            doi: <a href="https://doi.org/10.1145/3411109.3411139">10.1145/3411109.3411139</a>.
            <a href="https://hdl.handle.net/11250/4914318">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Previous studies have shown that movement-inducing properties of music largely depend on the rhythmic complexity of the stimuli. However, little is known about how simple isochronous beat patterns differ from more complex rhythmic structures in their effect on body movement. In this paper we study spontaneous movement of 98 participants instructed to stand as still as possible for 7 minutes while listening to silence and randomised sound excerpts: isochronous drumbeats and complex drum patterns, each at three different tempi (90, 120, 140 BPM). The participants’ head movement was recorded with an optical motion capture system.We found that on average participants moved more during the sound stimuli than in silence, which confirms the results from our previous studies. Moreover, the stimulus with complex drum patterns elicited more movement when compared to the isochronous drum beats. Across different tempi, the participants moved most at 120 BPM for the average of both types of stimuli. For the isochronous drumbeats, however, their movement was highest at 140 BPM. These results can contribute to our understanding of the interplay between rhythmic complexity, tempo and music-induced movement.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1817757" class="vrtx-external-publication">
        <div id="vrtx-publication-1817757">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1817757">
                Zelechowska, Agata; Gonzalez-Sanchez, Victor E.; Laeng, Bruno &amp; Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Headphones or Speakers? An Exploratory Study of Their Effects on Spontaneous Body Movement to Rhythmic Music.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Frontiers in Psychology.
                </span>
                            11(698).
            doi: <a href="https://doi.org/10.3389/fpsyg.2020.00698">10.3389/fpsyg.2020.00698</a>.
            <a href="https://hdl.handle.net/10852/78454">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Previous studies have shown that music may lead to spontaneous body movement, even when people try to stand still. But are spontaneous movement responses to music similar if the stimuli are presented using headphones or speakers? This article presents results from an exploratory study in which 35 participants listened to rhythmic stimuli while standing in a neutral position. The six different stimuli were 45 s each and ranged from a simple pulse to excerpts from electronic dance music (EDM). Each participant listened to all the stimuli using both headphones and speakers. An optical motion capture system was used to calculate their quantity of motion, and a set of questionnaires collected data about music preferences, listening habits, and the experimental sessions. The results show that the participants on average moved more when listening through headphones. The headphones condition was also reported as being more tiresome by the participants. Correlations between participants’ demographics, listening habits, and self-reported body motion were observed in both listening conditions. We conclude that the playback method impacts the level of body motion observed when people are listening to music. This should be taken into account when designing embodied music cognition studies.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1877438" class="vrtx-external-publication">
        <div id="vrtx-publication-1877438">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1877438">
                Zelechowska, Agata; Sanchez, Victor Evaristo Gonzalez; Laeng, Bruno; Vuoskoski, Jonna Katariina &amp; Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Who Moves to Music? Empathic Concern Predicts Spontaneous Movement Responses to Rhythm and Music.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Music &amp; Science.
                </span>
                            3.
            doi: <a href="https://doi.org/10.1177/2059204320974216">10.1177/2059204320974216</a>.
            <a href="https://hdl.handle.net/11250/3616910">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Moving to music is a universal human phenomenon, and previous studies have shown that people move to music even when they try to stand still. However, are there individual differences when it comes to how much people spontaneously respond to music with body movement? This article reports on a motion capture study in which 34 participants were asked to stand in a neutral position while listening to short excerpts of rhythmic stimuli and electronic dance music. We explore whether personality and empathy measures, as well as different aspects of music-related behaviour and preferences, can predict the amount of spontaneous movement of the participants. Individual differences were measured using a set of questionnaires: Big Five Inventory, Interpersonal Reactivity Index, and Barcelona Music Reward Questionnaire. Liking ratings for the stimuli were also collected. The regression analyses show that Empathic Concern is a significant predictor of the observed spontaneous movement. We also found a relationship between empathy and the participants’ self-reported tendency to move to music.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1819315" class="vrtx-external-publication">
        <div id="vrtx-publication-1819315">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1819315">
                Bishop, Laura &amp; Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Reliability of two infrared motion capture systems in a music performance setting.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Spagnol, Simone &amp; Valle, Andrea (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 17th Sound and Music Computing Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Axea sas/SMC Network.
                </span>
                <span class="vrtx-issn">ISSN 9788894541502.</span>
                            
            
            <a href="https://hdl.handle.net/11250/3371834">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper describes a comparative analysis of tracking quality in two infrared marker-based motion capture systems: one older but high-end (Qualisys, purchased in 2009) and the other newer and mid-range (OptiTrack, purchased in 2019). We recorded performances by a string quartet with both systems simultaneously, using the same frame rate. Our recording set-up included a combination of moving markers (affixed to musicians’ bodies) and stationary markers (affixed to music stands). Higher noise levels were observed in Qualisys recordings of stationary markers than in OptiTrack recordings, as well as a greater spatial range, though OptiTrack recordings had a higher rate of outliers (“spikes” in the signal). In moving markers, increased quantity of motion was associated with increased betweensystem error rates. Both systems showed minimal withintrial drift but a reduction in recording accuracy and precision over the duration of the experiment. Overall, our results show that the older/high-end system (Qualisys) produced slightly lower-quality recordings than the newer/midrange system (OptiTrack). We discuss how our findings may inform researchers’ interpretations of motion capture data, particularly when capturing the types of motion that are important for performing music.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1854446" class="vrtx-external-publication">
        <div id="vrtx-publication-1854446">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1854446">
                Erdem, Cagri; Lan, Qichao &amp; Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring relationships between effort, motion, and sound in new musical instruments.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Human Technology.
                </span>
                            16(3),
                <span class="vrtx-pages">p. 310–347.</span>
            doi: <a href="https://doi.org/10.17011/ht/urn.202011256767">10.17011/ht/urn.202011256767</a>.
            <a href="https://hdl.handle.net/10852/84959">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">We investigated how the action–sound relationships found in electric guitar performance can be used in the design of new instruments. Thirty-one trained guitarists performed a set of basic sound-producing actions (impulsive, sustained, and iterative) and free improvisations on an electric guitar. We performed a statistical analysis of the muscle activation data (EMG) and audio recordings from the experiment. Then we trained a long short-term memory network with nine different configurations to map EMG signal to sound. We found that the preliminary models were able to predict audio energy features of free improvisations on the guitar, based on the dataset of raw EMG from the basic sound-producing actions. The results provide evidence of similarities between body motion and sound in music performance, compatible with embodied music cognition theories. They also show the potential of using machine learning on recorded performance data in the design of new musical instruments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1889788" class="vrtx-external-publication">
        <div id="vrtx-publication-1889788">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1889788">
                Erdem, Cagri; Jensenius, Alexander Refsum; Glette, Kyrre; Krzyzaniak, Michael Joseph &amp; Veenstra, Frank
            </span>(2020).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Air-Guitar Control of Interactive Rhythmic Robots.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Proceedings of the International Conference on Live Interfaces (Proceedings of ICLI).
                </span>
                            
                <span class="vrtx-pages">p. 208–210.</span>
            
            <a href="https://hdl.handle.net/11250/4450253">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper describes an interactive art installation shown at ICLI in Trondheim in March 2020. The installation comprised three musical robots (Dr. Squiggles) that play rhythms by tapping. Visitors were invited to wear muscle-sensor armbands, through which they could control the robots by performing ‘air-guitar’-like gestures.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1859785" class="vrtx-external-publication">
        <div id="vrtx-publication-1859785">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1859785">
                Xambó, Anna; St?ckert, Robin; Jensenius, Alexander Refsum &amp; Saue, Sigurd
            </span>(2020).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Learning to Code Through Web Audio: A Team-Based Learning Approach.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Journal of The Audio Engineering Society.
                </span>
                <span class="vrtx-issn">ISSN 1549-4950.</span>
                            68(10),
                <span class="vrtx-pages">p. 727–737.</span>
            doi: <a href="https://doi.org/10.17743/jaes.2020.0019">10.17743/jaes.2020.0019</a>.
            <a href="https://hdl.handle.net/10852/81723">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this article, we discuss the challenges and opportunities provided by teaching programming using web audio technologies and adopting a team-based learning (TBL) approach among a mix of colocated and remote students, mostly novices in programming. The course has been designed for cross-campus teaching and teamwork, in alignment with the two-city master&#39;s program in which it has been delivered. We present the results and findings from (1) students&#39; feedback; (2) software complexity metrics; (3) students&#39; blog posts; and (4) teacher&#39;s reflections. We found that the nature of web audio as a browser-based environment, coupled with the collaborative nature of the course, was suitable for improving the students&#39; level of confidence about their abilities in programming. This approach promoted the creation of group course projects of a certain level of complexity, based on the students&#39; interests and programming levels. We discuss the challenges of this approach, such as supporting smooth cross-campus interactions and assuring students&#39; preknowledge in web technologies (HTML, CSS, and JavaScript) for an optimal experience. We conclude by envisioning the scalability of this course to other distributed and remote learning scenarios in academic and professional settings. This is in line with the foreseen future scenario of cross-site interaction mediated through code.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1829246" class="vrtx-external-publication">
        <div id="vrtx-publication-1829246">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1829246">
                Erdem, Cagri &amp; Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        RAW: Exploring Control Structures for Muscle-based Interaction in Collective Improvisation.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Michon, Romain &amp; Schroeder, Franziska (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Birmingham City University.
                </span>
                <span class="vrtx-issn">ISSN 9781949373998.</span>
                            
                <span class="vrtx-pages">p. 477–482.</span>
            
            <a href="https://hdl.handle.net/11250/3236993">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper describes the ongoing process of developing RAW, a collaborative body–machine instrument that relies on `sculpting&#39; the sonification of raw EMG signals. The instrument is built around two Myo armbands located on the forearms of the performer. These are used to investigate muscle contraction, which is again used as the basis for the sonic interaction design. Using a practice-based approach, the aim is to explore the musical aesthetics of naturally occurring bioelectric signals. We are particularly interested in exploring the differences between processing at audio rate versus control rate, and how the level of detail in the signal––and the complexity of the mappings––influence the experience of control in the instrument. This is exemplified through reflections on four concerts in which RAW has been used in different types of collective improvisation. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1855188" class="vrtx-external-publication">
        <div id="vrtx-publication-1855188">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1855188">
                Yánez, Jorge Poveda &amp; Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computer-based synaesthesia and the design of complex methods to approach multimodal realities of dance and music through technology. An interview with Alexander R. Jensenius, Deputy-Director of the RITMO Centre of Excellence of the University of Oslo.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Revista de Humanidades Digitales.
                </span>
                            
            doi: <a href="https://doi.org/10.5944/rhd.vol.5.2020.27029">10.5944/rhd.vol.5.2020.27029</a>.
            <a href="https://hdl.handle.net/11250/3528000">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">After producing ground-breaking computer-based tools to advance the study of human movement, such as the video-visualization techniques contained in the Musical-Gestures Toolbox, Alexander Refsum Jensenius has con-tinued to find more creative and analytical possibilities to intersect our understandings of music and dance. In the current context of technology-assisted misappropriation of tradi-tional songs and dances, I interviewed the Deputy Director of the RITMO Centre on how we might revert the link between new technol-ogies and intangible cultural heritage for the benefit of legitimate bearers.

Furthermore, in this interview, Alexander out-lines the embodied and interdisciplinary ap-proach towards music that has grounded the course of his career but even more interesting-ly, he offers insights about the future of expe-riencing dance through technology and the possibility of dancing robots.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1811356" class="vrtx-external-publication">
        <div id="vrtx-publication-1811356">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1811356">
                St?ckert, Robin; Bergsland, Andreas; Fasciani, Stefano &amp; Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Student active learning in a two campus organisation.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        eLearning and Software for Education.
                </span>
                <span class="vrtx-issn">ISSN 2066-026X.</span>
                            1,
                <span class="vrtx-pages">p. 612–620.</span>
            doi: <a href="https://doi.org/10.12753/2066-026X-20-080">10.12753/2066-026X-20-080</a>.
            <a href="https://hdl.handle.net/11250/2677983">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Higher education is facing disruptive changes in many fields. Students wants to have the option of learning anywhere, anytime and in any format. Universities need to develop and deliver to future students a complete learning ecosystem. At the same time universities are facing challenges such as growing costs and the pressure to give the students the knowledge, competence, skills and ability to continuously adapt to future job environments. As a consequence, many universities are investigating new ways of collaboration and sharing resources to cater to the demands of students, industry and society. An example of this collaboration is a new joint master between the two largest Universities in Norway: University of Oslo (UiO) and Norwegian University of Science and Technology (NTNU). In this paper, we present the lessons learned from almost two years of teaching and learning in the new joint master&#39;s programme, &quot;Music, Communication and Technology&quot; (MCT), between NTNU and UiO. This programme is a run in a two-campus learning space built as a two-way, audio-visual, high-quality, low-latency communication channel between the two campuses, called &quot;The Portal&quot;. Moreover, MCT is the subject of research for the SALTO (Student Active Learning in a Two campus Organisation) project, where novel techniques in teaching and learning are explored, such as team-based learning (TBL), flipped classroom, and other forms of student active learning. Educational elements in this master, provides the student with 21 century skills and deliver knowledge within humanities, entrepreneurship and technology. We elaborate on the technical, pedagogical and learning space-related challenges toward delivering teaching and learning in these cross-university settings. The paper concludes with a set of strategies that can be used to improve student active learning in different scenarios.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1817354" class="vrtx-external-publication">
        <div id="vrtx-publication-1817354">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1817354">
                St?ckert, Robin; Jensenius, Alexander Refsum; Xambó, Anna &amp; Brandtsegg, ?yvind
            </span>(2019).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        A case study in learning spaces for physical-virtual two-campus interaction.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        European Journal of Higher Education IT – EJHEIT.
                </span>
                <span class="vrtx-issn">ISSN 2519-1764.</span>
                            1.
            
            <a href="https://hdl.handle.net/11250/2677545">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this paper we present results from our ongoing project Student Active Learning in a Two campus Organization(SALTO). This is funded as part of the Norwegian University of Science and Technology’s (NTNU) Teaching Excellence scheme. The initiative consists of a portfolio of development measures, with the purpose of developing innovative approaches to learning, teaching and assessment. The aim of  SALTO  is  to  develop  pedagogical  strategies  for  the  two-campus  master’s  program Music, Communication and Technology(MCT). This is a joint program between NTNU and the University of Oslo,  with  the  students  being  split  between  the  cities  of  Trondheim  and  Oslo  500  km  apart.  The program  is  built  around  a  shared  physical-virtual  space -the  Portal -with  a  range  of  high-quality audiovisual technologies. The SALTO project focuses on how the Portal can be used for all activities in the program, with an emphasis on human-computer interaction, resource sharing and collaboration. This  is  done  by  students  and  teachers  exploring  educational,  methodological,  and  technological solutions together. As such, the SALTO project uses the Portalas a &quot;living lab&quot;, which is constantly evolving  and  being  optimized  for  student-active  learning  scenarios.  In  this  paper,  we  present  and discuss three cases from the first year of the project: (1) The MCT Opening Ceremony, (2) A &quot;Christmas concert&quot; between to upper secondary schools in Trondheim and Oslo, (3) An intensive workshop-based course  with  a  mix  of  preparations,  lectures  and  hands-on  exercises.    The  three  cases  do  in  various ways present some of the challenges and possibilities of two-campus teaching.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1702145" class="vrtx-external-publication">
        <div id="vrtx-publication-1702145">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1702145">
                Lan, Qichao; T?rresen, Jim &amp; Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        RaveForce: A Deep Reinforcement Learning Environment for Music Generation.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Barbancho, Isabel; Tardón, Lorenzo J.; Peinado, Alberto &amp; Barbancho, Ana M. (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    SMC 2019 Proceedings of the 16th Sound &amp; Music Computing Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Society for Sound and Music Computing.
                </span>
                <span class="vrtx-issn">ISSN 9788409085187.</span>
                            
                <span class="vrtx-pages">p. 217–222.</span>
            
            <a href="https://hdl.handle.net/11250/3570742">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1811555" class="vrtx-external-publication">
        <div id="vrtx-publication-1811555">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1811555">
                Diaz, Ximena Alarcón &amp; Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        “Ellos no están entendiendo nada” [“They are not understanding anything”]: embodied remembering as complex narrative in a Telematic Sonic Improvisation.
                </span>
                    <span class="vrtx-parent-contributors">
                            In S?ndergaard, Morten &amp; Beloff, Laura (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of RE:SOUND 2019.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=5CC42998-8DFE-4EE7-AEBA-B77E556AD9D3">British Computer Society (BCS)</a>.
                </span>
                            
            doi: <a href="https://doi.org/10.14236/ewic/resound19.32">10.14236/ewic/resound19.32</a>.
            <a href="https://hdl.handle.net/11250/3835162">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">INTIMAL is a physical-virtual system for relational listening, exploring the role of the body as interface that keeps memory of place in migratory contexts. The system is developed to integrate the body movements of performers (and their voices) with an oral archive. The system has been informed and tested by nine Colombian migrant women in Europe in a telematic performance between the cities of Oslo, Barcelona and London. In the performance a “complex narrative” emerged, for both the improvisers and the audiences. In this paper, we describe the conditions of the narrative environment, and the embodied expressions that emerged. We reflect on how this distributed embodied expression—through technological mediated sound and movement interactions—might further aid processes of collective remembering and catharsis, in a context of conflict and gendered migration. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1779604" class="vrtx-external-publication">
        <div id="vrtx-publication-1779604">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1779604">
                Lan, Qichao &amp; Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        QuaverSeries: A Live Coding Environment for Music Performance Using Web Technologies.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Sedo, Anna Xambo; Martin, Sara R. &amp; Roma, Gerard (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Web Audio Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        NTNU.
                </span>
                            
            
            <a href="https://hdl.handle.net/10852/79544">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">QuaverSeries consists of a domain-specific language and a single-page web application for collaborative live coding in music performances. Its domain-specific language borrows principles from both programming and digital interface design in its syntax rules, and hence adopts the paradigm of functional programming. The collaborative environment features the concept of &#39;virtual rooms&#39;, in which performers can collaborate from different locations, and the audience can watch the collaboration at the same time. Not only is the code synchronised among all the performers and online audience connected to the server, but the code executing command is also broadcast. This communication strategy, achieved by the integration of the language design and the environment design, provides a new form of interaction for web-based live coding performances.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1779590" class="vrtx-external-publication">
        <div id="vrtx-publication-1779590">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1779590">
                Sedo, Anna Xambo; St?ckert, Robin; Jensenius, Alexander Refsum &amp; Saue, Sigurd
            </span>(2019).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Facilitating Team-Based Programming Learning with Web Audio.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Sedo, Anna Xambo; Martin, Sara R. &amp; Roma, Gerard (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Web Audio Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        NTNU.
                </span>
                            
                <span class="vrtx-pages">p. 2–7.</span>
            
            <a href="https://hdl.handle.net/11250/2647714">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this paper, we present a course of audio programming using web audio technologies addressed to an interdisciplinary group of master students who are mostly beginners in programming. This course is held in two connected university campuses through a portal space and the students are expected to work in cross-campus teams. The workshop promotes both individual and group work and is based on ideas from science, technology, engineering, arts and mathematics (STEAM), team-based learning and project-based learning. We show the outcomes of this course, discuss the students’ feedback and reflect on the results. We found that it is important to provide individual vs. group work, to use the same code editor for consistent follow-up and to be able to share the screen to solve individual questions. Other aspects inherent to the master (intensity of the courses, coding in a research-oriented program) and to prior knowledge (web technologies) should be reconsidered. We conclude with a wider reflection on the challenges and potentials of using web audio as a programming environment for beginners in STEAM and distance-learning courses.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1482429" class="vrtx-external-publication">
        <div id="vrtx-publication-1482429">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1482429">
                Solberg, Ragnhild Torvanger &amp; Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Group behaviour and interpersonal synchronization to electronic dance music.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Musicae Scientiae.
                </span>
                <span class="vrtx-issn">ISSN 1029-8649.</span>
                            23(1),
                <span class="vrtx-pages">p. 111–134.</span>
            doi: <a href="https://doi.org/10.1177/1029864917712345">10.1177/1029864917712345</a>.
            <a href="https://hdl.handle.net/10852/59846">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The present study investigates how people move and relate to each other – and to the dance music – in a club-like setting created within a motion capture laboratory. Three groups of participants (29 in total) each danced to a 10-minute-long DJ mix consisting of four tracks of electronic dance music (EDM). Two of the EDM tracks had little structural development, while the two others included a typical “break routine” in the middle of the track, consisting of three distinct passages: (a) “breakdown”, (b) “build-up” and (c) “drop”. The motion capture data show similar bodily responses for all three groups in the break routines: a sudden decrease and increase in the general quantity of motion. More specifically, the participants demonstrated an improved level of interpersonal synchronization after the drop, particularly in their vertical movements. Furthermore, the participants’ activity increased and became more pronounced after the drop. This may suggest that the temporal removal and reintroduction of a clear rhythmic framework, as well as the use of intensifying sound features, have a profound effect on a group’s beat synchronization. Our results further suggest that the musical passages of EDM efficiently lead to the entrainment of a whole group, and that a break routine effectively “re-energizes” the dancing.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1746855" class="vrtx-external-publication">
        <div id="vrtx-publication-1746855">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1746855">
                Sanchez, Victor Evaristo Gonzalez; Zelechowska, Agata &amp; Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Analysis of the Movement-Inducing Effects of Music through the Fractality of Head Sway during Standstill.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Journal of Motor Behavior.
                </span>
                <span class="vrtx-issn">ISSN 0022-2895.</span>
                            
            doi: <a href="https://doi.org/10.1080/00222895.2019.1689909">10.1080/00222895.2019.1689909</a>.
            <a href="https://hdl.handle.net/11250/3782296">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The links between music and human movement have been shown to provide insight into crucial aspects of human’s perception, cognition, and sensorimotor systems. In this study, we examined the influence of music on movement during standstill, aiming at further characterizing the correspondences between movement, music, and perception, by analyzing head sway fractality. Eighty seven participants were asked to stand as still as possible for 500?seconds while being presented with alternating silence and audio stimuli. The audio stimuli were all rhythmic in nature, ranging from a metronome track to complex electronic dance music. The head position of each participant was captured with an optical motion capture system. Long-range correlations of head movement were estimated by detrended fluctuation analysis (DFA). Results agree with previous work on the movement-inducing effect of music, showing significantly greater head sway and lower head sway fractality during the music stimuli. In addition, patterns across stimuli suggest a two-way adaptation process to the effects of music, with musical stimuli influencing head sway while at the same time fractality modulated movement responses. Results indicate that fluctuations in head movement in both conditions exhibit long-range correlations, suggesting that the effects of music on head movement depended not only on the value of the most recent measured intervals, but also on the values of those intervals at distant times.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1719298" class="vrtx-external-publication">
        <div id="vrtx-publication-1719298">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1719298">
                Sedo, Anna Xambo; Saue, Sigurd; Jensenius, Alexander Refsum; St?ckert, Robin &amp; Brandtsegg, ?yvind
            </span>(2019).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        NIME Prototyping in Teams: A Participatory Approach to Teaching Physical Computing.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Queiroz, Marcelo &amp; Sedo, Anna Xambo (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Universidade Federal do Rio Grande do Sul.
                </span>
                            
                <span class="vrtx-pages">p. 216–221.</span>
            
            <a href="https://hdl.handle.net/10852/74082">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this paper, we present a workshop of physical computing applied to NIME design based on science, technology, engineering, arts, and mathematics (STEAM) education. The workshop is designed for master students with multidisciplinary backgrounds. They are encouraged to work in
teams from two university campuses remotely connected through a portal space. The components of the workshop are prototyping, music improvisation and reflective practice. We report the results of this course, which show a positive impact on the students’ confidence in prototyping and intention to continue in STEM fields. We also present the challenges and lessons learned on how to improve the teaching of hybrid technologies and programming skills in an interdisciplinary context across two locations, with the aim of satisfying both beginners and experts. We conclude with a broader discussion on how these new pedagogical perspectives can improve NIME-related courses.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1746871" class="vrtx-external-publication">
        <div id="vrtx-publication-1746871">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1746871">
                Becker, Artur; Herrebr?den, Henrik; Sanchez, Victor Evaristo Gonzalez; Nymoen, Kristian; Freitas, Carla Maria Dal Sasso &amp; T?rresen, Jim
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/1746871/contributors', 'vrtx-publication-contributors-1746871')">
                    [Show all&nbsp;7&nbsp;contributors for this article]</a>
            </span>(2019).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Functional Data Analysis of Rowing Technique Using Motion Capture Data.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Coleman, Grisha (Eds.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 6th International Conference on Movement and Computing.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        ACM Publications.
                </span>
                <span class="vrtx-issn">ISSN 9781450376549.</span>
                            
            doi: <a href="https://doi.org/10.1145/3347122.3347135">10.1145/3347122.3347135</a>.
            <a href="https://hdl.handle.net/11250/5035739">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">We present an approach to analyzing the motion capture data of rowers using bivariate functional principal component analysis (bfPCA). The method has been applied on data from six elite rowers rowing on an ergometer. The analyses of the upper and lower body coordination during the rowing cycle revealed significant differ- ences between the rowers, even though the data was normalized to account for differences in body dimensions. We make an argument for the use of bfPCA and other functional data analysis methods for the quantitative evaluation and description of technique in sports.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1703868" class="vrtx-external-publication">
        <div id="vrtx-publication-1703868">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1703868">
                Erdem, Cagri; Schia, Katja Henriksen &amp; Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Vrengt: A Shared Body–Machine Instrument for Music–Dance Performance.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Visi, Federico (Eds.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Music Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Universidade Federal do Rio Grande do Sul.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4027580">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">What if a musician could step outside the familiar instrumental paradigm and adopt a new embodied language for moving through sound with a dancer in true partnership? And what if a dancer’s body could coalesce with a musician’s skills and intuitively render movements into instrumental actions for active sound-making?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1703859" class="vrtx-external-publication">
        <div id="vrtx-publication-1703859">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1703859">
                Erdem, Cagri; Schia, Katja Henriksen &amp; Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Vrengt: A Shared Body–Machine Instrument for Music–Dance Performance.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Queiroz, Marcelo &amp; Sedo, Anna Xambo (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Universidade Federal do Rio Grande do Sul.
                </span>
                            
                <span class="vrtx-pages">p. 477–482.</span>
            
            <a href="https://hdl.handle.net/10852/68514">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper describes the process of developing a shared instrument for music–dance performance, with a particular focus on exploring the boundaries between standstill vs motion, and silence vs sound. The piece Vrengt grew from the idea of enabling a true partnership between a musician and a dancer, developing an instrument that would allow for active co-performance. Using a participatory design approach,  we worked with sonification as a tool for systematically exploring the dancer’s bodily expressions. The exploration used a “spatiotemporal matrix,” with a particular focus on sonic microinteraction. In the final performance, two Myo armbands were used for capturing muscle activity of the arm and leg of the dancer, together with a wireless headset microphone capturing the sound of breathing. In the paper we reflect on multi-user instrument paradigms, discuss our approach to creating a shared instrument using sonification as a tool for the sound design, and reflect on the performers’ subjective evaluation of the instrument.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1602427" class="vrtx-external-publication">
        <div id="vrtx-publication-1602427">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1602427">
                Patel-Grosz, Pritty; Grosz, Patrick Georg; Kelkar, Tejaswinee &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Coreference and disjoint reference in the semantics of narrative dance.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Sauerland, Uli &amp; Solt, Stephanie (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of Sinn und Bedeutung 22, vol. 2, ZASPiL 61.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Leibniz-Zentrum Allgemeine Sprachwissenschaft (ZAS).
                </span>
                            
                <span class="vrtx-pages">p. 199–216.</span>
            
            <a href="https://hdl.handle.net/11250/4482602">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1625573" class="vrtx-external-publication">
        <div id="vrtx-publication-1625573">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1625573">
                Kelkar, Tejaswinee; Roy, Udit &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Evaluating a collection of Sound-Tracing Data of Melodic Phrases.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Gómez, Emilia; Hu, Xiao; Humphrey, Eric &amp; Benetos, Emmanouil (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 19th International Society for Music Information Retrieval Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=52AA3F19-8D28-4B2B-A62E-F799D66D4D84">Institut de Recherche et Coordination Acoustique/Musique</a>.
                </span>
                <span class="vrtx-issn">ISSN 9782954035123.</span>
                            
            
            <a href="https://hdl.handle.net/11250/5204864">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Melodic contour, the ‘shape’ of a melody, is a common way to visualize and remember a musical piece. The purpose of this paper is to explore the building blocks of a future ‘gesture-based’ melody retrieval system. We present a dataset containing 16 melodic phrases from four musical styles and with a large range of contour variability. This is accompanied by full-body motion capture data of 26 participants performing sound-tracing to the melodies. The dataset is analyzed using canonical correlation analysis (CCA), and its neural network variant (Deep CCA), to understand how melodic contours and sound tracings relate to each other. The analyses reveal non-linear relationships between sound and motion. The link between pitch and verticality does not appear strong enough for complex melodies. We also find that descending melodic contours have the least correlation with tracing.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1600557" class="vrtx-external-publication">
        <div id="vrtx-publication-1600557">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1600557">
                Sanchez, Victor Evaristo Gonzalez; Zelechowska, Agata &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Correspondences Between Music and Involuntary Human Micromotion During Standstill.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Frontiers in Psychology.
                </span>
                            9.
            doi: <a href="https://doi.org/10.3389/fpsyg.2018.01382">10.3389/fpsyg.2018.01382</a>.
            <a href="https://hdl.handle.net/11250/4817202">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The relationships between human body motion and music have been the focus of several studies characterizing the correspondence between voluntary motion and various sound features. The study of involuntary movement to music, however, is still scarce. Insight into crucial aspects of music cognition, as well as characterization of the vestibular and sensorimotor systems could be largely improved through a description of the underlying links between music and involuntary movement. This study presents an analysis aimed at quantifying involuntary body motion of a small magnitude (micromotion) during standstill, as well as assessing the correspondences between such micromotion and different sound features of the musical stimuli: pulse clarity, amplitude, and spectral centroid. A total of 71 participants were asked to stand as still as possible for 6 min while being presented with alternating silence and music stimuli: Electronic Dance Music (EDM), Classical Indian music, and Norwegian fiddle music (Telespringar). The motion of each participant&#39;s head was captured with a marker-based, infrared optical system. Differences in instantaneous position data were computed for each participant and the resulting time series were analyzed through cross-correlation to evaluate the delay between motion and musical features. The mean quantity of motion (QoM) was found to be highest across participants during the EDM condition. This musical genre is based on a clear pulse and rhythmic pattern, and it was also shown that pulse clarity was the metric that had the most significant effect in induced vertical motion across conditions. Correspondences were also found between motion and both brightness and loudness, providing some evidence of anticipation and reaction to the music. Overall, the proposed analysis techniques provide quantitative data and metrics on the correspondences between micromotion and music, with the EDM stimulus producing the clearest music-induced motion patterns. The analysis and results from this study are compatible with embodied music cognition and sensorimotor synchronization theories, and provide further evidence of the movement inducing effects of groove-related music features and human response to sound stimuli. Further work with larger data sets, and a wider range of stimuli, is necessary to produce conclusive findings on the subject.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1546901" class="vrtx-external-publication">
        <div id="vrtx-publication-1546901">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1546901">
                Kelkar, Tejaswinee &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Analyzing Free-Hand Sound-Tracings of Melodic Phrases.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Applied Sciences.
                </span>
                            8(1),
                <span class="vrtx-pages">p. 1–21.</span>
            doi: <a href="https://doi.org/10.3390/app8010135">10.3390/app8010135</a>.
            <a href="https://hdl.handle.net/10852/61494">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this paper, we report on a free-hand motion capture study in which 32 participants ‘traced’ 16 melodic vocal phrases with their hands in the air in two experimental conditions. Melodic contours are often thought of as correlated with vertical movement (up and down) in time, and this was also our initial expectation. We did find an arch shape for most of the tracings, although this did not correspond directly to the melodic contours. Furthermore, representation of pitch in the vertical dimension was but one of a diverse range of movement strategies used to trace the melodies. Six different mapping strategies were observed, and these strategies have been quantified and statistically tested. The conclusion is that metaphorical representation is much more common than a ‘graph-like’ rendering for such a melodic sound-tracing task. Other findings include a clear gender difference for some of the tracing strategies and an unexpected representation of melodies in terms of a small object for some of the Hindustani music examples. The data also show a tendency of participants moving within a shared ‘social box’.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1597339" class="vrtx-external-publication">
        <div id="vrtx-publication-1597339">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1597339">
                Serafin, Stefania; Dahl, Sofia; Bresin, Roberto; Jensenius, Alexander Refsum; Unnthorsson, Runar &amp; V?lim?ki, Vesa
            </span>(2018).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        NordicSMC: A Nordic University Hub on Sound and Music Computing.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Georgaki, Anastasia (Eds.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Sound and Music Computing Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=C6CA5F3C-4AC9-402A-97D2-F4558F441352">Cyprus University of Technology</a>.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3847215">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Sound and music computing (SMC) is still an emerging field in many institutions, and the challenge is often to gain critical mass for developing study programs and undertake more ambitious research projects. We report on how a long-term collaboration between small and medium-sized SMC groups have led to an ambitious undertaking in the form of the Nordic Sound and Music Computing Network (NordicSMC), funded by the Nordic Research Council and institutions from all of the five Nordic countries (Denmark, Finland, Iceland, Norway, and Sweden). The constellation is unique in that it covers the field of sound and music from the “soft” to the “hard,” including the arts and humanities, the social and natural sciences, and engineering. This paper describes the goals, activities, and expected results of the network, with the aim of inspiring the creation of other joint efforts within the SMC community.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1614667" class="vrtx-external-publication">
        <div id="vrtx-publication-1614667">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1614667">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Musical Gestures Toolbox for Matlab.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Gómez, Emilia; Hu, Xiao; Humphrey, Eric &amp; Benetos, Emmanouil (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 19th International Society for Music Information Retrieval Conference.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=52AA3F19-8D28-4B2B-A62E-F799D66D4D84">Institut de Recherche et Coordination Acoustique/Musique</a>.
                </span>
                <span class="vrtx-issn">ISSN 9782954035123.</span>
                            
            
            <a href="https://hdl.handle.net/11250/4359939">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The Musical Gestures Toolbox for Matlab (MGT) aims at
assisting music researchers with importing, preprocessing,
analyzing, and visualizing video, audio, and motion capture data in a coherent manner within Matlab.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1656101" class="vrtx-external-publication">
        <div id="vrtx-publication-1656101">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1656101">
                Sanchez, Victor Evaristo Gonzalez; Zelechowska, Agata &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Muscle activity response of the audience during an experimental music performance.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Cunningham, Stuart (Eds.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the Audio Mostly 2018 on Sound in Immersion and Emotion.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=517D4F8F-AF83-4062-82FA-254E8A87D7D8">Association for Computing Machinery (ACM)</a>.
                </span>
                <span class="vrtx-issn">ISSN 9781450353731.</span>
                            
            doi: <a href="https://doi.org/10.1145/3243274.3243278">10.1145/3243274.3243278</a>.
            <a href="https://hdl.handle.net/10852/85796">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This exploratory study investigates muscular activity characteristics of a group of audience members during an experimental music performance. The study was designed to be as ecologically valid as possible, collecting data in a concert venue and making use of low- invasive measurement techniques. Muscle activity (EMG) from the forearms of 8 participants revealed that sitting in a group could be an indication of a level of group engagement, while comparatively greater muscular activity from a participant sitting at close distance to the stage suggests performance-induced bodily responses. The self-reported measures rendered little evidence supporting the links between muscular activity and live music exposure, although a larger sample size and a wider range of music styles need to be included in future studies to provide conclusive results.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1511538" class="vrtx-external-publication">
        <div id="vrtx-publication-1511538">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1511538">
                Adde, Lars; Yang, Hong; S?ther, Rannei; Jensenius, Alexander Refsum; Ihlen, Espen Alexander F. &amp; Cao, Jia-yan
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/1511538/contributors', 'vrtx-publication-contributors-1511538')">
                    [Show all&nbsp;7&nbsp;contributors for this article]</a>
            </span>(2018).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Characteristics of general movements in preterm infants assessed by computer-based video analysis.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-ARTICLE">
                        Physiotherapy Theory and Practice.
                </span>
                <span class="vrtx-issn">ISSN 0959-3985.</span>
                            34(4),
                <span class="vrtx-pages">p. 286–292.</span>
            doi: <a href="https://doi.org/10.1080/09593985.2017.1391908">10.1080/09593985.2017.1391908</a>.
            <a href="https://hdl.handle.net/11250/2480721">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Background: Previous evidence suggests that the variability of the spatial center of infant movements, calculated by computer-based video analysis software, can identify fidgety general movements (GMs) and predict cerebral palsy. Aim: To evaluate whether computer-based video analysis quantifies specific characteristics of normal fidgety movements as opposed to writhing general movements. Methods: A longitudinal study design was applied. Twenty-seven low-to moderate-risk preterm infants (20 boys, 7 girls; mean gestational age 32 [SD 2.7, range 27–36] weeks, mean birth weight 1790 grams [SD 430g, range 1185–2700g]) were videotaped at the ages of 3–5 weeks (period of writhing GMs) and 10–15 weeks (period of fidgety GMs) post term. GMs were classified according to Prechtl’s general movement assessment method (GMA) and by computer-based video analysis. The variability of the centroid of motion (CSD), derived from differences between subsequent video frames, was calculated by means of computer-based video analysis software; group mean differences between GM periods were reported. Results: The mean variability of the centroid of motion (CSD) determined by computer-based video analysis was 7.5% lower during the period of fidgety GMs than during the period of writhing GMs (p = 0.004). Conclusion: Our findings support that the variability of the centroid of motion reflects small and variable movements evenly distributed across the body, and hence shows that computer-based video analysis qualifies for assessment of direction and amplitude of FMs in young infants.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1593443" class="vrtx-external-publication">
        <div id="vrtx-publication-1593443">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1593443">
                Martin, Charles Patrick; Jensenius, Alexander Refsum &amp; T?rresen, Jim
            </span>(2018).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Composing an ensemble standstill work for Myo and Bela.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Dahl, Luke; Bowman, Doug &amp; Martin, Tom (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference On New Interfaces For Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Virginia Tech.
                </span>
                            
                <span class="vrtx-pages">p. 196–197.</span>
            
            <a href="https://hdl.handle.net/10852/65557">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper describes the process of developing a standstill performance work using the Myo gesture control armband and the Bela embedded computing platform. The combination of Myo and Bela allows a portable and extensible version of the standstill performance concept while introducing muscle tension as an additional control parameter. We describe the technical details of our setup and introduce Myo-to-Bela and Myo-to-OSC software bridges that assist with prototyping compositions using the Myo controller.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1593440" class="vrtx-external-publication">
        <div id="vrtx-publication-1593440">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1593440">
                Sanchez, Victor Evaristo Gonzalez; Martin, Charles Patrick; Zelechowska, Agata; Bjerkestrand, Kari Anne Vadstensvik; Johnson, Victoria Kristine ? &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Bela-based augmented acoustic guitars for sonic microinteraction.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Dahl, Luke; Bowman, Doug &amp; Martin, Tom (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference On New Interfaces For Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Virginia Tech.
                </span>
                            
                <span class="vrtx-pages">p. 324–327.</span>
            
            <a href="https://hdl.handle.net/10852/65556">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This article describes the design and construction of a collection of digitally-controlled augmented acoustic guitars, and the use of these guitars in the installation \textit\{Sverm-Resonans\}. The installation was built around the idea of exploring `inverse’ sonic microinteraction, that is, controlling sounds by the micromotion observed when attempting to stand still. It consisted of six acoustic guitars, each equipped with a Bela embedded computer for sound processing (in Pure Data), an infrared distance sensor to detect the presence of users, and an actuator attached to the guitar body to produce sound. With an attached battery pack, the result was a set of completely autonomous instruments that were easy to hang in a gallery space. The installation encouraged explorations on the boundary between the tactile and the kinesthetic, the body and the mind, and between motion and sound. The use of guitars, albeit with an untraditional `performance’ technique, made the experience both familiar and unfamiliar at the same time. Many users reported heightened sensations of stillness, sound, and vibration, and that the `inverse’ control of the instrument was both challenging and pleasant.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1575330" class="vrtx-external-publication">
        <div id="vrtx-publication-1575330">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1575330">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Methods for Studying Music-Related Body Motion.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Bader, Rolf (Eds.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Springer Handbook of Systematic Musicology.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=DC752087-7122-4D3A-9E4F-382AA2F39D2C">Springer Nature</a>.
                </span>
                <span class="vrtx-issn">ISSN 9783662550021.</span>
                            
                <span class="vrtx-pages">p. 805–818.</span>
            doi: <a href="https://doi.org/10.1007/978-3-662-55004-5_38">10.1007/978-3-662-55004-5_38</a>.
            <a href="https://hdl.handle.net/11250/5172438">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This chapter presents an overview of some methodological approaches and technologies that can be used in the study of music-related body motion. The aim is not to cover all possible approaches, but rather to highlight some of the ones that are more relevant from a musicological point of view. This includes methods for video-based and sensor-based motion analyses, both qualitative and quantitative. It also includes discussions of the strengths and weaknesses of the different methods, and reflections on how the methods can be used in connection to other data in question, such as physiological or neurological data, symbolic notation, sound recordings and contextual data.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1632364" class="vrtx-external-publication">
        <div id="vrtx-publication-1632364">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1632364">
                Lartillot, Olivier; Thedens, Hans-Hinrich &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational model of pitch detection, perceptive foundations, and application to Norwegian fiddle music.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Parncutt, Richard &amp; Sattmann, Sabrina (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of ICMPC15/ESCOM10.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Centre for Systematic Musicology, University of Graz.
                </span>
                <span class="vrtx-issn">ISSN 9783200057715.</span>
                            
                <span class="vrtx-pages">p. 252–255.</span>
            
            <a href="https://hdl.handle.net/10852/71867">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Background
Automated detection of pitch in polyphonic music remains a difficult challenge (Benetos et al., 2013). Robust solutions can be found for simple cases such as monodies. Implementation of perceptive/cognitive models have been so far less successful than engineering methods, and in particular machine learning models. One reference model (Klapuri, 2006) preselects pitch candidates based on harmonic summation and searches for multiple pitches through cancellation.
Aims
The aim was to conceive a model for pitch detection in polyphonic music able to transcribe in details traditional Norwegian music played on Hardanger fiddle, where more than two strings are played at the same time. The new model should be applicable to other types of music as well. Perceptive and cognitive models should guide the improvement of the state of the art.
Main Contribution
The model is neither based on a machine-learning training on a given set of samples, nor explicitly relying on stylistic rules. Instead, the methodology consists in conceiving a set of rules as simple and general as possible while offering satisfying results for the chosen corpus of music. We follow some general principles of the model by (Klapuri 2006) while introducing new heuristics. We present a new method for harmonic summation that penalises harmonic series that are sparse, in particular when odd partials are absent, as it would indicate that the actual harmonic series is a multiple of the given pitch candidate. Besides, a multiple of a fundamental can be selected as pitch in addition to the fundamental itself if its attack phase is sufficiently distinctive. For that purpose, we introduce a concept of pitch percept that persists over the whole extent of the note, and that serves as a reference for the detection of higher pitches at harmonic intervals.
Results
The proposed method enables to obtain transcriptions of relatively good quality, with a low ratio of false positives and false negatives. The construction of the model is under refinement. We are applying this method to the analysis of recordings of Norwegian folk music, containing a large part of Hardinger fiddle pieces and a cappella singing.
Implications
Automated transcription is of high interest for musicology and music information retrieval. This enables for instance to build large corpora of scores for music analysis and opens news perspectives for computational musicology. By attempting to design computer models based on general rules as simple as possible rather than on machine learning, while resulting in a behaviour in terms of pitch detection that comes closer to human capabilities, we hypothesise that the underlying mechanisms thus modelled might suggest general computational capabilities that could be found in cognitive models as well. In the same time, an improvement of the model based on expertise in music perception and cognition is desired.
References
Benetos et al. (2013). Automatic music transcription: challenges and future directions. Journal of Intelligent Information Systems, 41, 407-434
Klapuri, Multiple Fundamental Frequency Estimation by Summing Harmonic Amplitudes. ISMIR 2006 Keywords: pitch, computational model, harmonic summation, Norwegian folk music, Hardanger fiddle.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1473450" class="vrtx-external-publication">
        <div id="vrtx-publication-1473450">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1473450">
                Jensenius, Alexander Refsum; Sanchez, Victor Evaristo Gonzalez; Zelechowska, Agata &amp; Bjerkestrand, Kari Anne Vadstensvik
            </span>(2017).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring the Myo controller for sonic microinteraction.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Erkut, Cumhur (Eds.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the International Conference on New Interfaces for Musical Expression.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Aalborg University Copenhagen.
                </span>
                            
                <span class="vrtx-pages">p. 442–445.</span>
            
            <a href="https://hdl.handle.net/10852/55676">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper explores sonic microinteraction using muscle sensing through the Myo armband. The first part presents results from a small series of experiments aimed at finding the baseline micromotion and muscle activation data of people
being at rest or performing short/small actions. The second part presents the prototype instrument MicroMyo, built around the concept of making sound with little motion. The instrument plays with the convention that inputting more energy into an instrument results in more sound. MicroMyo, on the other hand, is built so that the less you move, the more it sounds. Our user study shows that while such an &quot;inverse instrument&quot; may seem puzzling at first, it also opens a space for interesting musical interactions.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1487108" class="vrtx-external-publication">
        <div id="vrtx-publication-1487108">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1487108">
                Kelkar, Tejaswinee &amp; Jensenius, Alexander Refsum
            </span>(2017).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Representation Strategies in Two-handed Melodic Sound-Tracing.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Gillies, Marco (Eds.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 4th International Conference on Movement Computing.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        ACM Publications.
                </span>
                <span class="vrtx-issn">ISSN 9781450352093.</span>
                            
            doi: <a href="https://doi.org/10.1145/3077981.3078050">10.1145/3077981.3078050</a>.
            <a href="https://hdl.handle.net/11250/5120175">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper describes an experiment in which the subjects performed a sound-tracing task to vocal melodies. They could move freely in the air with two hands, and their motion was captured using an infrared, marker-based system. We present a typology of distinct strategies used by the recruited participants to represent their perception of the melodies. These strategies appear as ways to represent time and space through the finite motion possibilities of two hands moving freely in space. We observe these strategies and present their typology through qualitative analysis. Then we numerically verify the consistency of these strategies by conducting tests of significance between labeled and random samples.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1487103" class="vrtx-external-publication">
        <div id="vrtx-publication-1487103">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1487103">
                Kelkar, Tejaswinee &amp; Jensenius, Alexander Refsum
            </span>(2017).
                <span class="vrtx-title title-articlesAndBookChapters">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring melody and motion features in “sound-tracings”.
                </span>
                    <span class="vrtx-parent-contributors">
                            In Lokki, Tapio; Pa?tynen, Jukka &amp; Va?lima?ki, Vesa (Ed.),
                    </span>
                <span class="vrtx-parent-title parent-title-articlesAndBookChapters">
                    Proceedings of the 14th Sound and Music Computing Conference 2017.
                </span>
                <span class="vrtx-publisher publisher-articlesAndBookChapters publisher-category-CHAPTERACADEMIC">
                        Aalto University.
                </span>
                <span class="vrtx-issn">ISSN 9789526037295.</span>
                            
                <span class="vrtx-pages">p. 98–103.</span>
            
            <a href="https://hdl.handle.net/11250/4570583">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Pitch and spatial height are often associated when describing music. In this paper we present results from a sound tracing study in which we investigate such sound–motion relationships. The subjects were asked to move as if they were creating the melodies they heard, and their motion was captured with an infra-red, marker-based camera system. The analysis is focused on calculating feature vectors typically used for melodic contour analysis. We use these features to compare melodic contour typologies with motion contour typologies. This is based on using proposed feature sets that were made for melodic contour similarity measurement. We apply these features to both the melodies and the motion contours to establish whether there is a correspondence between the two, and find the features that match the most. We find a relationship between vertical motion and pitch contour when evaluated through features rather than simply comparing contours.</p>
                </span>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/research-profile/1328">View all works in NVA</a></p>
    </div>

    <div id="vrtx-publication-tab-3">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-2269536" class="vrtx-external-publication">
        <div id="vrtx-publication-2269536">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2269536">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-books">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sonic Design: Explorations Between Art and Science.
                </span>
                <span class="vrtx-publisher publisher-books publisher-category-ANTHOLOGYACA">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=DC752087-7122-4D3A-9E4F-382AA2F39D2C">Springer Nature</a>.
                </span>
                <span class="vrtx-isbn">ISBN 9783031578922.</span>
            
                <span class="vrtx-pages">347 p.</span>
            doi: <a href="https://doi.org/10.1007/978-3-031-57892-2">10.1007/978-3-031-57892-2</a>.
            <a href="https://hdl.handle.net/11250/4377830">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This edited volume is based on a selection of contributions at an international seminar organized in May 2022 to celebrate the achievements of Professor God?y upon his retirement from the University of Oslo. The 17 chapters cover different approaches to sonic design practice and theory, giving readers historical backdrops and an overview of the current state of both artistic and scientific research in the field.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2046464" class="vrtx-external-publication">
        <div id="vrtx-publication-2046464">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2046464">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-books">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sound Actions: Conceptualizing Musical Instruments.
                </span>
                <span class="vrtx-publisher publisher-books publisher-category-MONOGRAPHACA">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=787501B7-4C33-4FC8-8689-95E5449219EC">MIT Press</a>.
                </span>
                <span class="vrtx-isbn">ISBN 9780262544634.</span>
            
                <span class="vrtx-pages">304 p.</span>
            doi: <a href="https://doi.org/10.7551/mitpress/14220.001.0001">10.7551/mitpress/14220.001.0001</a>.
            <a href="https://hdl.handle.net/10852/98282">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">

A techno-cognitive look at how new technologies are shaping the future of musicking.

“Musicking” encapsulates both the making of and perception of music, so it includes both active and passive forms of musical engagement. But at its core, it is a relationship between actions and sounds, between human bodies and musical instruments. Viewing musicking through this lens and drawing on music cognition and music technology, Sound Actions proposes a model for understanding differences between traditional acoustic “sound makers” and new electro-acoustic “music makers.”

What is a musical instrument? How do new technologies change how we perform and perceive music? What happens when composers build instruments, performers write code, perceivers become producers, and instruments play themselves? The answers to these pivotal questions entail a meeting point between interactive music technology and embodied music cognition, what author Alexander Refsum Jensenius calls “embodied music technology.” Moving between objective description and subjective narrative of his own musical experiences, Jensenius explores why music makes people move, how the human body can be used in musical interaction, and how new technologies allow for active musical experiences. The development of new music technologies, he demonstrates, has fundamentally changed how music is performed and perceived.</p>
                </span>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/research-profile/1328">View all works in NVA</a></p>
    </div>

    <div id="vrtx-publication-tab-4">
  <ul class="vrtx-external-publications">

      <li id="vrtx-external-publication-10430006" class="vrtx-external-publication">
        <div id="vrtx-publication-10430006">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10430006">
                Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Creativity Between Art and Science.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5506021">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">First, I will present MishMash Centre for AI and Creativity, a new Norwegian research centre advancing AI to explore and shape human-machine creativity. Funded by the Research Council of Norway in 2025, the centre brings together artists, engineers, and scholars across disciplines to create, explore, and reflect on AI systems that augment creative practice while foregrounding agency, inclusion, and sustainability. Organized into seven work packages, the centre addresses technical challenges such as real-time multi-agent systems and hybrid symbolic learning methods, societal concerns including bias, copyright, and equitable value distribution, and applied domains spanning health, education, cultural heritage, and the creative industries.Second, I will present some of my own ongoing explorations into embodied AI, including inverse control, small language models, and distributed computing. This approach challenges current monolithic, generative models, suggesting an alternative future for creative AI.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10429576" class="vrtx-external-publication">
        <div id="vrtx-publication-10429576">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10429576">
                Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kunstig kreativitet?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5505592">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Trenger vi egentlig musikere, forfattere og filmprodusenter n? som KI kan gj?re jobben? Vi tar diskusjonen om hvordan KI p?virker kreativ n?ring og kunstneriske prosesser.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10429282" class="vrtx-external-publication">
        <div id="vrtx-publication-10429282">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10429282">
                Brundell, Cathrine Th. &amp; Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Eneste KI-senter som kobler teknologi og kreativitet offisielt ?pnet.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Uniforum.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5505289">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Mye optimisme og litt bekymring preget ?pningen av MishMash – senter for kunstig intelligens og kreativitet.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10428593" class="vrtx-external-publication">
        <div id="vrtx-publication-10428593">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10428593">
                Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Velkommen til festival!                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5504614">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Det hjelper ikke med ?s?nn gj?r vi det her?-argumentasjon hvis vi skal f? til reelt 篮球即时比分_nba比分直播-彩客网重点推荐 p? tvers av institusjoner.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10428095" class="vrtx-external-publication">
        <div id="vrtx-publication-10428095">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10428095">
                Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        &quot;Soft&quot; and &quot;Hard&quot; research? Experiences from running a radically interdisciplinary research centre.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.uc.pt/ceis20/conferencias/alexander-jensenius-eixos-do-conhecimento-interdisciplinar/">https:/www.uc.pt/ceis20/conferencias/alexander-jensenius-eixos-do-conhecimento-interdisciplinar/</a>.
            <a href="https://hdl.handle.net/11250/5504143">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this presentation, Alexander Refsum Jensenius explores the practical and theoretical dimensions of running radically interdisciplinary research centres, focusing on his experiences with RITMO and the newly established MISHMASH Centre for AI and Creativity. He introduces the &quot;coffee machine&quot; philosophy, which advocates for physical colocation and social meeting points as essential tools to overcome institutional silos and bridge the diverse research motivations of fields like musicology, informatics, and psychology. Jensenius highlights the innovative potential of this approach through projects that translate artistic research into medical applications, such as using dance analysis software to screen infants for cerebral palsy and investigating the impact of musical stimuli on biological cells. He further details the MusicLab initiative, which scales data collection to full symphony orchestras to study embodied music cognition and human behaviour in real-life concert settings. The talk concludes by introducing MISHMASH, a national consortium dedicated to fostering human-centric AI that integrates artistic practice with technological development while navigating ethical challenges such as copyright and the preservation of cultural heritage.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10428085" class="vrtx-external-publication">
        <div id="vrtx-publication-10428085">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10428085">
                Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hva er &quot;flytsonen&quot;?                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK P2.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5504131">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">&quot;Kan kunst skape seg selv?&quot;, sp?r Abels T?rnpanel fra Kunsth?gskolen i Oslo (KHIO).
Send inn dine sp?rsm?l til v?re eksperter!</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10428084" class="vrtx-external-publication">
        <div id="vrtx-publication-10428084">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10428084">
                Jensenius, Alexander Refsum &amp; S?rum, Tuva Marie
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Nesten alle lever i sin egen stille boble.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK.no.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5504130">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Vi s?ker stillhet som aldri f?r, og hele 95 prosent av hodetelefonene som selges er st?yreduserende.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10427622" class="vrtx-external-publication">
        <div id="vrtx-publication-10427622">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10427622">
                Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Netsten alle hodetelefoner er st?yreduserenede.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5503753">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Innslag p? NRK nyhetsmorgen</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10417820" class="vrtx-external-publication">
        <div id="vrtx-publication-10417820">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10417820">
                Urke, Erling Zahl; Vrasdonk, Atilla Juliana &amp; Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Flamenco og videoer i RITMO sin ?rsrapport
       - LINK – Senter for l?ring og utdanning.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        UiO.no.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5483410">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvert ?r produserer LINK videoer som presenterer forskning gjort ved RITMO - Senter for tverrfaglig forskning p? rytme, tid og bevegelse. Fem nye videoer er ? finne i den ferske ?rsrapporten deres.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10416566" class="vrtx-external-publication">
        <div id="vrtx-publication-10416566">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10416566">
                Jensenius, Alexander Refsum &amp; Lindahl, Nikoline Riis
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Ekspert om st?ydemping-trenden: – Vi lever i parallelle verdener.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Aftenposten.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5482266">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">St?ydemping er blitt normalen. Sp?rsm?let er hva som skjer n?r fellesskapets lyd forsvinner.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10324090" class="vrtx-external-publication">
        <div id="vrtx-publication-10324090">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10324090">
                Jensenius, Alexander Refsum
            </span>(2026).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Tverrfaglighet gj?r litt vondt.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-READEROPINION">
                        Forskerforum.
                </span>
                <span class="vrtx-issn">ISSN 0800-1715.</span>
                            
            
            <a href="https://hdl.handle.net/11250/5350155">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvordan f?r man til tverrfaglig forskning i praksis? Mitt tips: Heng ved kaffemaskinen.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10428198" class="vrtx-external-publication">
        <div id="vrtx-publication-10428198">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10428198">
                Ho?en, Leif Henrik &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Eierskap til KI.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        ArtScene Trondheim.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5504213">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Et stort, nytt forskningssenter er bevilget 173 millioner. Det skal unders?ke skj?ringspunktet mellom kunstig intelligens (KI) og kreativitet, blant annet ved ? se p? bruk av KI i kunstneriske prosesser.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10428086" class="vrtx-external-publication">
        <div id="vrtx-publication-10428086">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10428086">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Permeating Art &amp; Science Collaboration.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5504132">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Panel discussion with Robertina ?ebjani?, Anetta Mona Chi?a, Alexander Refsum Jensenius. Moderated by Benedetta D&#39;Ettorre.

This panel examines the boundary zones where artistic and scientific approaches intersect, entangle, and permeate into one another. Bringing together practitioners working across more-than-human ecologies, technological imaginaries, and embodied research, the conversation will explore how meaningful collaboration can emerge from inter- and trans-disciplinary exchange.

Rather than framing art and science as opposites, we ask how their methods can become mutually generative; how artistic mindsets can expand scientific inquiry, and how scientific perspectives can deepen artistic experimentation. The session aims to discuss questions related to collaborative ethics, shared vocabularies, and the value of embracing “noise” as a catalyst for new forms of knowledge-making.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10346958" class="vrtx-external-publication">
        <div id="vrtx-publication-10346958">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10346958">
                Blekkerud, Martin; Jensenius, Alexander Refsum &amp; Schau, Kristopher
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kristopher Schau m?ter forskere: Dette gj?r rytme med kroppen.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5369677">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hva er det som egentlig skjer med oss n?r vi h?rer p? musikk? Hvordan oppfatter kroppen rytme og stillhet? Musikk kan hjelpe oss ? forst? hvordan vi oppfatter tid og rom, if?lge norsk forsker.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10346953" class="vrtx-external-publication">
        <div id="vrtx-publication-10346953">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10346953">
                J?re, Lisbet &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Verdens st?rste musikkeksperiment viste at publikum holdt pusten samtidig.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        篮球即时比分_nba比分直播-彩客网重点推荐.no.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5369674">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Publikum holder pusten samtidig, og n?r musikken blir emosjonell sitter alle helt stille sammen. Det avsl?rer de f?rste funnene fra verdens st?rste musikkeksperiment.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10341413" class="vrtx-external-publication">
        <div id="vrtx-publication-10341413">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10341413">
                Jensenius, Alexander Refsum &amp; H?ffding, Simon
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        M?rk musiken.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        DR P2.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5364463">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10341411" class="vrtx-external-publication">
        <div id="vrtx-publication-10341411">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10341411">
                Jensenius, Alexander Refsum &amp; H?ffding, Simon
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Preliminary results from symphonic concerts.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.aarhussymfoni.dk/koncert/maerk-musikken/">https:/www.aarhussymfoni.dk/koncert/maerk-musikken/</a>.
            <a href="https://hdl.handle.net/11250/5364461">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10298999" class="vrtx-external-publication">
        <div id="vrtx-publication-10298999">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10298999">
                Christodoulou, Anna-Maria; Arnim, Hugh Alexander von &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Supporting Narrative Comprehension in Programmatic Music through Music and Light.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5330610">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10298805" class="vrtx-external-publication">
        <div id="vrtx-publication-10298805">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10298805">
                Jensenius, Alexander Refsum &amp; Haugen, Ingrid Romarheim
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MishMash og Nasjonalbiblioteket.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5330416">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10298804" class="vrtx-external-publication">
        <div id="vrtx-publication-10298804">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10298804">
                Snaprud, Per; Jensenius, Alexander Refsum; Endestad, Tor &amp; W?ien, Randi
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Concientia.
                </span>
                            
            doi: <a href="https://doi.org/https:/happeningnext.com/event/samtale-om-bevissthet-og-det-%C3%A5-skape-eid3a0d68lba3">https:/happeningnext.com/event/samtale-om-bevissthet-og-det-%C3%A5-skape-eid3a0d68lba3</a>.
            <a href="https://hdl.handle.net/11250/5330415">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">I forbindelse med utstillingen Conscientia p? Gamle Munch arrangeres en samtale om bevissthet og det ? skape.
Utgangspunktet for tema til utstillingen er bevissthet og forhold knyttet til det ? skape.
En del av utstillingen tar utgangspunkt i en serie med selvportretter av Randi W?ien basert p? MR bilder tatt av hennes eget hode. Bildene representerer et sett av intrikate m?nstre i ulike st?rrelser og sammensetninger og kan refereres til ulike organer, andre vesener og ren natur. Samtidig er de en representasjon av kunstneren slik hun er satt sammen i sitt eget hode. Keramiker Jorid Krosse lager objekter med form og m?nster som tar inspirasjon fra naturen og kan relateres til organiske strukturer, hoder og andre vesener. I samspill med maleriene vil de keramiske objektene settes i en relasjon til det kroppslige.
Samtalen om bevissthet og det ? skape bruker utstillingen som et utgangspunkt til ? f? belyst hva den skapende prosessen kan bety for v?r egen utvikling og hvordan hjernen fungerer og responderer p? skapende prosesser. Tema for samtalen vil v?re forholdet mellom kunst og bevissthet, om relasjonen mellom maleri og objekt og om hvordan det ? skape kunst kan p?virke v?r forst?else av oss selv og omgivelsene.
For tiden forskes det mye p? hva som faktisk skjer i hjernen n?r man skaper noe. Vi har f?tt med oss to av de fremste forskerne p? temaet fra universitetet i Oslo.
Alexander Refsum Jensenius er professor i musikkteknologi ved Universitetet i Oslo, hvor han ogs? leder RITMO Senter for tverrfaglige studier av rytme, tid og bevegelse og MishMash Senter for KI og kreativitet. Han forsker p? hvordan lyd og musikk p?virker kropp og sinn, bevisst og ubevisst.
Tor Endestad er f?rsteamanuensis i kognitiv- og nevropsykologi p? univeristetet i Oslo og er tilknyttet Ritmo. Han leder FRONT neurolab og forsker p? kognitiv psykologi og kognitiv nevrovitenskap med fokus p? hjerneavbildningsmetodikk. P?g?ende forskningsprosjekter omfatter studier av basale mekanismer i oppfattelse av rytme og tid, oppmerksomhet og hukommelse.
Til ? moderere samtalen har vi f?tt med oss Per Snaprud. Han er vitenskapsjournalist og f?r det hjerneforsker. Han arbeider i det Stockholm baserte magasinet 篮球即时比分_nba比分直播-彩客网重点推荐 og Framsteg og har tidligere v?rt virksom ved Dagens Nyheters og Sveriges Radios vitenskapsredaksjoner. Han er ogs? forfatter av boken ?Medvetandets ?terkomst, om hj?rnan, kroppen och universum?.
Victoria Johnson er fiolinist, underviser ved Institutt for musikkvitenskap og deltar i ulike forskningsprosjekter ved UiO. Hun har hatt solokonserter blant annet under Festspillene i Bergen, Ultima, Borealisfestivalen og Soundwaves i London. Hennes lidenskap for samtidsmusikk har resultert i flere bestillingsverk og plateinnspillinger. I denne sammenhengen vil hun spille musikk som er direkte komponert til bildene og objektene i utstillingen. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10298803" class="vrtx-external-publication">
        <div id="vrtx-publication-10298803">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10298803">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kan musikk skape fred?                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Ungdomsavisa.
                </span>
                            
            doi: <a href="https://doi.org/https:/ungdomsavisa.com/index.php?artID=763&amp;navB=1">https:/ungdomsavisa.com/index.php?artID=763&amp;navB=1</a>.
            <a href="https://hdl.handle.net/11250/5330413">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">I kveld var en rekke personligheter fra musikkmilj?et samlet til debatt ved Universitetet i Oslo. Temaet var ?Kan musikk skape fred??. Blant deltakerne var Birgitte Grimstad og Lars Klevstrand , som har underholdt med musikk i flere ti?r. Debatten ble ledet av blant annet professor i musikkvitenskap ved UiO, Alexander Refsum Jensenius.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10296666" class="vrtx-external-publication">
        <div id="vrtx-publication-10296666">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10296666">
                Jensenius, Alexander Refsum; Sandvik, Kristin Bergtora; Grimstad, Birgitte; R?ysum, Andreas Hoem &amp; Klevstrand, Lars
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kan musikk skape fred?                </span>
                            
            doi: <a href="https://doi.org/https:/www.oslopeacedays.no/program/2025/fred-og-musikk">https:/www.oslopeacedays.no/program/2025/fred-og-musikk</a>.
            <a href="https://hdl.handle.net/11250/5328554">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">P? konsert f?ler vi samhold med fremmede, viser forskning. I et kort ?yeblikk samler musikken oss. Hvordan kan musikk ogs? samle oss i urolige tider? Flere artister jobber for fred, p? ulike m?ter. M?t noen av dem p? Scene Domus Bibliotheca! Hva er det med akkurat musikk som forener oss? Bli med p? musikksnakk med artistene Birgitte Grimstad, Lars Klevstrand og Andreas R?ysum. Du m?ter ogs?  fredsforsker Kristin Bergtora Sandvik. Her vil musikkprofessor Alexander Refsum Jensenius lede samtalen med ulike sp?rsm?l knyttet til tematikken – kanskje svarer de p? ditt sp?rsm?l ogs?? Samtalen er beregnet for et publikum uten faglig bakgrunn i temaet.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10292562" class="vrtx-external-publication">
        <div id="vrtx-publication-10292562">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10292562">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Nytt KI-senter ved UiO: MishMash.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5325017">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10292560" class="vrtx-external-publication">
        <div id="vrtx-publication-10292560">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10292560">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Openness in the age of AI.
                </span>
                            
            doi: <a href="https://doi.org/10.5281/zenodo.17652120">10.5281/zenodo.17652120</a>.
            <a href="https://hdl.handle.net/11250/5325015">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10292557" class="vrtx-external-publication">
        <div id="vrtx-publication-10292557">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10292557">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikk, opphavsrett og KI.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5325014">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10292555" class="vrtx-external-publication">
        <div id="vrtx-publication-10292555">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10292555">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Nytt KI-senter ved HF: MishMash.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5325013">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10292554" class="vrtx-external-publication">
        <div id="vrtx-publication-10292554">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10292554">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Are we still needed?                </span>
                            
            doi: <a href="https://doi.org/https:/filmskolen.no/artikler/2025/ki-i-filmbransjen-2-0">https:/filmskolen.no/artikler/2025/ki-i-filmbransjen-2-0</a>.
            <a href="https://hdl.handle.net/11250/5325011">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Den kunstige intelligensens frammarsj fortsetter ? v?re den st?rste kulturelle og samfunnsmessige omveltningen siden den industrielle revolusjonen. Siden fjor?rets konferanse har det skjedd vanvittig mye, derfor inviterer vi igjen til en dag fylt med internasjonale n?kkelpersoner, banebrytende prosjekter og nye perspektiver p? hvordan KI endrer m?ten vi utvikler, produserer og opplever film.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10292551" class="vrtx-external-publication">
        <div id="vrtx-publication-10292551">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10292551">
                Jensenius, Alexander Refsum; Bjerkestrand, Kari Anne Vadstensvik; Johnson, Victoria Christine ?rang &amp; Rao, Shabari
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Stillness – Silence.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.hf.uio.no/imv/english/research/news-and-events/events/Research-Forum/2025/research-forum-stillness-and-silence.html">https:/www.hf.uio.no/imv/english/research/news-and-events/events/Research-Forum/2025/research-forum-stillness-and-silence.html</a>.
            <a href="https://hdl.handle.net/11250/5325010">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">What can be learned by standing still in silence? Dancers typically move, and musicians move to produce sound. In this research forum, we explore the opposite: musicians and dancers who stand still in silence.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10291146" class="vrtx-external-publication">
        <div id="vrtx-publication-10291146">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10291146">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Er gamle fioliner bedre enn nye?                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK P2.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.arj.no/no/2025/11/26/gamle-fioliner/">https:/www.arj.no/no/2025/11/26/gamle-fioliner/</a>.
            <a href="https://hdl.handle.net/11250/5323823">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10289987" class="vrtx-external-publication">
        <div id="vrtx-publication-10289987">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10289987">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikk og kunstig intelligens.
                </span>
                    [TV].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        TV2.
                </span>
                            
            doi: <a href="https://doi.org/https:/play.tv2.no/nyheter/god-morgen-norge-y05mec7j?play=true">https:/play.tv2.no/nyheter/god-morgen-norge-y05mec7j?play=true</a>.
            <a href="https://hdl.handle.net/11250/5322859">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10289261" class="vrtx-external-publication">
        <div id="vrtx-publication-10289261">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10289261">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        What is the role of AI in creative activities?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5322244">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10286883" class="vrtx-external-publication">
        <div id="vrtx-publication-10286883">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10286883">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MishMash and potential for games.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5320145">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10285832" class="vrtx-external-publication">
        <div id="vrtx-publication-10285832">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10285832">
                Guo, Jinyue; T?rresen, Jim &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Cross-modal Analysis of Spatial-Temporal Auditory Stimuli and Human Micromotion when Standing Still in Indoor Environments (poster).
                </span>
                            
            doi: <a href="https://doi.org/10.5281/zenodo.17502603">10.5281/zenodo.17502603</a>.
            <a href="https://hdl.handle.net/11250/5319225">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10283759" class="vrtx-external-publication">
        <div id="vrtx-publication-10283759">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10283759">
                Sareen, Siddharth; Danbolt, Bj?rn Kristian; Aamaas, Borgar; Olsen, Cecilie Sachs; Bratland-Sanda, Solfrid &amp; Holmen, Heidi
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/10283759/contributors', 'vrtx-publication-contributors-10283759')">
                    [Show all&nbsp;14&nbsp;contributors for this article]</a>
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        AYFs Jubileumsbok - De f?rste ti ?rene.
                </span>
                            
            doi: <a href="https://doi.org/https:/akademietforyngreforskere.no/wp-content/uploads/2025/11/Jubileumsbok-digital-3.pdf">https:/akademietforyngreforskere.no/wp-content/uploads/2025/11/Jubileumsbok-digital-3.pdf</a>.
            <a href="https://hdl.handle.net/11250/5317571">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10283758" class="vrtx-external-publication">
        <div id="vrtx-publication-10283758">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10283758">
                Duch, Michael Francis; Furunes, Alexander Eriksson; Jensenius, Alexander Refsum &amp; Olsen, Cecilie Sachs
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kunstnerisk forskning for en kompleks verden.
                </span>
                            
            doi: <a href="https://doi.org/https:/akademietforyngreforskere.no/wp-content/uploads/2025/11/Jubileumsbok-digital-3.pdf">https:/akademietforyngreforskere.no/wp-content/uploads/2025/11/Jubileumsbok-digital-3.pdf</a>.
            <a href="https://hdl.handle.net/11250/5317572">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Kunstfagene spiller en viktig rolle i ? utvide m?ten vi jobber med og forst?r komplekse samfunnsproblemer. Likevel blir kunstfagene stadig oversett og nedprioritert i forskningspolitikken. Vi sp?r derfor: hva er, b?r og kan kunstens rolle v?re i det norske forskningslandskapet?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10283712" class="vrtx-external-publication">
        <div id="vrtx-publication-10283712">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10283712">
                Arnim, Hugh Alexander von; Christodoulou, Anna-Maria; Burnim, Kayla; Upham, Finn; Kelkar, Tejaswinee &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        LightHearted—A Framework for Mapping ECG Signals to Light Parameters in Performing Arts.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5317546">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10283018" class="vrtx-external-publication">
        <div id="vrtx-publication-10283018">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10283018">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Who Owns Our Knowledge? Open Research in MishMash Centre for AI &amp; Creativity.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5316932">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10283016" class="vrtx-external-publication">
        <div id="vrtx-publication-10283016">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10283016">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MishMash og Kunstnerisk utviklingsarbeid.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5316928">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10283014" class="vrtx-external-publication">
        <div id="vrtx-publication-10283014">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10283014">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MishMash Sentre for KI og kreativitet.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5316927">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10283012" class="vrtx-external-publication">
        <div id="vrtx-publication-10283012">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10283012">
                Jahr, Ida; Jensenius, Alexander Refsum; Lacroix, Mathieu &amp; Graver, Fredrik
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kunst i intelligens.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5316926">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">En paneldebatt om kunst og kunstig intelligens.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10281288" class="vrtx-external-publication">
        <div id="vrtx-publication-10281288">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10281288">
                Laczko, Balint; Rognes, Marie Elisabeth &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Poster for &quot;Image Sonification as Unsupervised Domain Transfer&quot;.
                </span>
                            
            doi: <a href="https://doi.org/10.5281/zenodo.17513165">10.5281/zenodo.17513165</a>.
            <a href="https://hdl.handle.net/11250/5278360">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The process of image sonification maps visual features into perceived auditory features. Most established sonification methods rely on identifying salient visual features in the input data and then mapping their distribution to a proportional distribution of auditory features. However, this approach requires both domain expertise and manual feature engineering. Here, we propose a new method of image sonification, leveraging recent advances in representation learning and domain transfer. Our approach introduces a pair of variational auto-encoder models that learn disentangled latent representations of the images and sounds, respectively, and a separate network that maps between these representations. The resulting sonification system encodes images into the latent space and then decodes them as sounds. Both representations and their mapping are learned in an entirely unsupervised manner. When evaluating the system in an interactive real-time setting, we observed that the model successfully learned disentangled representations of image and sound factors in our synthetic datasets.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10280356" class="vrtx-external-publication">
        <div id="vrtx-publication-10280356">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10280356">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MishMash - exploring AI and creativity between art and science.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5277612">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">A presentation of MishMash, a large Norwegian consortium dedicated to exploring the intersection of AI and creativity. Our primary objective is to create, explore, and reflect on AI for, through, and in creative practices. We will investigate AI’s impact on creative processes, develop innovative CoCreative AI systems, and address AI’s ethical, cultural, and societal implications in creative domains.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10280353" class="vrtx-external-publication">
        <div id="vrtx-publication-10280353">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10280353">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MishMash Centre for AI &amp; Creativity.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5277610">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">A presentation of MishMash, a large Norwegian consortium dedicated to exploring the intersection of AI and creativity. Our primary objective is to create, explore, and reflect on AI for, through, and in creative practices. We will investigate AI’s impact on creative processes, develop innovative CoCreative AI systems, and address AI’s ethical, cultural, and societal implications in creative domains.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10280352" class="vrtx-external-publication">
        <div id="vrtx-publication-10280352">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10280352">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikk og KI - Utfordringer og muligheter.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5277609">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Kunstig intelligens p?virker det meste om dagen ogs? musikklivet og musikkbransjen. Men hva er egentlig KI og hva er utfordringer og muligheter innenfor kunst og kultur? Presentasjonen diskuterer ulike pedagogiske tiln?rminger og gir eksempler p? hvordan det nye KI-senteret MishMash skal angripe problemstillingene.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10255085" class="vrtx-external-publication">
        <div id="vrtx-publication-10255085">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255085">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        KI og det nye MishMash-senteret.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4669517">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10255077" class="vrtx-external-publication">
        <div id="vrtx-publication-10255077">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255077">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Muligheter for deltagelse i MishMash Senter for KI og kreativitet.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3697314">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10255055" class="vrtx-external-publication">
        <div id="vrtx-publication-10255055">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255055">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Technologies supporting research on music-related body motion.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.liser.lu/events/EXPAR2025-09-18">https:/www.liser.lu/events/EXPAR2025-09-18</a>.
            <a href="https://hdl.handle.net/11250/4403674">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">As researchers, we are increasingly using emerging technologies, such as multiple mobile eye tracking, virtual reality, and physiological indicators (e.g., heart rate and respiration) to study professionals’ individual and collaborative work practices. In this workshop, we will demonstrate how these technologies can be provided to professionals in various fields (e.g., education, healthcare, business, engineering, the arts) as a resource for self-reflection, enabling them to study and improve their own practices.

The goal of this workshop is to introduce and facilitate participants to experience novel approaches that use these emerging technologies and tools to help practitioners study their own skills and understand their learning processes. We will also show how focus groups and stimulated recall interviews can encourage and guide professionals to discover ways to incorporate these new technologies into their practice as resources for reflection and growth.

The workshop’s theme is educational practice and research, with a focus on showing how we can offer teachers theoretically driven and empirically validated methodologies for witnessing the micro-processes of collaborative mathematics learning. We will show and discuss how multiple mobile eye-tracking and virtual reality can be used in educational practice and for teacher training and professional development.

This approach and these emerging technologies are applicable not only in education, but also in all other fields of research that aim to study individual and collective practices, as well as professional learning, during the process of acquiring new skills or improving existing ones.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10255047" class="vrtx-external-publication">
        <div id="vrtx-publication-10255047">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10255047">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Video Visualization - Learn to use MG Toolbox.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.liser.lu/events/EXPAR2025-09-19">https:/www.liser.lu/events/EXPAR2025-09-19</a>.
            <a href="https://hdl.handle.net/11250/4159930">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This workshop is targeted at students and researchers working with video recordings You will learn to use MG Toolbox, a Python package with numerous tools for visualizing and analyzing video files. This includes visualization techniques such as motion videos, motion history images, and motiongrams; techniques that, in different ways, allow for looking at video recordings from different temporal and spatial perspectives. It also includes some basic computer vision analysis, such as extracting quantity and centroid of motion, and using such features in analysis. MG Toolbox for Python is a collection of high-level modules that generate all of the above-mentioned visualizations.The toolbox is relevant for everyone working with video recordings of humans, such as in linguistics, psychology, medicine, human-computer interaction, and educational sciences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254906" class="vrtx-external-publication">
        <div id="vrtx-publication-10254906">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254906">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        RITMO, MishMash and the fourMs Lab.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4600335">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">A presentation for the Ukrainian Research Council.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254869" class="vrtx-external-publication">
        <div id="vrtx-publication-10254869">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254869">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kreativitet og KI.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.arj.no/no/2025/08/28/ki-filmbransjen/">https:/www.arj.no/no/2025/08/28/ki-filmbransjen/</a>.
            <a href="https://hdl.handle.net/11250/4445332">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Jeg vil presentere MishMash Senter for KI og kreativitet samt gi en mini-introduksjon til KI generelt.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254832" class="vrtx-external-publication">
        <div id="vrtx-publication-10254832">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254832">
                Jensenius, Alexander Refsum; Jahr, Ida &amp; Nordg?rd, Daniel
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MishMash Senter for KI og kreativitet.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.forskningsradet.no/arrangementer/2025/presentasjon-ki-sentra/">https:/www.forskningsradet.no/arrangementer/2025/presentasjon-ki-sentra/</a>.
            <a href="https://hdl.handle.net/11250/4427213">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">A presentation of the planned activities of MishMash Centre for AI &amp; Creativity</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254828" class="vrtx-external-publication">
        <div id="vrtx-publication-10254828">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254828">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Eksternt blikk p? NMHs forskningsaktivitet.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4211013">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10254770" class="vrtx-external-publication">
        <div id="vrtx-publication-10254770">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254770">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Noen resultater fra tre ?r med forsknings篮球即时比分_nba比分直播-彩客网重点推荐 med tre orkestre.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3847365">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Denne presentasjonen oppsummerer resultater fra forskningsstudier p? og med tre skandinaviske symfoniorkestre. I alle tilfeller har b?de kvalitative og kvantitative data blitt samlet inn p? pr?ver og konserter i konsertsaler. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-10254572" class="vrtx-external-publication">
        <div id="vrtx-publication-10254572">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10254572">
                Christodoulou, Anna-Maria; Glette, Kyrre; Lartillot, Olivier &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MusiQAl: Music Question Answering through Audio-Video fusion.
                </span>
                            
            doi: <a href="https://doi.org/https:/ismir2025.ismir.net/program-detailed-schedule">https:/ismir2025.ismir.net/program-detailed-schedule</a>.
            <a href="https://hdl.handle.net/11250/5061636">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10251419" class="vrtx-external-publication">
        <div id="vrtx-publication-10251419">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10251419">
                D&#39;Amario, Sara &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Workshop on Women Musicians Wellbeing.
                </span>
                            
            doi: <a href="https://doi.org/https:/www.uio.no/ritmo/english/news-and-events/events/workshops/2025/women-wellbeing/">https:/www.uio.no/ritmo/english/news-and-events/events/workshops/2025/women-wellbeing/</a>.
            <a href="https://hdl.handle.net/11250/4508060">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10251401" class="vrtx-external-publication">
        <div id="vrtx-publication-10251401">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10251401">
                D&#39;Amario, Sara; L?ve, Andreas; Foldal, Maja Dyhre &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Functional Near Infrared Spectroscopy  (fNIRS) Responses of Professional  Violinists during Orchestra Performances.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4070242">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-10241704" class="vrtx-external-publication">
        <div id="vrtx-publication-10241704">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-10241704">
                Arnim, Hugh Alexander von; Erdem, Cagri; C?té-Allard, Ulysse Teller Masao &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Sensor is not a Sensor: Diffracting the Preservation of Sonic Microinteraction with the SiFiBand.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4344042">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper documents our exploratory work to preserve the interactive music system Stillness Under Tension—developed to explore inverse sonic microinteraction—by porting it from the original and discontinued Myo sensor armband to SiFiBand, a new prototype armband with motion (IMU) and muscle (EMG) sensors. We approach this by merging the Multilevel Dynamic Preservation model with a “diffraction-in-action” method grounded in a theoretical entanglement perspective. Rather than focusing on the Myo version’s artefactual remains, we explore the difference in data representations offered by the two devices as our point of departure. The paper describes the sensor devices, evaluating their data representations given their technical specifications, and describing how these differences propagate throughout our attempt to preserve the system, enacting necessary changes. We discuss the implications of merging these methods in view of the long-term preservation of interactive music systems. Our version 2.0 of Stillness Under Tension finds itself experientially in a position between familiarity and newness.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2397612" class="vrtx-external-publication">
        <div id="vrtx-publication-2397612">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2397612">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MishMash, musikk og kunstig intelligens.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3814178">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2359464" class="vrtx-external-publication">
        <div id="vrtx-publication-2359464">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2359464">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        What happens in the body when you stand still?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4848468">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Professor Alexander Refsum Jensenius will talk about his decade-long exploration of human micromotion. Motion data from the 365 standstill sessions he carried out during 2023 reveals lots of biomechanical noise, but also some interesting signals.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2392289" class="vrtx-external-publication">
        <div id="vrtx-publication-2392289">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2392289">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        CoARA principles in practice. Insights from a crossdisciplinary Centre of Excellence.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4045881">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The presentation discusses the practical application of the NOR-CAM framework and CoARA principles at RITMO, a cross-disciplinary research centre. It highlights the challenges and strategies involved in promoting comprehensive and transparent research assessment, especially in interdisciplinary settings where values and evaluation criteria differ across fields. The author emphasizes the importance of redefining openness in research, professionalizing hiring committees, implementing structured career development programs, and fostering a culture of sharing and caring. These efforts aim to create a more equitable, supportive, and effective academic environment that values diverse contributions and supports researchers&#39; well-being.
</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2381891" class="vrtx-external-publication">
        <div id="vrtx-publication-2381891">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2381891">
                Olsen, Cecilie Sachs; Jensenius, Alexander Refsum &amp; Duch, Michael Francis
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kunstnerisk forskning for en kompleks verden.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3469764">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2355958" class="vrtx-external-publication">
        <div id="vrtx-publication-2355958">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2355958">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        篮球即时比分_nba比分直播-彩客网重点推荐sfronten - Mensblod mot Alzheimers.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK P2 Abels t?rn.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3599940">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Kan personlighetstrekket avgj?re om du liker ? danse?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391281" class="vrtx-external-publication">
        <div id="vrtx-publication-2391281">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391281">
                Sveen, Henrik; Bishop, Laura &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Cyclic Patterns and Spatial Orientations in Artificial
Impulsive Autonomous Sensory Meridian Response (ASMR) Sounds.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5094288">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Autonomous Sensory Meridian Response (ASMR) is a tingling sensation in the neck and spine often triggered by specific sounds. This paper reports a study on the impact of different cyclic patterns and spatial orientations—defined here as the perceived directionality and motion of sound sources in a three-dimensional auditory space—on inducing ASMR experiences. The results demonstrate that both the type of cyclic pattern and the spatial orientation significantly influence the intensity and nature of ASMR experiences. Furthermore, the research explores synthesizing ASMR-inducing sounds while preserving key audio characteristics from acoustically recorded ASMR content. Through survey data analysis and regression modeling, distinct patterns emerge regarding the relationship between personality traits and ASMR experience. The findings contribute to a deeper understanding of ASMR as a sensory phenomenon and provide insights into the potential applications of artificially generated ASMR stimuli. Additionally, the research sheds light on the role of spatiality in ASMR experiences and the synthesis of ASMR-inducing sounds for future studies and practical applications</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2395952" class="vrtx-external-publication">
        <div id="vrtx-publication-2395952">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2395952">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        What happens in the body when you stand still?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4282149">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Professor Alexander Refsum Jensenius will talk about his decade-long exploration of human micromotion. Motion data from the 365 standstill sessions he carried out during 2023 reveals lots of biomechanical noise, but also some interesting signals.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2383681" class="vrtx-external-publication">
        <div id="vrtx-publication-2383681">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2383681">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        KI og musikkens fremtid.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3930887">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Alexander Refsum Jensenius leder RITMO, Senter for tverrfaglig forskning p? rytme, tid og bevegelse, med 60 ansatte. Han leter for tiden systematisk etter mulighetene og perspektivene KI kommer med. KI er en disruptiv teknologi som griper inn i etablerte n?ringsmodeller. Hvor er vi p? vei? Mange av perspektivene kan virke overveldende. Det er imidlertid viktig ? huske p? at selv om maskiner n? er med p? ? utvikle seg selv, er det prim?rt mennesker som vil utvikle ogs? morgendagens teknologier. Han mener det er sentralt at vi i Norge er med p? denne utviklingen. Her mener kunst- og kulturfeltet har en unik mulighet til ? bidra gjennom eksperimentell utforskning og kritisk refleksjon. Han mener vi vil se flere systemer som fokuserer p? kontinuerlig samhandling mellom mennesker og maskiner, slik som n?r musikere improviserer. Men at man ikke kommer videre med KI uten at de f?r en kropp som kan sanse og handle. Og at KI-systemer vil kunne bli mer empatiske, noe som vil forbedre menneske-maskin-kommunikasjon, men som ogs? reiser mange etiske problemstillinger.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2383680" class="vrtx-external-publication">
        <div id="vrtx-publication-2383680">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2383680">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Improvisasjon for muskelarmb?nd.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3247304">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2383683" class="vrtx-external-publication">
        <div id="vrtx-publication-2383683">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2383683">
                Jemterud, Torkild; Jensenius, Alexander Refsum; Undheim, Vegard &amp; R?islien, Jo
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Wonderful Nachspiel med Torkild Jemterud.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4299979">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2383682" class="vrtx-external-publication">
        <div id="vrtx-publication-2383682">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2383682">
                Lerdahl, Erik; Buene, Eivind; Berg, Anna &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikk, stillhet og kreativitet.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3470643">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Alexander Refsum Jensenius har st?tt stille 10 min hver dag i ett ?r. Han kalles Professor Stillstand. Han leder et senter med 60 ansatte som forsker p? rytme, tid og bevegelse, og vil gjerne forst? mer og dypere om hvordan lydene og inntrykkene av det vi omgir oss med p?virker oss. Hans f?rste erkjennelse er at han tror verden ville kunne bli et bedre sted om alle stod stille 10 minutter hver dag. Hva gj?r musikk og stillhet med oss selv, v?rt velv?re og v?r kreativitet?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391058" class="vrtx-external-publication">
        <div id="vrtx-publication-2391058">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391058">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Regjeringa satser over én milliard p? KI.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4759465">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Seks nye forskningssentre skal blant annet forske p? ansvarlig, p?litelig og b?rekraftig kunstig intelligens.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2370699" class="vrtx-external-publication">
        <div id="vrtx-publication-2370699">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2370699">
                Jensenius, Alexander Refsum; Watne, ?shild; Maas?, Arnt &amp; Agledahl, Vetle
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikksnakk: Allsang.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4683439">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Vi har aldri konsumert mer musikk enn n?, viser forskning. Likevel er det f?rre av oss som tar en aktiv del i musikkskapingen, enn f?r. Hva mister vi n?r vi ikke tar del i, eller vet hvordan man lager, musikk? Vi skal ogs? teste hvordan det er ? synge sammen. Hva skjer med oss da?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2391056" class="vrtx-external-publication">
        <div id="vrtx-publication-2391056">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391056">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        KI-milliard.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        Dagsnytt atten.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4632889">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2370698" class="vrtx-external-publication">
        <div id="vrtx-publication-2370698">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2370698">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hvor er vi p? vei? Fremtidens h?yere utdanning.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5072523">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2391062" class="vrtx-external-publication">
        <div id="vrtx-publication-2391062">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2391062">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        篮球即时比分_nba比分直播-彩客网重点推荐ens rolle i ? forme fremtiden.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4465350">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2357828" class="vrtx-external-publication">
        <div id="vrtx-publication-2357828">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2357828">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        篮球即时比分_nba比分直播-彩客网重点推荐sfronten.  Hva sier den nyeste forskningen oss?                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK P2 Studio 2.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4264493">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2392394" class="vrtx-external-publication">
        <div id="vrtx-publication-2392394">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2392394">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        NOR-CAM og RITMO. Erfaringer fra et interdisiplin?rt senter.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5225146">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">## Sammendrag

Innlegget oppsummerer erfaringer med ? implementere CoARA- og NOR-CAM-prinsippene ved RITMO. Forfatteren deler refleksjoner rundt ?pne forskningspraksiser, utfordringer og muligheter ved tverrfaglig 篮球即时比分_nba比分直播-彩客网重点推荐, samt konkrete tiltak for rekruttering, karriereutvikling og et st?ttende akademisk milj?. Fokus ligger p? ? profesjonalisere ansettelsesprosesser, utvikle karriereprogrammer og bygge en kultur for deling og omsorg i forskningsmilj?et.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2392393" class="vrtx-external-publication">
        <div id="vrtx-publication-2392393">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2392393">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Tale under Konkurransen Unge Forskere 2025.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4942561">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">I denne forelesningen forteller jeg om hvordan en barndomsfascinasjon for papirsylindres styrke f?rte til en fysikkoppgave p? videreg?ende, som igjen f?rte til finaleplass i konkurransen og deltakelse p? Nobelarrangementer i Stockholm. Opplevelsen ga innsikt i forskningsverdenen og motivasjon til videre studier og forskning.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2366498" class="vrtx-external-publication">
        <div id="vrtx-publication-2366498">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2366498">
                Ormstad, Heidi &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Helseforskning: ?penhetens balansegang.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3997969">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">?pen forskning skal v?re s? ?pen som mulig og s? lukket som n?dvendig. Skribentene skiller mellom hva forskere skal og b?r gj?re.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2366536" class="vrtx-external-publication">
        <div id="vrtx-publication-2366536">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2366536">
                Schau, Kristopher &amp; Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Nysgjerrige p?: rytmens hemmeligheter.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Nysgjerrige Norge.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5209031">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">I denne episoden bes?ker Kristopher forskningssenteret RITMO ved Universitetet i Oslo. Der forsker de p? alt fra trommeroboter og mikromusikalske problemstillinger til hvordan vi p?virkes av ventilasjonslyd. Han m?ter senterleder Alexander Refsum Jensenius som forteller om forskning i skj?ringspunktet mellom musikk, bevegelse, psykologi og robotikk.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2397690" class="vrtx-external-publication">
        <div id="vrtx-publication-2397690">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2397690">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Her er Norges rykende ferske KI forskningssentre!                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3230768">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hele 1,3 milliarder kroner skal tildeles seks nasjonale forskningssentre p? kunstig intelligens. Disse er n? offentliggjort - kom og v?r en av de f?rste til ? bli kjent med de nye forskningssentrene og l?r om hva de skal forske p?!</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2392257" class="vrtx-external-publication">
        <div id="vrtx-publication-2392257">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2392257">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Video Visualization - Learn to use MG Toolbox.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4548877">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This workshop is designed for students and researchers who work with video recordings. You will learn to use MG Toolbox, a Python package with numerous tools for visualizing and analyzing video files. This includes visualization techniques such as motion videos, motion history images, and motiongrams, which allow for viewing video recordings from different temporal and spatial perspectives in various ways. It also includes some fundamental computer vision analysis, such as extracting the quantity and centroid of motion, and using such features in analysis. MG Toolbox for Python is a collection of high-level modules that generate all of the visualizations mentioned above. The toolbox is relevant for everyone working with video recordings of humans, including linguists, psychologists, medical professionals, human-computer interaction specialists, and educators in the educational sciences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2392258" class="vrtx-external-publication">
        <div id="vrtx-publication-2392258">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2392258">
                Jensenius, Alexander Refsum
            </span>(2025).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Music, RITMO and AI.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4028668">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">An introduction to RITMO and ongoing research on the topic of music and AI for a workshop between researchers from the University of Oslo, Queen Mary University of London, and KTH Royal Institute of Technology.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2329598" class="vrtx-external-publication">
        <div id="vrtx-publication-2329598">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2329598">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        From Sound to Science: Open Science Practices at the RITMO Centre.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Pathos.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4461398">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">A PathOS interview about how Open Science (Open Access to publications, Open/FAIR data and software, collaborations with citizens) has made a positive or negative impact.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2224205" class="vrtx-external-publication">
        <div id="vrtx-publication-2224205">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2224205">
                Jensenius, Alexander Refsum &amp; Laczko, Balint
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Video Visualization.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4759772">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This workshop is targeted at students and researchers working with video recordings. You will learn to use MG Toolbox, a Python package with numerous tools for visualizing and analyzing video recordings. This includes visualization techniques such as motion videos, motion history images, and motiongrams; techniques that, in different ways, allow for looking at video recordings from different temporal and spatial perspectives. It also includes some basic computer vision analysis, such as extracting quantity and centroid of motion, and using such features in analysis.MG Toolbox for Python is a collection of high-level modules for generating all of the above-mentioned visualizations and analyses. This toolbox was initially developed to analyze music-related body motion but is equally helpful for other disciplines working with video recordings of humans, such as linguistics, psychology, medicine, and educational sciences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2315588" class="vrtx-external-publication">
        <div id="vrtx-publication-2315588">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2315588">
                Jensenius, Alexander Refsum; Wendt, Kaja Kathrine; Ski-Berg, Veronica &amp; Slette, Aslaug Louise
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        S1E3 Forskerkarrierer - i tall og matriser.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        Podcast.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3883480">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">I tredje episode av NIFUs podkastserie Kunnskapsfloker snakker vi om forskerkarrierer. Hva er egentlig en ?forskerkarriere?, og hvor i samfunnet finner vi forskere? ? utvikle gode forskerkarrierer st?r h?yt p? dagsorden b?de i Norge og i Europa. Det utvikles for tiden nye rammeverk for karriereutvikling samt statistiske indikatorer som dokumenterer hvordan forskerkarrierer utvikler seg over tid. Men hvordan kan forskningssystemet tilrettelegge for mangfoldige forskerkarrierer? Gjester i episoden er Kaja Kathrine Wendt fra SSB/NIFU og Alexander Refsum Jensenius fra UiO. Programledere er Veronica Ski-Berg og Aslaug Louise Slette. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307415" class="vrtx-external-publication">
        <div id="vrtx-publication-2307415">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307415">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Labprat #3: NM i stillstand.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3886952">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Klarer du ? st? stille til favorittl?ta di? Pr?v selv og vinn 1000kr!

Folk sier ofte at det er umulig ? ikke bevege seg til musikk, men stemmer det?

Onsdag 3. april kan du teste deg selv n?r professor Alexander Refsum Jensenius – ogs? kjent som Professor stillstand – inviterer til ?NM i stillstand? her p? Popsenteret.

Vinneren k?res samme kveld p? LAB.prat #3 med nettopp Alexander! Her f?r du ogs? vite mer om hva som faktisk skjer i kroppen n?r vi h?rer p? musikk. 

Som vanlig ledes kvelden av fasilitator og ?MC? Dr. Kjell Andreas Oddekalv, ogs? kjent som ?Dr. Kjell? (eller hele Norges Kjelledegge som han selv liker ? si) fra Hiphop orkesteret Sinsenfist. Sammen med Alexander inviterer han til en uformell samtale og Q&amp;A om kroppsrytmer og hvordan de p?virkes av omgivelsene v?re. 

I tidsrommet mellom stillstandkonkurransen og LAB.prat er Popsenteret ?pent og du er velkommen til ? bes?ke utstillingen v?r og alt den har ? by p?!</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307451" class="vrtx-external-publication">
        <div id="vrtx-publication-2307451">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307451">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        20 years of concert research at the University of Oslo.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3393298">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In my talk I will give an overview of the concert research conducted in the fourMs Lab at the University of Oslo from the early 2000s to today. Over the years, we have explored and refined numerous data captures methods, from qualitative observation studies, interviews, and diaries to motion capture and physiological sensing. At the core has always been the attempt to shed light on the complexity of music performance. This includes understanding more about the subtleties of performer&#39;s sound-producing actions, sound-facilitating motion, and communicative and expressive gestures. It also includes the intricacies of inter-personal synchronization. Over the years, we have been able to expand from studying duos, trios, and quartets to full orchestras. Today, we have lots of data, some answers, and even more questions than when we started. An excellent starting point for future research.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307437" class="vrtx-external-publication">
        <div id="vrtx-publication-2307437">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307437">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Video Visualization and Analysis.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4468971">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this workshop, I will introduce video visualization as a method for understanding more about music-related body motion. Examples will be given of various methods implemented in the standalone application VideoAnalysis and the Musical Gestures Toolbox for Python.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307448" class="vrtx-external-publication">
        <div id="vrtx-publication-2307448">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307448">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Blikksporing av musikere og maler p? scenen.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4774308">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvor ser en kunstner som maler p? scenen under en konsert? Forskere fra UiO fors?ker ? finne ut av dette ved hjelp av avansert blikksporingsteknologi.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307423" class="vrtx-external-publication">
        <div id="vrtx-publication-2307423">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307423">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Ambient project at RITMO.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3493696">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The AMBIENT project aims to study how such elements influence people&#39;s bodily behaviors and how they feel about the rhythms in an environment. This will be done by studying how different auditory and visual stimuli combine to create rhythms in various settings.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307385" class="vrtx-external-publication">
        <div id="vrtx-publication-2307385">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307385">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        From air guitar to self-playing guitars.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4270258">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">What can air guitar performance tell about people&#39;s musical experience and how does it relate to real guitar performance? Alexander Refsum Jensenius will tell about his decade-long research into music-related body motion of both performers and perceivers. He will also tell about how this has informed new performance paradigms, including the self-playing guitars that will be showcased at the festival.

?

Alexander Refsum Jensenius is a professor of music technology at the University of Oslo and Director of RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion. He studies how and why people move to music and uses this knowledge to create new music with untraditional instruments. He is widely published, including the books Sound Actions and A NIME Reader.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307419" class="vrtx-external-publication">
        <div id="vrtx-publication-2307419">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307419">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Embodied music-related design.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4697973">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Abrahamson et al. (2022) recently called for a merging of Embodied Design-Based Research and Learning Analytics to establish a coherent and integrated focus on Multimodal Learning Analytics of Embodied Design. In Spring 2022, members of EDRL and selected international collaborators of the lab participated in “Rhythm Rising,” a workshop week hosted at University of Oslo’s RITMO Centre for Interdisciplinary Studies in Rhythm, Time, and Motion. The workshop featured activities for graduate students to learn the scientific research methodologies of gathering physical, physiological, and neurobiological data from study participants engaged in interactive learning of STEM content. The activities combined the respective expertise of Abrahamson (learning sciences) and Jensenius (embodied music cognition and technology) to investigate sensorimotor micro-processes hypothesized to form the cognitive basis of conceptual understandings, such as hand- and eye actions leading to the emergence of mathematical insight. Whereas the Oslo workshop spurred great enthusiasm among the graduate students, its duration only allowed time for initial data collection. Therefore, we would like to regather in Spring 2024 to continue our collaborative work and to share insights about data analysis, visualization, and interpretation. Concurrently, we’ll develop ideas for future joint research projects.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2307446" class="vrtx-external-publication">
        <div id="vrtx-publication-2307446">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307446">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MusicLab som et ?pen forskningsprosjekt mellom RITMO og UB.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4144602">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">MusicLab er et 篮球即时比分_nba比分直播-彩客网重点推荐 mellom RITMO og Universitetsbiblioteket. M?let er ? utforske nye m?ter ? samle inn og formidle musikkrelaterte forskningsdata p?.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2323047" class="vrtx-external-publication">
        <div id="vrtx-publication-2323047">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2323047">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        ?pen forskning muliggj?r forskningsn?r utdanning.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4787234">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Selv om vi liker ? si at driver med forskningsbasert utdanning, er organiseringen av forskning og utdanning gjerne plassert i ulike siloer, skriver Alexander Refsum Jensenius.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2263563" class="vrtx-external-publication">
        <div id="vrtx-publication-2263563">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2263563">
                Jensenius, Alexander Refsum &amp; Danielsen, Anne
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Tverrfaglighet: 40-grupper til besv?r.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4350617">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Vi er positive til fler- og tverrfaglige studiel?p og synes 40-grupper er en god idé. Strukturen er p? plass, men implementeringen er mangelfull. Til tider er det vanskelig ? skj?nne at vi jobber ved samme institusjon.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2242399" class="vrtx-external-publication">
        <div id="vrtx-publication-2242399">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2242399">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Instruments, sounds, music and interaction.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4043860">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2280908" class="vrtx-external-publication">
        <div id="vrtx-publication-2280908">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2280908">
                Jensenius, Alexander Refsum; Danielsen, Anne; Kvammen, Daniel &amp; Tollefsb?l, Sofie
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikksnakk: Musikk i urolige tider.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5188871">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">P? konsert f?ler vi samhold med fremmede, viser forskning. I et kort ?yeblikk samler musikken oss. Hvordan kan musikk ogs? samle oss i urolige tider? Hva er det med akkurat musikk som forener oss? Bli med p? musikksnakk med artistene Daniel Kvammen og vokalist i FIEH, Sofie Tollefsb?l, og musikkforsker Anne Danielsen. Her vil musikkprofessor Alexander Refsum Jensenius lede samtalen med ulike sp?rsm?l knyttet til tematikken – kanskje svarer de p? ditt sp?rsm?l ogs?? Samtalen er beregnet for et publikum uten faglig bakgrunn i temaet.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2280627" class="vrtx-external-publication">
        <div id="vrtx-publication-2280627">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2280627">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The assessment of researchers is changing – how will it impact your career?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3834870">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Changes are happening in the world of research assessment, for example by recognizing several competencies as merits and a better balance between quantitative and qualitative goals. In Norway, for example, Universities Norway presented the NOR-CAM report in 2021 which sparked a movement for reform. As an early career researcher, it&#39;s crucial to understand how these changes may impact your research career. In this talk, Jensenius will discuss the evolving landscape of research assessment and what it means for you.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2305556" class="vrtx-external-publication">
        <div id="vrtx-publication-2305556">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2305556">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Muskelmusikk.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4432418">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hva skjer i musklene n?r vi fors?ker ? st? stille? Hvordan kan men lage musikk fra kroppen. I pausen p? Forsker Grand Prix vil jeg underholde med et sceneshow hvor jeg utforsker interaktive muskelarmb?nd og en musikkhanske.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2257455" class="vrtx-external-publication">
        <div id="vrtx-publication-2257455">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2257455">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hvorfor trenger vi lisenser p? data?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4040662">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">QualiFAIR huben inviterer til en presentasjon og en diskusjon om rettighetene til data og behov for lisenser for data og annet forskningsmateriale.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2234856" class="vrtx-external-publication">
        <div id="vrtx-publication-2234856">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2234856">
                Jensenius, Alexander Refsum &amp; Lilleeng, Sverre
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Professor stillstand.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4263051">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2243455" class="vrtx-external-publication">
        <div id="vrtx-publication-2243455">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2243455">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Stod stille hver dag i 10 minutter.
                </span>
                    [TV].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK Helgemorgen.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3384163">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2323996" class="vrtx-external-publication">
        <div id="vrtx-publication-2323996">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2323996">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hvordan kan ?pen forskning lede til ?pen utdanning? Og omvendt?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4860919">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">?penhet og akademisk frihet er hj?rnesteiner i et velfungerende forskningssystem. Dette m? vi bevare n?r vi skal bygge et helhetlig nasjonalt forskningssystem som ogs? inkluderer skjermet og gradert forskning. Hvordan skal vi klare ? bygge et forskningssystem som er s? ?pent som mulig og s? lukket som n?dvendig? ?pen forskning inneb?rer at forskningen gj?res tilgjengelig og deles av forskere, institusjoner, sektorer og over landegrenser. Det har v?rt lite fokus p? de positive sidene ?pen forskning kan ha innen utdanning. Hvordan kan vi motivere til mer forskningsn?r utdanning, men ogs? ?ke kvaliteten p? forskningen? </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2319249" class="vrtx-external-publication">
        <div id="vrtx-publication-2319249">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2319249">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Can doing nothing tell us everything?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5146610">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Can doing nothing tell us everything? Meet Professor Alexander Refsum Jensenius, a music researcher exploring the deep connections between sound, space, and the human body. Through his fascinating studies on stillness and motion, Alexander has discovered surprising insights into how we interact with our environment.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2319254" class="vrtx-external-publication">
        <div id="vrtx-publication-2319254">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2319254">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The assessment of researchers is changing – how will it impact your career?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5100853">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Changes are happening in the world of research assessment, for example by recognizing several competencies as merits and a better balance between quantitative and qualitative goals. In Norway, for example, Universities Norway presented the NOR-CAM report in 2021 which sparked a movement for reform. As an early career researcher, it&#39;s crucial to understand how these changes may impact your research career. In this talk, Jensenius will discuss the evolving landscape of research assessment and what it means for you.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2319253" class="vrtx-external-publication">
        <div id="vrtx-publication-2319253">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2319253">
                Jensenius, Alexander Refsum; Edwards, Peter; Klungnes, Kristina Mariell Dulsrud; Berg, Anna &amp; Jenssen, Kjell Runar
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikksnakk: Filmmusikk.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4128799">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Musikk skaper stemning i filmer. Men hvordan klarer filmmusikken ? bevege oss s? mye? Og hva er starten p? fortellingen om hvorfor man bruker musikk for ? skape en viss stemning?

Se for deg en hai komme sv?mmende mot en uviten bader – i stillhet. Hva med Frodo og Sam som karrer seg opp Mount Doom til lyden av... ingenting? Eller Katniss Everdeen som kj?rer i en vogn i flammer gjennom Capitol, uten trommer som dr?nner og majestetiske horn? Litt kjedelig, ikkesant?

Musikk er viktig i film for ? lage en viss stemning. Men hvordan ble det s?nn? Er det bare for ? f? oss til ? f?le, eller ligger det en historie bak filmmusikken?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2274681" class="vrtx-external-publication">
        <div id="vrtx-publication-2274681">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2274681">
                Jensenius, Alexander Refsum &amp; Jerve, Karoline Ruderaas
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Verdens st?rste musikkeksperiment.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Ballade.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4276767">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">I kveld m?tes NRKs popul?rvitenskapelige radioprogram Abels t?rn, KORK og forskningsprosjektet MusicLab for ? m?le hva som skjer mellom musikere og publikum n?r de utsettes for musikk.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2319252" class="vrtx-external-publication">
        <div id="vrtx-publication-2319252">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2319252">
                Jensenius, Alexander Refsum; R?nning, Anne-Birgitte; Haug, Dag Trygve Truslew &amp; S?ther, Steinar Andreas
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Frokostm?te: Humaniora og infrastruktur.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4043803">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Heller ikke en humanistisk forsker klarer seg helt p? egenh?nd. Men hvilken infrastruktur trenger vi for humanistisk forskning? Infrastrukturer kommer i mange st?rrelser og former, og vi snakker stadig mer om dem - s?rlig n?r samtalen dreier seg om det digitale skiftet. Derfor sp?r vi: hva er humanioras infrastrukturer? Hvilke forskjeller og likheter er det mellom de forskjellige fagene p? HF? Hvordan kan vi best s?rge for at n?dvendig infrastruktur er p? plass? For ? gi seg i kast med disse sp?rsm?lene har vi samlet et panel med erfarne forskere og undervisere fra ulike HF-fag som alle i tillegg har erfaring fra lederroller og verv med betydning for hvordan HF og UiO forholder seg til infrastruktur.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2392100" class="vrtx-external-publication">
        <div id="vrtx-publication-2392100">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2392100">
                Jemterud, Torkild; Jensenius, Alexander Refsum; L?seth, Guro Engvig &amp; Holthe, Kolbj?rn
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        ABELS KORK - Verdens st?rste(?) musikkeksperiment.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5031209">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvordan p?virker musikk oss? Hva skjer i hjernen v?r n?r vi h?rer en melodi vi liker – eller misliker? Hvorfor reagerer vi forskjellig p? ulike typer musikk? Og hvordan klarer et helt orkester ? spille plettfritt sammen? Og forresten: trenger de egentlig ? ha en dirigent? Hver fredag svarer panelet i Abels t?rn p? alle slags vitenskapelige sp?rsm?l, store og sm?, fra lytterne. Noen vil langt ut i verdensrommet, og andre er mer opptatt av hva som skjer p? kj?kkenbenken. Men musikk er noe vi alle har et forhold til. Den er rundt oss hele tiden, og det er mye ? undre seg over n?r det gjelder musikk og hvordan den taler til oss p? dype personlige plan. Derfor har Abels t?rn og KORK g?tt sammen med RITMO og Universitetsbiblioteket for ? lage en musikalsk utgave av det popul?re vitenskapsprogrammet. Vi introduserer: Abels KORK!</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2370702" class="vrtx-external-publication">
        <div id="vrtx-publication-2370702">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2370702">
                Jensenius, Alexander Refsum; Riaz, Maham; Oldfield, Thomas L &amp; Juarez, Karenina
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        RITMO-studenter presenterer nye installasjoner.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4494725">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Studenter tilknyttet RITMO stiller ut prosjektene sine p? Popsenteret: en interaktiv symaskin fra 1911, et lyttende og snakkende speil, og et interaktivt maleri. Hvordan kan slike objekter gi musikalske opplevelser?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2370700" class="vrtx-external-publication">
        <div id="vrtx-publication-2370700">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2370700">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Challenges and Possibilities of Open Music Data.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4184456">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The Sempre Autumn conference was an online student study day, held on Friday 8th November 2024, with a combination of student presentations, research speed dating, and a special session on open research featuring Professor Iain Brennan (University of Hull), Professor Tuomas Eerola (Durham University), and Professor Alexander Refsum Jensenius (University of Oslo). The event was open to doctoral students at any stage of their research and those thinking of applying for doctoral study. We invited proposals for short presentations (10 minutes + 5 for Q&amp;A) from doctoral students, on any aspect of music psychology or music education.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2272907" class="vrtx-external-publication">
        <div id="vrtx-publication-2272907">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2272907">
                Oddekalv, Kjell Andreas &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        LAB.prat #3 og &quot;NM i stillstand&quot;: Kan man st? stille til musikk?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4347033">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2315680" class="vrtx-external-publication">
        <div id="vrtx-publication-2315680">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2315680">
                Bochynska, Agata; Bergstr?m, Rebecca Josefine Five &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Why do we need licenses on data?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4755481">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2260882" class="vrtx-external-publication">
        <div id="vrtx-publication-2260882">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2260882">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Mock PhD Interview.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3511605">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The objective of the interview mockup is to provide an example of what a PhD interview looks like. We want to provide a safe space to ask questions to an experient interviewer and to understand how to better prepare for the interview if you&#39;re applying to PhD positions in other countries.

LatAm BISH Bash is a series of meetings and networking events that connect engineers, researchers, students, and companies working on speech, acoustics, and audio processing.

This time, we will have a PhD mockup interview conducted by Alexander Jensenius, who is a professor of music technology and Director of RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2397693" class="vrtx-external-publication">
        <div id="vrtx-publication-2397693">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2397693">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Some Challenges in Musical Artificial Intelligence.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3831506">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this presentation, I highlight RITMO&#39;s interdisciplinary approach, combining musicology, psychology, and informatics to study rhythm as a fundamental human property. I emphasise the intersection of humans and machines in AI, advocating for a balanced approach that incorporates both rule-based and learning-based systems, especially in music. I also address critical aspects like code sharing, data accessibility (FAIR principles), privacy, copyright, and ethical considerations within the AI landscape. Finally, I call for the development of AI for creative use, considering its impact on knowledge, ethics, and human experience, while also examining policy and societal rights.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2306125" class="vrtx-external-publication">
        <div id="vrtx-publication-2306125">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2306125">
                Solbakk, Anne-Kristin &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Research Ethics and Legal Perspectives.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5130693">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2255063" class="vrtx-external-publication">
        <div id="vrtx-publication-2255063">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2255063">
                Jensenius, Alexander Refsum &amp; Jemterud, Torkild
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hva er verdens mest klissete substans?                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK P2 - Abels t?rn.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4750013">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Har menneske rytme? Hvorfor l?ter to toner med ulik grunntone forskjellig? Hvorfor grynter pianister?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2306986" class="vrtx-external-publication">
        <div id="vrtx-publication-2306986">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2306986">
                Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Poster for &quot;Synth Maps: Mapping The Non-Proportional Relationships Between Synthesizer Parameters and Synthesized Sound&quot;.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4710729">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Parameter Mapping (PM) is probably the most used design approach in sonification. However, the relationship between a synthesizer’s input parameters and the perceptual distribution of its output sounds might not be proportional, limiting its ability to convey relationships within the source data in the sound. This study evaluates a basic Frequency Modulation (FM) synthesis module with perceptually motivated descriptors, measures of spectral energy distribution, and latent embeddings of pre-trained audio representation models. We demonstrate how these metrics do not indicate straightforward relationships between synthesis parameters and perceived sound. This is done using interactive audiovisual scatter plots—Synth Maps—that can be used to explore the sound distribution of the synthesizer and qualitatively evaluate how well
the different representations align with human perception. Link to the code and the interactive Synth Maps are available.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2284893" class="vrtx-external-publication">
        <div id="vrtx-publication-2284893">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2284893">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sound Actions: Conceptualizing Musical Instruments.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5183966">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">El martes 6 de agosto, a las 14:30 horas, se realizará la charla &quot;Sound Actions: Conceptualizing Musical Instruments&quot; que impartirá Alexander Refsum Jensenius, profesor de Tecnología Musical y Director del Centro RITMO para Estudios Interdisciplinarios en Ritmo, Tiempo y Movimiento en la Universidad de Oslo.

Alexander Refsum Jensenius es profesor de Tecnología Musical y Director del Centro RITMO para Estudios Interdisciplinarios en Ritmo, Tiempo y Movimiento en la Universidad de Oslo.
En la charla, que se realizará en idioma inglés, el académico presentará algunos aspectos destacados de su libro &quot;Sound Actions: Conceptualizing Musical Instruments&quot;. Esto incluye una discusión sobre las diferencias entre los instrumentos acústicos y electroacústicos y cómo los instrumentos de hoy en día no son solo &quot;creadores de sonido&quot;, sino que cada vez más son &quot;creadores de música&quot;. Ejemplificará este cambio con varios de sus propios nuevos instrumentos para la expresión musical (NIME).</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2305608" class="vrtx-external-publication">
        <div id="vrtx-publication-2305608">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2305608">
                G?ksülük, Bilge Serdar; Tidemann, Aleksander &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Telematic Testing: One Performance in Three Locations.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4686167">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2307459" class="vrtx-external-publication">
        <div id="vrtx-publication-2307459">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307459">
                Jensenius, Alexander Refsum; Vo, Synne; Kelkar, Tejaswinee &amp; Kjus, Yngvar
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikksnakk: Musikk p? Spotify - hvordan funker algoritmene?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4821486">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvorfor er det slik at plateselskaper ?nsker at artister skal lage TikTok?er for ? promotere musikken sin? Hva bestemmer hvilke musikkanbefalinger du f?r i Spotify? Og hvordan bruker plateselskapene dataene dine til ? generere klikk og lytt? Bli med p? en samtale om algoritmer p? apper som TikTok og Spotify - og hvordan de p?virker musikksmaken din!

Til ? diskutere dette kommer:

- Synne Vo. Hun er en artist som slo igjennom p? TikTok, og bruker plattformen aktivt for ? promotere musikken sin. Hun kommer til panelet for ? dele sine erfaringer med bransjen og appene.

- Yngvar Kjus. Han er professor i musikk og medier p? UiO, og har forsket mye p? popul?rmusikk, musikkproduksjon og musikkbransjen.

- Tejaswinee Kelkar. Hun er er en sanger og forsker innen musikk og bevegelse. Hun har tidligere jobbet som dataanalytiker i Universal Music Norway og ved RITMO Center of Excellence ved Universitetet i Oslo.

Samtalen ledes av Alexander Refsum Jensenius. Han er professor i musikk ved Universitetet i Oslo, og leder av RITMO - Senter for tverrfaglig forskning p? rytme, tid og bevegelse. Han pr?ver hele tiden ? forst? mer om hvordan og hvorfor mennesker beveger seg til musikk.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2392254" class="vrtx-external-publication">
        <div id="vrtx-publication-2392254">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2392254">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Interdisiplin?ritet - et musikkperspektiv.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3909868">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Skal snakke om skj?ringspunktet mellom psykologi, informatikk og musikk og arbeidet som foreg?r p? instituttet han leder: RITMO, Senter for Interdisiplin?re studier i rytme, tid og bevegelse ved Universitetet i Oslo. Alexander er b?de forsker og musiker. Han har en sammensatt bakgrunn best?ende av musikk, informatikk, fysikk og matematikk og hans praktisk rettede forskning har bredt nedslagsfelt. Digitale verkt?y som har blitt utviklet ved RITMO blir n? ogs? brukt innen medisinsk forskning p? ADHD og Cerebral Parese.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2347651" class="vrtx-external-publication">
        <div id="vrtx-publication-2347651">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2347651">
                Karbasi, Seyed Mojtaba; Pileberg, Silje &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        A newly developed robot can play the drums, listen, and learn.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Science Norway.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4773370">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2307457" class="vrtx-external-publication">
        <div id="vrtx-publication-2307457">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2307457">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        20 Years of Piano Research at the University of Oslo.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4045963">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this lecture-recital, I will present piano-related research from the Department of Musicology over the last twenty years. I will also reflect on my role in this history, both as an artist and scientist. Finally, I will scrutinize the department&#39;s new Disklavier while performing various exploratory etudes.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2317366" class="vrtx-external-publication">
        <div id="vrtx-publication-2317366">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2317366">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        NOR-CAM as an enabler for flexible academic career paths in and out of Norway.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4023429">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Dette webinaret er det fjerde i rekka og vil handla om forslaget om attraktive karrierar i akademia. Europakommisjonen sitt utgangspunkt er at internasjonalt utdannings篮球即时比分_nba比分直播-彩客网重点推荐 og kvalitetsutvikling i h?gare utdanning ikkje blir st?tta og verdsett i s? stor grad som naudsynt i dei akademiske karrierane, og at dette er eit hinder for utvikling av europeisk h?gare utdanning.

Kva inneber dette forslaget, og korleis ser det ut fr? perspektivet til europeiske og norske universitet? Korleis heng den europeiske prosessen for utvikling av akademiske karrierar saman med det som skjer i Noreg?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2392256" class="vrtx-external-publication">
        <div id="vrtx-publication-2392256">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2392256">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikk og kunstig intelligens.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4186863">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2257651" class="vrtx-external-publication">
        <div id="vrtx-publication-2257651">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2257651">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Mock PhD Interview.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3250681">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The objective of the interview mockup is to provide an example of what a PhD interview looks like. We want to provide a safe space to ask questions to an experient interviewer and to understand how to better prepare for the interview if you&#39;re applying to PhD positions in other countries.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2281183" class="vrtx-external-publication">
        <div id="vrtx-publication-2281183">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2281183">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Tverr faglighet? Muligheter og utfordringer med fler- og tverrfaglighet.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4301637">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Tverrfaglighet nevnes gjerne i festtaler og s?knadstekster, men hvordan er virkeligheten? I denne presentasjonen vil professor Alexander Refsum Jensenius diskutere egne erfaringer med fler- og tverrfaglige forskningsprosjekter.
Han vil ogs? presentere hvordan RITMO jobber med ? utvikle en forskningskultur og prosjekts?knader p? kryss og tvers av gjeldende fagdisipliner.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2281197" class="vrtx-external-publication">
        <div id="vrtx-publication-2281197">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2281197">
                Jensenius, Alexander Refsum &amp; Bochynska, Agata
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Opphavsrettslige utfordringer ved overgangen til FAIR forskningsdata ved UiO.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5040460">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2281190" class="vrtx-external-publication">
        <div id="vrtx-publication-2281190">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2281190">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hjernen i sentrum: Kunst.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4115688">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvorfor er noen musikalske og andre ikke? Hvordan har det seg at kunst kan treffe oss s? voldsomt - og s? ulikt! Ulike kunstneriske uttrykk som musikk, malerkunst, litteratur, dans og teater kommer uten fasit og tolkes vidt forskjellig fra person til person. Er det hjernen som styrer dette? Det er ?penbart at hjernen v?r er aktiv og ikke passiv n?r vi opplever kunst. Hvorfor er det s?nn? Gir kunstneriske opplevelser god hjernetrim? Er kunst viktig for hjernehelsen?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2281186" class="vrtx-external-publication">
        <div id="vrtx-publication-2281186">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2281186">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Vurderinger i akademiske karrierel?p.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3710239">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">UHRs arbeidsgruppe for ?pen vurdering utarbeidet i 2021 en veileder for vurdering i akademiske
karrierel?p – NOR-CAM. Det finnes ogs? andre initiativer for vurdering av akademiske karrierer,
deriblant det europeiske Coalition for Advancing Research Assessment (CoARA). Men hva er verdien
av disse vurderingsveilederne? Hvem er de for og hva er de ment ? f? til? Og hvilke vurderingsveiledere
er det som blir viktig i tiden som kommer?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2281194" class="vrtx-external-publication">
        <div id="vrtx-publication-2281194">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2281194">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MusicLab as an Open Science innovation project between a research centre and the University library.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5162893">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2281193" class="vrtx-external-publication">
        <div id="vrtx-publication-2281193">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2281193">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Fostering the emergence of new research data careers.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4335744">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Equipping future graduates, researchers, and society at large with the skills needed to support the digital transition is becoming a priority on European, national, and institutional agendas. Research data management (RDM) and FAIR data are part of this skillset, and research data careers are increasingly in demand in both the public and private sectors. At organisational level, the availability of staff with data competencies is crucial to support the implementation of FAIR RDM practices and, ultimately, to foster the transition towards Open Science. Data collected by the European University Association show for example how universities are creating dedicated research data support services and hiring specific support staff, but significant disparities exist between countries and institutions. RDM responsibilities still fall to existing members of staff. In many cases, technical skills are only partially available and new dedicated staff is required. Universities who have hired specific research data support roles may still have problems meeting the growing demand for research data expertise. Within this context, a major challenge is represented by the absence of a shared recognition and definition of research management professional profiles, despite recent progress being made at European level through ERA Action 17 on research management. This session will address needs, challenges and opportunities related to the emergence of new research data careers, including the identification of key skills, clear career paths and their integration into research assessment systems. It will do so by showcasing best practices and reflecting on ways forward with a panel of experts representing different actors, i.e. university leaders, research data practitioners and policymakers.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2281185" class="vrtx-external-publication">
        <div id="vrtx-publication-2281185">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2281185">
                Jensenius, Alexander Refsum; S?rb?, Solveig &amp; Bergstr?m, Rebecca Josefine Five
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        ?ndsverk, ?penhet, kunstnerisk forskning og forskningsbasert kunst: Erfaringer med MusicLab.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4584572">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2281187" class="vrtx-external-publication">
        <div id="vrtx-publication-2281187">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2281187">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Interdisciplinarity.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4898221">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2281188" class="vrtx-external-publication">
        <div id="vrtx-publication-2281188">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2281188">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikk, Data og KI.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5002588">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Musikk er en av de mest komplekse menneskelige kommunikasjonsformene som finnes og egner seg derfor godt for ? utforske kunstig intelligens. Presentasjonen beskriver hvordan musikkforskere, psykologer og informatikere jobber sammen ved RITMO for ? forst? mer om rytme, tid og bevegelse hos mennesker og maskiner.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2281192" class="vrtx-external-publication">
        <div id="vrtx-publication-2281192">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2281192">
                Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikk og kunstig intelligens.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4336219">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2296279" class="vrtx-external-publication">
        <div id="vrtx-publication-2296279">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2296279">
                Christodoulou, Anna-Maria; Dutta, Sagar; Lartillot, Olivier; Glette, Kyrre &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring Convolutional Neural Network Models for Multimodal Classification of Expressive Piano Performance.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4642212">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2296281" class="vrtx-external-publication">
        <div id="vrtx-publication-2296281">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2296281">
                Christodoulou, Anna-Maria &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Navigating Challenges in Multimodal Music Data Management for AI Systems.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5024521">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The responsible management of multimodal music datasets plays a crucial role in the development and evaluation of music processing systems. However, navigating the landscape of legal and ethical considerations can be a complex and challenging task due to the magnitude and diversity of such. This paper clarifies these divergent legal and ethical considerations and highlights the challenges associated with multimodality and AI systems. Focusing on the most crucial stages of multimodal music data management, we provide recommendations for tackling legal and ethical challenges. We emphasize the importance of establishing an inclusive and accessible music data environment, encouraging researchers and data users to adopt responsible approaches towards managing multimodal music data collections.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2140991" class="vrtx-external-publication">
        <div id="vrtx-publication-2140991">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2140991">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Wishful thinking about CVs: Perspectives from a researcher.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3980287">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2140990" class="vrtx-external-publication">
        <div id="vrtx-publication-2140990">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2140990">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sound Actions: An Embodied approach to a Digital Organology.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5084371">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">What is an instrument in our increasingly electrified world? In this talk I will present a set of theoretical building blocks from my forthcoming book on &quot;musicking in an electronic world&quot;. At the core of the argument is the observation that the introduction of new music technologies has led to an increased separation between action and sound in musical performance. This has happened gradually, with pianos and organs being important early examples of instruments that introduced mechanical components between the performer and resonating objects. Today&#39;s network-based instruments represent an extreme case of a spatiotemporal dislocation between action and sound. They challenge our ideas of what an instrument can be, who can perform on them, and how they should be analyzed. In the lecture I will explain how we can use the concepts of action-sound couplings and mappings to structure our thinking about such instruments. This will be used at the heart of a new organology that embraces the qualities of both acoustic and electroacoustic instruments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2146554" class="vrtx-external-publication">
        <div id="vrtx-publication-2146554">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2146554">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Conceptualizing Musical Instruments.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4157847">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">What is an instrument in our increasingly electrified world? In this talk I will present a set of theoretical building blocks from my recent book &quot;Sound Actions&quot;. At the core of the argument is the observation that the introduction of new music technologies has led to an increased separation between action and sound in musical performance. This has happened gradually, with pianos and organs being important early examples of instruments that introduced mechanical components between the performer and resonating objects. Today&#39;s network-based instruments represent an extreme case of a spatiotemporal dislocation between action and sound. They challenge our ideas of what an instrument can be, who can perform on them, and how they should be analyzed. In the lecture I will explain how we can use the concepts of action-sound couplings and mappings to structure our thinking about such instruments. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2146548" class="vrtx-external-publication">
        <div id="vrtx-publication-2146548">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2146548">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Career Assessment Matrix in Norway and how to adapt your CV to OS practices.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5007301">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The Norwegian Career Assessment Matrix (NOR-CAM) is a toolbox for recognition and rewards in academic careers.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2140993" class="vrtx-external-publication">
        <div id="vrtx-publication-2140993">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2140993">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Explorations of human micromotion through standing still.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3551789">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Throughout 2023, I will stand still for ten minutes around noon every day, in a different room each day. The aim is to collect data about my micromotion and compare it to the qualities of the environment. This project follows a decade-long exploration of human micromotion from both artistic and scientific perspectives. In the talk, I will present results from the annual Norwegian Championships of Standstill, where we have studied the influence of music on people&#39;s micromotion. I will also talk about how micromotion can be used in interactive music systems, allowing for conscious and unconscious control of musical sounds. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2140994" class="vrtx-external-publication">
        <div id="vrtx-publication-2140994">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2140994">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Conceptualizing Musical Instruments.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4483927">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">What is an instrument in our increasingly electrified world? In this talk I will present a set of theoretical building blocks from my forthcoming book on &quot;musicking in an electronic world&quot;. At the core of the argument is the observation that the introduction of new music technologies has led to an increased separation between action and sound in musical performance. This has happened gradually, with pianos and organs being important early examples of instruments that introduced mechanical components between the performer and resonating objects. Today&#39;s network-based instruments represent an extreme case of a spatiotemporal dislocation between action and sound. They challenge our ideas of what an instrument can be, who can perform on them, and how they should be analyzed. In the lecture I will explain how we can use the concepts of action-sound couplings and mappings to structure our thinking about such instruments. This will be used at the heart of a new organology that embraces the qualities of both acoustic and electroacoustic instruments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2202439" class="vrtx-external-publication">
        <div id="vrtx-publication-2202439">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2202439">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sound Actions: Conceptualizing Musical Instruments.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4981052">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">How do new technologies change how we perform and perceive music? What happens when composers build instruments, performers write code, perceivers become producers, and instruments play themselves? These are questions addressed in the new book by Professor Alexander Refsum Jensenius: Sound Actions: Conceptualizing Musical Instruments published by the MIT Press.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2202432" class="vrtx-external-publication">
        <div id="vrtx-publication-2202432">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2202432">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Explorations of human micromotion through standing still.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4025813">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Throughout 2023, I will stand still for ten minutes around noon every day, in a different room each day. The aim is to collect data about my micromotion and compare it to the qualities of the environment. This project follows a decade-long exploration of human micromotion from both artistic and scientific perspectives. In the talk, I will present results from the annual Norwegian Championships of Standstill, where we have studied the influence of music on people&#39;s micromotion. I will also talk about how micromotion can be used in interactive music systems, allowing for conscious and unconscious control of musical sounds.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2202435" class="vrtx-external-publication">
        <div id="vrtx-publication-2202435">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2202435">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sound Actions - Conceptualizing Musical Instruments.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5065826">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2133295" class="vrtx-external-publication">
        <div id="vrtx-publication-2133295">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2133295">
                Jensenius, Alexander Refsum &amp; Hagen, Knut-?yvind
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Stoler du p? stemmer? Det m? du slutte med. N? kan kunstig intelligens f? hvem som helst til ? si hva som helst.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK P1 - Ukeslutt.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3244683">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2187796" class="vrtx-external-publication">
        <div id="vrtx-publication-2187796">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2187796">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Forskarperspektivet.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3658390">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Denne hausten har Utkast til strategi for norsk vitenskapelig publisering etter 2024 vore ute til h?yring. Strategien skildrar tilr?dingar til b?de forskarar, forskingsutf?rande institusjonar, forskingsfinansi?rar og myndigheiter. I dette seminaret inviterer vi ein av dei som har utarbeidd strategien, Vidar R?eggen fr? Universitets- og H?gskoler?det, til ? fortelje om arbeidet med rapporten, innspel som har komme inn og korleis han ser for seg det framtidige publiseringslandskapet. Deretter g?r ordet til Alexander Jensenius (UiO, NOR-CAM), Johanne Raade (UiT) og Marte Qvenild (NFR), til ? diskutere korleis dei ser framtida for open publisering etter 2024, fr? perspektivet til ein forskar, institusjon og finansi?r, h?vesvis. Ser dei andre utfordringar enn dei som er fors?kt m?tt i den nye strategien?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2202429" class="vrtx-external-publication">
        <div id="vrtx-publication-2202429">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2202429">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring Human Micromotion Through Standing Still.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3725477">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Moving slowly likely puts us into a special state of mind. Subjective reports from various practices including dance, Tai Chi and walking meditation suggest that slow movements can bring participants into a special state involving increased relaxation and awareness. Interestingly, relatively little research has been performed specifically to understand the underlying mechanisms and the possible applications of human slow movement. One reason might be that slow movements are not common in day-to-day life: when we want to move – for example to pick up our cup of coffee - we usually want to do it now. Some evidence suggests that humans tend to avoid moving slowly in different tasks, for example, when improvising movements together. The goal of this meeting is to bring together scholars and practitioners interested in slow movement, and to foster interdisciplinary research on this somewhat neglected topic. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2202443" class="vrtx-external-publication">
        <div id="vrtx-publication-2202443">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2202443">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Tverrfaglig forskning p? rytme, tid og bevegelse.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3512336">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">RITMO er et unikt SFF p? grunn av sin radikalt tverrfaglige oppbygning. Hvordan fungerer det i praksis?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2188583" class="vrtx-external-publication">
        <div id="vrtx-publication-2188583">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2188583">
                Jensenius, Alexander Refsum &amp; Tytko, James
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Emerging tech creates music from dance movements.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        The Naked Scientists.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3407299">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Listen to the melodies composed with the help of motion capture body suits...</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2183304" class="vrtx-external-publication">
        <div id="vrtx-publication-2183304">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2183304">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Still Standing: The effects of sound and music on people standing still.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4893857">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Throughout 2023, I have been standing still for ten minutes around noon every day, in a different room each day. This project follows a decade-long exploration of human micromotion from both artistic and scientific perspectives. In the talk, I will present results from the annual Norwegian Championships of Standstill, where we have studied the influence of music on people&#39;s micromotion. I will also talk about how micromotion can be used in interactive music systems, allowing for the conscious and unconscious control of musical sounds.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2183301" class="vrtx-external-publication">
        <div id="vrtx-publication-2183301">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2183301">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Still Standing: The effects of sound and music on people standing still.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4375585">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Throughout 2023, I have been standing still for ten minutes around noon every day, in a different room each day. This project follows a decade-long exploration of human micromotion from both artistic and scientific perspectives. In the talk, I will present results from the annual Norwegian Championships of Standstill, where we have studied the influence of music on people&#39;s micromotion. I will also talk about how micromotion can be used in interactive music systems, allowing for the conscious and unconscious control of musical sounds.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2106038" class="vrtx-external-publication">
        <div id="vrtx-publication-2106038">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2106038">
                Jensenius, Alexander Refsum &amp; Poutaraud, Joachim
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Video Visualization.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4733516">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This workshop is targeted at students and researchers working with video recordings. Even though the workshop will be based on quantitative tools, the aim is to provide solutions for qualitative research. This includes visualization techniques such as motion videos, motion history images, and motiongrams, which, in different ways, allow for looking at video recordings from different temporal and spatial perspectives. It also includes basic computer vision analysis modules, such as extracting quantity and centroid of motion, and using such features in analysis.

The participants will learn to use the Musical Gestures Toolbox for Python, a collection of high-level modules for easily generating all of the above-mentioned visualizations and analyses. This toolbox was initially developed for analyzing music-related body motion but is equally helpful for other disciplines working with video recordings of humans, such as linguistics, psychology, medicine, and educational sciences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2280626" class="vrtx-external-publication">
        <div id="vrtx-publication-2280626">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2280626">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikk og kunstig intelligens.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3720525">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2172754" class="vrtx-external-publication">
        <div id="vrtx-publication-2172754">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2172754">
                Olaisen, Sofie Retterst?l; Jensenius, Alexander Refsum &amp; Vuoskoski, Jonna Katariina
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Una m? danse n?r ho h?yrer musikk: – Eit urgamalt instinkt.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3704477">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Urgamle instinkt blir sett i sving n?r hjernen din oppfattar musikk. No kan forskarane ogs? sj? danselysta i augo dine.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2135506" class="vrtx-external-publication">
        <div id="vrtx-publication-2135506">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2135506">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        RITMO as incubator/driver for Open Science.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5205201">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2135514" class="vrtx-external-publication">
        <div id="vrtx-publication-2135514">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2135514">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        CV-modul som grunnlag for NOR-CAM.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4012362">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Vurdering av forskning er p? dagsorden som aldri f?r. NIFU inviterer derfor til ?pent seminar om helhetlig vurdering av forskere og forskning. Bakteppet er den nye europeiske avtalen om evaluering av forskning og den nye norske veilederen for karrierevurdering av forskere. Seminaret arrangeres i 篮球即时比分_nba比分直播-彩客网重点推荐 mellom NIFU (R-Quest), UHR og Det nasjonale publiseringsutvalget.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2135507" class="vrtx-external-publication">
        <div id="vrtx-publication-2135507">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2135507">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        NOR-CAM and European agreement on reforming research assessment.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4162588">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2180726" class="vrtx-external-publication">
        <div id="vrtx-publication-2180726">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2180726">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Ny teknologi vil alltid endre musikken.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-READEROPINION">
                        Aftenposten (morgenutg. : trykt utg.).
                </span>
                <span class="vrtx-issn">ISSN 0804-3116.</span>
                            
            
            <a href="https://hdl.handle.net/11250/5070106">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Musikerne forsvant ikke med grammofonen, det gj?r de ikke n? heller.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198032" class="vrtx-external-publication">
        <div id="vrtx-publication-2198032">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198032">
                Swarbrick, Dana; Danielsen, Anne; Jensenius, Alexander Refsum &amp; Vuoskoski, Jonna Katariina
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Effects of “Feeling Moved” and “Groove” On Standstill.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4743375">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2192960" class="vrtx-external-publication">
        <div id="vrtx-publication-2192960">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2192960">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        How Findable, Accessible, Interoperable and Reusable data enables research-led education.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4209609">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">FAIR data is an essential component of the open research ecosystem. In this article, Alexander Refsum Jensenius argues that &quot;FAIRification&quot; can also benefit research-based and research-led education, providing opportunities to bring together different university missions.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2169506" class="vrtx-external-publication">
        <div id="vrtx-publication-2169506">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2169506">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Vurdering av forskere.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4544532">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2182707" class="vrtx-external-publication">
        <div id="vrtx-publication-2182707">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2182707">
                Jensenius, Alexander Refsum; Danielsen, Anne &amp; S?ndergaard, Pia
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hvor blir det av UiOs alumni-satsing?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5223091">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Det snakkes i festlige lag om at v?re alumni er en ressurs. Dessverre viser praksis at man ikke bare ignorerer tidligere ansatte, men aktivt fors?ker ? fjerne alle spor av at de har forsket ved institusjonen.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2169508" class="vrtx-external-publication">
        <div id="vrtx-publication-2169508">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2169508">
                Vogt, Yngve; Krauss, Stefan Johannes Karl; Mossige, Joachim; Dysthe, Dag Kristian; Angheluta, Luiza &amp; Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Bereder grunnen for kunstige organer.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Apollon.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3491297">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2181261" class="vrtx-external-publication">
        <div id="vrtx-publication-2181261">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2181261">
                Jensenius, Alexander Refsum &amp; Tidemann, Grethe
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Cristin forsvinner. Uklart hva som blir bedre i det nye systemet.                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Uniforum.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3232429">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Cristin, det nasjonale systemet for forskningsdokumentasjon, skal erstattes av Nasjonalt vitenarkiv. Men hva som blir bedre i det nye systemet kan verken IT-direkt?ren eller forskningsdirekt?ren ved UiO svare p?.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2141706" class="vrtx-external-publication">
        <div id="vrtx-publication-2141706">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2141706">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Experiences and advice from several humanities/life science collaborations.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3289413">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2141705" class="vrtx-external-publication">
        <div id="vrtx-publication-2141705">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2141705">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Oppsummering av arbeidet med opphavsrett og lisenser i QualiFAIR.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4604481">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Forskere ofte stiller sp?rsm?l p? hvordan de skal h?ndtere opphavsrett n?r det samler inn data. Hvem eier data? Hvem har rettigheter og hvilke rettigheter har man som prosjektleder eller prosjektdeltaker? Hvilke lisenser skal man velge n?r man vil dele ulikt materiale slikt som artikler, datasett, kildekode, bilder, lyd- og videoopptak? Hvordan kan man bruke andres materiale som ikke har spesifikke lisenser? Hvordan kan UiO legge bedre til rette for at studenter og ansatte f?r et bevisst forhold til opphavsrett?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2135888" class="vrtx-external-publication">
        <div id="vrtx-publication-2135888">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2135888">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hva vil det si ? f?lge prinsippene i NOR-CAM og CoARA i praksis? Erfaringer fra RITMO.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4399103">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">NARMA v?rkonferanse 2023 – 篮球即时比分_nba比分直播-彩客网重点推荐sst?tte i uforutsigbare tider – NARMA 10 ?r.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200050" class="vrtx-external-publication">
        <div id="vrtx-publication-2200050">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200050">
                Riaz, Maham; Upham, Finn; Burnim, Kayla; Bishop, Laura &amp; Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comparing inertial motion sensors for capturing human micromotion.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5122145">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The paper presents a study of the noise level of accelerometer data from a mobile phone compared to three commercially available IMU-based devices (AX3, Equivital, and Movesense) and a marker-based infrared motion capture system (Qualisys). The sensors are compared in static positions and for measuring human micromotion, with larger motion sequences as reference. The measurements show that all but one of the IMU-based devices capture motion with an accuracy and precision that is far below human micromotion. However, their data and representations differ, so care should be taken when comparing data between devices.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2131190" class="vrtx-external-publication">
        <div id="vrtx-publication-2131190">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2131190">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring large datasets of human, music-related standstill.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4436546">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Throughout 2023, I will stand still for ten minutes around noon every day, in a different room each day. The aim is to collect data about my micromotion and compare it to the qualities of the environment. This project follows a decade-long exploration of human micromotion from both artistic and scientific perspectives. In the talk, I will present results from the annual Norwegian Championships of Standstill, where we have studied the influence of music on people&#39;s micromotion. I will also talk about how micromotion can be used in interactive music systems, allowing for conscious and unconscious control of musical sounds. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2200580" class="vrtx-external-publication">
        <div id="vrtx-publication-2200580">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2200580">
                Lartillot, Olivier; Thedens, Hans-Hinrich; Mjelva, Olav Lukseng?rd; Elovsson, Anders; Monstad, Lars L?berg &amp; Johansson, Mats Sigvard
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/2200580/contributors', 'vrtx-publication-contributors-2200580')">
                    [Show all&nbsp;8&nbsp;contributors for this article]</a>
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Norwegian Folk Music &amp; Computational Analysis.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3480827">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">As a prélude for Norway&#39;s Constitution Day, this special event celebrated the Norwegian folk music tradition, showcasing our new online archive and demonstrating the richness of Hardanger fiddle music, with live performance. One aim of the project is to conceive new technologies allowing to better access, understand and appreciate Norwegian folk music.

In this event, we introduced a new online version of the Norwegian Folk Music Archive and discuss underlying theoretical and technical challenges. A live concert/workshop, with the participation of Olav Lukseng?rd Mjelva, offered a lively introduction to Hardanger fiddle music and its elaborate rhythm. The interests and challenges of automated transcription and analysis were discussed, with the public release of our new software Annotemus.

The symposium was organised in the context of the MIRAGE project (RITMO, in collaboration with the National Library of Norway&#39;s Digital Humanities Laboratory).</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2191630" class="vrtx-external-publication">
        <div id="vrtx-publication-2191630">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2191630">
                Jensenius, Alexander Refsum &amp; S?rnes, Astrid Johanne
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Ny Beatles-musikk.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK Nyhetsmorgen.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3326038">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The Beatles gir ut ny musikk</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2191629" class="vrtx-external-publication">
        <div id="vrtx-publication-2191629">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2191629">
                Jensenius, Alexander Refsum &amp; S?rnes, Astrid Johanne
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Beatles med ny l?t.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3595593">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">?Now And Then? er ferdigstilt av Paul McCartney og Ringo Starr – med litt hjelp fr? kunstig intelligens.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2127653" class="vrtx-external-publication">
        <div id="vrtx-publication-2127653">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2127653">
                Jensenius, Alexander Refsum &amp; Rosenberg, Ingvild
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Unik forskningskonsert.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK P1.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4857891">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2202456" class="vrtx-external-publication">
        <div id="vrtx-publication-2202456">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2202456">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Introducing MusicLab.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3773555">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In 2021, one of the world’s finest string quartets, The Danish String Quartet (DSQ), and a large team of international researchers based at RITMO, co-hosted MusicLab Copenhagen – a groundbreaking event where DSQ performed their best repertoire while researchers experimented with, measured, and analyzed the experiences and behavior of musicians and audience. Some of the questions we tried to answer were: Do we become one grand “we” when absorbed in music together? How do we synchronize our bodily rhythms with the music during a concert? As an innovative musical and scientific format, the concert has been widely reported and won “Event of the Year” by the Danish National Broadcasting Corporation (DR P2). Now, the researchers have completed their analyses, and we are excited to share findings in a hybrid launch event.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2127652" class="vrtx-external-publication">
        <div id="vrtx-publication-2127652">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2127652">
                Haaland, Tonette N. &amp; Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        SSO deltar i forskning: – Skal finne ut hvordan musikk p?virker oss.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Rogalands avis.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4789083">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Stavanger Symfoniorkester (SSO) inviterer elever p? 5.-10. trinn p? konsert, for ? gjennomf?re forskning.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2127649" class="vrtx-external-publication">
        <div id="vrtx-publication-2127649">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2127649">
                Jensenius, Alexander Refsum &amp; Burnim, Kayla
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Forskere inntok Konserthuset.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Stavanger Aftenblad.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3741506">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hundrevis av elever kom for ? h?re p? Stavanger symfoniorkester. Mens orkesteret spilte, var musikerne, dirigenten og publikum del av et unikt forskningsprosjekt.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2139082" class="vrtx-external-publication">
        <div id="vrtx-publication-2139082">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2139082">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Rhythmic Data Science.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3408980">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Rhythm is everywhere, from how we walk, talk, dance and play to telling stories about our past and even predicting the future. Rhythm is key to how we interact with our world. Our heartbeat, nervous system, and other bodily cycles work through rhythm. As such, rhythm is a crucial aspect of human action and perception, and it is in complex interaction with the world&#39;s cultural, biological and mechanical rhythms. At RITMO, they research rhythmic phenomena and their complex relationships with the rhythms of human bodies and brains. In the talk, Alexander will present examples of how they record, synchronize, and analyze data of complex, rhythmic human behavior, such as real-world concerts.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198424" class="vrtx-external-publication">
        <div id="vrtx-publication-2198424">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198424">
                Bukvic, Ivica Ico; Jensenius, Alexander Refsum; Wittman, Hollis &amp; Masu, Raul
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Implementing the new template for NIME music proceedings with the community.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4600332">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">We will analyze a new possible template for NIME submissions which would simplify the integration of NIME music performances in the COMPEL, a database which facilitates navigation across different categories (pieces, persons, instruments). The template emerges from a workshop run last year at NIME about the structure of COMPEL and the process of entering all performances presented last year. From this workshop we expect to improve the template and validate it with a community.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198418" class="vrtx-external-publication">
        <div id="vrtx-publication-2198418">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198418">
                Karbasi, Seyed Mojtaba; Jensenius, Alexander Refsum; God?y, Rolf Inge &amp; T?rresen, Jim
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring Emerging Drumming Patterns in a Chaotic Dynamical System using ZRob.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3524390">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">ZRob is a robotic system designed for playing a snare drum. The robot is constructed with a passive flexible spring-based joint inspired by the human hand. This paper describes a study exploring rhythmic patterns by exploiting the chaotic dynamics of two ZRobs. In the experiment, we explored the control configurations of each arm by trying to create un- predictable patterns. Over 200 samples have been recorded and analyzed. We show how the chaotic dynamics of ZRob can be used for creating new drumming patterns.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198336" class="vrtx-external-publication">
        <div id="vrtx-publication-2198336">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198336">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The assessment of researchers is changing – how will it impact your career?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5237382">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Changes are happening in the world of research assessment, for example by recognizing several competencies as merits and a better balance between quantitative and qualitative goals. In Norway, for example, Universities Norway presented the NOR-CAM report in 2021 which sparked a movement for reform.  As an early career researcher, it&#39;s crucial to understand how these changes may impact your research career. In this talk, Jensenius will discuss the evolving landscape of research assessment and what it means for you. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198337" class="vrtx-external-publication">
        <div id="vrtx-publication-2198337">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198337">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Innovasjon og ?pen forskning.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4308136">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2198421" class="vrtx-external-publication">
        <div id="vrtx-publication-2198421">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198421">
                Masu, Raul; Morreale, Fabio &amp; Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The O in NIME: Reflecting on the Importance of Reusing and Repurposing Old Musical Instruments.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4521771">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this paper, we reflect on the focus of “newness” in NIME research and practice and argue that there is a missing O (for “Old”) in framing our academic discourse. A systematic review of the last year’s conference proceedings reveals that most papers do, indeed, present new instruments, interfaces, or pieces of technology. Comparably few papers focus on the prolongation of existing NIMEs. Our meta-analysis identifies four main categories from these papers: (1) reuse, (2) update, (3) complement, and (4) long-term engagement. We discuss how focusing more on these four types of NIME development and engagement can be seen as an approach to increase sustainability.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2182244" class="vrtx-external-publication">
        <div id="vrtx-publication-2182244">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2182244">
                Jensenius, Alexander Refsum &amp; Zürn, Christof
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Standing still with Alexander Refsum Jensenius.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        The Power of Music Thinking.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4050287">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">What is the use of standing still for 10 minutes? I was asking myself when I saw a post on social media. It was a double picture of a man with a mobile phone around his neck displaying some data, and another picture showed the view he saw at that moment. I learned that he stood there for 10 minutes without any movement, listening to the sound that was already there. There were many pictures like this, and I decided to get in contact.

So, today, we are in Oslo. We speak with Alexander Refsum Jensenius, a professor of music technology at the University of Oslo, a book author, a music researcher and researching musician working in the fields of embodied music cognition and new interfaces for musical expression. 

Alexander shares with us his experiences while performing and testing with artistic methods of embodied listening and how people experience music and sound. This goes from experiments with and without the conductor of a Symphony Orchestra to the sounds of our kitchen appliances.

We talk about his motion capture lab, where a person’s exact location and micro-movements can be detected while they hear different kinds of music, and how the researchers can understand what moves them. 

Alexander shares insights about the Norwegian Championship of Stand Still, where until now, 1000s of people have participated, and the winner is the person with the lowest average velocity on standing the stillest over some time. 

Alexander explains the interplay of body and mind and reveals some secrets on how to move people, for example, on the dance floor or to calm them down. It all has to do with our bpm, the average heartbeat of about 60 beats a minute. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198335" class="vrtx-external-publication">
        <div id="vrtx-publication-2198335">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198335">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Observing spaces while standing still.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4024536">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Throughout 2023, I stand still for ten minutes around noon every day, in a different room each day. This project follows a decade-long exploration of human micromotion from both artistic and scientific perspectives. Previously, I have been interested in the impact of music. Now, I am listening to ventilation systems, elevators, and people walking and talking and reflecting on how they influence my body and
mind. The aim is to understand more about the rhythms of the environment.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2198334" class="vrtx-external-publication">
        <div id="vrtx-publication-2198334">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2198334">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3875060">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">RITMO er et tverrfaglig senter som ?nsker ? avdekke de kognitive mekanismene som ligger til grunn for menneskelig rytme, i musikk, bevegelse og audiovisuelle medier.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2192098" class="vrtx-external-publication">
        <div id="vrtx-publication-2192098">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2192098">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hvordan en videreutvikling av vurderingssystemet endrer hvordan man jobber faglig.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4901820">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2192099" class="vrtx-external-publication">
        <div id="vrtx-publication-2192099">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2192099">
                Jensenius, Alexander Refsum
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikk og kunstig intelligens.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4190551">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Kunstig intelligens kan allerede skrive noter og mikse musikk. I tiden fremover vil vi se mange eksempler p? hvordan maskinl?ring tas i bruk i musikkut?ving og -produksjon og til ? skape nye lytteopplevelser. Men hva er egentlig musikalsk kunstig intelligens? Hva vil det si ? trene en maskinl?ringsmodell? Vil maskinene gj?re musikere og komponister overfl?dige? Denne forelesningen vil gi deg en del svar, men ogs? flere sp?rsm?l.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2094857" class="vrtx-external-publication">
        <div id="vrtx-publication-2094857">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2094857">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring music performance and perception through motion capture.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4051979">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This talk will present different approaches to capturing human bodily activity. Motion capture can be performed with sensor-based and camera-based systems, each of which has benefits and limitations. Sensor-based systems are flexible and scalable and can easily be used outside laboratory environments. They are good at tracking relative motion and rotation information but less suitable for tracking position. Camera-based systems come in many flavors and can be used with and without markers. They excel at tracking positions but are prone to reflections and environmental noise. As a consequence, camera-based motion capture systems are better suited for laboratory settings. I will discuss my twenty-year-long experience using different motion capture systems to study music-related body motion. This includes research on musicians, including rehearsal techniques and performance strategies. Such studies push the limits of the technology when it comes to precision and accuracy. It is particularly challenging when using motion capture equipment in real-world concert settings. At the University of Oslo, we have successfully captured the motion of both solo and ensemble performances and are currently trying to scale up to a full orchestra. We are also carrying out motion capture of perceivers, audience members in concerts, dancers, and other people moving to music. Through the Norwegian Championship of Standstill, we have delved into human micromotion, the tiniest actions we can perform and perceive. At this level, motion capture can detect physiological signals, such as breathing and heart rate. Data from such studies are interesting scientifically and have also been used in artistic practice. Finally, I will give examples of how real-time motion capture can be used in various creative applications, including &quot;inverse&quot; sonic interaction.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2043055" class="vrtx-external-publication">
        <div id="vrtx-publication-2043055">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2043055">
                Karbasi, Seyed Mojtaba; Jensenius, Alexander Refsum; God?y, Rolf Inge &amp; T?rresen, Jim
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Robotic Drummer with a Flexible Joint: the Effect of Passive Impedance on Drumming.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4041776">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Intelligent robots aimed for performing music and playing musical instruments have been developed in recent years. With the advancements in artificial intelligence and robotic systems, new capabilities have been explored in this field. One major aspect of musical robots that can lead to the emergence of creative results is the ability to learn skills autonomously. To make it feasible, it is important to make the robot utilize its maximum potential and mechanical capabilities to play a musical instrument. Furthermore, the robot needs to find the musical possibilities based on the physical properties of the instrument to provide satisfying results. In this work, we introduce a drum robot with certain mechanical specifications and analyze the capabilities of the robot according to the drumming sound results of the robot. The robot has two degrees of freedom, actuated by one quasi direct-drive servo motor. The gripper of the robot features a flexible joint with passive springs which adds complexity to the movements of the drumstick. In a basic experiment, we have looked at the drum roll performance by the robot while changing a few control variables such as frequency and amplitude of the motion. Both single-stroke and double-stroke drum rolls can be performed by the robot by changing the control variables. The effect of the flexible gripper on the drumming results of the robot is the main focus of this study. Additionally, we have divided the control space according to the type of drum rolls. The results of this experiment lay the groundwork for developing an intelligent algorithm for the robot to learn musical patterns by interacting with the drum.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2068538" class="vrtx-external-publication">
        <div id="vrtx-publication-2068538">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2068538">
                Upham, Finn; Memis, Ahmet Emin; Hansen, Niels Chr.; Rosas, Fernando E.; Clim, Maria-Alena &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Participatory applause: Interactions of audience members clapping at the end of a classical music concert.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5016621">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Participatory applause: Interactions of audience members clapping at the end of a classical music concert

According to musicological studies of audience culture, applause is the most overt form of participation allowed to the collections of individuals attending classical music concerts (Brandl-Risi, 2011; Small, 1998; Tr?ndle, 2020). The final round of applause can exhibit many interesting dynamics related to their collective enthusiasm for the performance (Lupyan &amp; Rifkin, 2003), the local applause culture, and what is on stage during the clapping. 

Quantitative empirical study of group clapping behaviours has principally depended on participants clapping on request in laboratory settings or A/V recordings from concerts (Neda, 2000) and presentations (Mann et al., 2013). To study the coordination involved in this collective behaviour, we need accurate measurements of individuals clapping voluntarily in a real concert setting. 

To describe how the appreciative audience members adjust their clapping to each other and the action on stage during the final round of applause, demonstrating their participation at a concert’s end.
*
After the Danish String Quartet (DSQ) performed their last piece at the Music Lab Copenhagen Concert, the audience clapped continuously for nearly two minutes. During that time, the musicians stood and bowed, had scientific instruments removed from their bodies, left the stage, returned to bow again, and finally left the stage for good. The clapping action of individual participants in this concert experiment was captured by a mobile phone on their chests, and these recordings show how individuals’ clapping contributed to the collective effect shared with the musicians. 

Through the final applause interval, 70 devices captured clear clap sequences, representing over half of the audience at this chamber performance. In some ways, their applause followed expected patterns for a concert audience. They began to applaud over a very short time interval (Mann et al., 2013), more than half starting within less than a second of each other. After 20 s of independent clapping at rates from under 120 BMP to over 200 BPM, the participants shifted to clapping together on a shared beat, a practice that is common for Danish audiences. This group maintained synchrony for over a minute while steadily accelerating from around 158 BMP to 176 BMP, an expected consequence of mutual adaptation during group clapping (Thomson et al., 2018). The coordinated action was strongest while the musicians were on the stage but a subset of independent clapping broke out while the audience waited for the performers to return for their final round of bows. 

Participants’ claps were evaluated from two perspectives: the alignment of claps, reflecting the dominant shift from independence to coordination, and the distribution of participants’ clapping rates over time. Despite some measurement challenges, the shift from independent to coordinate clapping emerges strongly from participants’ movements, with the median rate of clapping slowing until a dominant beat takes hold. Individuals’ clap sequences confirm that the independent clapping at the start of the applause is a result of individual participants clapping isochronously at their own rate, separate in rate and phase from their neighbours in the hall. When the audience claps together, they are voluntarily adjusting to the dominant rate and phase of the people in the hall, with little change in the quality of their isochronous clapping action. Drift in the synchronised clapping rate reflects mutual attentiveness while variation in the number of participants contributing to the coordinated claps suggests differences in applause strategy. Many participants opted to coordinate with their peers while some seemed to prioritise reacting to the musicians. 
</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2043954" class="vrtx-external-publication">
        <div id="vrtx-publication-2043954">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2043954">
                Swarbrick, Dana; Upham, Finn; Erdem, Cagri; Jensenius, Alexander Refsum &amp; Vuoskoski, Jonna Katariina
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Measuring Virtual Audiences with The MusicLab App: Proof of Concept.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4412226">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">We present a proof of concept by using the mobile application MusicLab to measure motion during a livestreamed concert  and  examining its relation to  musical features. With the MusicLab App, participants’ own smartphones’ inertial measurement unit (IMU) sensors can be leveraged to record their motion and their subjective experiences collected  through  survey  responses.  The  MusicLab  Lock-down  Rave  was  an  Algorave  (live-coded  dance  music) livestreamed concert featuring prolific performers Renick Bell and Khoparzi. They livestreamed for an international audience who wore their smartphones with the MusicLab App while they listened/danced to the performances. From their acceleration, we  computed quantity of motion and compared it to musical features that have previously been associated with music-related motion, namely pulse clarity and  low  and  high  spectral  flux. By  encountering  challenges  and  implementing  improvements,  the  MusicLab App  has  become a  useful  tool  for  researching  music-related motion.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2072485" class="vrtx-external-publication">
        <div id="vrtx-publication-2072485">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2072485">
                Herrebr?den, Henrik; Gonzalez, Victor; Vuoskoski, Jonna Katariina &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Pre-recorded sound file versus human coach: Investigating auditory guidance effects on elite rowers.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4268921">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2280625" class="vrtx-external-publication">
        <div id="vrtx-publication-2280625">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2280625">
                Jensenius, Alexander Refsum; Bukvic, Ivica Ico; Wittman, Hollis &amp; Ogier, Andi
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Workshop on NIME Archiving.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4389974">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">We propose a workshop to continue discussions in the community about how to best
preserve information from the NIME conferences, the NIME community, and the
computer music community at large. The workshop will follow up threads from the
NIME publication ecosystem workshop (NIME 2020, Birmingham) and the NIMEhub
workshop (NIME 2016, Brisbane). The main task is to find a solution for an open,
future-oriented, and institutionally recognized archiving solution for the activities of
the NIME community. Currently, only NIME publications are archived according to the
FAIR principles. No solutions exist for archiving information about
instruments/interfaces and other hardware/software-based artifacts produced in the
community. Neither do we have a system for describing and preserving
compositions/pieces, installations, performances, and workshops.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2076619" class="vrtx-external-publication">
        <div id="vrtx-publication-2076619">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2076619">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Diversity in academia: Challenges and opportunities.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4975677">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2090748" class="vrtx-external-publication">
        <div id="vrtx-publication-2090748">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2090748">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Responsible Research and Innovation in Sound and Music Computing.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5173416">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">My presentation will focus on how the ongoing shift to Open Research within the field of sound and music computing (SMC) promotes Responsible Research and Innovation (RRI).</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2075629" class="vrtx-external-publication">
        <div id="vrtx-publication-2075629">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2075629">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Data? ?pen forskningspraksis for ikke-datadrevne fagfelt.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4061986">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hva er ?pen forskning og hva skal til for ? etablere en kultur for ?pen forskning i humanistiske fag og samfunnsfag? Hvordan vi skal komme til en ny normal der det vi i dag omtaler som ??pen forskning? kun kalles forskning?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2061615" class="vrtx-external-publication">
        <div id="vrtx-publication-2061615">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2061615">
                Lesteberg, Mari &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MICRO and MACRO - Developing New Accessible Musicking Technologies.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4397733">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper describes the development of two musical instrument prototypes developed to explore how non-haptic music technologies can be accessed from a web browser and how they can offer accessibility for people with low fine motor skills. Two approaches to browser-based motion capture were developed and tested during an iterative design process. This was followed by observational studies of two user groups: one with low fine motor skills and one with normal motor skills. Contrary to our expectations, we found that avoiding the use of buttons and mice did not make the apps more accessible for the participants with low fine motor skills. Furthermore, motion speed was considered more important for people with low motor skills than the size of the control action. The most important finding is that browser-based musical instruments using sensor-based and video-based motion tracking are not only feasible but allow for reaching much larger groups of people than previously possible. This may ultimately lead to both more personalized and accessible musical experiences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2062247" class="vrtx-external-publication">
        <div id="vrtx-publication-2062247">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2062247">
                Kwak, Dongho; Krzyzaniak, Michael Joseph; Danielsen, Anne &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        A mini acoustic chamber for small-scale sound experiments.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4726991">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper describes the design and construction of a mini acoustic chamber using low-cost materials. The primary purpose is to provide an acoustically treated environment for small-scale sound measurements and experiments using ≤  10-inch speakers. Testing with different types of speakers showed frequency responses of &lt;?10?dB peak-to-peak (except the ”boxiness” range below 900?Hz), and the acoustic insulation (soundproofing) of the chamber is highly efficient (approximately 20?dB?SPL in reduction). Therefore, it provides a significant advantage in conducting experiments requiring a small room with consistent frequency response and preventing unwanted noise and hearing damage. Additionally, using a cost-effective and compact acoustic chamber gives flexibility when characterizing a small-scale setup and sound stimuli used in experiments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1999735" class="vrtx-external-publication">
        <div id="vrtx-publication-1999735">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1999735">
                Jensenius, Alexander Refsum &amp; Platou, Jeanette
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kan kunstig intelligens v?re kreativ?                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK P2 Arena.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4042876">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hva er AI, eller kunstig intelligens, som vi kaller det p? norsk. I Arena i dag ser vi p? hvor kunstig intelligensk blir brukt, og hva det funker i. Kan vi f? en data til ? skrive poesi, og hva med musikken og kunsten?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2090667" class="vrtx-external-publication">
        <div id="vrtx-publication-2090667">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2090667">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Erfaringer med ? lage 3xMOOC.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4604036">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">I denne presentasjonen vil jeg presentere hvordan vi gjennom ?rene har utviklet tre komplette nettkurs ved Universitetet i Oslo: Music Moves (2016), Motion Capture (2022) og Pupillometry (2023). Fokuset vil ligge p? muligheter og utfordringer i video i utdanningssammenheng.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1998324" class="vrtx-external-publication">
        <div id="vrtx-publication-1998324">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1998324">
                H?ffding, Simon &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MusicLab Copenhagen - en forskningskonsert.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4110714">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2018262" class="vrtx-external-publication">
        <div id="vrtx-publication-2018262">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2018262">
                Danielsen, Anne &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Presentation of RITMO Research.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4054894">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1990623" class="vrtx-external-publication">
        <div id="vrtx-publication-1990623">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1990623">
                Jensenius, Alexander Refsum &amp; Ashley, Kevin
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        FAIR in Higher Education.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3322438">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Between January 25th and 27th, FAIRsFAIR partners and stakeholders will meet for a series of concluding meetings to deep-dive into the results of FAIRsFAIR. We’ll analyse the impact that we managed to have on the European Research Community. We&#39;ll go once more through the tools, guidelines and best practices that we have produced and delivered to researchers, data stewards, decision makers and funders towards a better, more structured approach towards FAIR data management. We’ll take the recommendations we produced and the lessons we learnt and leave them as a legacy for future activities to come.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2094129" class="vrtx-external-publication">
        <div id="vrtx-publication-2094129">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2094129">
                Remache-Vinueza, Byron &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Groundbreaking New Technology Allows People To Listen to Music Through Touch.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        SciTechDaily.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3428633">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">An audio-tactile algorithm created by University of Malaga scientists conveys melodic information through vibration.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2076779" class="vrtx-external-publication">
        <div id="vrtx-publication-2076779">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2076779">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kunstfag og ?pen forskning.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5101092">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvilke dilemmaer oppst?r n?r forskningsdata og resultater skal deles og gjenbrukes? Og hvilke muligheter medf?rer mer ?penhet og ?kt deling av data for fag som eksempelvis musikk, visuell kunst, film, scenekunst og design?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2076796" class="vrtx-external-publication">
        <div id="vrtx-publication-2076796">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2076796">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        RITMO and Interdisciplinarity.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4940705">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this presentation I will discuss how we have been developing an interdisciplinary research centre, in which researchers from the arts and humanities and the social and natural sciences.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2094136" class="vrtx-external-publication">
        <div id="vrtx-publication-2094136">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2094136">
                Remache-Vinueza, Byron; Trujillo-León, Andrés; Clim, Maria-Alena; Sarmiento-Ortiz, Fabián; Topon-Visarrea, Liliana &amp; Jensenius, Alexander Refsum
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/2094136/contributors', 'vrtx-publication-contributors-2094136')">
                    [Show all&nbsp;7&nbsp;contributors for this article]</a>
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Mapping Monophonic MIDI Tracks to Vibrotactile Stimuli Using Tactile Illusions.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4368703">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this project, we propose an algorithm to convert musical features and structures extracted from monophonic MIDI files to tactile illusions. Mapping music to vibrotactile stimuli is a challenging process since the perceptible frequency range of the skin is lower than that of the auditory system, which may cause the loss of some musical features. Moreover, current proposed models do not warrant the correspondence between the emotional response to music and the vibrotactile version of it. We propose to use tactile illusions as an additional resource to convey more meaningful vibrotactile stimuli. Tactile illusions enable us to add dynamics to vibrotactile stimuli in the form of movement, changes of direction, and localization. The suggested algorithm converts monophonic MIDI files into arrangements of two tactile illusions: “phantom motion” and “funneling”. The validation of the rendered material consisted of presenting the audio rendered from MIDI files to participants and then adding the vibrotactile component to it. The arrangement of tactile illusions was also evaluated alone. Results suggest that the arrangement of tactile illusions evokes more positive emotions than negative ones. This arrangement was also perceived as more agreeable and stimulating than the original audio. Although musical features such as rhythm, tempo, and melody were mostly recognized in the arrangement of tactile illusions, it provoked a different emotional response from that of the original audio.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2072111" class="vrtx-external-publication">
        <div id="vrtx-publication-2072111">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2072111">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Open music research between art and science.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4837237">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Many music researchers are turning towards studying music performance and perception in real-world settings. Collecting data in a concert situation is non-trivial, and FAIRifying the data is even more challenging. In this talk, I will discuss some challenges with handling privacy and copyright matters in music research. I will also discuss some benefits of working towards more open music research. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2072113" class="vrtx-external-publication">
        <div id="vrtx-publication-2072113">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2072113">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Publish or Perish? Researcher assessment is about to change.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3455971">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In July 2022, the European Commission launched an Agreement On Reforming Research Assessment. After years of talking, there is significant momentum for changing how researchers are assessed. In this talk, I will present some work leading up to the new agreement and how Universities Norway took a lead when developing the Norwegian Career Assessment Matrix (NOR-CAM). The core idea is that academics need to get recognition for a broader range of activities. This is important for transitioning to more open research practices and diverse career paths within and outside academia.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2007982" class="vrtx-external-publication">
        <div id="vrtx-publication-2007982">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2007982">
                Outa, Amani al; Kn?velsrud, Helene; Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Winner of RRI-inspired transdisciplinary side quest call.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Centre for Digital Life Norway.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4689931">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Centre for Digital Life Norway (DLN) is excited to congratulate the team behind the project “The autophagic symphony – Unveiling the final rhythm” as winner of DLN’s RRI-inspired transdisciplinary side quest call.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2072114" class="vrtx-external-publication">
        <div id="vrtx-publication-2072114">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2072114">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Alternatives to journal-based metrics in research assessment.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3497408">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Science Europe invites institutional leaders, researchers at all stages of their careers, and experts from the field to join its 18 and 19 October 2022 conference on Open Science to discuss two key questions: (1) Is Open Science ready to become the norm in research? (2) How do we ensure this becomes an equitable transition? To find answers to these questions, the conference will provide a comprehensive overview of practical and policy initiatives, research assessment reforms, and financial measures that support the transition to Open Science. We will also look forward to new and emerging trends.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2072116" class="vrtx-external-publication">
        <div id="vrtx-publication-2072116">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2072116">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Experiencing the world through sound actions.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5184233">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This talk will reflect on my year-long project recording a daily &quot;sound action&quot;. These are multimodal entities consisting of body motion and its resultant sound. When we only see a sound action, we can imagine its sound. If we only hear a sound action, we can imagine the body motion and objects involved in the interaction. Sound actions are ubiquitous in everyday life yet rarely discussed and reflected upon. My attempts at analyzing sound actions show some of the complexity involved in making sense of actions, reactions, and interactions with the world. This complexity can also inspire creative usage. I will present examples of meaningless and cognitively conflicting sound actions in the talk.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2072020" class="vrtx-external-publication">
        <div id="vrtx-publication-2072020">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2072020">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        From ideas to reality: interdisciplinary collaborations.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4883666">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2072031" class="vrtx-external-publication">
        <div id="vrtx-publication-2072031">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2072031">
                Jensenius, Alexander Refsum
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        NOR-CAM - en introduksjon.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4995292">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">A working group appointed by Universities Norway (UHR) was mandated to recommend guiding principles for the assessment and evaluation of research(ers) in light of the transition to Open Science. This working group proposed a more flexible and holistic framework for recognition and rewards in academic research assessment. The ambition has been to develop a guide that adopts three core principles for assessment: more transparency, greater breadth, and comprehensive assessments as opposed to the one-sided use of indicators.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2095428" class="vrtx-external-publication">
        <div id="vrtx-publication-2095428">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2095428">
                Jensenius, Alexander Refsum &amp; Lome, Ragnhild
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Mer mangfold innenfor humaniora?                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        篮球即时比分_nba比分直播-彩客网重点推荐spolitikk.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4351887">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">En ny kunnskapspolitisk prosess er i gang, om hvordan akademiske karrierer skal vurderes. Hvordan p?virker det humaniora i Norge??</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1939357" class="vrtx-external-publication">
        <div id="vrtx-publication-1939357">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1939357">
                Swarbrick, Dana; Upham, Finn; Erdem, Cagri; Burnim, Kayla &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The MusicLab App – Exploring the usage of mobile accelerometry to measure audience movement and respiration.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4730577">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1947696" class="vrtx-external-publication">
        <div id="vrtx-publication-1947696">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1947696">
                Arvola, Jakob &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kunstig intelligens komponerer Beethoven.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK P2.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4218731">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">F?r Ludwig van Beethoven d?de i 1827, begynte han ? arbeide p? en symfoni nummer 10. Den ble aldri ferdig, men n?, nesten 200 ?r etter, har musikken blitt fullf?rt ved hjelp av kunstig intelligens. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1982967" class="vrtx-external-publication">
        <div id="vrtx-publication-1982967">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1982967">
                Patel-Grosz, Pritty; Katz, Jonah; Grosz, Patrick Georg; Kelkar, Tejaswinee &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        From music to dance: the inheritance of semantic inferences.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4729208">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2005759" class="vrtx-external-publication">
        <div id="vrtx-publication-2005759">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2005759">
                Jensenius, Alexander Refsum &amp; Fasciani, Stefano
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        University of Oslo - RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-EDITORIAL">
                        Proceedings of the SMC Conferences.
                </span>
                            2021-,
                <span class="vrtx-pages">p. xxix–xxix.</span>
            
            <a href="https://hdl.handle.net/11250/4068626">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1939149" class="vrtx-external-publication">
        <div id="vrtx-publication-1939149">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1939149">
                Swarbrick, Dana; Upham, Finn; Erdem, Cagri; Burnim, Kayla &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MusicLab Algorave – An exploratory study examining the usage of mobile accelerometry to measure movements of a virtual concert audience.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4038709">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2005298" class="vrtx-external-publication">
        <div id="vrtx-publication-2005298">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2005298">
                Funderud, Ingrid; Danielsen, Anne; Endestad, Tor; Jensenius, Alexander Refsum; Leske, Sabine Liliana &amp; Solbakk, Anne-Kristin
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Improving working memory in patents with epilepsy by rhythmic sounds.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5184033">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1954918" class="vrtx-external-publication">
        <div id="vrtx-publication-1954918">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1954918">
                Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Open Research as Communication Strategy.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5124887">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1945970" class="vrtx-external-publication">
        <div id="vrtx-publication-1945970">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1945970">
                Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hvorfor leder ?penhet til bedre forskning?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5242605">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1933284" class="vrtx-external-publication">
        <div id="vrtx-publication-1933284">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1933284">
                Carlsen, Toril; Einarsson, Anna Elisabet; Jensenius, Alexander Refsum &amp; Norderval, Kristin
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Contemporary Vocal Arts Training: Renewing the opera form through new pedagogical practices that expand the role of the singer.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/2776442">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Avdeling Operah?gskolen ?nsker ? arrangere en tredagers konferanse p? KHiO 27.-29. august 2021, med fokus p? operapedagogikk og moderne stemmebruk. M?let med konferansen er ? unders?ke den pedagogiske praksisen som trengs for ? trene operasangere mot dagens krav til nye operaproduksjoner og utvide den kreative rolle til oeprasangere. Improvisasjon, 篮球即时比分_nba比分直播-彩客网重点推荐 og bruk av teknologi for ? fosnye opera som kunstform vil v?re sentrale emner. UiO/ RITMO og foreningen VOXLAB st?r som 篮球即时比分_nba比分直播-彩客网重点推荐spartnere.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1936460" class="vrtx-external-publication">
        <div id="vrtx-publication-1936460">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1936460">
                Jenssen, Ariadne Loinsworth; Monstad, Lars &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kunstig intelligens lager musikk for ARY.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        篮球即时比分_nba比分直播-彩客网重点推荐sdagene.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3864564">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Mange holder fast ved at kunstneriske utrykk aldri kan bli erstattet av datamaskiner og algoritmer. Men stemmer det? Hvor god er egentlig kunstig intelligens til ? lage musikk? </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1927485" class="vrtx-external-publication">
        <div id="vrtx-publication-1927485">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1927485">
                Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Visibility for researchers on university web pages.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4565370">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Academics need to be visible online. If you don’t publish and disseminate your research, it won’t have an impact. So it is in our own interest to have up-to-date personal pages with information about what we do. I would argue that it is also in the interest of universities that their employee’s personal pages are up-to-date and look good. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1936462" class="vrtx-external-publication">
        <div id="vrtx-publication-1936462">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1936462">
                Jenssen, Ariadne Loinsworth; Monstad, Lars; Munoz, Sofia Gonzalez; Fasciani, Stefano &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kan kunstig intelligens erstatte en artist?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4797832">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">?rets tema for 篮球即时比分_nba比分直播-彩客网重点推荐sdagene handler om fred og konflikt, og en p?g?ende kamp er menneskene mot maskinen. Jobber har jo allerede blitt erstattet av roboter, betyr det at v?re favorittartister n? st?r for tur?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1914426" class="vrtx-external-publication">
        <div id="vrtx-publication-1914426">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1914426">
                Masu, Raul; Melbye, Adam Pultz; Sullivan, John &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        NIME and the Environment: Toward a More Sustainable NIME Practice.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4323620">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper addresses environmental issues around NIME research and practice. We discuss the formulation of an environmental statement for the conference as well as the initiation of a NIME Eco Wiki containing information on environmental concerns related to the creation of new musical instruments. We outline a number of these concerns and, by systematically reviewing the proceedings of all previous NIME conferences, identify a general lack of reflection on the environmental impact of the research undertaken. Finally, we propose a framework for addressing the making, testing, using, and disposal of NIMEs in the hope that sustainability may become a central concern to researchers. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1888738" class="vrtx-external-publication">
        <div id="vrtx-publication-1888738">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1888738">
                Jensenius, Alexander Refsum &amp; Holm, Hege
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hva f?r oss ut p? dansegulvet?                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK P1.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3430753">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hva f?r oss ut p? dansegulvet? Det er forsket p? hva som skal til for at vi beveger oss til musikk. Det er helt spesielle rytmer som f?r kroppen v?r til ? bevege seg, enten vi vil eller ei. Professor med bakgrunn b?de fra musikk, fysikk og matematikk forklarer.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1929494" class="vrtx-external-publication">
        <div id="vrtx-publication-1929494">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1929494">
                Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Technology on the opera stage.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4175687">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1936899" class="vrtx-external-publication">
        <div id="vrtx-publication-1936899">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1936899">
                Monstad, Lars &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kan kunstig intelligens erstatte artisten ARY?                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Kreativt forum.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3859823">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Kan kunstig intelligens lage l?ter for artistene v?re? 篮球即时比分_nba比分直播-彩客网重点推荐sr?det ville se hvor bra musikk kunstig intelligens kan lage, og fikk Ary med p? et eksperiment.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1957536" class="vrtx-external-publication">
        <div id="vrtx-publication-1957536">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1957536">
                Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hva er egentlig et musikkinstrument?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4864772">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Et piano er et instrument. Og en fiolin. Men hva med stemmen? Eller en gaffel? Eller en mobiltelefon? Forelesningen vil diskutere gamle og nye musikkinstrumenter og hvordan ny teknologi er med p? ? endre m?ten vi lager og opplever musikk. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1957528" class="vrtx-external-publication">
        <div id="vrtx-publication-1957528">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1957528">
                Fasciani, Stefano &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sound and Music Computing at the University of Oslo.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3547035">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">A presentation of labs and studios at the Department of Musicology and RITMO Centre for Interdisciplinary Studies in Rhythm, Time, and Motion at the University of Oslo.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1957535" class="vrtx-external-publication">
        <div id="vrtx-publication-1957535">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1957535">
                Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        ?pen 篮球即时比分_nba比分直播-彩客网重点推荐spraksis – i praksis?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3268692">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Vi er p? vei inn i et nytt paradigme der ?penhet vil v?re en naturlig del av hvordan vi forsker. ?penhet forventes i ?kende grad gjennom hele forskningsprosessen fra hvordan forskningen planlegges og gjennomf?res, til hvordan data og resultater lagres og deles og til hvordan vi 篮球即时比分_nba比分直播-彩客网重点推荐er, forskere imellom og med resten av samfunnet. ?penhet p?virker forskere, forskningsinstitusjonene, finansi?rer og de som bruke forskningen. 

M?let med konferansen var ? stoppe opp og diskutere hvordan de ulike prosessene i arbeidet mot ?pen forskning virker (eller ikke virker) sammen. Hvordan ser dette ut fra forskernes st?sted og hvordan henger politikken sammen med praksis?  Er de ulike delene av sektoren rigget for endringene som er p? vei og hvordan er den overordnede politikken, nasjonalt og internasjonalt koblet til endringer i forskernes hverdag? </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1875463" class="vrtx-external-publication">
        <div id="vrtx-publication-1875463">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1875463">
                Jensenius, Alexander Refsum &amp; S?rb?, Solveig Isis
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Lyd, stillhet, bevegelse og stillstand.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        Radiorakel.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4829362">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Solveigs Speisa Musikk</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1875451" class="vrtx-external-publication">
        <div id="vrtx-publication-1875451">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1875451">
                Jensenius, Alexander Refsum &amp; Finnset, Knut Anders
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Uenighet om UiOs nettsider: - Advarer kolleger mot ? bruke dem.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Uniforum.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3656091">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Nettsidene til UiO har blitt for store i omfang og m? ryddes opp i. Forskere frykter at aktiviteter skal forsvinne for alltid, men UiO lover ? ikke slette innhold uten godkjenning fra instituttene.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1925445" class="vrtx-external-publication">
        <div id="vrtx-publication-1925445">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1925445">
                Upham, Finn; Zelechowska, Agata; Gonzalez, Victor &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Quiet Breathing to Heard Music.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3692340">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1925446" class="vrtx-external-publication">
        <div id="vrtx-publication-1925446">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1925446">
                Zelechowska, Agata; Gonzalez, Victor &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Championship of Standstill: A paradigm to study involuntary responses to music with the use of motion capture.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3289591">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2005100" class="vrtx-external-publication">
        <div id="vrtx-publication-2005100">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2005100">
                Lan, Qichao &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Glicol: A Graph-oriented Live Coding Language Developed with Rust, WebAssembly and AudioWorklet.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4951045">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2005103" class="vrtx-external-publication">
        <div id="vrtx-publication-2005103">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2005103">
                Lan, Qichao &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Collaborative Live Coding with Glicol Music Programming Language.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4258222">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2005205" class="vrtx-external-publication">
        <div id="vrtx-publication-2005205">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2005205">
                Kwak, Dongho; Danielsen, Anne &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Music for cells: The human body as a rhythm machine.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4833753">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1916339" class="vrtx-external-publication">
        <div id="vrtx-publication-1916339">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1916339">
                Masu, Raul; Melbye, Adam Pultz; Sullivan, John &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        NIME Eco Wiki: A Crash Course.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3406318">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this workshop, hosted by the three NIME environmental officers, participants will be introduced to the NIME Eco Wiki, a repository for addressing environmental and sustainability issues within the NIME community. During the workshop, the participants will discuss how practices on the communal as well as the individual level may become more sustainable and they will create new additions and ideas for the Wiki. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1910750" class="vrtx-external-publication">
        <div id="vrtx-publication-1910750">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1910750">
                Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The experience of time and space in human standstill.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5235330">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1914956" class="vrtx-external-publication">
        <div id="vrtx-publication-1914956">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1914956">
                Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        NOR-CAM - A framework for recognition and rewards in academic careers.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4766522">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">A working group appointed by Universities Norway (UHR) was mandated to recommend guiding principles for the assessment and evaluation of research(ers) in light of the transition to Open Science. This working group proposes a more flexible and holistic framework for recognition and rewards in academic research assessment. The ambition has been to develop a guide that adopts three core principles for assessment: more transparency, greater breadth, and comprehensive assessments as opposed to one-sided use of indicators.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1914957" class="vrtx-external-publication">
        <div id="vrtx-publication-1914957">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1914957">
                Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        NOR-CAM - En veileder for vurdering i akademiske karrierel?p.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4732608">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">UHRs arbeidsgruppe for ?pen evaluering har siden 2019 jobbet med en veileder for vurdering i akademiske karrierel?p. Veilederen er n? ferdig, og arbeidsgruppen foresl?r et mer fleksibelt og helhetlig rammeverk for arbeidet med vurdering i akademiske karrierel?p. Ambisjonen har v?rt ? utvikle en veileder der mer ?penhet, st?rre bredde i vurderingene, og med helhetlige vurderinger som motsats til ensidig bruk av indikatorer st?r sentralt. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1892532" class="vrtx-external-publication">
        <div id="vrtx-publication-1892532">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1892532">
                Karbasi, Seyed Mojtaba; God?y, Rolf Inge; Jensenius, Alexander Refsum &amp; T?rresen, Jim
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        A Learning Method for Stiffness Control of a Drum Robot for Rebounding Double Strokes.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5134982">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In robot drumming, performing double stroke rolls is a key ability. Human drummers learn to play double strokes by just trying it several times. For performing it, a model needs to be learned to provide anticipatory commands during drumming. Joint stiffness plays a key role in rebounding double stroke task and should be considered in the model. We have introduced an interactive learning method for a drum robot to learn joint stiffness for rebounding double stroke task. The model is simulated for a 2-DoF robotic arm. The algorithm is simulated with 3 different drum kits to show the robustness of the learning approach. The simulation results also show significant compatibility with human performance results. In addition, the refined learning algorithm adjusts the stroke timing which is important for producing proper rhythms.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1910751" class="vrtx-external-publication">
        <div id="vrtx-publication-1910751">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1910751">
                Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hvordan bygge og videreutvikle en internasjonal, interdisiplin?r forskningsgruppe?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4433346">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2004277" class="vrtx-external-publication">
        <div id="vrtx-publication-2004277">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2004277">
                Bishop, Laura; Sanchez, Victor Evaristo Gonzalez; Laeng, Bruno; Jensenius, Alexander Refsum &amp; H?ffding, Simon
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Social context affects head motion and gaze in string quartet rehearsal and concert performance.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3629835">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1954357" class="vrtx-external-publication">
        <div id="vrtx-publication-1954357">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1954357">
                Laczko, Balint &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Reflections on the Development of the Musical Gestures Toolbox for Python.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4814723">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The paper presents the Musical Gestures Toolbox (MGT) for Python, a collection of modules targeted at researchers working with video recordings. The toolbox includes video visualization techniques such as creating motion videos, motion history images, and motiongrams. These visualizations allow for studying video recordings from different temporal and spatial perspectives. The toolbox also includes basic computer vision methods, and it is designed to integrate well with audio analysis toolboxes. The MGT was initially developed to analyze music-related body motion (of musicians, dancers, and perceivers) but is equally helpful for other disciplines working with video recordings of humans, such as linguistics, pedagogy, psychology, and medicine.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1947505" class="vrtx-external-publication">
        <div id="vrtx-publication-1947505">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1947505">
                Hanger, Mari Rian &amp; Jensenius, Alexander Refsum
            </span>(2021).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Tror unge forskere kan fristes til ? velge tidsskrift i gr?sonen.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Universitetsavisa.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3553558">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Kravet om ? publisere mange artikler kan skape grobunn for r?vertidsskriftene. – Det hadde v?rt bedre med f?rre og bedre publikasjoner, mener P?l Romundstad. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1827950" class="vrtx-external-publication">
        <div id="vrtx-publication-1827950">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1827950">
                Jensenius, Alexander Refsum; Danielsen, Anne &amp; Edwards, Peter
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Vi trenger kort og godt mer automagi i Cristin.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3228740">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Forskerne bruker utallige arbeidstimer p? ? legge inn informasjonen. Det er p? tide ? f? den ut igjen! skriver UiO-forskerne Alexander Refsum Jensenius, Anne Danielsen og Peter Edwards.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1828658" class="vrtx-external-publication">
        <div id="vrtx-publication-1828658">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1828658">
                Zelechowska, Agata; Sanchez, Victor Evaristo Gonzalez &amp; Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Standstill to the ‘beat’: Differences in involuntary movement
responses to simple and complex rhythms.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4694088">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Previous studies have shown that movement-inducing properties of music largely depend on the rhythmic complexity of the stimuli. However, little is known about how simple isochronous beat patterns differ from more complex rhythmic structures in their effect on body movement. In this paper we study spontaneous movement of 98 participants instructed to stand as still as possible for 7 minutes while listening to silence and randomised sound excerpts: isochronous drumbeats and complex drum patterns, each at three different tempi (90, 120, 140 BPM). The participants’ head movement was recorded with an optical motion capture system.We found that on average participants moved more during the sound stimuli than in silence, which confirms the results from our previous studies. Moreover, the stimulus with complex drum patterns elicited more movement when compared to the isochronous drum beats. Across different tempi, the participants moved most at 120 BPM for the average of both types of stimuli. For the isochronous drumbeats, however, their movement was highest at 140 BPM. These results can contribute to our understanding of the interplay between rhythmic complexity, tempo and music-induced movement.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1849628" class="vrtx-external-publication">
        <div id="vrtx-publication-1849628">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1849628">
                Jensenius, Alexander Refsum &amp; Ingebrethsen, Christian
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kan en datamaskin lage den neste megahiten?                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK P2.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3738914">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1806422" class="vrtx-external-publication">
        <div id="vrtx-publication-1806422">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1806422">
                Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hvordan bruke video som ressurs i digital undervisning?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4527017">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Bli med og snakk om hvordan vi legger om v?r undervisning i disse dager, denne gang med fokus p? video. LINK og undervisere ved UiO vil bidra med faglige innlegg, etterfulgt av ?pen erfaringsdeling og sp?rsm?l &amp; svar.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1849634" class="vrtx-external-publication">
        <div id="vrtx-publication-1849634">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1849634">
                Jensenius, Alexander Refsum &amp; Ingebrethsen, Christian
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikk laget av kunstig intelligens.
                </span>
                    [TV].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK Dagsrevyen.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3370547">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1849675" class="vrtx-external-publication">
        <div id="vrtx-publication-1849675">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1849675">
                Jensenius, Alexander Refsum &amp; Ingebrethsen, Christian
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikk laget av algoritmer utfordrer musikkbransjen: – Det er klart vi er bekymret.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK.no.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3298885">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Frank Sinatra har kommet med en ny sang 22 ?r etter sin d?d. Eller, har han egentlig det?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1820764" class="vrtx-external-publication">
        <div id="vrtx-publication-1820764">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1820764">
                Jensenius, Alexander Refsum &amp; Ingebrethsen, Christian
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Nesten umulig ? st? i ro til musikk.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK P2.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4986127">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1820769" class="vrtx-external-publication">
        <div id="vrtx-publication-1820769">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1820769">
                Jensenius, Alexander Refsum &amp; Ingebrethsen, Christian
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hvor vanskelig er det egentlig ? la v?re ? bevege seg til fengende musikk?                </span>
                    [TV].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK Dagsrevyen.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4967970">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1820767" class="vrtx-external-publication">
        <div id="vrtx-publication-1820767">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1820767">
                Jensenius, Alexander Refsum &amp; Ingebrethsen, Christian
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Klarer du ? st? stille til favorittl?ta di?                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK.no.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4332996">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1820766" class="vrtx-external-publication">
        <div id="vrtx-publication-1820766">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1820766">
                Jensenius, Alexander Refsum &amp; Ingebrethsen, Christian
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        篮球即时比分_nba比分直播-彩客网重点推荐 om danselyst.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK P2.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5200755">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1820771" class="vrtx-external-publication">
        <div id="vrtx-publication-1820771">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1820771">
                Jensenius, Alexander Refsum &amp; Lillesl?tten, Mari
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        You just can’t stand still.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        HF-aktuelt.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3739122">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Not moving to dance music is near impossible, according to new research.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1820770" class="vrtx-external-publication">
        <div id="vrtx-publication-1820770">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1820770">
                Jensenius, Alexander Refsum &amp; Lillesl?tten, Mari
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Du klarer faktisk ikke ? st? stille.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        HF-aktuelt.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4439050">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">? ikke bevege seg til dansemusikk, er s? godt som umulig, viser ny forskning.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1819818" class="vrtx-external-publication">
        <div id="vrtx-publication-1819818">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1819818">
                Jensenius, Alexander Refsum &amp; Svendsen, Njord Vegard
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Meiner forskarar m? ha rett til ? ?klippe og lime? fr? publiserte artiklar.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Khrono.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3845087">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">- Vi m? komme bort fr? tanken om at ein publisert forskingsartikkel er ein digital versjon av ein papirartikkel, meiner musikkforskar Alexander Refsum Jensenius.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1823065" class="vrtx-external-publication">
        <div id="vrtx-publication-1823065">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1823065">
                Jensenius, Alexander Refsum &amp; Andersson, B?rd
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Umulig ? st? stille til musikk.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        KulturPlot.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4992806">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">N? er det bevist: Det finnes en indre dansel?ve i oss alle. Ny forskning viser dessuten at den musikken som ikke f?r det til ? rykke i dansefoten er norsk.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1860328" class="vrtx-external-publication">
        <div id="vrtx-publication-1860328">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1860328">
                Jensenius, Alexander Refsum &amp; Danielsen, Anne
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        UiOs nettsider er en viktig forskningsinfrastruktur.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4916472">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Det er underlig at UiO s? aktivt g?r inn for ? slette tilgjengelig informasjon om v?r egen kultur og historie. Vi er enig i at det er behov for ? rydde opp i nettsidene, men mener at fokuset b?r ligge p? rydding og kvalitetssikring fremfor sletting, skriver Alexander Refsum Jensenius og Anne Danielsen i RITMO. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1815747" class="vrtx-external-publication">
        <div id="vrtx-publication-1815747">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1815747">
                Bishop, Laura &amp; Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Reliability of two infrared motion capture systems in a music performance setting.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4626282">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper describes a comparative analysis of tracking quality in two infrared marker-based motion capture systems: one older but high-end (Qualisys, purchased in 2009) and the other newer and mid-range (OptiTrack, purchased in 2019). We recorded performances by a string quartet with both systems simultaneously, using the same frame rate. Our recording set-up included a combination of moving markers (affixed to musicians’ bodies) and stationary markers (affixed to music stands). Higher noise levels were observed in Qualisys recordings of stationary markers than in OptiTrack recordings, as well as a greater spatial range, though OptiTrack recordings had a higher rate of outliers (“spikes” in the signal). In moving markers, increased quantity of motion was associated with increased between-system error rates. Both systems showed minimal within-trial drift but a reduction in recording accuracy and precision over the duration of the experiment. Overall, our results show that the older/high-end system (Qualisys) produced slightly lower-quality recordings than the newer/mid-range system (OptiTrack). We discuss how our findings may inform researchers’ interpretations of motion capture data, particularly when capturing the types of motion that are important for performing music.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1826444" class="vrtx-external-publication">
        <div id="vrtx-publication-1826444">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1826444">
                Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hvorfor er ?pen forskning bedre forskning?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3639605">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1780478" class="vrtx-external-publication">
        <div id="vrtx-publication-1780478">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1780478">
                Fasciani, Stefano; Jensenius, Alexander Refsum; St?ckert, Robin &amp; Xambó, Anna
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The MCT Portal: an infrastructure, a laboratory and a pedagogical tool.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4066664">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1858109" class="vrtx-external-publication">
        <div id="vrtx-publication-1858109">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1858109">
                Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Some Challenges of Citizen Science for Universities.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4791592">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The potential of Citizen Science is high on the agenda in the discussion on the future of academic research. The European Commission’s Communication “A new ERA for Research and Innovation”, published in September 2020, states that “[...] the engagement of citizens, local communities and civil society will [help] achieve greater social impact and increased trust in science.” Citizens can contribute in diverse ways, ranging from data collection over data analysis to co-designing projects, and thereby bring academic research and its outcomes closer to society.

However, Citizen Science also accentuates ethical and legal questions about ownership of the research process and outcomes, and poses challenges in terms of safeguarding research quality. Addressing these challenges and using the opportunities of Citizen Science will require universities to take the lead and consider the place of Citizen Science within their institutional strategies, as well as the support they offer to research staff.

Engaging in inclusive and transparent science, Citizen Science and Open Science are becoming increasingly intertwined. Currently, Citizen Science is described by the European Commission as “both an aim and enabler of Open Science”.

This joint workshop will discuss themes around institutional support for Citizen Science and offers an opportunity to transfer and share knowledge. The aim is to exchange experiences, lessons learnt, and explore common challenges. To support Citizen Science, the online workshop will discuss tools, guidelines and good practices from Open Science experiences as well. Participating universities will have the opportunity to share expertise, coordinate efforts and exchange advice on services, tools and legal and ethical issues.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1806078" class="vrtx-external-publication">
        <div id="vrtx-publication-1806078">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1806078">
                Jensenius, Alexander Refsum &amp; Svendsen, Njord Vegard
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Spelelystene.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Khrono.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3653388">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">For somme er ein arbeidsdag utan musikk utenkeleg.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1806778" class="vrtx-external-publication">
        <div id="vrtx-publication-1806778">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1806778">
                Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Video Visualization Strategies at RITMO.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4747259">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1829316" class="vrtx-external-publication">
        <div id="vrtx-publication-1829316">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1829316">
                Jensenius, Alexander Refsum; Danielsen, Anne &amp; Edwards, Peter
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Vi trenger kort og godt mer automagi i Cristin.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3528949">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Forskerne bruker utallige arbeidstimer p? ? legge inn informasjonen. Det er p? tide ? f? den ut igjen.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1811355" class="vrtx-external-publication">
        <div id="vrtx-publication-1811355">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1811355">
                St?ckert, Robin; Bergsland, Andreas; Fasciani, Stefano &amp; Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Student active learning in a two campus organisation.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4033903">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Higher education is facing disruptive changes in many fields. Students wants to have the option of learning anywhere, anytime and in any format. Universities need to develop and deliver to future students a complete learning ecosystem. At the same time universities are facing challenges such as growing costs and the pressure to give the students the knowledge, competence, skills and ability to continuously adapt to future job environments. As a consequence, many universities are investigating new ways of collaboration and sharing resources to cater to the demands of students, industry and society. An example of this collaboration is a new joint master between the two largest Universities in Norway: University of Oslo (UiO) and Norwegian University of Science and Technology (NTNU). In this paper, we present the lessons learned from almost two years of teaching and learning in the new joint master&#39;s programme, &quot;Music, Communication and Technology&quot; (MCT), between NTNU and UiO. This programme is a run in a two-campus learning space built as a two-way, audio-visual, high-quality, low-latency communication channel between the two campuses, called &quot;The Portal&quot;. Moreover, MCT is the subject of research for the SALTO (Student Active Learning in a Two campus Organisation) project, where novel techniques in teaching and learning are explored, such as team-based learning (TBL), flipped classroom, and other forms of student active learning. Educational elements in this master, provides the student with 21st century skills and deliver knowledge within humanities, entrepreneurship and technology. We elaborate on the technical, pedagogical and learning space-related challenges toward delivering teaching and learning in these cross-university settings. The paper concludes with a set of strategies that can be used to improve student active learning in different scenarios.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1749986" class="vrtx-external-publication">
        <div id="vrtx-publication-1749986">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1749986">
                Jensenius, Alexander Refsum &amp; J?rgensen, Paul Arvid
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kulturstripa: Musikken som beveger.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK P2.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5085941">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none"> Er du av typen som berre m? danse n?r du h?yrer ein viss type musikk? Paul Arvid J?rgensen har m?tt ein forskar som ser p? om vi menneske er f?dd med dansefot, og om ein type musikk f?rer til meir r?rsle enn en annan. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1740715" class="vrtx-external-publication">
        <div id="vrtx-publication-1740715">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1740715">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sound actions: An embodied approach to a digital organology.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4302845">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">What is an instrument in our increasingly electrified world? In this talk I will present a set of theoretical building blocks from my forthcoming book on &quot;musicking in an electronic world&quot;. At the core of the argument is the observation that the introduction of new music technologies has led to an increased separation between action and sound in musical performance. This has happened gradually, with pianos and organs being important early examples of instruments that introduced mechanical components between the performer and resonating objects. Today&#39;s network-based instruments represent an extreme case of a spatiotemporal dislocation between action and sound. They challenge our ideas of what an instrument can be, who can perform on them, and how they should be analyzed. In the lecture I will explain how we can use the concepts of action-sound couplings and mappings to structure our thinking about such instruments. This will be used at the heart of a new organology that embraces the qualities of both acoustic and electroacoustic instruments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1740716" class="vrtx-external-publication">
        <div id="vrtx-publication-1740716">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1740716">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Lecture-demo: Music-Related Micromotion.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4465108">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This presentation will summarize findings from my research into music-related micromotion. This includes the smallest human motion that we can perform and perceive, typically measured at at a scale of millimeters. We have carried out a series of studies of such micromotion, in which people have been asked to try to stand still on the floor, both in silence and with (musical) sound. By measuring their bodily responses with different types of motion tracking and physiological devices we find a number of similarities between people&#39;s quantity and quality of motion. This has been the starting point for exploring the use of micromotion in musical practice, what I call &#39;sonic microinteraction&#39;. This includes standstill performances with interactive sound and light. It also includes several installations with our ensemble of self-playing guitars. These are hybrid instruments, using digital sound-production through acoustically resonating guitars. They are controlled through inverse microinteraction, meaning that you need to focus on standing still to produce any sound. This challenges our traditional understanding of the affordance of musical instruments, and opens for both artistically and scientifically interesting perspectives.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1694969" class="vrtx-external-publication">
        <div id="vrtx-publication-1694969">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1694969">
                Jensenius, Alexander Refsum &amp; Seres, Silvija
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Eksp?rt: Alexander Refsum Jensenius.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        L?RN Podcast.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4573597">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hva kan vi finne n?r vi analyserer folks dansebevegelser?  Og hva er selvspillende gitarer? I denne episoden av #L?RN snakker Silvija med f?rsteamanuensis ved Institutt for musikkvitenskap ved Universitetet i Oslo, Alexander Refsum Jensenius, om NM i stillstand og kunst og teknologi.

— Vi tror at det er noen dypt forankrede systemer som gj?r at bevegelse og musikk er koblet sammen i hjernen v?r. S? lyd og bevegelse har ikke skiller, forteller han.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1740713" class="vrtx-external-publication">
        <div id="vrtx-publication-1740713">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1740713">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sound actions: An embodied approach to a digital organology.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5028137">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">What is an instrument in our increasingly electrified world? In this talk I will present a set of theoretical building blocks from my forthcoming book on &quot;musicking in an electronic world&quot;. At the core of the argument is the observation that the introduction of new music technologies has led to an increased separation between action and sound in musical performance. This has happened gradually, with pianos and organs being important early examples of instruments that introduced mechanical components between the performer and resonating objects. Today&#39;s network-based instruments represent an extreme case of a spatiotemporal dislocation between action and sound. They challenge our ideas of what an instrument can be, who can perform on them, and how they should be analyzed. In the lecture I will explain how we can use the concepts of action-sound couplings and mappings to structure our thinking about such instruments. This will be used at the heart of a new organology that embraces the qualities of both acoustic and electroacoustic instruments.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1726962" class="vrtx-external-publication">
        <div id="vrtx-publication-1726962">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1726962">
                Diaz, Ximena Alarcón &amp; Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        &quot;Ellos no están entendiendo nada&quot; (&quot;They are not understanding anything&quot;): Listening to Embodied Memories of Colombian Migrant Women Reflecting on Conflict and Migration.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4102131">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Exploring the role of the body as an interface that keeps memory of place, INTIMAL physical-virtual “embodied” system, integrates body movement, voice, and an oral archive, as an artistic platform for relational listening (Alarcón, 2019), using networking technologies for telematic sonic improvisatory performances, in the context of geographical migration. INTIMAL has been informed by a case study with nine Colombian migrant
women in Europe, listening to their migrations, and to an oral archive with testimonies of conflict and migration by other Colombian migrant women.1 The first two interfaces created for the system: MEMENTO (a spoken word retrieval system), and RESPIRO (for transmission and sonification of breathing data), have been tested by the research participants in a telematic sonic improvisatory public improvisatory performance between the cities of Oslo, Barcelona, and London. In the performance, proposed as a shared dream, a “complex narrative” (Grishakova &amp; Poulaki, 2019) emerged, for both the improvisers and the audiences. In this paper, we describe the conditions of the narrative environment, and the embodied expressions that
emerged—including body movement, voice, spoken word, and breathing—establishing connections between gendered migration, and Colombian conflict. We reflect on how distributed improvisatory embodied
expression, and relational listening through technological mediations, aids the process of collective remembering (Wertsch, 2001), in a complex context of conflict and migration.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1705098" class="vrtx-external-publication">
        <div id="vrtx-publication-1705098">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1705098">
                St?ckert, Robin; Jensenius, Alexander Refsum; Sedo, Anna Xambo &amp; Brandtsegg, ?yvind
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        A case study in learning spaces for physical-virtual two-campus interaction.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4616649">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Norwegian University of Science and Technology (NTNU) Teaching Excellence is an integrated and
wide-ranging initiative aimed at helping NTNU to achieve its goal to providing education characterized
by quality at a high international level. The initiative consists of a portfolio of development measures,
with the purpose to develop innovative approaches to learning, teaching and assessment.
SALTO (Student Active Learning in a Two campus Organization) is one of the development projects
founded for the period 2018-2020. The project is based on a study where the students are divided into
two campuses. The aim is to develop effective pedagogy with activity at both campuses at the same
time, with particular emphasis on interaction, resource sharing and communication/collaboration. The
project aims to allow students and teachers to explore educational, methodological, and technological
solutions together.
A new joint master&#39;s program in &quot;Music, Communication and Technology&quot; (MCT) between NTNU and
University in Oslo (UiO), constitutes the framework for the SALTO project. The common pedagogy,
technology and shared learning space between the two Universities, is hereafter defined as the Portal.
SALTO will utilize the MCT Portal as an arena/living lab to evolve and optimize student active learning
scenarios. In this paper, we elaborate on the issues, challenges and potential with three different
scenarios, which emerged during the first 6 months of the project:
(1) The Opening Ceremony between NTNU and UiO, with a combo of talks and performance.
(2) A live Christmas concert connecting two high schools 500 km apart (Trondheim-Oslo).
(3) An intense cross-university course with a combo of preparations, lectures and hands-on exercises.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1701990" class="vrtx-external-publication">
        <div id="vrtx-publication-1701990">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1701990">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Technology and the voice.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4121287">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1701989" class="vrtx-external-publication">
        <div id="vrtx-publication-1701989">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1701989">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring the Spatiotemporal Matrix in Music-Dance Performance.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3855257">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">I will present the spatiotemporal matrix, a system for categorizing human actions into different spatial and temporal levels: micro, meso, macro. Most regular human actions would be categorized as meso-meso, that is, medium-sized motion within a timespan that fits our short-term memory. Exploring combinations of micro and macro levels in both space and time is challenging, but is also conceptually, practically and artistically interesting. I will show an example of this from the music-dance performance Sverm, and explain how the matrix was informed by my research into the effect of music on the micromotion observed when people try to stand still.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1732027" class="vrtx-external-publication">
        <div id="vrtx-publication-1732027">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1732027">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hvordan fremme tverrfaglighet?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3799108">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1732016" class="vrtx-external-publication">
        <div id="vrtx-publication-1732016">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1732016">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hva kan kroppslig stillstand si om opplevelsen av musikk?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4543067">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1769000" class="vrtx-external-publication">
        <div id="vrtx-publication-1769000">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1769000">
                Jensenius, Alexander Refsum &amp; Lieungh, Erik
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        #26 Music Research.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Open Science Talk.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4181359">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this episode, we talk about Music Research, and how it is to practice open research within this field.
Our guest is Alexander Jensenius, Associate Professor at the Department of Musicology
- Centre for Interdisciplinary Studies in Rhythm, Time and Motion (IMV) at the University of Oslo. He is also behind MusicLAb, an event-based project where data is collected, during a musical performance, and analyzed on the fly.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1686927" class="vrtx-external-publication">
        <div id="vrtx-publication-1686927">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1686927">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Towards Convergence in Research Assessment.
                </span>
                            
                <span class="vrtx-pages">p. 14–14.</span>
            
            <a href="https://hdl.handle.net/11250/3443765">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Open Science is on everyone’s lips these days. There are many reasons why this shift is necessary and wanted, and also several hurdles. One big challenge is the lack of incentives and rewards. Underlying this is the question of what we want to incentivize and reward, which ultimately boils down to the way we assess research and researchers. This is not a small thing. After all, we are talking about the cornerstone of people’s careers, whether an inspiring academic gets a job, promotion, and project funding.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1709150" class="vrtx-external-publication">
        <div id="vrtx-publication-1709150">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1709150">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Reflections from a Carpentries Train the Trainer Workshop.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Carpentries.org.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4590140">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this post, Alexander Refsum Jensenius shares thoughts and takeaways from a recent Train the Trainer Workshop</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1686930" class="vrtx-external-publication">
        <div id="vrtx-publication-1686930">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1686930">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Dagens system tar ikke h?yde for at folk skal kunne ta kurs p? 0,2 studiepoeng. Men hvorfor ikke?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5237811">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Vi m? legge til rette for mye raskere, enklere, og smidigere prosesser for ? registrere, insentivere og bel?nne ulike former for mikroutdanning, skriver f?rsteamenuensis ved Universitetet i Oslo, Alexander Refsum Jensenius.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1775349" class="vrtx-external-publication">
        <div id="vrtx-publication-1775349">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1775349">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikkteknologi som d?r?pner til bedre forst?else.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3643834">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">I seminaret vil du f? presentert eksempler p? hvordan musikkteknologi kan brukes i undervisning, b?de i sm? og store grupper. B?de akustisk og elektronisk musikkteknologi fungerer godt for ? forklare prinsipper ogs? utenfor regul?r musikkundervisning. I dette seminaret f?r du vite hvordan matematikk kan forklares med sm? synthesizere, fysikk med strengeinstrumenter og kroppskontroll med interaktiv musikk. Dette er prinsipper som kan tas i bruk med enkle virkemidler. Seminaret vil kunne v?re relevant for deg som ?nsker tips til alternative tiln?rminger overfor elever som strever med l?ring.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1711730" class="vrtx-external-publication">
        <div id="vrtx-publication-1711730">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1711730">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Music Technology Developments at University of Oslo.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4408609">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1775348" class="vrtx-external-publication">
        <div id="vrtx-publication-1775348">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1775348">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Experimenting with Open Research Experiments.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3656108">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Is it possible to do experimental music research completely openly? And what can we gain by opening up the research process from beginning to end? In the talk I will present MusicLab, an open research project at the University of Oslo. The aim is to explore new methods for conducting research, research communication, and education. Each MusicLab event is organized around a public music performance, during which we collect data from both musicians and audience members. Here we explore different types of sensing systems that work in real-world contexts, such as breathing, heartbeat, muscle tension, or motion. The events also contain an edutainment element through panel discussions as well as &quot;data jockeying&quot; in the form of live data analysis. The collected data is made publicly available, and forms the basis for further analysis and publications after the event. Opening up the research process is conceptually, practically, and technologically challenging for everyone involved. The benefit is that it has helped us solve a number of issues when it comes to GDPR and copyright. It has also pushed our research in directions that we previously had never thought about, and helped us communicate this to new users.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1779593" class="vrtx-external-publication">
        <div id="vrtx-publication-1779593">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1779593">
                Sedo, Anna Xambo; St?ckert, Robin; Jensenius, Alexander Refsum &amp; Saue, Sigurd
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Facilitating Team-Based Programming Learning with Web Audio.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3877229">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this paper, we present a course of audio programming using web audio technologies addressed to an interdisciplinary group of master students who are mostly beginners in programming. This course is held in two connected university campuses through a portal space and the students are expected to work in cross-campus teams. The workshop promotes both individual and group work and is based on ideas from science, technology, engineering, arts and mathematics (STEAM), team-based learning and project-based learning. We show the outcomes of this course, discuss the students’ feedback and reflect on the results. We found that it is important to provide individual vs. group work, to use the same code editor for consistent follow-up and to be able to share the screen to solve individual questions. Other aspects inherent to the master (intensity of the courses, coding in a research-oriented program) and to prior knowledge (web technologies) should be reconsidered. We conclude with a wider reflection on the challenges and potentials of using web audio as a programming environment for beginners in STEAM and distance-learning courses.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1779611" class="vrtx-external-publication">
        <div id="vrtx-publication-1779611">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1779611">
                Lan, Qichao; Cagri, Erdem &amp; Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Performing with QuaverSeries Live Coding Environment.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3900543">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1744087" class="vrtx-external-publication">
        <div id="vrtx-publication-1744087">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1744087">
                S?rb?, Solveig Isis; Good, Matthew &amp; Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        RITMO + UB = MusicLab.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4524037">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">MusicLab er et 篮球即时比分_nba比分直播-彩客网重点推荐sprosjekt mellom UB og RITMO og en pilot p? ?pen forskning ved UiO. Konseptet kombinerer live musikk, live forskning og vitenskapsformidling. Det er mye vi har f?tt til med MusicLab, men i slikt nybrottsarbeid st?ter man ogs? p? nye typer utfordringer. Hvor langt kan man trekke ?penheten?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1698256" class="vrtx-external-publication">
        <div id="vrtx-publication-1698256">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1698256">
                Lan, Qichao; T?rresen, Jim &amp; Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        RaveForce: A Deep Reinforcement Learning Environment for Music.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4790522">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1749083" class="vrtx-external-publication">
        <div id="vrtx-publication-1749083">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1749083">
                Becker, Artur; Herrebr?den, Henrik; Sanchez, Victor Evaristo Gonzalez; Nymoen, Kristian; Freitas, Carla Maria Dal Sasso &amp; T?rresen, Jim
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/1749083/contributors', 'vrtx-publication-contributors-1749083')">
                    [Show all&nbsp;7&nbsp;contributors for this article]</a>
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Functional Data Analysis of Rowing Technique Using Motion Capture Data.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4368755">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">We present an approach to analyzing the motion capture data ofrowers using bivariate functional principal component analysis(bfPCA). The method has been applied on data from six elite rowersrowing on an ergometer. The analyses of the upper and lower bodycoordination during the rowing cycle revealed significant differences between the rowers, even though the data was normalized toaccount for differences in body dimensions. We make an argumentfor the use of bfPCA and other functional data analysis methods forthe quantitative evaluation and description of technique in sports.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1722596" class="vrtx-external-publication">
        <div id="vrtx-publication-1722596">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1722596">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Ett ?r med MCT-programmet ved IMV.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3821501">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1711398" class="vrtx-external-publication">
        <div id="vrtx-publication-1711398">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1711398">
                Sanchez, Victor Evaristo Gonzalez; Herrebr?den, Henrik; Olimstad, Jostein &amp; Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Effects of acoustic pacing on the smoothness of rowing movements.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5143622">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The influence of acoustic stimuli and feedback in sport has been explored as means of optimizing technique, in particular  during  training.  Interactive  and  adaptive acoustic systems have been evaluated for rowing, with results showing a significant increase in boat velocity.  However,  assessment  of  the  effects  of  acoustic feedback and pacing in the technical aspects of rowing is still scarce. Previous studies on the smoothness of the stroke force profile have shown that smoothness metrics can qualitatively reflect movement coordination. In this  study,  we  quantify  and  compare  hand  movement smoothness  from  rowers  performing  under  three acoustic  conditions:  silence,  verbal  instructions,  and acoustic pacing.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1743183" class="vrtx-external-publication">
        <div id="vrtx-publication-1743183">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1743183">
                Enli, Gunn &amp; Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Mikroutdanninger gir nye muligheter for videreutdanning.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3261709">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1743182" class="vrtx-external-publication">
        <div id="vrtx-publication-1743182">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1743182">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Scientific Computing Use Case: RITMO.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4029615">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1699346" class="vrtx-external-publication">
        <div id="vrtx-publication-1699346">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1699346">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Developing the self-playing guitars.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4368828">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The AAAI project holds a final workshop showcasing instruments developed and techniques explored. The workshop also consists of a performance with new pieces for augmented guitars, violins, double basses, ukuleles, as well as six self-playing guitars.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1699343" class="vrtx-external-publication">
        <div id="vrtx-publication-1699343">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1699343">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The RITMO Centre at University of Oslo.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4927549">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1699342" class="vrtx-external-publication">
        <div id="vrtx-publication-1699342">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1699342">
                Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Tutorial: Musical Gestures Toolbox.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3627560">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">An intensive PhD-level training course on sound and motion analysis with experts in sound and music computing from the Nordic countries.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1699345" class="vrtx-external-publication">
        <div id="vrtx-publication-1699345">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1699345">
                Jensenius, Alexander Refsum; Erdem, Cagri; Zelechowska, Agata; Lan, Qichao; Fuhrer, Julian Peter &amp; Sanchez, Victor Evaristo Gonzalez
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Entraining Guitars.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4904678">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1758880" class="vrtx-external-publication">
        <div id="vrtx-publication-1758880">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1758880">
                Diaz, Ximena Alarcón; Boddie, Paul; Erdem, Cagri; Aandahl, Eigil; Andersen, Elias Sukken &amp; Dahl, Eirik
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/1758880/contributors', 'vrtx-publication-contributors-1758880')">
                    [Show all&nbsp;8&nbsp;contributors for this article]</a>
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sensing Place and Presence in an INTIMAL Long-Distance Improvisation.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3732728">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">INTIMAL is an interactive system for relational listening, which integrates physical-virtual interfaces for people to sonically improvise between distant locations. The aim is to embrace two key aspects in the context of human migration: the sense of place and the sense of presence. This paper reflects on the use of INTIMAL in a long-distance improvisation between the cities of Oslo, Barcelona and London in May 2019. This improvisation was performed by nine Colombian migrant women, who had been involved in a research process using the Deep Listening? practice developed by Pauline Oliveros. Here we describe the performance setting and the implementation of the first two interfaces of the system: MEMENTO, an “embodied” navigator of an oral archive of Colombian women’s testimonies of conflict and migration; and RESPIRO, a sonification system that transmits and sonifies live, breathing signals between distant locations. We reflect on how the two interfaces facilitated and challenged the improvisers’ listening experiences and connections.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1702711" class="vrtx-external-publication">
        <div id="vrtx-publication-1702711">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1702711">
                Jensenius, Alexander Refsum; Schramm, Rodrigo; Coccioli, Lamberto; Mancini, Clara &amp; Lyons, Michael J.
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Ethics at NIME.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3239379">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This workshop is intended to discuss how we think about and handle research ethics at NIME conferences. A number of NIME papers involved studies on/with humans. Most of these are on volunteering adults, but there are also examples of studies with children, and with patients. We also see an interest in the community to carry out research on/with animals. NIME’s current ethical guidelines do not take these perspectives into account. The Steering Committee therefore sees the need to develop better and more up-to-date ethical guidelines for the conference. This is to create an increased awareness about the needs for ethical considerations in the community, but also as guidelines for reviewers and conference chairs. NIME is proud of being a very heterogeneous community, covering people working in a large number of different scientific disciplines, artistic practices, as well as R&amp;D in the industry. Needless to say, this breadth of perspectives also means that it is difficult to impose the same guidelines on all studies. NIME researchers also have to abide to a number of different regulations at institutional, regional, national, continental and international levels. The workshop will consist of short introductions to some challenges faced when carrying out research on/with humans and animals, in both scientific and artistic contexts. This will be followed by group-based brainstorming and a final plenary discussion.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1703748" class="vrtx-external-publication">
        <div id="vrtx-publication-1703748">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1703748">
                Erdem, Cagri; Schia, Katja Henriksen &amp; Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Vrengt: A Shared Body–Machine Instrument for Music–Dance Performance.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3778266">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1702700" class="vrtx-external-publication">
        <div id="vrtx-publication-1702700">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1702700">
                Jensenius, Alexander Refsum; McPherson, Andrew; Sedo, Anna Xambo; Overholt, Dan; Pellerin, Guillaume &amp; Bukvic, Ivica Ico
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/1702700/contributors', 'vrtx-publication-contributors-1702700')">
                    [Show all&nbsp;8&nbsp;contributors for this article]</a>
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Open Research Strategies and Tools in the NIME Community.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4738598">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This workshop is intended for discussing how we can develop more and better strategies and tools for opening up research processes and results within the NIME community. The development of more openness in research has been in progress for a fairly long time, and has recently received a lot of more political attention through the Plan S initiative, The Declaration on Research Assessment (DORA), EU&#39;s Horizon Europe, and so on. The NIME community has been positive to openness since the beginning, but still has not been able to fully explore this within the community. We call for a workshop to discuss how we can move forwards in making the NIME community (even) more open throughout all its activities.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1703875" class="vrtx-external-publication">
        <div id="vrtx-publication-1703875">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1703875">
                Erdem, Cagri; Schia, Katja Henriksen &amp; Jensenius, Alexander Refsum
            </span>(2019).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Vrengt: A Shared Body–Machine Instrument for Music–Dance Performance.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3558363">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1621516" class="vrtx-external-publication">
        <div id="vrtx-publication-1621516">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1621516">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sophus Bugges Salong: Rytmer og f?lelser.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4266521">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvorfor opplever vi rytmer og tid slik vi gj?r? &quot;RITMO Senter for tverrfaglig forskning p? rytme, tid og bevegelse&quot; vil gi oss svaret.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1584014" class="vrtx-external-publication">
        <div id="vrtx-publication-1584014">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1584014">
                Jensenius, Alexander Refsum &amp; Seres, Silvija
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        #89: Alexander Jensenius: Slik danser du din egen musikk.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Oslo Business Forum.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3468055">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvordan kan teknologi og musikk bli til noe veldig spennende? Og hva er unikt med m?ten vi beveger oss p?? Dagens podkastgjest er f?rsteamanuensis i musikkteknologi, Alexander Jensenius.

I episode #89 av podkastserien ‘De som bygger det nye Norge med Silvija Seres’ snakker Jensenius om hvordan tverrfaglighet mellom uvanlige fag og disipliner ?pne nye m?ter ? tenke p?.

Han snakker ogs? om hvordan han bruker ?motion capture? til ? studere mennesker i bevegelse, hva rytme betyr for mennesker og hans beste r?d til unge forskere.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1597478" class="vrtx-external-publication">
        <div id="vrtx-publication-1597478">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1597478">
                Sanchez, Victor Evaristo Gonzalez &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Analysing the synchronisation of COM motion with music in human standing.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5178989">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Introduction
Postural stability have been the focus of a number of studies on fall prevention and sports, with an emphasis on walking dynamics1 . Fewer studies have aimed at understanding the influence of sound stimuli in standing posture sway 2.
Although the vestibular system plays a fundamental role in the control of postural stability, it has also been shown to be key in embodied cognition processes 3.  It is in part through the vestibular system that music activates motor areas in the brain to induce movement, while body movement enhances the cognitive processing of sound and music 3.
This study explored the influence of music on postural control by measuring synchronization between body center of mass (COM) sway with music.
Methods
7 women (32 ± 4.39 years, 1.73 ± 0.04 m, mean ± SD), and 5 men (29.67 ± 4.63 years, 1.81 ±  0.04 m) participated in the study. Participants were asked to stand still for 6 minutes as they were presented with alternating segments of silence and music. COM movements were measured from the position of a passive marker placed in the midline of the sacrum, recorded using an infrared motion capture system. Radial and vertical COM movements were cross-correlated with the pulse clarity, RMS, and spectral centroid of the stimuli.
Results
Paired samples t-test revealed differences in COM radial and vertical sway between silent and music conditions to be significant at the 0.05 level.
A repeated measures ANOVA showed a significant effect of the stimuli on COM sway (p &lt; 0.05).
The effect of the stimuli on the lag of maximum cross-correlation (delay) between COM radial sway and RMS  was shown to be significant (p &lt; 0.05). Differences in delay between pulse clarity and COM vertical sway were significant between stimuli (p &lt; 0.05 ).
Discussion
Results suggest that the effect of RMS in music-induced postural sway might be predominant in the radial plane, with anticipatory behavior observed for stimuli with low RMS.
Vertical sway correspondence patterns suggest anticipatory vertical motion to music spectral centroid.
A more robust understanding of a range of music features and their links with induced movement could lead to insight into the role of the vestibular and sensory systems in balance control.

References
1 Cimolin, V., Galli, M. (2014). Summary measures for clinical gait analysis: A literature review. Gait &amp; Posture 39, 1005-1010.
2 Ross, J. M., Warlaumont, A. S., Abney, D. H., Rigoli, L. M., and Balasubramaniam, R. (2016). Influence of musical groove on postural sway. Journal of Experimental Psychology: Human Perception and Performance Advance online publication.
3 Todd, N. P. (1999). Motion in music: A neurobiological perspective. Music Perception: An Interdisciplinary Journal 17, 115–126.
</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1611404" class="vrtx-external-publication">
        <div id="vrtx-publication-1611404">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1611404">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Introduction to Musical Gestures Toolbox for Matlab.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5053581">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1611412" class="vrtx-external-publication">
        <div id="vrtx-publication-1611412">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1611412">
                Martin, Charles Patrick; Sanchez, Victor Evaristo Gonzalez; Zelechowska, Agata; Erdem, Cagri &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Stillness under Tension.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3264339">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1678067" class="vrtx-external-publication">
        <div id="vrtx-publication-1678067">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1678067">
                Fuhrer, Julian; Glette, Kyrre &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Interactive Animation of RITMO Logo.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3565675">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1678068" class="vrtx-external-publication">
        <div id="vrtx-publication-1678068">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1678068">
                Fuhrer, Julian Peter; Glette, Kyrre &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Interactive Animation of the RITMO Logo.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4879731">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">In this project the logo of RITMO is installed in an interactive animation. It is able to move in accordance with the frequency band of an audio input stream. That is to say, the RITMO logo interacts with the rhythmical streams of music.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1611417" class="vrtx-external-publication">
        <div id="vrtx-publication-1611417">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1611417">
                Danielsen, Anne &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Rytme i tid og rom.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5056181">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1611450" class="vrtx-external-publication">
        <div id="vrtx-publication-1611450">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1611450">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Hvilken type IKT-kompetanse trenger humanister og samfunnsvitere?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3275060">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1611415" class="vrtx-external-publication">
        <div id="vrtx-publication-1611415">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1611415">
                Martin, Charles Patrick; Lesteberg, Mari; Jawad, Karolina; Aandahl, Eigil; Xambó, Anna &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Stillness under Tension.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4714973">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1611422" class="vrtx-external-publication">
        <div id="vrtx-publication-1611422">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1611422">
                B?hn, Einar Duenger; Smajdor, Anna Colette &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Humaniora og teknologi.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4685657">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1592908" class="vrtx-external-publication">
        <div id="vrtx-publication-1592908">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1592908">
                Zelechowska, Agata; Sanchez, Victor Evaristo Gonzalez &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        How music moves us? Studying human body micromotion in music perception.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3881858">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Music has the power to influence not only our thoughts and emotions, but also various physiological processes in our bodies. Furthermore, it often encourages physical movement of the listener. While there are numerous studies describing spontaneous psychophysiological responses to music that are linked with emotions, spontaneous body movement to music has became a topic of exploration relatively recently. Mostly, it has been studied in the context of free dance (Burger et al., 2013) or synchronization to musical rhythm while performing repetitive movements such as walking (Styns et al., 2007). But what can we observe if the participants are just standing still? In our project “MICRO - Human Bodily Micromotion in Music Perception and Interaction” we focus on movements so small that they can be unnoticed both by observer and performer, and that can happen involuntarily. This is what we call “micromotion” of the human body. To see how these small movements are affected by music, we develop different experiments using mainly motion capture technology, but also physiological measures such as electromyography (EMG). In this presentation I would like to describe some of our research methods, findings and plans. 
In one of the experiment paradigms, disguised as the “Norwegian Championship of Standitill”, we invite groups of participants to the laboratory and ask them to stand as still as possible while we present them with segments of selected music or silence. The head motion of each participant is captured using an infrared optical system. In 2012, 91 subjects stood on the floor for 3 minutes in silence and 3 minutes listening to music of increasing level of rhythmicality and energy (Jensenius et al., 2017). In 2017, 71 participants listened to 6 minutes consisting of segments of silence alternating with electronic dance music (EDM), classical Indian music or Norwegian fiddle music. In both studies we observed higher mean quantity of motion of the participants (QoM) in music condition compared to silence condition, and the effect was driven mostly by EDM. We also observed correlations between QoM and participant’s age, height and standing strategy (locked knees), although these results are mixed between the two studies. The future goal is to look more closely into specific features in music that correspond with observed movement, to search for signs of rhythmic entrainment, and to see what demographic and psychological factors might contribute to interpersonal differences in music induced body micromotion.

References:
Burger, B., Thompson, M. R., Luck, G., Saarikallio, S., &amp; Toiviainen, P. (2013). Influences of rhythm-and timbre-related musical features on characteristics of music-induced movement. Frontiers in psychology, 4, 183.
Jensenius, A. R., Zelechowska, A., &amp; Gonzalez Sanchez, V. E. (2017). The Musical Influence on People&#39;s Micromotion when Standing Still in Groups. In Proceedings of the SMC Conferences (pp. 195-200). Aalto University.
Styns, F., van Noorden, L., Moelants, D., &amp; Leman, M. (2007). Walking on music. Human movement science, 26(5), 769-785.
</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1605682" class="vrtx-external-publication">
        <div id="vrtx-publication-1605682">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1605682">
                Aronsen, Jan Magnus &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        ?R?vertidsskriftene? er bare toppen av isfjellet.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5125481">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Staten betaler for ny kunnskap, gir den bort gratis, og kj?per den deretter tilbake av internasjonale forlag.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1563038" class="vrtx-external-publication">
        <div id="vrtx-publication-1563038">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1563038">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kan musikk p?virke stillstanden din?                </span>
                <span class="vrtx-publisher publisher-other publisher-category-ARTICLEPOPULAR">
                        篮球即时比分_nba比分直播-彩客网重点推荐.no.
                </span>
                <span class="vrtx-issn">ISSN 1891-635X.</span>
                            
            
            <a href="https://hdl.handle.net/11250/5020405">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Det er umulig ? st? stille. Kroppen lever og beveger seg hele tiden. Selv om man fors?ker ? st? i ro, klarer man det ikke helt. S? hvor stille st?r vi egentlig? Og hvordan p?virker musikk oss n?r vi st?r stille?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1634523" class="vrtx-external-publication">
        <div id="vrtx-publication-1634523">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1634523">
                Vestre, Eskil Olaf; Danielsen, Anne; Jensenius, Alexander Refsum; London, Justin; Schia, Katja Henriksen &amp; Abramczyk, Filip
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Rytmen er en danser.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Ballade.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3591284">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1586173" class="vrtx-external-publication">
        <div id="vrtx-publication-1586173">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1586173">
                Jensenius, Alexander Refsum; Adde, Lars &amp; Flydal, Lars O.
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        篮球即时比分_nba比分直播-彩客网重点推荐sm?te mellom musikk og medisin.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        V?rt Land.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4986086">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Tverrfaglig nytte: 篮球即时比分_nba比分直播-彩客网重点推荐 p? kroppens rytmer og bevegelser har skapt nye diagnoseverkt?y som gj?r at ?cerebral parese kan p?pekes tidligere.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1634524" class="vrtx-external-publication">
        <div id="vrtx-publication-1634524">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1634524">
                Jensenius, Alexander Refsum; Danielsen, Anne; London, Justin; Schia, Katja Henriksen &amp; Abramczyk, Filip
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Intellectual warm-up: &quot;Rhythm&quot;.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3655972">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1634527" class="vrtx-external-publication">
        <div id="vrtx-publication-1634527">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1634527">
                Schia, Katja Henriksen; Erdem, Cagri; Lan, Qichao &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Music-Dance Performance: &quot;Rhythm&quot;.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5243748">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1578174" class="vrtx-external-publication">
        <div id="vrtx-publication-1578174">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1578174">
                Jensenius, Alexander Refsum; S?rb?, Solveig Isis &amp; Arvola, Jakob
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        MusicLab - et nytt konsept for forskning og forskningsformidling.
                </span>
                    [Radio].
                <span class="vrtx-publisher publisher-other publisher-category-PROGRAMPARTICIP">
                        NRK Klassisk.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3986205">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1583721" class="vrtx-external-publication">
        <div id="vrtx-publication-1583721">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1583721">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Introduction to the SoundTracer Project.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3242644">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1597338" class="vrtx-external-publication">
        <div id="vrtx-publication-1597338">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1597338">
                Serafin, Stefania; Dahl, Sofia; Bresin, Roberto; Jensenius, Alexander Refsum; Unnthorsson, Runar &amp; V?lim?ki, Vesa
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        NordicSMC: A Nordic University Hub on Sound and Music Computing.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4077647">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Sound and music computing (SMC) is still an emerging field in many institutions, and the challenge is often to gain critical mass for developing study programs and undertake more ambitious research projects. We report on how a long-term collaboration between small and medium-sized SMC groups have led to an ambitious undertaking in the form of the Nordic Sound and Music Computing Network (NordicSMC), funded by the Nordic Research Council and institutions from all of the five Nordic countries (Denmark, Finland, Iceland, Norway, and Sweden). The constellation is unique in that it covers the field of sound and music from the “soft” to the “hard,” including the arts and humanities, the social and natural sciences, and engineering. This paper describes the goals, activities, and expected results of the network, with the aim of inspiring the creation of other joint efforts within the SMC community.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1620257" class="vrtx-external-publication">
        <div id="vrtx-publication-1620257">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1620257">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Forskerbloggen: Hvorfor f?r man lyst til ? danse til musikk?                </span>
                    [TV].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        NRK.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4550367">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1575165" class="vrtx-external-publication">
        <div id="vrtx-publication-1575165">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1575165">
                Jensenius, Alexander Refsum &amp; T?nnesen, Eva
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        St?r mot hverandre om hva som er best for ? sikre topp forskning.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Khrono.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4314626">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">篮球即时比分_nba比分直播-彩客网重点推荐smilj?er st?r mot hverandre i en debatt om hvordan best satse langsiktig p? ? utvikle forskningsmlj?er i verdenstoppen. N? tar Arbeiderpartiets Nina Sandberg debatten inn i Stortinget.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1607784" class="vrtx-external-publication">
        <div id="vrtx-publication-1607784">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1607784">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Fremtiden er analog - perspektiver p? humaniora og teknologi.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3752190">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1583723" class="vrtx-external-publication">
        <div id="vrtx-publication-1583723">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1583723">
                Lartillot, Olivier; Jensenius, Alexander Refsum; Haugen, Toril &amp; Baar?y, Fillip-André
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Finner musikk ved ? bevege h?nden.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        HF aktuelt.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4288223">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1607781" class="vrtx-external-publication">
        <div id="vrtx-publication-1607781">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1607781">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kunnskapsledelse av et Senter for fremragende forskning.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3437849">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1607783" class="vrtx-external-publication">
        <div id="vrtx-publication-1607783">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1607783">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Ny musikkforskning ved RITMO.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3502928">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1636600" class="vrtx-external-publication">
        <div id="vrtx-publication-1636600">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1636600">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The recording and use of videos from music events – how to balance ethical requirements and FAIR principles using data from live concerts, music performances and the like?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3289162">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1636605" class="vrtx-external-publication">
        <div id="vrtx-publication-1636605">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1636605">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        ?pen forskning - et humanistisk-teknologisk perspektiv.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5073382">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1586318" class="vrtx-external-publication">
        <div id="vrtx-publication-1586318">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1586318">
                Lartillot, Olivier; Jensenius, Alexander Refsum &amp; Haugen, Toril
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Mobilen finner musikk n?r du beveger h?nden.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        篮球即时比分_nba比分直播-彩客网重点推荐.no.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4248787">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1580314" class="vrtx-external-publication">
        <div id="vrtx-publication-1580314">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1580314">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The importance of &quot;nothing&quot;: studying human music-related micromotion.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3669431">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none"> How much do people move when they try to stand still? Does listening to music influence your micromotion? Can we use micromotion in human-computer interaction? In this presentation, music technologist Alexander Refsum Jensenius (RITMO, UiO) will share some results from his research on human cognition on the boundaries between the conscious and the unconscious, the voluntary and the involuntary.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1580315" class="vrtx-external-publication">
        <div id="vrtx-publication-1580315">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1580315">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Studying &quot;nothing&quot;: complexities of human music-related micromotion.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5055521">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">How much do people move when they try to stand still? Does listening to music influence your micromotion? Can we use micromotion in human-computer interaction? In this presentation, music technologist Alexander Refsum Jensenius (RITMO, UiO) will share some results from his research on human cognition on the boundaries between the conscious and the unconscious, the voluntary and the involuntary.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1570966" class="vrtx-external-publication">
        <div id="vrtx-publication-1570966">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1570966">
                Jensenius, Alexander Refsum; Bjerkestrand, Kari Anne Vadstensvik; Johnson, Victoria; Sanchez, Victor Evaristo Gonzalez; Zelechowska, Agata &amp; Jensenius, Francesca R.
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sverm-Pluck.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4883010">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1638074" class="vrtx-external-publication">
        <div id="vrtx-publication-1638074">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1638074">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Paneldeltagelse: Veien til ?pen tilgang - konsekvenser og implementering av Plan S.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3569378">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1570208" class="vrtx-external-publication">
        <div id="vrtx-publication-1570208">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1570208">
                Jack, Robert; Jensenius, Alexander Refsum; Sanchez, Victor Evaristo Gonzalez; Bjerkestrand, Kari Anne Vadstensvik; Zelechowska, Agata &amp; Martin, Charles Patrick
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/1570208/contributors', 'vrtx-publication-contributors-1570208')">
                    [Show all&nbsp;7&nbsp;contributors for this article]</a>
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sverm-Resonans: interactive installation with resonating guitars and Bela.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Bela blog.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3373968">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Sverm-Resonans is an interactive installation developed for the Ultima Contemporary Music Festival in Oslo which features 6 suspended guitars, each fitted with an actuator, distance sensor and a Bela.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1614664" class="vrtx-external-publication">
        <div id="vrtx-publication-1614664">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1614664">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Musical Gestures Toolbox for Matlab.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4863271">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The Musical Gestures Toolbox for Matlab (MGT) aims at
assisting music researchers with importing, preprocessing,
analyzing, and visualizing video, audio, and motion capture data in a coherent manner within Matlab.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1579307" class="vrtx-external-publication">
        <div id="vrtx-publication-1579307">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1579307">
                Jensenius, Alexander Refsum; Sparbo, Nj?l; Sen, Sagar &amp; Edvardsen, Elisabeth
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Breath, breathing and the sensing of breathing.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3304038">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1638083" class="vrtx-external-publication">
        <div id="vrtx-publication-1638083">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1638083">
                Jensenius, Alexander Refsum; Sundquist, Jonas Hartford &amp; Eriksen, Siri ?verland
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        De er studenter ved to universiteter samtidig.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Khrono.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4530335">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">NTNU og Universitetet i Oslo har for f?rste gang g?tt sammen om ? lage et fellles studieprogram. Resultatet er masteren Music, Communication and Techonology. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1564950" class="vrtx-external-publication">
        <div id="vrtx-publication-1564950">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1564950">
                Danielsen, Anne &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The Norwegian Centre of Excellence RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3345718">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1564949" class="vrtx-external-publication">
        <div id="vrtx-publication-1564949">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1564949">
                Jensenius, Alexander Refsum &amp; Vogt, Yngve
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Musikkprogram kan avsl?re cerebral parese.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Apollon.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3661183">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Musikkforsker har laget et dataprogram for ? m?le bevegelsene til dansere. N? bruker medisinere verkt?yet hans til ? avsl?re om sm? babyer har cerebral parese.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1631851" class="vrtx-external-publication">
        <div id="vrtx-publication-1631851">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1631851">
                Jensenius, Alexander Refsum &amp; T?mte, Even
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        N?r bassisten st?r 500 kilometer unna.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Musikkultur.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3396891">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">P? masterprogrammet Music, communication, and technology bruker studenter i Oslo og Trondheim avansert videokonferanseutstyr til ? jamme i sanntid.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1634761" class="vrtx-external-publication">
        <div id="vrtx-publication-1634761">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1634761">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Evaluering, merittering og forskningsvurdering.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4548401">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1634741" class="vrtx-external-publication">
        <div id="vrtx-publication-1634741">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1634741">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Omvendt klasserom mellom to universiteter: Master Programme in Music, Communication &amp; Technology.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4734043">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1563896" class="vrtx-external-publication">
        <div id="vrtx-publication-1563896">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1563896">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        RITMO - et nytt senter for fremragende forskning.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4936159">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1634759" class="vrtx-external-publication">
        <div id="vrtx-publication-1634759">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1634759">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Comments on Open Science - Do we need a national action plan?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4782828">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1610041" class="vrtx-external-publication">
        <div id="vrtx-publication-1610041">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1610041">
                Sanchez, Victor Evaristo Gonzalez; Zelechowska, Agata &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Muscle activity response of the audience during an experimental music performance.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3582994">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This exploratory study investigates muscular activity characteristics of a group of audience members during an experimental music performance. The study was designed to be as ecologically valid as possible, collecting data in a concert venue and making use of low-invasive measurement techniques. Muscle activity (EMG) from the forearms of 8 participants revealed that sitting in a group could be an indication of a level of group engagement, while comparatively greater muscular activity from a participant sitting at close distance to the stage suggests performance-induced bodily responses. The self-reported measures rendered little evidence supporting the links between muscular activity and live music exposure, although a larger sample size and a wider range of music styles need to be included in future studies to provide conclusive results.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1581717" class="vrtx-external-publication">
        <div id="vrtx-publication-1581717">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1581717">
                Jensenius, Alexander Refsum; Duch, Michael Francis; Langdalen, J?rgen; ?se, Tone; Larsen, Edvine &amp; ?stern, Tone Pernille
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kunsten ? forske.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4153991">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hva er kunstnerisk forskning? Hvorfor heter det kunstnerisk utviklingsarbeid og ikke kunstnerisk forskning n?r det heter det i andre land? Er det s?nn at kunstnerisk forskning skiller seg fra all annen type forskning, og i s? fall hvorfor?</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1593444" class="vrtx-external-publication">
        <div id="vrtx-publication-1593444">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1593444">
                Jensenius, Alexander Refsum; Martin, Charles Patrick; Bjerkestrand, Kari Anne Vadstensvik &amp; Johnson, Victoria
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Stillness under Tension.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4652834">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1593438" class="vrtx-external-publication">
        <div id="vrtx-publication-1593438">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1593438">
                Sanchez, Victor Evaristo Gonzalez; Martin, Charles Patrick; Zelechowska, Agata; Bjerkestrand, Kari Anne Vadstensvik; Johnson, Victoria &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Bela-based augmented acoustic guitars for sonic microinteraction.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3850611">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This article describes the design and construction of a collection of digitally-controlled augmented acoustic guitars, and the use of these guitars in the installation \textit\{Sverm-Resonans\}. The installation was built around the idea of exploring `inverse’ sonic microinteraction, that is, controlling sounds by the micromotion observed when attempting to stand still. It consisted of six acoustic guitars, each equipped with a Bela embedded computer for sound processing (in Pure Data), an infrared distance sensor to detect the presence of users, and an actuator attached to the guitar body to produce sound. With an attached battery pack, the result was a set of completely autonomous instruments that were easy to hang in a gallery space. The installation encouraged explorations on the boundary between the tactile and the kinesthetic, the body and the mind, and between motion and sound. The use of guitars, albeit with an untraditional `performance’ technique, made the experience both familiar and unfamiliar at the same time. Many users reported heightened sensations of stillness, sound, and vibration, and that the `inverse’ control of the instrument was both challenging and pleasant.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1640855" class="vrtx-external-publication">
        <div id="vrtx-publication-1640855">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1640855">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        How I try to do it FAIR.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4271758">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1640854" class="vrtx-external-publication">
        <div id="vrtx-publication-1640854">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1640854">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Motion Capture in Music Performance, Perception and Interaction.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3410250">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1677999" class="vrtx-external-publication">
        <div id="vrtx-publication-1677999">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1677999">
                Fuhrer, Julian; Glette, Kyrre &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Interactive Opening Animation.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4392241">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">With this project we installed the logo of RITMO in an interactive animation for the opening of the centre. The logo is enabled to receive audio input such that it is able to move in accordance with the frequency band of the input. That is to say, the logo is able to move along with rythmic streams of the music.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1593442" class="vrtx-external-publication">
        <div id="vrtx-publication-1593442">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1593442">
                Martin, Charles Patrick; Jensenius, Alexander Refsum &amp; T?rresen, Jim
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Composing an ensemble standstill work for Myo and Bela.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4602077">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper describes the process of developing a standstill performance work using the Myo gesture control armband and the Bela embedded computing platform. The combination of Myo and Bela allows a portable and extensible version of the standstill performance concept while introducing muscle tension as an additional control parameter. We describe the technical details of our setup and introduce Myo-to-Bela and Myo-to-OSC software bridges that assist with prototyping compositions using the Myo controller.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1593441" class="vrtx-external-publication">
        <div id="vrtx-publication-1593441">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1593441">
                Martin, Charles Patrick; Xambó, Anna; Visi, Federico; Morreale, Fabio &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Stillness under Tension.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3580659">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Stillness Under Tension? is an ensemble standstill work for Myo gesture control armband and Bela embedded music platform. Humans are incapable of standing completely still due to breathing and other involuntary micromotions. This work explores the expressive space of standing still through an inverse action-sound mapping: less movement leads to more sound. Four performers stand as still as possible on stage, each wearing a Myo armband connected to a Bela embedded sound processing platform. The Myo is used to measure the performers movement, and the muscle activity in their forearm which they can use--both voluntarily and involuntarily--to control a synthesised sound world. Each performer uses one Myo and Bela in a musical space defined by their physical position and posture while standing still.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1593434" class="vrtx-external-publication">
        <div id="vrtx-publication-1593434">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1593434">
                Danielsen, Anne &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        UiOs innovasjonspris til RITMO ved Anne Danielsen og Alexander Refsum Jensenius.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Ballade.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3917188">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">F?r prisen for tverrfaglig, banebrytende og innovativt arbeid. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1605099" class="vrtx-external-publication">
        <div id="vrtx-publication-1605099">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1605099">
                Lartillot, Olivier; Thedens, Hans-Hinrich &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Computational model of pitch detection, perceptive foundations, and application to Norwegian fiddle music.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4416925">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Background
Automated detection of pitch in polyphonic music remains a difficult challenge (Benetos et al., 2013). Robust solutions can be found for simple cases such as monodies. Implementation of perceptive/cognitive models have been so far less successful than engineering methods, and in particular machine learning models. One reference model (Klapuri, 2006) preselects pitch candidates based on harmonic summation and searches for multiple pitches through cancellation.
Aims
The aim was to conceive a model for pitch detection in polyphonic music able to transcribe in details traditional Norwegian music played on Hardanger fiddle, where more than two strings are played at the same time. The new model should be applicable to other types of music as well. Perceptive and cognitive models should guide the improvement of the state of the art.
Main Contribution
The model is neither based on a machine-learning training on a given set of samples, nor explicitly relying on stylistic rules. Instead, the methodology consists in conceiving a set of rules as simple and general as possible while offering satisfying results for the chosen corpus of music. We follow some general principles of the model by (Klapuri 2006) while introducing new heuristics. We present a new method for harmonic summation that penalises harmonic series that are sparse, in particular when odd partials are absent, as it would indicate that the actual harmonic series is a multiple of the given pitch candidate. Besides, a multiple of a fundamental can be selected as pitch in addition to the fundamental itself if its attack phase is sufficiently distinctive. For that purpose, we introduce a concept of pitch percept that persists over the whole extent of the note, and that serves as a reference for the detection of higher pitches at harmonic intervals.
Results
The proposed method enables to obtain transcriptions of relatively good quality, with a low ratio of false positives and false negatives. The construction of the model is under refinement. We are applying this method to the analysis of recordings of Norwegian folk music, containing a large part of Hardinger fiddle pieces and a cappella singing.
Implications
Automated transcription is of high interest for musicology and music information retrieval. This enables for instance to build large corpora of scores for music analysis and opens news perspectives for computational musicology. By attempting to design computer models based on general rules as simple as possible rather than on machine learning, while resulting in a behaviour in terms of pitch detection that comes closer to human capabilities, we hypothesise that the underlying mechanisms thus modelled might suggest general computational capabilities that could be found in cognitive models as well. In the same time, an improvement of the model based on expertise in music perception and cognition is desired.
References
Benetos et al. (2013). Automatic music transcription: challenges and future directions. Journal of Intelligent Information Systems, 41, 407-434
Klapuri, Multiple Fundamental Frequency Estimation by Summing Harmonic Amplitudes. ISMIR 2006 Keywords: pitch, computational model, harmonic summation, Norwegian folk music, Hardanger fiddle.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1589120" class="vrtx-external-publication">
        <div id="vrtx-publication-1589120">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1589120">
                Kelkar, Tejaswinee; Roy, Udit &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Evaluating a collection of Sound-Tracing Data of Melodic Phrases.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4024849">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1606024" class="vrtx-external-publication">
        <div id="vrtx-publication-1606024">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1606024">
                Toft, Martin &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        UiO og NTNU har starta revolusjonerande studium i musikkteknologi.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Uniforum.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4303668">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">I dag opna Universitetet i Oslo og NTNU eit felles masterprogram der studentane skal utvikla musikkteknologi i det felles elektroniske klasserommet Portalen. – Det er utruleg at det er blitt realisert to ?r etter at eg kom med ideen, seier musikkforskar Alexander Refsum Jensenius.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1606033" class="vrtx-external-publication">
        <div id="vrtx-publication-1606033">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1606033">
                Jensenius, Alexander Refsum &amp; Klausen, Aslaug Olette
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Nytt musikkteknologstudie skal foreg? i Oslo og Trondheim samtidig.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Ballade.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4328393">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Masterprogrammet er det f?rste delte studiet i sitt slag, mellom NTNU og Universitet i Oslo. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1606031" class="vrtx-external-publication">
        <div id="vrtx-publication-1606031">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1606031">
                Jensenius, Alexander Refsum &amp; Lie, Tove
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        ?nsker l?rernorm i h?yere utdanning: Maksimalt 20 studenter per foreleser.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Khrono.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5225364">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Det er p? tide ? diskutere en l?rernorm for h?yere utdanning, mener f?rsteamanuensis ved Universitetet i Oslo, Alexander Refsum Jensenius.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1606029" class="vrtx-external-publication">
        <div id="vrtx-publication-1606029">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1606029">
                J?re, Lisbet; Danielsen, Anne &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        P? sporet av rytmen.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Uniforum.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4728158">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvorfor f?r vi lyst til ? bevege oss n?r vi h?rer musikk? Vinnerne av UiOs innovasjonspris, Anne Danielsen og Alexander Refsum Jensenius, finner forh?pentligvis svaret n?r de fordyper seg i mennesket og rytmens mysterier.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1546880" class="vrtx-external-publication">
        <div id="vrtx-publication-1546880">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1546880">
                Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Dans i musikkfaget i skolen.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3772879">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1450149" class="vrtx-external-publication">
        <div id="vrtx-publication-1450149">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1450149">
                Jensenius, Alexander Refsum &amp; Klausen, Aslaug Olette
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Vil finne magien i musikken.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Morgenbladet.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4726170">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Nytt forskningsprosjekt skal studere mikrobevegelser, og skaper ekte eksperimentell musikk.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1507034" class="vrtx-external-publication">
        <div id="vrtx-publication-1507034">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1507034">
                Jensenius, Alexander Refsum
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        From resonating strings to autonomous electronic instruments - towards a new organology.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3970784">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Vi lever i en spennende tid, med stadig nye varianter av b?de akustiske og elektroniske instrumenter. Disse passer sjelden inn i de tradisjonelle organologiske fremstillingene, noe som gj?r at det er behov for en mer systematisk diskusjon av hvordan man kan klassifisere b?de instrumenter (i utvidet forstand) og dere spilleteknikk. I denne presentasjonen vil jeg forklare hovedelementene i en ny organologi som jeg holder p? ? utvikle, med utgangspunkt i det jeg kaller &quot;handling-lyd-koblinger&quot;.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1534032" class="vrtx-external-publication">
        <div id="vrtx-publication-1534032">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1534032">
                Jensenius, Alexander Refsum &amp; Sundquist, Jonas Hartford
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Forsker p? musikk - ved ? st? stille.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Khrono.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4265352">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Hvor stille kan du egentlig ? st? n?r du h?rer p? musikk? Det fors?ker Alexander Refsum Jensenius ? finne svar p?.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1480032" class="vrtx-external-publication">
        <div id="vrtx-publication-1480032">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1480032">
                Danielsen, Anne &amp; Jensenius, Alexander Refsum
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Centre for Interdisciplinary Studies in Rhythm, Time and Motion f?r status som Senter for fremragende forskning.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Ballade.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3473509">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Sentret ved institutt for musikkvitenskap p? Univeristetet i Oslo skal sammen med ni andre senter dele p? til sammen 1,5 milliarder kroner over ti ?r for ? drive forskning p? internasjonalt toppniv?.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1480037" class="vrtx-external-publication">
        <div id="vrtx-publication-1480037">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1480037">
                Jensenius, Alexander Refsum &amp; Flydal, Lars O.
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Leter etter magien i musikken.
                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        V?rt Land.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4709183">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Alexander Refsum Jensenius leter etter det magiske i musikken. Hans dr?m er ? hente ut kroppens egen musikk.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1480038" class="vrtx-external-publication">
        <div id="vrtx-publication-1480038">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1480038">
                Jensenius, Alexander Refsum
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        What steps can government and university leaders take to promote Open Science?                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3776783">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1476754" class="vrtx-external-publication">
        <div id="vrtx-publication-1476754">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1476754">
                Jensenius, Alexander Refsum
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        The importance of &quot;nothing&quot;: studying human music-related micromotion.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4488647">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This presentation will focus on my research on human micromotion in musical contexts. My scientific research has focused on understanding more about the phenomenon of human standstill and how music influences our micromotion when standing still. My artistic research has focused on the exploration of micromotion in music and dance performance, and particularly how it is possible to set up systems for sonic microinteraction. My two separate &quot;tracks&quot; of research, the scientific and artistic, have positively reinforced each other, shedding light on a level of musical expressivity on the boundary between the conscious and the unconscious, the voluntary and the involuntary. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1476757" class="vrtx-external-publication">
        <div id="vrtx-publication-1476757">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1476757">
                Jensenius, Alexander Refsum
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Micro, Meso, Macro: Music-related body motion at different spatiotemporal levels.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4028050">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Performance of acoustic instruments is often happening at a spatiotemporal micro-level. Violin performance, for example, is based on an extreme control of the spatial placement of the left-hand fingering and the right-hand bow strokes. Even though there are exceptions, many digital musical instruments (DMIs) are based on meso- or macro-level control, that is, fairly large and slow control actions compared to acoustic instruments. In this talk I will present a theoretical framework for sound-producing actions and a related organological model. This will be exemplified with some of my empirical results of music-induced dancing, &quot;air instrument&quot; performance and sonic microinteraction.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1480035" class="vrtx-external-publication">
        <div id="vrtx-publication-1480035">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1480035">
                Aronsen, Jan Magnus; Jensenius, Alexander Refsum; Emblem, Kyrre Eeg; Storeng, Katerini Tagmatarchi; Enberg, Katja &amp; Knutsen, Carl Henrik
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/1480035/contributors', 'vrtx-publication-contributors-1480035')">
                    [Show all&nbsp;7&nbsp;contributors for this article]</a>
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Kvalitetsparadokset i den norske forskningspolitikken.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4004433">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">De fleste insentivene norske forskere opplever i sin hverdag, er preget av kortsiktige resultater og m?lstyring mot kvantitet heller enn kvalitet. Dette er et kvalitetsparadoks i norsk forskningspolitikk. skriver flere medlemmer i Akademiet for yngre forskere. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1480042" class="vrtx-external-publication">
        <div id="vrtx-publication-1480042">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1480042">
                Jensenius, Alexander Refsum
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Online tools that will kickstart creativity.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5221140">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Music making has moved into the cloud. In this lecture-demonstration, Alexander Refsum Jensenius will show various tools for online music making, ranging from simple sound makers to advanced music programming. He will talk about the possibilities and limitations of various technologies, and propose a framework for understanding how online music making will shape the future of music.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1498071" class="vrtx-external-publication">
        <div id="vrtx-publication-1498071">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1498071">
                Jensenius, Alexander Refsum
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        How Build An Eco-System? Panel discussion on how to best support a Norwegian music/tech eco-system.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4159107">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1498066" class="vrtx-external-publication">
        <div id="vrtx-publication-1498066">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1498066">
                Jensenius, Alexander Refsum &amp; Klausen, Aslaug Olette
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Tar roboter over musikken?                </span>
                    [Journal].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Ballade.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4535808">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Ja, mener musikkforsker, som samtidig mener dette gir mer plass til mennesker. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1446378" class="vrtx-external-publication">
        <div id="vrtx-publication-1446378">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1446378">
                Jensenius, Alexander Refsum &amp; Svarstad, J?rgen
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Han brukte fire ?r p? ? f? forskningsst?tte.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        Forskerforum.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3586826">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Alexander Refsum Jensenius synes ikke det er bortkastet tid ? skrive s?knader til 篮球即时比分_nba比分直播-彩客网重点推荐sr?det som blir avvist. </p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1473449" class="vrtx-external-publication">
        <div id="vrtx-publication-1473449">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1473449">
                Jensenius, Alexander Refsum; Sanchez, Victor Evaristo Gonzalez; Zelechowska, Agata &amp; Bjerkestrand, Kari Anne Vadstensvik
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring the Myo controller for sonic microinteraction.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4984341">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper explores sonic microinteraction using muscle sensing through the Myo armband. The first part presents results from a small series of experiments aimed at finding the baseline micromotion and muscle activation data of people
being at rest or performing short/small actions. The second part presents the prototype instrument MicroMyo, built around the concept of making sound with little motion. The instrument plays with the convention that inputting more energy into an instrument results in more sound. MicroMyo, on the other hand, is built so that the less you move, the more it sounds. Our user study shows that while such an &quot;inverse instrument&quot; may seem puzzling at first, it also opens a space for interesting musical interactions.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1476048" class="vrtx-external-publication">
        <div id="vrtx-publication-1476048">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1476048">
                Jensenius, Alexander Refsum
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        RITMO - Centre for Interdisciplinary Studies in Rhythm, Time and Motion.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4716126">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1471113" class="vrtx-external-publication">
        <div id="vrtx-publication-1471113">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1471113">
                Emblem, Kyrre Eeg; Enberg, Katja; Helland, Ingeborg Palm; Jensenius, Alexander Refsum; Rognes, Marie &amp; Storeng, Katerini Tagmatarchi
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/1471113/contributors', 'vrtx-publication-contributors-1471113')">
                    [Show all&nbsp;7&nbsp;contributors for this article]</a>
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Tenk nytt om tellekanter, Haugstad!                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4467274">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">En siteringsindikator kan v?re positivt for ? m?le forskningskvalitet fremfor -kvantitet, men vi trenger fornyelse av tellekantsystemet.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1515239" class="vrtx-external-publication">
        <div id="vrtx-publication-1515239">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1515239">
                Jensenius, Alexander Refsum &amp; Fotland, Margaret Louise
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        European Open Data Champions.
                </span>
                    [Internet].
                <span class="vrtx-publisher publisher-other publisher-category-MEDIAINTERVIEW">
                        SPARC Europe.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5059879">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">A research paper without accompanying data is incomplete.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1489095" class="vrtx-external-publication">
        <div id="vrtx-publication-1489095">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1489095">
                Jensenius, Alexander Refsum &amp; Kelkar, Tejaswinee
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Improvisation for Linnstrument, voice and Mogees.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4677992">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1487102" class="vrtx-external-publication">
        <div id="vrtx-publication-1487102">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1487102">
                Kelkar, Tejaswinee &amp; Jensenius, Alexander Refsum
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Exploring melody and motion features in “sound-tracings”.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3706507">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">Pitch and spatial height are often associated when describing music. In this paper we present results from a sound tracing study in which we investigate such sound–motion relationships. The subjects were asked to move as if they were creating the melodies they heard, and their motion was captured with an infra-red, marker-based camera system. The analysis is focused on calculating feature vectors typically used for melodic contour analysis. We use these features to compare melodic contour typologies with motion contour typologies. This is based on using proposed feature sets that were made for melodic contour similarity measurement. We apply these features to both the melodies and the motion contours to establish whether there is a correspondence between the two, and find the features that match the most. We find a relationship between vertical motion and pitch contour when evaluated through features rather than simply comparing contours.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1487101" class="vrtx-external-publication">
        <div id="vrtx-publication-1487101">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1487101">
                Jensenius, Alexander Refsum
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Fremtidens musikk.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4299167">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1517978" class="vrtx-external-publication">
        <div id="vrtx-publication-1517978">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1517978">
                Jensenius, Alexander Refsum; Martin, Charles Patrick; Bjerkestrand, Kari Anne Vadstensvik &amp; Johnson, Victoria
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sverm-Muscle.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4050094">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1487105" class="vrtx-external-publication">
        <div id="vrtx-publication-1487105">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1487105">
                Kelkar, Tejaswinee &amp; Jensenius, Alexander Refsum
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Representation Strategies in Two-handed Melodic Sound-Tracing.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3517227">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This paper describes an experiment in which the subjects performed a sound-tracing task to vocal melodies. They could move freely in the air with two hands, and their motion was captured using an infrared, marker-based system. We present a typology of distinct strategies used by the recruited participants to represent their perception of the melodies. These strategies appear as ways to represent time and space through the finite motion possibilities of two hands moving freely in space. We observe these strategies and present their typology through qualitative analysis. Then we numerically verify the consistency of these strategies by conducting tests of significance between labeled and random samples.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1517976" class="vrtx-external-publication">
        <div id="vrtx-publication-1517976">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1517976">
                Jensenius, Alexander Refsum; Bjerkestrand, Kari Anne Vadstensvik; Donnarumma, Marco; Brean, Are &amp; Bruusgaard, Jo C.
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Panel: Biophysical Music.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/3373936">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">&quot;Biophysical Music&quot; is volume 1 of the new concept &quot;MusicLab&quot;, a series of events exploring the science of music from different perspectives. The idea is to mix research and edutainment through hands-on workshops, intellectual warm-ups, performances and data jockeying.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1484328" class="vrtx-external-publication">
        <div id="vrtx-publication-1484328">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1484328">
                Solberg, Ragnhild Torvanger &amp; Jensenius, Alexander Refsum
            </span>(2017).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Arm and Head Movements to Musical Passages of Electronic Dance Music.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/5091368">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2255771" class="vrtx-external-publication">
        <div id="vrtx-publication-2255771">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2255771">
                Bochynska, Agata; Bergstr?m, Rebecca Josefine Five &amp; Jensenius, Alexander Refsum
            </span>(2024).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Opphavsrettslige utfordringer ved overgangen til FAIR
forskningsdata ved UiO - Notat fra en QualiFAIR-arbeidsgruppe.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-REPORT">
                        QualiFair.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4684516">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-2239090" class="vrtx-external-publication">
        <div id="vrtx-publication-2239090">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2239090">
                Kwak, Dongho; Jensenius, Alexander Refsum; Danielsen, Anne; Scholz, Hanne &amp; Olsen, Petter Angell
            </span>(2023).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Music for cells? Rhythmic mechanical stimulations of cell cultures.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-DOCTORDISSERTAT">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=A04A15C5-1B21-46BB-BC1D-AA4EF9B6DEB9">Universitetet i Oslo</a>.
                </span>
                            
            
            <a href="https://hdl.handle.net/10852/107913">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This dissertation investigates how acoustic parameters and musical elements can be generated and manipulated to induce beneficial mechanical stimulations and alterations in cell cultures. The research has been conducted as part of a life science convergence environment, and the theoretical framework and experimental method in this dissertation are derived from three different disciplines: biology, music technology, and physics. The theoretical discussions centre around biological cell sensing mechanisms, the physical limitations and potentiality of audible sound to be used as mechano-acoustic cellular stimuli, and the concept of rhythm from biology and music technology perspectives. The methods include audio signal processing, physical characterisation of the experimental setup, various biological assays, and microscopic image feature extraction. Such a radically interdisciplinary approach culminated in laboratory experiments involving sound vibrations of human cell cultures using a vertical vibration system controlled by synthesized audio signals. The experimental variables included: No Vibration (NV, control), Continuous Vibration (CV), Regular Pulse (RP), and Variable Pulse (VP). The CV condition was categorised as non-rhythmic in this dissertation, while RP and VP were categorised as rhythmic conditions. The results demonstrate alterations in F-actin filament structure (length, thickness, angle) and the tendency of increased levels of cells in the G1-phase cell cycle in vibrated cell cultures. The “effect” was more apparent under the non-rhythmic (CV) condition than rhythmic conditions (RP and VP). The results also show that F-actin filament structural properties are negatively correlated (r &lt; -.9), and the number of cells in the G1-phase cycle is positively correlated (r &gt; .9) in relation to the magnitudes of mechanical parameters (RMS acceleration and shear stress). Nevertheless, the biological mechanism(s) responsible for the observed effects has yet to be characterised. The results from this dissertation inspire further studies on the effects of rhythmic mechanoacoustic stimulation on cellular biological rhythms (e.g., regulation of CLOCK, PER, and CRY genes).</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-2162703" class="vrtx-external-publication">
        <div id="vrtx-publication-2162703">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-2162703">
                Ruud, Kenneth; Heggland, Ingrid; Limstrand, Ingunn; Pettersen, Klas Henning; B?e, Gunnar &amp; Storsul, Tanja
                    <a href="javascript:void(0);" title="Get all contributors" onclick="addContributor('https://api.cristin.no/v2/nvaresults/2162703/contributors', 'vrtx-publication-contributors-2162703')">
                    [Show all&nbsp;16&nbsp;contributors for this article]</a>
            </span>(2022).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Investering i infrastrukturer for FAIR forskningsdata og s?rlig relevante forvaltningsdata for forskning.
Anbefalinger fra datainfrastrukturutvalget mai 2022.                </span>
                <span class="vrtx-publisher publisher-other publisher-category-REPORT">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=E6866DAE-7883-4DA6-A59F-86C3300C5816">Norges forskningsr?d</a>.
                </span>
                <span class="vrtx-issn">ISSN 9788212039339.</span>
                            
            
            <a href="https://hdl.handle.net/11250/5004222">Full text in Research Archive</a>
        </div>
    </li>
      <li id="vrtx-external-publication-1878439" class="vrtx-external-publication">
        <div id="vrtx-publication-1878439">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1878439">
                Zelechowska, Agata; Jensenius, Alexander Refsum; Laeng, Bruno &amp; Vuoskoski, Jonna Katariina
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Irresistible Movement: The Role of Musical Sound, Individual Differences and Listening Context in Movement Responses to Music.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-DOCTORDISSERTAT">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=A04A15C5-1B21-46BB-BC1D-AA4EF9B6DEB9">Universitetet i Oslo</a>.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4389638">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">This dissertation examines the phenomenon of spontaneous movement responses to music. It attempts to grasp and illustrate the complexity of this behaviour by viewing it from different perspectives. Unlike most previous studies on music and body movement, this dissertation places the focus on barely visible manifestations of movement, such as those that may occur when listening to music while standing still. The point of departure is a reflection on movement responses to music and why such responses are considered universal among humans. This is followed by a discussion on the different approaches to studying how music ‘inspires’ movement, and an overview of the different factors that can potentially contribute to the emergence of movement responses to music. The first goal of the empirical research was to verify the common conception that ‘music makes us move’ and examine whether such movement responses can be involuntary. Three of the five included papers show that music can, indeed, make people move, even when they try to stand as still as possible. The second goal is to explore different factors that contribute to movement responses to music. Throughout the included papers, several topics are examined, including rhythmic complexity, tempo, music genres, individual differences and playback systems. The theoretical chapters show how these topics fit into three broader components of the music experience: music, listener and context. Overall, the results suggest that several factors seem to increase movement responses to music: the clear underlying pulse in the sound stimuli, the rhythmic complexity, a tempo of around 120 beats per minute, listening on headphones rather than speakers and high empathy of the listener. All in all, this dissertation contributes to bridging several gaps in the literature on music-related body movement. It also broadens the perspective on why, how and when music moves us.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1844385" class="vrtx-external-publication">
        <div id="vrtx-publication-1844385">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1844385">
                Silva, Sembapperumaarachchige &amp; Jensenius, Alexander Refsum
            </span>(2020).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        Sonification of Standstill Recordings.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-THESISMASTER">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=A04A15C5-1B21-46BB-BC1D-AA4EF9B6DEB9">Universitetet i Oslo</a>.
                </span>
                            
            
            <a href="https://hdl.handle.net/10852/80753">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The goal of this thesis was to develop and experiment with a set of sonification tools to explore participant data from standstill competitions. Using data from the 2012 Norwegian Championship of Standstill, three sonification models were developed using the Max/MSP programming environment. The first section of the thesis introduces sonification as a method for data exploration and discusses different sonification strategies. Momentary Displacement of the position was derived from the position data and parameter mapping methods were used to map the data features with sound parameters. The displacement of position in the XY plane or the position changes along the Z-Axis can be mapped either to white-noise or to a sine tone. The data variables control the amplitude and a filter cut-off frequency of the white noise or the amplitude and frequency of the sine tone. Moreover, using sound spatialization together with sonification was explored by mapping position coordinates to spatial parameters of a sine tone. A “falling” effect of the standing posture was identified through the sonification. Also audible were the participants’ breathing patterns and postural adjustments. All in all, the implemented sonification methods can be effectively used to get an overview of the standstill dataset.</p>
                </span>
        </div>
    </li>
      <li id="vrtx-external-publication-1598159" class="vrtx-external-publication">
        <div id="vrtx-publication-1598159">
            <span class="vrtx-contributors" id="vrtx-publication-contributors-1598159">
                Wallace, Benedikte &amp; Jensenius, Alexander Refsum
            </span>(2018).
                <span class="vrtx-title title-other">
                    <!-- For readability. Too many underlined characters when both present -->
                        SoundTracer: A brief project summary.
                </span>
                <span class="vrtx-publisher publisher-other publisher-category-REPORT">
                        <a class="vrtx-publisher" href="https://kanalregister.hkdir.no/publiseringskanaler/info/forlag?pid=A04A15C5-1B21-46BB-BC1D-AA4EF9B6DEB9">Universitetet i Oslo</a>.
                </span>
                            
            
            <a href="https://hdl.handle.net/11250/4138494">Full text in Research Archive</a>
                <span class="vrtx-publication-summary">
                            <a href="#" aria-expanded="false" aria-label="Show summary" class="vrtx-publication-summary">Show summary</a>
                            <p class="vrtx-publication-summary" style="display:none">The SoundTracer project is a collaborative effort between the Norwegian National Library and the Department of Musicology at the University of Oslo. The goal of the project is to use the audio recordings collected by the folk music department of the National Library to create a query-by-gesture application that is able to search and retrieve specific pieces of music using motion detected by a mobile device. This brief summary will outline the objectives, the methods that are used, as well as the results of the SoundTracer project.</p>
                </span>
        </div>
    </li>
    </ul>
      <p class="vrtx-more-external-publications"><a href="https://nva.sikt.no/research-profile/1328">View all works in NVA</a></p>
    </div>

      </div>
    </div>



      
            
      
        <div class="vrtx-date-info">
        <span class="published-date-label">Published</span>
        <span class="published-date">May 19, 2006 12:00 AM </span>
        
        - <span class="last-modified-date">Last modified</span>
        <span class="last-modified-date">Feb. 27, 2026 5:38 PM</span>
        
        </div>
      
          </div>
        </div>
        <div id="vrtx-additional-content">
          
      
        
        
        
          
            
            
            
              
                
  <div class="vrtx-feed">
      <a class="feed-title" href="https://www.arj.no/">ARJ (English)</a>






        <ul class="items">
            <li class="item-1">








                <div class="vrtx-list-item-content">

      <a class="item-title" href="https://www.arj.no/2026/04/03/no-bullets-please/">No bullets, please!</a>




















      <span class="published-date">
Apr. 3, 2026 2:00 AM      </span>





























                </div>
            </li>
            <li class="item-2">








                <div class="vrtx-list-item-content">

      <a class="item-title" href="https://www.arj.no/2026/04/02/building-a-local-llm-driven-chat-for-my-web-page/">Building a local LLM-driven chat for my web page</a>




















      <span class="published-date">
Apr. 2, 2026 2:00 AM      </span>





























                </div>
            </li>
            <li class="item-3 item-last">








                <div class="vrtx-list-item-content">

      <a class="item-title" href="https://www.arj.no/2026/03/29/mishmash-bubble-animations/">More MishMash bubble animations</a>




















      <span class="published-date">
Mar. 29, 2026 1:00 AM      </span>





























                </div>
            </li>
        </ul>



  </div>


              
            
          
        
      
          

<div class="vrtx-projects vrtx-frontpage-box">
  <h2>Projects</h2>

  <div class="vrtx-box-content">
  <ul class="only-links">
      <li><a href="/english/research/strategic-research-areas/life-science/research/convergence-environments/autorhythm/index.html">AUTORHYTHM – the role of autophagy in healthy aging</a></li>
      <li><a href="/english/research/strategic-research-areas/life-science/research/convergence-environments/abino/index.html">Artificial Biomimetic systems – the Niche of Islet Organoids</a></li>
      <li><a href="/ritmo/english/projects/completed-projects/abino/index.html">Artificial Biomimetic systems – the Niche of Islet Organoids</a></li>
      <li><a href="/ritmo/english/projects/Bodies-in-Concert/index.html">Bodies in Concert</a></li>
      <li><a href="/ritmo/english/projects/ambient/index.html">Bodily Entrainment to Audiovisual Rhythms (AMBIENT)</a></li>
      <li><a href="/english/research/strategic-research-areas/life-science/research/convergence-environments/itom/index.html">Integrated technologies for tracking organoid morphogenesis (ITOM)</a></li>
      <li><a href="/ritmo/english/projects/ZRob/index.html">Interactive Robotic System (ZRob)</a></li>
      <li><a href="/ritmo/english/projects/mirage/index.html">MIRAGE - A Comprehensive AI-Based System for Advanced Music Analysis</a></li>
      <li><a href="/ritmo/english/projects/modeling-and-robots/index.html">Modeling and robots</a></li>
      <li><a href="/ritmo/english/projects/musical-hci/index.html">Musical human-computer interaction</a></li>
      <li><a href="http://www.nime.org">NIME</a></li>
      <li><a href="/ritmo/english/projects/nordicsmc/index.html">Nordic Sound and Music Computing network (NordicSMC)</a></li>
      <li><a href="/ritmo/english/projects/self-playing-guitars/index.html">Self-playing Guitars</a></li>
      <li><a href="https://www.uv.uio.no/ils/english/research/projects/evir/index.html">e-Infrastructure for video research (eVIR)</a></li>
  </ul>

        <div id="vrtx-related-projects-completed" class="vrtx-related-projects-completed">
          <h3>Completed projects</h3>
          
          
          
  <ul class="only-links">
      <li><a href="/ritmo/english/projects/completed-projects/aaai/index.html">AAAI - Acoustically Active Augmented Instruments</a></li>
      <li><a href="/ritmo/english/projects/completed-projects/autophagic-symphony/index.html">Autophagic Symphony</a></li>
      <li><a href="/ritmo/english/projects/completed-projects/cima/index.html">Computer-based Infant Movement Assessment</a></li>
      <li><a href="/ritmo/english/projects/dr-squiggles/index.html">Dr. Squiggles</a></li>
      <li><a href="/ritmo/english/research/labs/fourms/research/projects/congas/index.html">Gesture Controlled Audio Systems (2004-2007)</a></li>
      <li><a href="https://www.sv.uio.no/psi/english/research/projects/human-time-data/index.html">Human Time Data project</a></li>
      <li><a href="/ritmo/english/projects/completed-projects/intimal/index.html">INTIMAL: Interfaces for Relational Listening – Body, Memory, Migration, Telematics</a></li>
      <li><a href="/ritmo/english/projects/completed-projects/micro/index.html">MICRO - Human Bodily Micromotion in Music Perception and Interaction </a></li>
      <li><a href="https://www.hf.uio.no/imv/english/research/projects/musicalgestures/index.html">Musical Gestures (2004-2007)</a></li>
      <li><a href="/ritmo/english/projects/completed-projects/database/index.html">Musical Motion Database</a></li>
      <li><a href="/ritmo/english/research/labs/fourms/research/projects/database/index.html">Musical Motion Database</a></li>
      <li><a href="https://www.mn.uio.no/ifi/english/research/projects/semra/index.html">Semra: Sensing Music-Related Actions</a></li>
      <li><a href="https://www.hf.uio.no/imv/english/research/projects/sma/index.html">Sensing Music-related Actions</a></li>
      <li><a href="/ritmo/english/research/labs/fourms/research/projects/sid/index.html">Sonic Interaction Design (2008-2011)</a></li>
      <li><a href="/ritmo/english/projects/completed-projects/soundtracer/index.html">SoundTracer</a></li>
      <li><a href="https://www.hf.uio.no/imv/english/research/projects/salto/index.html">Student active learning in two-campus organization: SALTO</a></li>
      <li><a href="/ritmo/english/projects/completed-projects/sverm/index.html">Sverm</a></li>
      <li><a href="/ritmo/english/projects/synchronized-robotics/index.html">Synchronized Robotics</a></li>
      <li><a href="/ritmo/english/research/labs/fourms/research/projects/instruments/transformation/index_old.html">Transformation (2009-2011)</a></li>
  </ul>
        </div>
        <span id="vrtx-related-projects-completed-toggle-wrapper" style="display: none">
          <a id="vrtx-related-projects-completed-toggle" href="javascript:void(0);">Show completed projects</a>
        </span>
  </div>
</div>



          <div class="vrtx-groups vrtx-frontpage-box">
  <h2>Research groups</h2>
    
  <div class="vrtx-box-content">
    <ul class="only-links">
          <li><a href="https://www.ub.uio.no/english/libraries/dsc/c2ho-creative-computing-hub-oslo/index.html">Creative Computing Hub Oslo (C2HO)</a></li>
          <li><a href="https://www.hf.uio.no/iln/english/research/networks/digital-humanities/index.html">Digital Humanities</a></li>
          <li><a href="http://www.uio.no/ritmo">RITMO</a></li>
          <li><a href="https://www.hf.uio.no/iln/english/research/groups/super-linguistics/index.html">Super Linguistics</a></li>
    </ul>
  </div>
</div>

          
      
      
        <div id="vrtx-related-content">
          <figure><figure><p><a href="/ritmo/english/research/labs/fourms/research/Datasets/oslo-standstill-database"><img src="/ritmo/english/people/management/alexanje/multi-nm-person2-crop_240.jpg" alt="Oslo Standstill Database" width="195" height="180" loading="lazy"/></a></p><figcaption><a href="/ritmo/english/research/labs/fourms/research/Datasets/oslo-standstill-database">Oslo Standstill Database</a>. More than 600 standstill recordings.</figcaption></figure><figure><p><a href="/ritmo/english/research/labs/fourms/research/software/musicalgesturestoolbox"><img src="/ritmo/english/people/management/alexanje/mgt-matlab-promo_square-crop_250.png" alt="Musical Gestures Toolbox" width="181" height="180" loading="lazy"/></a></p><figcaption><a href="/ritmo/english/research/labs/fourms/research/software/musicalgesturestoolbox">Musical Gestures Toolbox</a>. Software for video visualization.</figcaption></figure><figure><p><a href="/ritmo/english/projects/musiclab"><img src="/ritmo/english/people/management/alexanje/musiclablogo-oransje-bedre.png" alt="MusicLab" width="207" height="180" loading="lazy"/></a></p><figcaption><a href="/ritmo/english/projects/musiclab">MusicLab</a>. An Open Research innovation project.</figcaption></figure><p><strong>Alexander@</strong></p><ul><li><a href="http://uio.academia.edu/AlexanderRefsumJensenius">Academia.edu</a></li><li><a href="http://www.cristin.no/as/WebObjects/cristin.woa/5/wa/fres?erNordisk=1&amp;erUkjent=1&amp;visParametre=1&amp;erInternasjonalt=1&amp;erNasjonalt=1&amp;fornavn=alexander&amp;la=en&amp;action=sok&amp;bs=50&amp;erNorsk=1&amp;etternavn=jensenius&amp;sort=ukategorier">Cristin</a></li><li><a href="https://www.duo.uio.no/discover?scope=10852/87&amp;query=alexander%20jensenius%20refsum&amp;fq=author_filter%3Ajensenius%2C\+alexander\+refsum\|\|\|Jensenius%2C\+Alexander\+Refsum">DUO</a></li><li><a href="http://www.flickr.com/photos/alexarje/">Flickr</a></li><li><a href="https://github.com/alexarje">Github</a></li><li><a href="http://scholar.google.no/citations?user=nZS6vVMAAAAJ">Google Scholar</a></li><li><a href="http://www.linkedin.com/in/alexarje">LinkedIn</a></li><li><a href="https://mastodon.online/@arjensenius">Mastodon</a></li><li><a href="https://academic.microsoft.com/#/detail/29743983">MS Academic Search</a></li><li><a href="https://orcid.org/0000-0001-6171-8743">Orcid</a></li><li><a href="https://publons.com/researcher/1523071/alexander-refsum-jensenius/">Publons</a></li><li><a href="http://twitter.com/alexarje">Twitter</a></li><li><a href="https://vimeo.com/alexarje">Vimeo</a></li><li><a href="http://www.youtube.com/user/alexarje">YouTube</a></li></ul></figure>
        </div>
      
        </div>
      </div>
       <!--stopindex-->
     </main>
   </div>

    <!-- Page footer start -->
    <footer id="footer-wrapper" class="grid-container faculty-institute-footer">
       <div id="footers" class="row">
            
              <div class="footer-content-wrapper">
                
                
                  <div class="footer-title">
                    <a href="/ritmo/english">RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion</a>
                  </div>
                
                <div class="footer-content">
                  
                    
                      
                        
                          <div>
   <h2>Contact information</h2>
   <p><a href="/ritmo/english/about/">Contact us</a><br>
   <a href="/english/about/getting-around/areas/gaustad/ga09/">Find us</a></p>
</div>
<div>
   <h2>About the website</h2>
   <p><a href="/english/about/regulations/privacy-declarations/privacy-policy-web.html">Cookies</a><br>
   <a href="/ritmo/english/people/management/alexanje/ https:/uustatus.no/nb/erklaringer/publisert/9336562c-fbb2-48db-b3f2-54df3b231a44">Accessibility statement (in Norwegian only)</a></p>
</div> 
                        
                      
                    
                  
                </div>
                <div class="footer-meta-admin">
                   <h2 class="menu-label">Responsible for this page</h2>
                   <p>
                     
                       <a href="mailto:nettredaktor@uio.no">Nettredakt?r</a>
                     
                   </p>
                   




    <div class="vrtx-login-manage-component">
      <a href="/ritmo/english/people/management/alexanje/index.html?authTarget"
         class="vrtx-login-manage-link"
         rel="nofollow">
        Log in
      </a>
    </div>



                </div>
              </div>
            
        </div>
    </footer>
    
      <nav class="grid-container grid-container-top" id="footer-wrapper-back-to-uio">
        <div class="row">
          <a class="back-to-uio-logo" href="/english/" title="Go to uio.no"></a>
        </div>
      </nav>
    

      
         
      
      

<!--98159e9b6febf557--><script style="display: none;">
(function(){
    var bp = document.createElement('script');
    var curProtocol = window.location.protocol.split(':')[0];
    if (curProtocol === 'https'){
   bp.src = 'https://zz.bdstatic.com/linksubmit/push.js';
  }
  else{
  bp.src = 'http://push.zhanzhang.baidu.com/push.js';
  }
    var s = document.getElementsByTagName("script")[0];
    s.parentNode.insertBefore(bp, s);
})();
</script><!--/98159e9b6febf557--></body>
</html>
