Audio Recording Action Button 🎤

After many attempts, I managed to insert an audio button action to transcribe voice to text. I share it here.

What it does is use Google Chrome’s free speech recognition API to transcribe your voice to text in the text box, after which it shows you the live transcription in the text box, and finally sends the transcription automatically, simulating an enter, here is the embed code, inject it into the footer section:

<script>
(function() {
  const MIC_CLASS = 'mic-button';

  // La función que crea e inyecta el botón de micrófono
  function injectMic() {
    document.querySelectorAll('div.flex.items-center.gap-x-2').forEach(container => {
      // Si ya existe, no duplicamos
      if (container.querySelector(`button.${MIC_CLASS}`)) return;

      const btn = document.createElement('button');
      btn.className = `${MIC_CLASS} outline-none w-8 h-8 flex items-center justify-center rounded-full duration-200 transition-colors ease-in-out`;
      btn.style.backgroundColor = 'rgba(0, 0, 0, 0.063)';
      btn.style.marginLeft = '4px';
      btn.innerHTML = `
        <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24"
             class="h-6 w-6 shrink-0 duration-200 transition-colors ease-in-out"
             style="color: rgba(0, 0, 0, 0.5);">
          <path fill="currentColor"
                d="M12 14a3 3 0 0 0 3-3V6a3 3 0 0 0-6 0v5a3 3 0 0 0 3 3Zm5-3a5 5 0 0 1-10 0H5a7 7 0 0 0 14 0h-2ZM11 21h2v-2h-2v2Z"/>
        </svg>
      `;

      btn.addEventListener('click', () => {
        const ta = document.querySelector('textarea.resize-none');
        if (!ta || !('webkitSpeechRecognition' in window)) {
          return alert('Tu navegador no soporta SpeechRecognition');
        }

        let finalTranscript = '';
        const rec = new webkitSpeechRecognition();
        rec.lang = 'es-ES';
        rec.interimResults = true;
        rec.maxAlternatives = 1;
        rec.start();

        rec.onresult = e => {
          let interim = '';
          for (let i = e.resultIndex; i < e.results.length; i++) {
            const r = e.results[i];
            if (r.isFinal) finalTranscript += r[0].transcript + ' ';
            else interim += r[0].transcript;
          }
          const text = (finalTranscript + interim).trimEnd();
          // Actualiza el textarea en vivo
          const setter = Object.getOwnPropertyDescriptor(HTMLTextAreaElement.prototype, 'value').set;
          setter.call(ta, text);
          ta.dispatchEvent(new Event('input', { bubbles: true }));
        };

        rec.onerror = err => console.error('SpeechRecognition Error:', err.error);

        rec.onend = () => {
          // Simula Enter para enviar automáticamente
          ta.dispatchEvent(new KeyboardEvent('keydown', {
            bubbles: true, cancelable: true,
            key: 'Enter', code: 'Enter', which: 13, keyCode: 13
          }));
        };
      });

      container.appendChild(btn);
    });
  }

  // Inyección inicial
  injectMic();

  // Intentamos observar el contenedor principal del chat para reinyectar si cambia
  const chatRoot = document.querySelector('div.fixed.flex.w-full.flex-col');
  const target = chatRoot || document.body;
  new MutationObserver(injectMic)
    .observe(target, { childList: true, subtree: true });
})();
</script>

Try it and tell me how it is, @nathaniel @admin_mike Is there any possibility of adding it natively?

~ all glory belongs to God

16 Likes

Wow, this is really cool – I’m assuming this would only work if the end user were using Chrome as their browser?

1 Like

Hello, very good observation. In that case, you can change the use of the Chrome API for Whisper or Deepgram, with this it will work in any browser.

1 Like

@bruno12345 this is pretty cool. Thank you so much for sharing this.

Great work Bruno. I use a slightly different approach but yours is simpler… it does make my PAs more functional.

It looks super awesome! Yet, I’ve tried to paste the code in the Footer Section under Settings tab but it doesn’t seem to work. I’m not an IT guy :slight_smile: Maybe I pasted in the wrong are? Where this code should go?

I’m using my pickaxes embedded to coaching platfrom. Maybe this scrip is only for studios? :thinking:

Spot - on… This script area is only for Studios.

Bruno, Thank you so much! This works like a charm. I did convert mine to English. :slight_smile:

For anyone wanting to use this you can follow these steps:

  1. Click on the studio where you want to apply the voice functionality.
  2. Click on Settings and scroll down to Footer
  3. Copy in the code (you can use this one for English)
<script>
(function () {
  const MIC_CLASS = 'mic-button';

  // Function that creates and injects the microphone button
  function injectMic() {
    document.querySelectorAll('div.flex.items-center.gap-x-2').forEach(container => {
      // Avoid duplicating if the button already exists
      if (container.querySelector(`button.${MIC_CLASS}`)) return;

      const btn = document.createElement('button');
      btn.className = `${MIC_CLASS} outline-none w-8 h-8 flex items-center justify-center rounded-full duration-200 transition-colors ease-in-out`;
      btn.style.backgroundColor = 'rgba(0, 0, 0, 0.063)';
      btn.style.marginLeft = '4px';
      btn.innerHTML = `
        <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24"
             class="h-6 w-6 shrink-0 duration-200 transition-colors ease-in-out"
             style="color: rgba(0, 0, 0, 0.5);">
          <path fill="currentColor"
                d="M12 14a3 3 0 0 0 3-3V6a3 3 0 0 0-6 0v5a3 3 0 0 0 3 3Zm5-3a5 5 0 0 1-10 0H5a7 7 0 0 0 14 0h-2ZM11 21h2v-2h-2v2Z"/>
        </svg>
      `;

      btn.addEventListener('click', () => {
        const ta = document.querySelector('textarea.resize-none');
        if (!ta || !('webkitSpeechRecognition' in window)) {
          return alert('Your browser does not support SpeechRecognition.');
        }

        let finalTranscript = '';
        const rec = new webkitSpeechRecognition();
        rec.lang = 'en-US';            // main change: recognize English
        rec.interimResults = true;
        rec.maxAlternatives = 1;
        rec.start();

        rec.onresult = e => {
          let interim = '';
          for (let i = e.resultIndex; i < e.results.length; i++) {
            const r = e.results[i];
            if (r.isFinal) finalTranscript += r[0].transcript + ' ';
            else interim += r[0].transcript;
          }
          const text = (finalTranscript + interim).trimEnd();

          // Live‑update the textarea
          const setter = Object.getOwnPropertyDescriptor(HTMLTextAreaElement.prototype, 'value').set;
          setter.call(ta, text);
          ta.dispatchEvent(new Event('input', { bubbles: true }));
        };

        rec.onerror = err => console.error('SpeechRecognition error:', err.error);

        rec.onend = () => {
          // Simulate Enter key to send automatically
          ta.dispatchEvent(new KeyboardEvent('keydown', {
            bubbles: true,
            cancelable: true,
            key: 'Enter',
            code: 'Enter',
            which: 13,
            keyCode: 13
          }));
        };
      });

      container.appendChild(btn);
    });
  }

  // Initial injection
  injectMic();

  // Observe the chat root so we can re‑inject if the DOM changes
  const chatRoot = document.querySelector('div.fixed.flex.w-full.flex-col');
  const target = chatRoot || document.body;
  new MutationObserver(injectMic).observe(target, { childList: true, subtree: true });
})();
</script>
  1. It autosaves and is ready to use.
  2. Open one of the pickaxes in that studio and click the mic to start. You will need to grant permission the first time you use it.
7 Likes

This is fabulous! Thank you!

1 Like

This is so awesome. I had Gemini amend the script to include a language dropdown so that your users can select their language to speak.

(function () {
const MIC_CLASS = ‘mic-button’;
const LANG_SELECT_CLASS = ‘lang-select-dropdown’;

const AVAILABLE_LANGUAGES = {
‘en-US’: ‘English (US)’,
‘en-GB’: ‘English (UK)’,
‘es-ES’: ‘Español (España)’,
‘es-MX’: ‘Español (México)’,
‘fr-FR’: ‘Français (France)’,
‘de-DE’: ‘Deutsch (Deutschland)’,
‘it-IT’: ‘Italiano (Italia)’,
‘ja-JP’: ‘日本語 (日本)’,
‘ko-KR’: ‘한국어 (대한민국)’,
‘pt-BR’: ‘Português (Brasil)’,
‘ru-RU’: ‘Русский (Россия)’,
‘zh-CN’: ‘中文 (简体)’, // Mandarin (Simplified, China)
‘zh-HK’: ‘中文 (香港)’, // Cantonese (Traditional, Hong Kong) - Note: Recognition for Cantonese can be tricky
‘zh-TW’: ‘中文 (臺灣)’, // Mandarin (Traditional, Taiwan)
‘hi-IN’: ‘हिन्दी (भारत)’ // Hindi (India)
// Add more BCP 47 language tags: https://www.iana.org/assignments/language-subtag-registry/language-subtag-registry
};

function injectMic() {
document.querySelectorAll(‘div.flex.items-center.gap-x-2’).forEach(container => {
if (container.querySelector(button.${MIC_CLASS}) || container.querySelector(select.${LANG_SELECT_CLASS})) {
return;
}

  // --- Create Microphone Button (first, as requested) ---
  const btn = document.createElement('button');
  btn.className = `${MIC_CLASS} outline-none w-8 h-8 flex items-center justify-center rounded-full duration-200 transition-colors ease-in-out`;
  btn.style.backgroundColor = 'rgba(0, 0, 0, 0.063)';
  btn.style.marginLeft = '4px'; // Original margin for the mic button
  btn.innerHTML = `
    <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24"
         class="h-6 w-6 shrink-0 duration-200 transition-colors ease-in-out"
         style="color: rgba(0, 0, 0, 0.5);">
      <path fill="currentColor"
            d="M12 14a3 3 0 0 0 3-3V6a3 3 0 0 0-6 0v5a3 3 0 0 0 3 3Zm5-3a5 5 0 0 1-10 0H5a7 7 0 0 0 14 0h-2ZM11 21h2v-2h-2v2Z"/>
    </svg>
  `;

  // --- Create Language Selector (second, as requested) ---
  const langSelect = document.createElement('select');
  langSelect.className = LANG_SELECT_CLASS;
  // Styling for the dropdown
  langSelect.style.marginLeft = '8px'; // Space between mic button and dropdown
  langSelect.style.padding = '6px 4px';
  langSelect.style.borderRadius = '6px';
  langSelect.style.border = '1px solid rgba(0,0,0,0.1)';
  langSelect.style.backgroundColor = 'rgba(0, 0, 0, 0.03)';
  langSelect.style.fontSize = '14px';
  langSelect.style.color = 'black'; // Set text color to black

  for (const [code, name] of Object.entries(AVAILABLE_LANGUAGES)) {
    const option = document.createElement('option');
    option.value = code;
    option.textContent = name;
    langSelect.appendChild(option);
  }

  const lastSelectedLang = localStorage.getItem('speechLang');
  if (lastSelectedLang && AVAILABLE_LANGUAGES[lastSelectedLang]) {
      langSelect.value = lastSelectedLang;
  } else {
      langSelect.value = 'en-US'; // Default to English US if nothing stored or invalid
  }

  langSelect.addEventListener('change', (e) => {
      localStorage.setItem('speechLang', e.target.value);
  });

  // --- Event Listener for Microphone Button ---
  btn.addEventListener('click', () => {
    const ta = document.querySelector('textarea.resize-none');
    if (!ta || !('webkitSpeechRecognition' in window)) {
      return alert('Your browser does not support SpeechRecognition. Try Chrome or Edge.');
    }

    const selectedLang = langSelect.value; // Get language from the adjacent select

    let finalTranscript = '';
    const rec = new webkitSpeechRecognition();
    rec.lang = selectedLang;
    rec.interimResults = true;
    rec.maxAlternatives = 1;
    
    try {
        rec.start();
    } catch (e) {
        alert('Speech recognition could not be started. Ensure microphone permissions are granted and no other recognition is active.');
        console.error("Error starting speech recognition:", e);
        return;
    }


    btn.style.backgroundColor = 'rgba(255, 0, 0, 0.2)'; // Recording indicator

    rec.onresult = e => {
      let interim = '';
      for (let i = e.resultIndex; i < e.results.length; i++) {
        const r = e.results[i];
        if (r.isFinal) finalTranscript += r[0].transcript + ' ';
        else interim += r[0].transcript;
      }
      const text = (finalTranscript + interim).trimEnd();

      const setter = Object.getOwnPropertyDescriptor(HTMLTextAreaElement.prototype, 'value').set;
      setter.call(ta, text);
      ta.dispatchEvent(new Event('input', { bubbles: true }));
    };

    rec.onerror = err => {
      console.error('SpeechRecognition error:', err.error, err.message);
      let alertMsg = `Speech recognition error: ${err.error}.`;
      if (err.error === 'no-speech') {
        alertMsg = 'No speech was detected. Please try again.';
      } else if (err.error === 'network') {
        alertMsg = 'A network error occurred. Please check your connection.';
      } else if (err.error === 'not-allowed' || err.error === 'service-not-allowed') {
        alertMsg = 'Microphone access was denied or the service is not allowed. Please check browser permissions.';
      } else if (err.error === 'aborted') {
        alertMsg = 'Speech recognition was aborted. This might happen if you click the mic again too quickly or due to an external factor.'
      } else if (err.error === 'language-not-supported') {
        alertMsg = `The selected language (${selectedLang}) is not supported by your browser's speech recognition engine.`;
      }
      alert(alertMsg);
      btn.style.backgroundColor = 'rgba(0, 0, 0, 0.063)';
    };

    rec.onend = () => {
      btn.style.backgroundColor = 'rgba(0, 0, 0, 0.063)';
      if (finalTranscript.trim().length > 0) {
        ta.dispatchEvent(new KeyboardEvent('keydown', {
          bubbles: true, cancelable: true, key: 'Enter', code: 'Enter', which: 13, keyCode: 13
        }));
      } else {
        const setter = Object.getOwnPropertyDescriptor(HTMLTextAreaElement.prototype, 'value').set;
        // Only clear if it was empty to begin with or only interim results that didn't finalize
        if (ta.value.trim() !== "" && finalTranscript.trim() === "") {
             // If there was interim text but no final text, clear it.
             // If the user manually typed something, this logic might need adjustment.
             // For now, assuming textarea is mainly populated by speech.
            setter.call(ta, '');
            ta.dispatchEvent(new Event('input', { bubbles: true }));
        }
      }
    };
  });

  // --- Append elements to the container in the new order ---
  // Mic button first, then language dropdown
  container.appendChild(btn);
  container.appendChild(langSelect);
});

}

injectMic();

const chatRoot = document.querySelector(‘div.fixed.flex.w-full.flex-col’);
const target = chatRoot || document.body;
new MutationObserver(injectMic).observe(target, { childList: true, subtree: true });
})();

Blockquote

2 Likes

I love this so much!! I would love to use it in my studio and have been playing around but one issue - when I pause while I’m talking it automatically stops.

Any successful workarounds in the script?

This is great and can be applied to many things I think. Unfortunately this system is not functional at this point. The voice recognition doesn’t act naturally and cuts you off while you are talking. This issue would frustrate my clients for sure and they would say they wasted credits.

I will attempt to find a workaround though and hopefully we can get this awesome script working at 100%. Thanks for the share.

Improved versions of this default script:
Script with a 4-second pause before stopping:
:light_bulb: If you want to change the time (for example, to 5 or 6 seconds), just change 4000 to 5000 or 6000:

<script>
(function() {
  const MIC_CLASS = 'mic-button';

  function injectMic() {
    document.querySelectorAll('div.flex.items-center.gap-x-2').forEach(container => {
      if (container.querySelector(`button.${MIC_CLASS}`)) return;

      const btn = document.createElement('button');
      btn.className = `${MIC_CLASS} outline-none w-8 h-8 flex items-center justify-center rounded-full duration-200 transition-colors ease-in-out`;
      btn.style.backgroundColor = 'rgba(0, 0, 0, 0.063)';
      btn.style.marginLeft = '4px';
      btn.innerHTML = `
        <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24"
             class="h-6 w-6 shrink-0 duration-200 transition-colors ease-in-out"
             style="color: rgba(0, 0, 0, 0.5);">
          <path fill="currentColor"
                d="M12 14a3 3 0 0 0 3-3V6a3 3 0 0 0-6 0v5a3 3 0 0 0 3 3Zm5-3a5 5 0 0 1-10 0H5a7 7 0 0 0 14 0h-2ZM11 21h2v-2h-2v2Z"/>
        </svg>
      `;

      btn.addEventListener('click', () => {
        const ta = document.querySelector('textarea.resize-none');
        if (!ta || !('webkitSpeechRecognition' in window)) {
          return alert('Your browser does not support SpeechRecognition');
        }

        let finalTranscript = '';
        const rec = new webkitSpeechRecognition();
        rec.lang = 'es-ES';
        rec.interimResults = true;
        rec.maxAlternatives = 1;
        rec.continuous = true;

        let pauseTimer = null;

        rec.start();

        rec.onresult = e => {
          clearTimeout(pauseTimer);
          let interim = '';
          for (let i = e.resultIndex; i < e.results.length; i++) {
            const r = e.results[i];
            if (r.isFinal) finalTranscript += r[0].transcript + ' ';
            else interim += r[0].transcript;
          }
          const text = (finalTranscript + interim).trimEnd();
          const setter = Object.getOwnPropertyDescriptor(HTMLTextAreaElement.prototype, 'value').set;
          setter.call(ta, text);
          ta.dispatchEvent(new Event('input', { bubbles: true }));

          pauseTimer = setTimeout(() => {
            rec.stop();
          }, 4000);
        };

        rec.onerror = err => console.error('SpeechRecognition Error:', err.error);

        rec.onend = () => {
          clearTimeout(pauseTimer);
          ta.dispatchEvent(new KeyboardEvent('keydown', {
            bubbles: true, cancelable: true,
            key: 'Enter', code: 'Enter', which: 13, keyCode: 13
          }));
        };
      });

      container.appendChild(btn);
    });
  }

  injectMic();

  const chatRoot = document.querySelector('div.fixed.flex.w-full.flex-col');
  const target = chatRoot || document.body;
  new MutationObserver(injectMic).observe(target, { childList: true, subtree: true });
})();
</script>

Still bothered by the waiting time?
Here is a version similar to WhatsApp’s audio function: it records your voice and does not stop until you click the microphone button:

<script>
(function() {
  const MIC_CLASS = 'mic-button';
  let recognition = null;
  let listening = false;

  function injectMic() {
    document.querySelectorAll('div.flex.items-center.gap-x-2').forEach(container => {
      if (container.querySelector(`button.${MIC_CLASS}`)) return;

      const btn = document.createElement('button');
      btn.className = `${MIC_CLASS} outline-none w-8 h-8 flex items-center justify-center rounded-full duration-200 transition-colors ease-in-out`;
      btn.style.backgroundColor = 'rgba(0, 0, 0, 0.063)';
      btn.style.marginLeft = '4px';
      btn.innerHTML = `
        <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24"
             class="h-6 w-6 shrink-0 duration-200 transition-colors ease-in-out"
             style="color: rgba(0, 0, 0, 0.5);">
          <path fill="currentColor"
                d="M12 14a3 3 0 0 0 3-3V6a3 3 0 0 0-6 0v5a3 3 0 0 0 3 3Zm5-3a5 5 0 0 1-10 0H5a7 7 0 0 0 14 0h-2ZM11 21h2v-2h-2v2Z"/>
        </svg>
      `;

      btn.addEventListener('click', () => {
        const ta = document.querySelector('textarea.resize-none');
        if (!ta || !('webkitSpeechRecognition' in window)) {
          return alert('Your browser does not support SpeechRecognition');
        }

        if (!recognition) {
          recognition = new webkitSpeechRecognition();
          recognition.lang = 'es-ES';
          recognition.interimResults = true;
          recognition.maxAlternatives = 1;
          recognition.continuous = true;
        }

        if (listening) {
          recognition.stop();
          listening = false;
          btn.style.backgroundColor = 'rgba(0, 0, 0, 0.063)';
          return;
        }

        let finalTranscript = '';
        listening = true;
        btn.style.backgroundColor = 'rgba(255, 0, 0, 0.3)';
        recognition.start();

        recognition.onresult = e => {
          let interim = '';
          for (let i = e.resultIndex; i < e.results.length; i++) {
            const r = e.results[i];
            if (r.isFinal) finalTranscript += r[0].transcript + ' ';
            else interim += r[0].transcript;
          }
          const text = (finalTranscript + interim).trimEnd();
          const setter = Object.getOwnPropertyDescriptor(HTMLTextAreaElement.prototype, 'value').set;
          setter.call(ta, text);
          ta.dispatchEvent(new Event('input', { bubbles: true }));
        };

        recognition.onerror = err => {
          console.error('SpeechRecognition Error:', err.error);
        };

        recognition.onend = () => {
          if (listening) {
            recognition.start();
          } else {
            btn.style.backgroundColor = 'rgba(0, 0, 0, 0.063)';
            ta.dispatchEvent(new KeyboardEvent('keydown', {
              bubbles: true, cancelable: true,
              key: 'Enter', code: 'Enter', which: 13, keyCode: 13
            }));
          }
        };
      });

      container.appendChild(btn);
    });
  }

  injectMic();

  const chatRoot = document.querySelector('div.fixed.flex.w-full.flex-col');
  const target = chatRoot || document.body;
  new MutationObserver(injectMic).observe(target, { childList: true, subtree: true });
})();
</script>

Annoyed by the automatic message sending?
Here is a version that removes the automatic sending of the message after the audio:

<script>
(function() {
  const MIC_CLASS = 'mic-button';
  let recognition = null;
  let listening = false;

  function injectMic() {
    document.querySelectorAll('div.flex.items-center.gap-x-2').forEach(container => {
      if (container.querySelector(`button.${MIC_CLASS}`)) return;

      const btn = document.createElement('button');
      btn.className = `${MIC_CLASS} outline-none w-8 h-8 flex items-center justify-center rounded-full duration-200 transition-colors ease-in-out`;
      btn.style.backgroundColor = 'rgba(0, 0, 0, 0.063)';
      btn.style.marginLeft = '4px';
      btn.innerHTML = `
        <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24"
             class="h-6 w-6 shrink-0 duration-200 transition-colors ease-in-out"
             style="color: rgba(0, 0, 0, 0.5);">
          <path fill="currentColor"
                d="M12 14a3 3 0 0 0 3-3V6a3 3 0 0 0-6 0v5a3 3 0 0 0 3 3Zm5-3a5 5 0 0 1-10 0H5a7 7 0 0 0 14 0h-2ZM11 21h2v-2h-2v2Z"/>
        </svg>
      `;

      btn.addEventListener('click', () => {
        const ta = document.querySelector('textarea.resize-none');
        if (!ta || !('webkitSpeechRecognition' in window)) {
          return alert('Your browser does not support SpeechRecognition');
        }

        if (!recognition) {
          recognition = new webkitSpeechRecognition();
          recognition.lang = 'es-ES';
          recognition.interimResults = true;
          recognition.maxAlternatives = 1;
          recognition.continuous = true;
        }

        if (listening) {
          recognition.stop();
          listening = false;
          btn.style.backgroundColor = 'rgba(0, 0, 0, 0.063)';
          return;
        }

        let finalTranscript = '';
        listening = true;
        btn.style.backgroundColor = 'rgba(255, 0, 0, 0.3)';
        recognition.start();

        recognition.onresult = e => {
          let interim = '';
          for (let i = e.resultIndex; i < e.results.length; i++) {
            const r = e.results[i];
            if (r.isFinal) finalTranscript += r[0].transcript + ' ';
            else interim += r[0].transcript;
          }
          const text = (finalTranscript + interim).trimEnd();
          const setter = Object.getOwnPropertyDescriptor(HTMLTextAreaElement.prototype, 'value').set;
          setter.call(ta, text);
          ta.dispatchEvent(new Event('input', { bubbles: true }));
        };

        recognition.onerror = err => {
          console.error('SpeechRecognition Error:', err.error);
        };

        recognition.onend = () => {
          btn.style.backgroundColor = 'rgba(0, 0, 0, 0.063)';
          if (listening) {
            recognition.start();
          }
        };
      });

      container.appendChild(btn);
    });
  }

  injectMic();

  const chatRoot = document.querySelector('div.fixed.flex.w-full.flex-col');
  const target = chatRoot || document.body;
  new MutationObserver(injectMic).observe(target, { childList: true, subtree: true });
})();
</script>

~All the glory is to God.

2 Likes

Nice one @bruno12345 :clap:t3:

Will use this in my projects for sure