From 2ec5e2d2a62b3dced5899ce5b33e9fc8b83177aa Mon Sep 17 00:00:00 2001 From: thespad Date: Mon, 26 Jan 2026 17:34:58 +0000 Subject: [PATCH] Set auto lang & model as default --- README.md | 16 ++++++++++------ readme-vars.yml | 9 +++++---- root/etc/s6-overlay/s6-rc.d/svc-whisper/run | 5 +++-- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 923580f..9aacb67 100644 --- a/README.md +++ b/README.md @@ -94,10 +94,11 @@ services: - PUID=1000 - PGID=1000 - TZ=Etc/UTC - - WHISPER_MODEL=tiny-int8 + - DEBUG= #optional - LOCAL_ONLY= #optional - WHISPER_BEAM=1 #optional - - WHISPER_LANG=en #optional + - WHISPER_LANG=auto #optional + - WHISPER_MODEL=auto #optional volumes: - /path/to/faster-whisper/data:/config ports: @@ -113,10 +114,11 @@ docker run -d \ -e PUID=1000 \ -e PGID=1000 \ -e TZ=Etc/UTC \ - -e WHISPER_MODEL=tiny-int8 \ + -e DEBUG= `#optional` \ -e LOCAL_ONLY= `#optional` \ -e WHISPER_BEAM=1 `#optional` \ - -e WHISPER_LANG=en `#optional` \ + -e WHISPER_LANG=auto `#optional` \ + -e WHISPER_MODEL=auto `#optional` \ -p 10300:10300 \ -v /path/to/faster-whisper/data:/config \ --restart unless-stopped \ @@ -133,10 +135,11 @@ Containers are configured using parameters passed at runtime (such as those abov | `-e PUID=1000` | for UserID - see below for explanation | | `-e PGID=1000` | for GroupID - see below for explanation | | `-e TZ=Etc/UTC` | specify a timezone to use, see this [list](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List). | -| `-e WHISPER_MODEL=tiny-int8` | Whisper model that will be used for transcription. From `tiny`, `base`, `small` and `medium`, all with `-int8` compressed variants | +| `-e DEBUG=` | If set to `true`, or any other value, the container will output debug logs. | | `-e LOCAL_ONLY=` | If set to `true`, or any other value, the container will not attempt to download models from HuggingFace and will only use locally-provided models. | | `-e WHISPER_BEAM=1` | Number of candidates to consider simultaneously during transcription. | -| `-e WHISPER_LANG=en` | Language that you will speak to the add-on. | +| `-e WHISPER_LANG=auto` | Two character code for the language that you will speak to the add-on. | +| `-e WHISPER_MODEL=auto` | Whisper model that will be used for transcription. From [here](https://github.com/SYSTRAN/faster-whisper/blob/master/faster_whisper/utils.py#L11-L31). | | `-v /config` | Local path for Whisper config files. | | `--read-only=true` | Run container with a read-only filesystem. Please [read the docs](https://docs.linuxserver.io/misc/read-only/). | @@ -302,6 +305,7 @@ Once registered you can define the dockerfile to use with `-f Dockerfile.aarch64 ## Versions +* **26.01.26:** - Default to `auto` for model and language if not set. * **20.08.25:** - Add gpu-legacy branch for pre-Turing cards. * **10.08.25:** - Add support for local-only mode. * **05.12.24:** - Build from Github releases rather than Pypi. diff --git a/readme-vars.yml b/readme-vars.yml index 3ffcc15..eb4648a 100644 --- a/readme-vars.yml +++ b/readme-vars.yml @@ -18,9 +18,7 @@ development_versions_items: # container parameters common_param_env_vars_enabled: true param_container_name: "{{ project_name }}" -param_usage_include_env: true -param_env_vars: - - {env_var: "WHISPER_MODEL", env_value: "tiny-int8", desc: "Whisper model that will be used for transcription. From `tiny`, `base`, `small` and `medium`, all with `-int8` compressed variants", env_options: ["tiny-int8", "tiny", "base-int8", "base", "small-int8", "small", "medium-int8"]} +param_usage_include_env: false param_usage_include_vols: true param_volumes: - {vol_path: "/config", vol_host_path: "/path/to/{{ project_name }}/data", desc: "Local path for Whisper config files."} @@ -30,9 +28,11 @@ param_ports: # optional container parameters opt_param_usage_include_env: true opt_param_env_vars: + - {env_var: "DEBUG", env_value: "", desc: "If set to `true`, or any other value, the container will output debug logs."} - {env_var: "LOCAL_ONLY", env_value: "", desc: "If set to `true`, or any other value, the container will not attempt to download models from HuggingFace and will only use locally-provided models."} - {env_var: "WHISPER_BEAM", env_value: "1", desc: "Number of candidates to consider simultaneously during transcription."} - - {env_var: "WHISPER_LANG", env_value: "en", desc: "Language that you will speak to the add-on."} + - {env_var: "WHISPER_LANG", env_value: "auto", desc: "Two character code for the language that you will speak to the add-on."} + - {env_var: "WHISPER_MODEL", env_value: "auto", desc: "Whisper model that will be used for transcription. From [here](https://github.com/SYSTRAN/faster-whisper/blob/master/faster_whisper/utils.py#L11-L31)."} readonly_supported: true # application setup block app_setup_block_enabled: true @@ -85,6 +85,7 @@ init_diagram: | "faster-whisper:gpu" <- Base Images # changelog changelogs: + - {date: "26.01.26:", desc: "Default to `auto` for model and language if not set."} - {date: "20.08.25:", desc: "Add gpu-legacy branch for pre-Turing cards."} - {date: "10.08.25:", desc: "Add support for local-only mode."} - {date: "05.12.24:", desc: "Build from Github releases rather than Pypi."} diff --git a/root/etc/s6-overlay/s6-rc.d/svc-whisper/run b/root/etc/s6-overlay/s6-rc.d/svc-whisper/run index 458a192..a24c70f 100755 --- a/root/etc/s6-overlay/s6-rc.d/svc-whisper/run +++ b/root/etc/s6-overlay/s6-rc.d/svc-whisper/run @@ -8,9 +8,10 @@ exec \ s6-setuidgid abc python3 -m wyoming_faster_whisper \ --uri 'tcp://0.0.0.0:10300' \ --device cuda \ - --model "${WHISPER_MODEL}" \ + --model "${WHISPER_MODEL:-auto}" \ --beam-size "${WHISPER_BEAM:-1}" \ - --language "${WHISPER_LANG:-en}" \ + --language "${WHISPER_LANG:-auto}" \ --data-dir /config \ --download-dir /config \ + ${DEBUG:+--debug} \ ${LOCAL_ONLY:+--local-files-only}