Skip to content

Commit 47ad669

Browse files
authored
Merge pull request #58 from linuxserver/gpu-legacy-autolang
2 parents 20644ec + 1fb514d commit 47ad669

3 files changed

Lines changed: 18 additions & 12 deletions

File tree

README.md

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -94,10 +94,11 @@ services:
9494
- PUID=1000
9595
- PGID=1000
9696
- TZ=Etc/UTC
97-
- WHISPER_MODEL=tiny-int8
97+
- DEBUG= #optional
9898
- LOCAL_ONLY= #optional
9999
- WHISPER_BEAM=1 #optional
100-
- WHISPER_LANG=en #optional
100+
- WHISPER_LANG=auto #optional
101+
- WHISPER_MODEL=auto #optional
101102
volumes:
102103
- /path/to/faster-whisper/data:/config
103104
ports:
@@ -113,10 +114,11 @@ docker run -d \
113114
-e PUID=1000 \
114115
-e PGID=1000 \
115116
-e TZ=Etc/UTC \
116-
-e WHISPER_MODEL=tiny-int8 \
117+
-e DEBUG= `#optional` \
117118
-e LOCAL_ONLY= `#optional` \
118119
-e WHISPER_BEAM=1 `#optional` \
119-
-e WHISPER_LANG=en `#optional` \
120+
-e WHISPER_LANG=auto `#optional` \
121+
-e WHISPER_MODEL=auto `#optional` \
120122
-p 10300:10300 \
121123
-v /path/to/faster-whisper/data:/config \
122124
--restart unless-stopped \
@@ -133,10 +135,11 @@ Containers are configured using parameters passed at runtime (such as those abov
133135
| `-e PUID=1000` | for UserID - see below for explanation |
134136
| `-e PGID=1000` | for GroupID - see below for explanation |
135137
| `-e TZ=Etc/UTC` | specify a timezone to use, see this [list](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List). |
136-
| `-e WHISPER_MODEL=tiny-int8` | Whisper model that will be used for transcription. From `tiny`, `base`, `small` and `medium`, all with `-int8` compressed variants |
138+
| `-e DEBUG=` | If set to `true`, or any other value, the container will output debug logs. |
137139
| `-e LOCAL_ONLY=` | If set to `true`, or any other value, the container will not attempt to download models from HuggingFace and will only use locally-provided models. |
138140
| `-e WHISPER_BEAM=1` | Number of candidates to consider simultaneously during transcription. |
139-
| `-e WHISPER_LANG=en` | Language that you will speak to the add-on. |
141+
| `-e WHISPER_LANG=auto` | Two character code for the language that you will speak to the add-on. |
142+
| `-e WHISPER_MODEL=auto` | Whisper model that will be used for transcription. From [here](https://github.com/SYSTRAN/faster-whisper/blob/master/faster_whisper/utils.py#L11-L31). |
140143
| `-v /config` | Local path for Whisper config files. |
141144
| `--read-only=true` | Run container with a read-only filesystem. Please [read the docs](https://docs.linuxserver.io/misc/read-only/). |
142145

@@ -302,6 +305,7 @@ Once registered you can define the dockerfile to use with `-f Dockerfile.aarch64
302305

303306
## Versions
304307

308+
* **26.01.26:** - Default to `auto` for model and language if not set.
305309
* **20.08.25:** - Add gpu-legacy branch for pre-Turing cards.
306310
* **10.08.25:** - Add support for local-only mode.
307311
* **05.12.24:** - Build from Github releases rather than Pypi.

readme-vars.yml

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,7 @@ development_versions_items:
1818
# container parameters
1919
common_param_env_vars_enabled: true
2020
param_container_name: "{{ project_name }}"
21-
param_usage_include_env: true
22-
param_env_vars:
23-
- {env_var: "WHISPER_MODEL", env_value: "tiny-int8", desc: "Whisper model that will be used for transcription. From `tiny`, `base`, `small` and `medium`, all with `-int8` compressed variants", env_options: ["tiny-int8", "tiny", "base-int8", "base", "small-int8", "small", "medium-int8"]}
21+
param_usage_include_env: false
2422
param_usage_include_vols: true
2523
param_volumes:
2624
- {vol_path: "/config", vol_host_path: "/path/to/{{ project_name }}/data", desc: "Local path for Whisper config files."}
@@ -30,9 +28,11 @@ param_ports:
3028
# optional container parameters
3129
opt_param_usage_include_env: true
3230
opt_param_env_vars:
31+
- {env_var: "DEBUG", env_value: "", desc: "If set to `true`, or any other value, the container will output debug logs."}
3332
- {env_var: "LOCAL_ONLY", env_value: "", desc: "If set to `true`, or any other value, the container will not attempt to download models from HuggingFace and will only use locally-provided models."}
3433
- {env_var: "WHISPER_BEAM", env_value: "1", desc: "Number of candidates to consider simultaneously during transcription."}
35-
- {env_var: "WHISPER_LANG", env_value: "en", desc: "Language that you will speak to the add-on."}
34+
- {env_var: "WHISPER_LANG", env_value: "auto", desc: "Two character code for the language that you will speak to the add-on."}
35+
- {env_var: "WHISPER_MODEL", env_value: "auto", desc: "Whisper model that will be used for transcription. From [here](https://github.com/SYSTRAN/faster-whisper/blob/master/faster_whisper/utils.py#L11-L31)."}
3636
readonly_supported: true
3737
# application setup block
3838
app_setup_block_enabled: true
@@ -85,6 +85,7 @@ init_diagram: |
8585
"faster-whisper:gpu-legacy" <- Base Images
8686
# changelog
8787
changelogs:
88+
- {date: "26.01.26:", desc: "Default to `auto` for model and language if not set."}
8889
- {date: "20.08.25:", desc: "Add gpu-legacy branch for pre-Turing cards."}
8990
- {date: "10.08.25:", desc: "Add support for local-only mode."}
9091
- {date: "05.12.24:", desc: "Build from Github releases rather than Pypi."}

root/etc/s6-overlay/s6-rc.d/svc-whisper/run

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,10 @@ exec \
88
s6-setuidgid abc python3 -m wyoming_faster_whisper \
99
--uri 'tcp://0.0.0.0:10300' \
1010
--device cuda \
11-
--model "${WHISPER_MODEL}" \
11+
--model "${WHISPER_MODEL:-auto}" \
1212
--beam-size "${WHISPER_BEAM:-1}" \
13-
--language "${WHISPER_LANG:-en}" \
13+
--language "${WHISPER_LANG:-auto}" \
1414
--data-dir /config \
1515
--download-dir /config \
16+
${DEBUG:+--debug} \
1617
${LOCAL_ONLY:+--local-files-only}

0 commit comments

Comments
 (0)