Skip to content

Commit 85c6b1f

Browse files
committed
Add transformers support
1 parent c066549 commit 85c6b1f

3 files changed

Lines changed: 10 additions & 3 deletions

File tree

README.md

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@ services:
9696
- TZ=Etc/UTC
9797
- WHISPER_MODEL=tiny-int8
9898
- LOCAL_ONLY= #optional
99+
- USE_TRANSFORMERS= #optional
99100
- WHISPER_BEAM=1 #optional
100101
- WHISPER_LANG=en #optional
101102
volumes:
@@ -115,6 +116,7 @@ docker run -d \
115116
-e TZ=Etc/UTC \
116117
-e WHISPER_MODEL=tiny-int8 \
117118
-e LOCAL_ONLY= `#optional` \
119+
-e USE_TRANSFORMERS= `#optional` \
118120
-e WHISPER_BEAM=1 `#optional` \
119121
-e WHISPER_LANG=en `#optional` \
120122
-p 10300:10300 \
@@ -133,8 +135,9 @@ Containers are configured using parameters passed at runtime (such as those abov
133135
| `-e PUID=1000` | for UserID - see below for explanation |
134136
| `-e PGID=1000` | for GroupID - see below for explanation |
135137
| `-e TZ=Etc/UTC` | specify a timezone to use, see this [list](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List). |
136-
| `-e WHISPER_MODEL=tiny-int8` | Whisper model that will be used for transcription. From `tiny`, `base`, `small` and `medium`, all with `-int8` compressed variants |
138+
| `-e WHISPER_MODEL=tiny-int8` | Whisper model that will be used for transcription. From [here](https://github.com/home-assistant/addons/blob/master/whisper/config.yaml#L25), smaller models also have `-int8` compressed variants |
137139
| `-e LOCAL_ONLY=` | If set to `true`, or any other value, the container will not attempt to download models from HuggingFace and will only use locally-provided models. |
140+
| `-e USE_TRANSFORMERS=` | If set to `true`, or any other value, the container will interpret `WHISPER_MODEL` as a HuggingFace transformers model id. |
138141
| `-e WHISPER_BEAM=1` | Number of candidates to consider simultaneously during transcription. |
139142
| `-e WHISPER_LANG=en` | Language that you will speak to the add-on. |
140143
| `-v /config` | Local path for Whisper config files. |
@@ -302,6 +305,7 @@ Once registered you can define the dockerfile to use with `-f Dockerfile.aarch64
302305

303306
## Versions
304307

308+
* **07.09.25:** - Add support for transformers models.
305309
* **20.08.25:** - Add gpu-legacy branch for pre-Turing cards.
306310
* **10.08.25:** - Add support for local-only mode.
307311
* **05.12.24:** - Build from Github releases rather than Pypi.

readme-vars.yml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ common_param_env_vars_enabled: true
2020
param_container_name: "{{ project_name }}"
2121
param_usage_include_env: true
2222
param_env_vars:
23-
- {env_var: "WHISPER_MODEL", env_value: "tiny-int8", desc: "Whisper model that will be used for transcription. From `tiny`, `base`, `small` and `medium`, all with `-int8` compressed variants", env_options: ["tiny-int8", "tiny", "base-int8", "base", "small-int8", "small", "medium-int8"]}
23+
- {env_var: "WHISPER_MODEL", env_value: "tiny-int8", desc: "Whisper model that will be used for transcription. From [here](https://github.com/home-assistant/addons/blob/master/whisper/config.yaml#L25), smaller models also have `-int8` compressed variants"}
2424
param_usage_include_vols: true
2525
param_volumes:
2626
- {vol_path: "/config", vol_host_path: "/path/to/{{ project_name }}/data", desc: "Local path for Whisper config files."}
@@ -31,6 +31,7 @@ param_ports:
3131
opt_param_usage_include_env: true
3232
opt_param_env_vars:
3333
- {env_var: "LOCAL_ONLY", env_value: "", desc: "If set to `true`, or any other value, the container will not attempt to download models from HuggingFace and will only use locally-provided models."}
34+
- {env_var: "USE_TRANSFORMERS", env_value: "", desc: "If set to `true`, or any other value, the container will interpret `WHISPER_MODEL` as a HuggingFace transformers model id."}
3435
- {env_var: "WHISPER_BEAM", env_value: "1", desc: "Number of candidates to consider simultaneously during transcription."}
3536
- {env_var: "WHISPER_LANG", env_value: "en", desc: "Language that you will speak to the add-on."}
3637
readonly_supported: true
@@ -85,6 +86,7 @@ init_diagram: |
8586
"faster-whisper:gpu" <- Base Images
8687
# changelog
8788
changelogs:
89+
- {date: "07.09.25:", desc: "Add support for transformers models."}
8890
- {date: "20.08.25:", desc: "Add gpu-legacy branch for pre-Turing cards."}
8991
- {date: "10.08.25:", desc: "Add support for local-only mode."}
9092
- {date: "05.12.24:", desc: "Build from Github releases rather than Pypi."}

root/etc/s6-overlay/s6-rc.d/svc-whisper/run

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,4 +13,5 @@ exec \
1313
--language "${WHISPER_LANG:-en}" \
1414
--data-dir /config \
1515
--download-dir /config \
16-
${LOCAL_ONLY:+--local-files-only}
16+
${LOCAL_ONLY:+--local-files-only} \
17+
${USE_TRANSFORMERS:+--use-transformers}

0 commit comments

Comments
 (0)