diff --git a/.editorconfig b/.editorconfig old mode 100755 new mode 100644 diff --git a/LICENSE b/LICENSE old mode 100755 new mode 100644 diff --git a/README.md b/README.md index 1ef4cd5..7601702 100644 --- a/README.md +++ b/README.md @@ -95,6 +95,7 @@ services: - PGID=1000 - TZ=Etc/UTC - WHISPER_MODEL=tiny-int8 + - LOCAL_ONLY= #optional - WHISPER_BEAM=1 #optional - WHISPER_LANG=en #optional volumes: @@ -113,6 +114,7 @@ docker run -d \ -e PGID=1000 \ -e TZ=Etc/UTC \ -e WHISPER_MODEL=tiny-int8 \ + -e LOCAL_ONLY= `#optional` \ -e WHISPER_BEAM=1 `#optional` \ -e WHISPER_LANG=en `#optional` \ -p 10300:10300 \ @@ -132,6 +134,7 @@ Containers are configured using parameters passed at runtime (such as those abov | `-e PGID=1000` | for GroupID - see below for explanation | | `-e TZ=Etc/UTC` | specify a timezone to use, see this [list](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List). | | `-e WHISPER_MODEL=tiny-int8` | Whisper model that will be used for transcription. From [here](https://github.com/SYSTRAN/faster-whisper/blob/master/faster_whisper/utils.py#L12-L31), all with `-int8` compressed variants | +| `-e LOCAL_ONLY=` | If set to `true`, or any other value, the container will not attempt to download models from HuggingFace and will only use locally-provided models. | | `-e WHISPER_BEAM=1` | Number of candidates to consider simultaneously during transcription. | | `-e WHISPER_LANG=en` | Language that you will speak to the add-on. | | `-v /config` | Local path for Whisper config files. | @@ -299,6 +302,7 @@ Once registered you can define the dockerfile to use with `-f Dockerfile.aarch64 ## Versions +* **10.08.25:** - Add support for local-only mode. * **30.12.24:** - Add arm64 support for non-GPU builds. * **05.12.24:** - Build from Github releases rather than Pypi. * **18.07.24:** - Rebase to Ubuntu Noble. diff --git a/readme-vars.yml b/readme-vars.yml index 6663159..06ecf98 100644 --- a/readme-vars.yml +++ b/readme-vars.yml @@ -16,6 +16,7 @@ development_versions: true development_versions_items: - {tag: "latest", desc: "Stable releases"} - {tag: "gpu", desc: "Releases with Nvidia GPU support (amd64 only)"} + - {tag: "gpu-legacy", desc: "Legacy releases with Nvidia GPU support for pre-Turing cards (amd64 only)"} # container parameters common_param_env_vars_enabled: true param_container_name: "{{ project_name }}" @@ -31,6 +32,7 @@ param_ports: # optional container parameters opt_param_usage_include_env: true opt_param_env_vars: + - {env_var: "LOCAL_ONLY", env_value: "", desc: "If set to `true`, or any other value, the container will not attempt to download models from HuggingFace and will only use locally-provided models."} - {env_var: "WHISPER_BEAM", env_value: "1", desc: "Number of candidates to consider simultaneously during transcription."} - {env_var: "WHISPER_LANG", env_value: "en", desc: "Language that you will speak to the add-on."} readonly_supported: true @@ -85,6 +87,8 @@ init_diagram: | "faster-whisper:latest" <- Base Images # changelog changelogs: + - {date: "20.08.25:", desc: "Add gpu-legacy branch for pre-Turing cards."} + - {date: "10.08.25:", desc: "Add support for local-only mode."} - {date: "30.12.24:", desc: "Add arm64 support for non-GPU builds."} - {date: "05.12.24:", desc: "Build from Github releases rather than Pypi."} - {date: "18.07.24:", desc: "Rebase to Ubuntu Noble."} diff --git a/root/etc/s6-overlay/s6-rc.d/svc-whisper/run b/root/etc/s6-overlay/s6-rc.d/svc-whisper/run index 332e198..4147edd 100755 --- a/root/etc/s6-overlay/s6-rc.d/svc-whisper/run +++ b/root/etc/s6-overlay/s6-rc.d/svc-whisper/run @@ -9,4 +9,5 @@ exec \ --beam-size "${WHISPER_BEAM:-1}" \ --language "${WHISPER_LANG:-en}" \ --data-dir /config \ - --download-dir /config + --download-dir /config \ + ${LOCAL_ONLY:+--local-files-only}