@@ -3,19 +3,6 @@ const { exec } = require("node:child_process");
33const { promisify } = require ( "node:util" ) ;
44const asyncExec = promisify ( exec ) ;
55
6- /**
7- * Default API endpoint when not configured.
8- *
9- * @type {string }
10- */
11- const DEFAULT_ENDPOINT = "http://localhost:11434/v1" ;
12-
13- /**
14- * Default model name when not configured.
15- *
16- * @type {string }
17- */
18- const DEFAULT_MODEL = "gemma3" ;
196/**
207 * System prompt used for guiding the LLM's behavior.
218 *
@@ -157,8 +144,8 @@ async function generateCommitMessageWithLLM(endpoint, model, diff) {
157144 const firstChoice = Array . isArray ( choices ) ? choices [ 0 ] : undefined ;
158145 const content =
159146 firstChoice &&
160- firstChoice . message &&
161- typeof firstChoice . message . content === "string"
147+ firstChoice . message &&
148+ typeof firstChoice . message . content === "string"
162149 ? firstChoice . message . content
163150 : undefined ;
164151 if ( ! content ) {
@@ -220,8 +207,15 @@ async function generateAndApplyCommitMessage() {
220207 ) ;
221208 }
222209 const config = vscode . workspace . getConfiguration ( "llmCommitMsg" ) ;
223- const endpoint = config . get ( "endpoint" , DEFAULT_ENDPOINT ) ;
224- const model = config . get ( "model" , DEFAULT_MODEL ) ;
210+ /** @type {string | undefined } */
211+ const endpoint = config . get ( "endpoint" ) ;
212+ /** @type {string | undefined } */
213+ const model = config . get ( "model" ) ;
214+ if ( ! endpoint || ! model ) {
215+ throw new Error (
216+ "LLM Commit Message settings are not configured. Please set endpoint and model in Settings." ,
217+ ) ;
218+ }
225219 const message = await generateCommitMessageWithLLM ( endpoint , model , diff ) ;
226220 if ( repo ?. inputBox ) {
227221 repo . inputBox . value = message ;
@@ -273,7 +267,7 @@ function activate(context) {
273267 *
274268 * @returns {void } Nothing.
275269 */
276- function deactivate ( ) { }
270+ function deactivate ( ) { }
277271
278272module . exports = {
279273 activate,
0 commit comments