-
-
Notifications
You must be signed in to change notification settings - Fork 2k
Expand file tree
/
Copy pathsetup.sh
More file actions
executable file
·808 lines (721 loc) · 34.8 KB
/
setup.sh
File metadata and controls
executable file
·808 lines (721 loc) · 34.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
#!/bin/bash
# Color codes
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
DEFAULT_FG='\033[39m'
RED='\033[0;31m'
NC='\033[0m'
BOLD='\033[1m'
# Base Compose file (relative to script location)
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd -P)"
COMPOSE_FILE="${SCRIPT_DIR}/deployment/docker-compose-hub.yaml"
COMPOSE_FILE_LOCAL="${SCRIPT_DIR}/deployment/docker-compose.yaml"
ENV_FILE="${SCRIPT_DIR}/.env"
# Animation function
animate_dino() {
tput civis # Hide cursor
local dino_lines=(
" ######### "
" ############# "
" ##################"
" ####################"
" ######################"
" ####################### ######"
" ############################### "
" ################################## "
" ################ ############ "
" ################## ########## "
" ##################### ######## "
" ###################### ###### ### "
" ############ ########## #### ## "
" ############# ######### ##### "
" ############## ######### "
" ############## ########## "
"############ ####### "
" ###### ###### #### "
" ################ "
" ################# "
)
# Static DocsGPT text
local static_text=(
" ____ ____ ____ _____ "
" | _ \\ ___ ___ ___ / ___| _ \\_ _|"
" | | | |/ _ \\ / __/ __| | _| |_) || | "
" | |_| | (_) | (__\\__ \\ |_| | __/ | | "
" |____/ \\___/ \\___|___/\\____|_| |_| "
" "
)
# Print static text
clear
for line in "${static_text[@]}"; do
echo "$line"
done
tput sc
# Build-up animation
for i in "${!dino_lines[@]}"; do
tput rc
for ((j=0; j<=i; j++)); do
echo "${dino_lines[$j]}"
done
sleep 0.05
done
sleep 0.5
tput rc
tput ed
tput cnorm
}
# Check and start Docker function
check_and_start_docker() {
# Check if Docker is running
if ! docker info > /dev/null 2>&1; then
echo "Docker is not running. Starting Docker..."
# Check the operating system
case "$(uname -s)" in
Darwin)
open -a Docker
;;
Linux)
sudo systemctl start docker
;;
*)
echo "Unsupported platform. Please start Docker manually."
exit 1
;;
esac
# Wait for Docker to be fully operational with animated dots
echo -n "Waiting for Docker to start"
while ! docker system info > /dev/null 2>&1; do
for i in {1..3}; do
echo -n "."
sleep 1
done
echo -ne "\rWaiting for Docker to start "
done
echo -e "\nDocker has started!"
fi
}
# Function to prompt the user for the main menu choice
prompt_main_menu() {
echo -e "\n${DEFAULT_FG}${BOLD}Welcome to DocsGPT Setup!${NC}"
echo -e "${DEFAULT_FG}How would you like to proceed?${NC}"
echo -e "${YELLOW}1) Use DocsGPT Public API Endpoint (simple and free, uses pre-built Docker images from Docker Hub for fastest setup)${NC}"
echo -e "${YELLOW}2) Serve Local (with Ollama)${NC}"
echo -e "${YELLOW}3) Connect Local Inference Engine${NC}"
echo -e "${YELLOW}4) Connect Cloud API Provider${NC}"
echo -e "${YELLOW}5) Advanced: Build images locally (for developers)${NC}"
echo
echo -e "${DEFAULT_FG}By default, DocsGPT uses pre-built images from Docker Hub for a fast, reliable, and consistent experience. This avoids local build errors and speeds up onboarding. Advanced users can choose to build images locally if needed.${NC}"
echo
read -p "$(echo -e "${DEFAULT_FG}Choose option (1-5): ${NC}")" main_choice
}
# Function to prompt for Local Inference Engine options
prompt_local_inference_engine_options() {
clear
echo -e "\n${DEFAULT_FG}${BOLD}Connect Local Inference Engine${NC}"
echo -e "${DEFAULT_FG}Choose your local inference engine:${NC}"
echo -e "${YELLOW}1) LLaMa.cpp${NC}"
echo -e "${YELLOW}2) Ollama${NC}"
echo -e "${YELLOW}3) Text Generation Inference (TGI)${NC}"
echo -e "${YELLOW}4) SGLang${NC}"
echo -e "${YELLOW}5) vLLM${NC}"
echo -e "${YELLOW}6) Aphrodite${NC}"
echo -e "${YELLOW}7) FriendliAI${NC}"
echo -e "${YELLOW}8) LMDeploy${NC}"
echo -e "${YELLOW}b) Back to Main Menu${NC}"
echo
read -p "$(echo -e "${DEFAULT_FG}Choose option (1-8, or b): ${NC}")" engine_choice
}
# Function to prompt for Cloud API Provider options
prompt_cloud_api_provider_options() {
clear
echo -e "\n${DEFAULT_FG}${BOLD}Connect Cloud API Provider${NC}"
echo -e "${DEFAULT_FG}Choose your Cloud API Provider:${NC}"
echo -e "${YELLOW}1) OpenAI${NC}"
echo -e "${YELLOW}2) Google (Vertex AI, Gemini)${NC}"
echo -e "${YELLOW}3) Anthropic (Claude)${NC}"
echo -e "${YELLOW}4) Groq${NC}"
echo -e "${YELLOW}5) HuggingFace Inference API${NC}"
echo -e "${YELLOW}6) Azure OpenAI${NC}"
echo -e "${YELLOW}7) Novita${NC}"
echo -e "${YELLOW}b) Back to Main Menu${NC}"
echo
read -p "$(echo -e "${DEFAULT_FG}Choose option (1-7, or b): ${NC}")" provider_choice
}
# Function to prompt for Ollama CPU/GPU options
prompt_ollama_options() {
clear
echo -e "\n${DEFAULT_FG}${BOLD}Serve Local with Ollama${NC}"
echo -e "${DEFAULT_FG}Choose how to serve Ollama:${NC}"
echo -e "${YELLOW}1) CPU${NC}"
echo -e "${YELLOW}2) GPU${NC}"
echo -e "${YELLOW}b) Back to Main Menu${NC}"
echo
read -p "$(echo -e "${DEFAULT_FG}Choose option (1-2, or b): ${NC}")" ollama_choice
}
# ========================
# Advanced Settings Functions
# ========================
# Vector Store configuration
configure_vector_store() {
echo -e "\n${DEFAULT_FG}${BOLD}Vector Store Configuration${NC}"
echo -e "${DEFAULT_FG}Choose your vector store:${NC}"
echo -e "${YELLOW}1) FAISS (default, local)${NC}"
echo -e "${YELLOW}2) Elasticsearch${NC}"
echo -e "${YELLOW}3) Qdrant${NC}"
echo -e "${YELLOW}4) Milvus${NC}"
echo -e "${YELLOW}5) LanceDB${NC}"
echo -e "${YELLOW}6) PGVector${NC}"
echo -e "${YELLOW}b) Back${NC}"
echo
read -p "$(echo -e "${DEFAULT_FG}Choose option (1-6, or b): ${NC}")" vs_choice
case "$vs_choice" in
1)
echo "VECTOR_STORE=faiss" >> "$ENV_FILE"
echo -e "${GREEN}Vector store set to FAISS.${NC}"
;;
2)
echo "VECTOR_STORE=elasticsearch" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter Elasticsearch URL (e.g. http://localhost:9200): ${NC}")" elastic_url
[ -n "$elastic_url" ] && echo "ELASTIC_URL=$elastic_url" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter Elasticsearch Cloud ID (leave empty if using URL): ${NC}")" elastic_cloud_id
[ -n "$elastic_cloud_id" ] && echo "ELASTIC_CLOUD_ID=$elastic_cloud_id" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter Elasticsearch username (leave empty if none): ${NC}")" elastic_user
[ -n "$elastic_user" ] && echo "ELASTIC_USERNAME=$elastic_user" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter Elasticsearch password (leave empty if none): ${NC}")" elastic_pass
[ -n "$elastic_pass" ] && echo "ELASTIC_PASSWORD=$elastic_pass" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter Elasticsearch index name (default: docsgpt): ${NC}")" elastic_index
echo "ELASTIC_INDEX=${elastic_index:-docsgpt}" >> "$ENV_FILE"
echo -e "${GREEN}Vector store set to Elasticsearch.${NC}"
;;
3)
echo "VECTOR_STORE=qdrant" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter Qdrant URL (e.g. http://localhost:6333): ${NC}")" qdrant_url
[ -n "$qdrant_url" ] && echo "QDRANT_URL=$qdrant_url" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter Qdrant API key (leave empty if none): ${NC}")" qdrant_key
[ -n "$qdrant_key" ] && echo "QDRANT_API_KEY=$qdrant_key" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter Qdrant collection name (default: docsgpt): ${NC}")" qdrant_collection
echo "QDRANT_COLLECTION_NAME=${qdrant_collection:-docsgpt}" >> "$ENV_FILE"
echo -e "${GREEN}Vector store set to Qdrant.${NC}"
;;
4)
echo "VECTOR_STORE=milvus" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter Milvus URI (default: ./milvus_local.db): ${NC}")" milvus_uri
echo "MILVUS_URI=${milvus_uri:-./milvus_local.db}" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter Milvus token (leave empty if none): ${NC}")" milvus_token
[ -n "$milvus_token" ] && echo "MILVUS_TOKEN=$milvus_token" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter Milvus collection name (default: docsgpt): ${NC}")" milvus_collection
echo "MILVUS_COLLECTION_NAME=${milvus_collection:-docsgpt}" >> "$ENV_FILE"
echo -e "${GREEN}Vector store set to Milvus.${NC}"
;;
5)
echo "VECTOR_STORE=lancedb" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter LanceDB path (default: ./data/lancedb): ${NC}")" lancedb_path
echo "LANCEDB_PATH=${lancedb_path:-./data/lancedb}" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter LanceDB table name (default: docsgpts): ${NC}")" lancedb_table
echo "LANCEDB_TABLE_NAME=${lancedb_table:-docsgpts}" >> "$ENV_FILE"
echo -e "${GREEN}Vector store set to LanceDB.${NC}"
;;
6)
echo "VECTOR_STORE=pgvector" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter PGVector connection string (e.g. postgresql://user:pass@host:5432/db): ${NC}")" pgvector_conn
[ -n "$pgvector_conn" ] && echo "PGVECTOR_CONNECTION_STRING=$pgvector_conn" >> "$ENV_FILE"
echo -e "${GREEN}Vector store set to PGVector.${NC}"
;;
b|B) return ;;
*) echo -e "\n${RED}Invalid choice.${NC}" ; sleep 1 ;;
esac
}
# Embeddings configuration
configure_embeddings() {
echo -e "\n${DEFAULT_FG}${BOLD}Embeddings Configuration${NC}"
echo -e "${DEFAULT_FG}Choose your embeddings provider:${NC}"
echo -e "${YELLOW}1) HuggingFace (default, local)${NC}"
echo -e "${YELLOW}2) OpenAI Embeddings${NC}"
echo -e "${YELLOW}3) Custom Remote Embeddings (OpenAI-compatible API)${NC}"
echo -e "${YELLOW}b) Back${NC}"
echo
read -p "$(echo -e "${DEFAULT_FG}Choose option (1-3, or b): ${NC}")" emb_choice
case "$emb_choice" in
1)
echo "EMBEDDINGS_NAME=huggingface_sentence-transformers/all-mpnet-base-v2" >> "$ENV_FILE"
echo -e "${GREEN}Embeddings set to HuggingFace (local).${NC}"
;;
2)
echo "EMBEDDINGS_NAME=openai_text-embedding-ada-002" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter Embeddings API key (leave empty to reuse LLM API_KEY): ${NC}")" emb_key
[ -n "$emb_key" ] && echo "EMBEDDINGS_KEY=$emb_key" >> "$ENV_FILE"
echo -e "${GREEN}Embeddings set to OpenAI.${NC}"
;;
3)
read -p "$(echo -e "${DEFAULT_FG}Enter embeddings model name: ${NC}")" emb_name
[ -n "$emb_name" ] && echo "EMBEDDINGS_NAME=$emb_name" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter remote embeddings API base URL: ${NC}")" emb_url
[ -n "$emb_url" ] && echo "EMBEDDINGS_BASE_URL=$emb_url" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter embeddings API key (leave empty if none): ${NC}")" emb_key
[ -n "$emb_key" ] && echo "EMBEDDINGS_KEY=$emb_key" >> "$ENV_FILE"
echo -e "${GREEN}Custom remote embeddings configured.${NC}"
;;
b|B) return ;;
*) echo -e "\n${RED}Invalid choice.${NC}" ; sleep 1 ;;
esac
}
# Authentication configuration
configure_auth() {
echo -e "\n${DEFAULT_FG}${BOLD}Authentication Configuration${NC}"
echo -e "${DEFAULT_FG}Choose authentication type:${NC}"
echo -e "${YELLOW}1) None (default, no authentication)${NC}"
echo -e "${YELLOW}2) Simple JWT${NC}"
echo -e "${YELLOW}3) Session JWT${NC}"
echo -e "${YELLOW}b) Back${NC}"
echo
read -p "$(echo -e "${DEFAULT_FG}Choose option (1-3, or b): ${NC}")" auth_choice
case "$auth_choice" in
1)
echo -e "${GREEN}Authentication disabled (default).${NC}"
;;
2)
echo "AUTH_TYPE=simple_jwt" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter JWT secret key (leave empty to auto-generate): ${NC}")" jwt_key
if [ -n "$jwt_key" ]; then
echo "JWT_SECRET_KEY=$jwt_key" >> "$ENV_FILE"
else
generated_key=$(openssl rand -hex 32 2>/dev/null || head -c 64 /dev/urandom | od -An -tx1 | tr -d ' \n')
echo "JWT_SECRET_KEY=$generated_key" >> "$ENV_FILE"
echo -e "${YELLOW}Auto-generated JWT secret key.${NC}"
fi
echo -e "${GREEN}Authentication set to Simple JWT.${NC}"
;;
3)
echo "AUTH_TYPE=session_jwt" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter JWT secret key (leave empty to auto-generate): ${NC}")" jwt_key
if [ -n "$jwt_key" ]; then
echo "JWT_SECRET_KEY=$jwt_key" >> "$ENV_FILE"
else
generated_key=$(openssl rand -hex 32 2>/dev/null || head -c 64 /dev/urandom | od -An -tx1 | tr -d ' \n')
echo "JWT_SECRET_KEY=$generated_key" >> "$ENV_FILE"
echo -e "${YELLOW}Auto-generated JWT secret key.${NC}"
fi
echo -e "${GREEN}Authentication set to Session JWT.${NC}"
;;
b|B) return ;;
*) echo -e "\n${RED}Invalid choice.${NC}" ; sleep 1 ;;
esac
}
# Integrations configuration
configure_integrations() {
echo -e "\n${DEFAULT_FG}${BOLD}Integrations Configuration${NC}"
echo -e "${YELLOW}1) Google Drive${NC}"
echo -e "${YELLOW}2) GitHub${NC}"
echo -e "${YELLOW}b) Back${NC}"
echo
read -p "$(echo -e "${DEFAULT_FG}Choose option (1-2, or b): ${NC}")" int_choice
case "$int_choice" in
1)
read -p "$(echo -e "${DEFAULT_FG}Enter Google OAuth Client ID: ${NC}")" google_id
[ -n "$google_id" ] && echo "GOOGLE_CLIENT_ID=$google_id" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter Google OAuth Client Secret: ${NC}")" google_secret
[ -n "$google_secret" ] && echo "GOOGLE_CLIENT_SECRET=$google_secret" >> "$ENV_FILE"
echo -e "${GREEN}Google Drive integration configured.${NC}"
;;
2)
read -p "$(echo -e "${DEFAULT_FG}Enter GitHub Personal Access Token (with repo read access): ${NC}")" github_token
[ -n "$github_token" ] && echo "GITHUB_ACCESS_TOKEN=$github_token" >> "$ENV_FILE"
echo -e "${GREEN}GitHub integration configured.${NC}"
;;
b|B) return ;;
*) echo -e "\n${RED}Invalid choice.${NC}" ; sleep 1 ;;
esac
}
# Document Processing configuration
configure_doc_processing() {
echo -e "\n${DEFAULT_FG}${BOLD}Document Processing Configuration${NC}"
read -p "$(echo -e "${DEFAULT_FG}Parse PDF pages as images for better table/chart extraction? (y/N): ${NC}")" pdf_image
if [[ "$pdf_image" =~ ^[yY]$ ]]; then
echo "PARSE_PDF_AS_IMAGE=true" >> "$ENV_FILE"
echo -e "${GREEN}PDF-as-image parsing enabled.${NC}"
fi
read -p "$(echo -e "${DEFAULT_FG}Enable OCR for document processing (Docling)? (y/N): ${NC}")" ocr_enabled
if [[ "$ocr_enabled" =~ ^[yY]$ ]]; then
echo "DOCLING_OCR_ENABLED=true" >> "$ENV_FILE"
echo -e "${GREEN}Docling OCR enabled.${NC}"
fi
}
# Text-to-Speech configuration
configure_tts() {
echo -e "\n${DEFAULT_FG}${BOLD}Text-to-Speech Configuration${NC}"
echo -e "${DEFAULT_FG}Choose TTS provider:${NC}"
echo -e "${YELLOW}1) Google TTS (default, free)${NC}"
echo -e "${YELLOW}2) ElevenLabs${NC}"
echo -e "${YELLOW}b) Back${NC}"
echo
read -p "$(echo -e "${DEFAULT_FG}Choose option (1-2, or b): ${NC}")" tts_choice
case "$tts_choice" in
1)
echo "TTS_PROVIDER=google_tts" >> "$ENV_FILE"
echo -e "${GREEN}TTS set to Google TTS.${NC}"
;;
2)
echo "TTS_PROVIDER=elevenlabs" >> "$ENV_FILE"
read -p "$(echo -e "${DEFAULT_FG}Enter ElevenLabs API key: ${NC}")" elevenlabs_key
[ -n "$elevenlabs_key" ] && echo "ELEVENLABS_API_KEY=$elevenlabs_key" >> "$ENV_FILE"
echo -e "${GREEN}TTS set to ElevenLabs.${NC}"
;;
b|B) return ;;
*) echo -e "\n${RED}Invalid choice.${NC}" ; sleep 1 ;;
esac
}
# Generate INTERNAL_KEY for worker-to-backend auth if not already present
ensure_internal_key() {
if ! grep -q "^INTERNAL_KEY=" "$ENV_FILE" 2>/dev/null; then
local internal_key
internal_key=$(openssl rand -hex 32 2>/dev/null || head -c 64 /dev/urandom | od -An -tx1 | tr -d ' \n')
echo "INTERNAL_KEY=$internal_key" >> "$ENV_FILE"
fi
}
# Main advanced settings menu
prompt_advanced_settings() {
ensure_internal_key
echo
read -p "$(echo -e "${DEFAULT_FG}Would you like to configure advanced settings? (y/N): ${NC}")" configure_advanced
if [[ ! "$configure_advanced" =~ ^[yY]$ ]]; then
return
fi
while true; do
echo -e "\n${DEFAULT_FG}${BOLD}Advanced Settings${NC}"
echo -e "${YELLOW}1) Vector Store ${NC}${DEFAULT_FG}(default: faiss)${NC}"
echo -e "${YELLOW}2) Embeddings ${NC}${DEFAULT_FG}(default: HuggingFace local)${NC}"
echo -e "${YELLOW}3) Authentication ${NC}${DEFAULT_FG}(default: none)${NC}"
echo -e "${YELLOW}4) Integrations ${NC}${DEFAULT_FG}(Google Drive, GitHub)${NC}"
echo -e "${YELLOW}5) Document Processing ${NC}${DEFAULT_FG}(PDF as image, OCR)${NC}"
echo -e "${YELLOW}6) Text-to-Speech ${NC}${DEFAULT_FG}(default: Google TTS)${NC}"
echo -e "${YELLOW}s) Save and Continue with Docker setup${NC}"
echo
read -p "$(echo -e "${DEFAULT_FG}Choose option (1-6, or s): ${NC}")" adv_choice
case "$adv_choice" in
1) configure_vector_store ;;
2) configure_embeddings ;;
3) configure_auth ;;
4) configure_integrations ;;
5) configure_doc_processing ;;
6) configure_tts ;;
s|S) break ;;
*) echo -e "\n${RED}Invalid choice.${NC}" ; sleep 1 ;;
esac
done
}
# 1) Use DocsGPT Public API Endpoint (simple and free)
use_docs_public_api_endpoint() {
echo -e "\n${NC}Setting up DocsGPT Public API Endpoint...${NC}"
echo "LLM_PROVIDER=docsgpt" > "$ENV_FILE"
echo "VITE_API_STREAMING=true" >> "$ENV_FILE"
echo -e "${GREEN}.env file configured for DocsGPT Public API.${NC}"
prompt_advanced_settings
check_and_start_docker
echo -e "\n${NC}Starting Docker Compose...${NC}"
docker compose --env-file "${ENV_FILE}" -f "${COMPOSE_FILE}" pull && docker compose --env-file "${ENV_FILE}" -f "${COMPOSE_FILE}" up -d
docker_compose_status=$? # Capture exit status of docker compose
echo "Docker Compose Exit Status: $docker_compose_status"
if [ "$docker_compose_status" -ne 0 ]; then
echo -e "\n${RED}${BOLD}Error starting Docker Compose. Please ensure Docker Compose is installed and in your PATH.${NC}"
echo -e "${RED}Refer to Docker documentation for installation instructions: https://docs.docker.com/compose/install/${NC}"
exit 1 # Indicate failure and EXIT SCRIPT
fi
echo -e "\n${GREEN}DocsGPT is now running on http://localhost:5173${NC}"
echo -e "${YELLOW}You can stop the application by running: docker compose -f \"${COMPOSE_FILE}\" down${NC}"
}
# 2) Serve Local (with Ollama)
serve_local_ollama() {
local ollama_choice model_name
local docker_compose_file_suffix
local model_name_prompt
local default_model="llama3.2:1b"
get_model_name_ollama() {
read -p "$(echo -e "${DEFAULT_FG}Enter Ollama Model Name (leave empty for default: ${default_model} (1.3GB)): ${NC}")" model_name_input
if [ -z "$model_name_input" ]; then
model_name="$default_model" # Set default model if input is empty
else
model_name="$model_name_input" # Use user-provided model name
fi
}
while true; do
clear
prompt_ollama_options
case "$ollama_choice" in
1) # CPU
docker_compose_file_suffix="cpu"
get_model_name_ollama
break ;;
2) # GPU
echo -e "\n${YELLOW}For this option to work correctly you need to have a supported GPU and configure Docker to utilize it.${NC}"
echo -e "${YELLOW}Refer to: https://hub.docker.com/r/ollama/ollama for more information.${NC}"
read -p "$(echo -e "${DEFAULT_FG}Continue with GPU setup? (y/b): ${NC}")" confirm_gpu
case "$confirm_gpu" in
y|Y)
docker_compose_file_suffix="gpu"
get_model_name_ollama
break ;;
b|B) clear; return 1 ;; # Back to Main Menu
*) echo -e "\n${RED}Invalid choice. Please choose y or b.${NC}" ; sleep 1 ;;
esac
;;
b|B) clear; return 1 ;; # Back to Main Menu
*) echo -e "\n${RED}Invalid choice. Please choose 1-2, or b.${NC}" ; sleep 1 ;;
esac
done
echo -e "\n${NC}Configuring for Ollama ($(echo "$docker_compose_file_suffix" | tr '[:lower:]' '[:upper:]'))...${NC}" # Using tr for uppercase - more compatible
echo "API_KEY=xxxx" > "$ENV_FILE" # Placeholder API Key
echo "LLM_PROVIDER=openai" >> "$ENV_FILE"
echo "LLM_NAME=$model_name" >> "$ENV_FILE"
echo "VITE_API_STREAMING=true" >> "$ENV_FILE"
echo "OPENAI_BASE_URL=http://ollama:11434/v1" >> "$ENV_FILE"
echo "EMBEDDINGS_NAME=huggingface_sentence-transformers/all-mpnet-base-v2" >> "$ENV_FILE"
echo -e "${GREEN}.env file configured for Ollama ($(echo "$docker_compose_file_suffix" | tr '[:lower:]' '[:upper:]')${NC}${GREEN}).${NC}"
prompt_advanced_settings
check_and_start_docker
local compose_files=(
-f "${COMPOSE_FILE}"
-f "$(dirname "${COMPOSE_FILE}")/optional/docker-compose.optional.ollama-${docker_compose_file_suffix}.yaml"
)
echo -e "\n${NC}Starting Docker Compose with Ollama (${docker_compose_file_suffix})...${NC}"
docker compose --env-file "${ENV_FILE}" "${compose_files[@]}" pull
docker compose --env-file "${ENV_FILE}" "${compose_files[@]}" up -d
docker_compose_status=$?
echo "Docker Compose Exit Status: $docker_compose_status" # Debug output
if [ "$docker_compose_status" -ne 0 ]; then
echo -e "\n${RED}${BOLD}Error starting Docker Compose. Please ensure Docker Compose is installed and in your PATH.${NC}"
echo -e "${RED}Refer to Docker documentation for installation instructions: https://docs.docker.com/compose/install/${NC}"
exit 1 # Indicate failure and EXIT SCRIPT
fi
echo "Waiting for Ollama container to be ready..."
OLLAMA_READY=false
while ! $OLLAMA_READY; do
CONTAINER_STATUS=$(docker compose "${compose_files[@]}" ps --services --filter "status=running" --format '{{.Service}}')
if [[ "$CONTAINER_STATUS" == *"ollama"* ]]; then # Check if 'ollama' service is in running services
OLLAMA_READY=true
echo "Ollama container is running."
else
echo "Ollama container not yet ready, waiting..."
sleep 5
fi
done
echo "Pulling $model_name model for Ollama..."
docker compose --env-file "${ENV_FILE}" "${compose_files[@]}" exec -it ollama ollama pull "$model_name"
echo -e "\n${GREEN}DocsGPT is now running with Ollama (${docker_compose_file_suffix}) on http://localhost:5173${NC}"
printf -v compose_files_escaped "%q " "${compose_files[@]}"
echo -e "${YELLOW}You can stop the application by running: docker compose ${compose_files_escaped}down${NC}"
}
# 3) Connect Local Inference Engine
connect_local_inference_engine() {
local engine_choice
local model_name_prompt model_name openai_base_url
get_model_name() {
read -p "$(echo -e "${DEFAULT_FG}Enter Model Name (leave empty to set later as None): ${NC}")" model_name
if [ -z "$model_name" ]; then
model_name="None"
fi
}
while true; do
clear
prompt_local_inference_engine_options
case "$engine_choice" in
1) # LLaMa.cpp
engine_name="LLaMa.cpp"
openai_base_url="http://host.docker.internal:8000/v1"
get_model_name
break ;;
2) # Ollama
engine_name="Ollama"
openai_base_url="http://host.docker.internal:11434/v1"
get_model_name
break ;;
3) # TGI
engine_name="TGI"
openai_base_url="http://host.docker.internal:8080/v1"
get_model_name
break ;;
4) # SGLang
engine_name="SGLang"
openai_base_url="http://host.docker.internal:30000/v1"
get_model_name
break ;;
5) # vLLM
engine_name="vLLM"
openai_base_url="http://host.docker.internal:8000/v1"
get_model_name
break ;;
6) # Aphrodite
engine_name="Aphrodite"
openai_base_url="http://host.docker.internal:2242/v1"
get_model_name
break ;;
7) # FriendliAI
engine_name="FriendliAI"
openai_base_url="http://host.docker.internal:8997/v1"
get_model_name
break ;;
8) # LMDeploy
engine_name="LMDeploy"
openai_base_url="http://host.docker.internal:23333/v1"
get_model_name
break ;;
b|B) clear; return 1 ;; # Back to Main Menu
*) echo -e "\n${RED}Invalid choice. Please choose 1-8, or b.${NC}" ; sleep 1 ;;
esac
done
echo -e "\n${NC}Configuring for Local Inference Engine: ${BOLD}${engine_name}...${NC}"
echo "API_KEY=None" > "$ENV_FILE"
echo "LLM_PROVIDER=openai" >> "$ENV_FILE"
echo "LLM_NAME=$model_name" >> "$ENV_FILE"
echo "VITE_API_STREAMING=true" >> "$ENV_FILE"
echo "OPENAI_BASE_URL=$openai_base_url" >> "$ENV_FILE"
echo "EMBEDDINGS_NAME=huggingface_sentence-transformers/all-mpnet-base-v2" >> "$ENV_FILE"
echo -e "${GREEN}.env file configured for ${BOLD}${engine_name}${NC}${GREEN} with OpenAI API format.${NC}"
echo -e "${YELLOW}Note: MODEL_NAME is set to '${BOLD}$model_name${NC}${YELLOW}'. You can change it later in the .env file.${NC}"
prompt_advanced_settings
check_and_start_docker
echo -e "\n${NC}Starting Docker Compose...${NC}"
docker compose --env-file "${ENV_FILE}" -f "${COMPOSE_FILE}" pull && docker compose --env-file "${ENV_FILE}" -f "${COMPOSE_FILE}" up -d
docker_compose_status=$?
echo "Docker Compose Exit Status: $docker_compose_status" # Debug output
if [ "$docker_compose_status" -ne 0 ]; then
echo -e "\n${RED}${BOLD}Error starting Docker Compose. Please ensure Docker Compose is installed and in your PATH.${NC}"
echo -e "${RED}Refer to Docker documentation for installation instructions: https://docs.docker.com/compose/install/${NC}"
exit 1 # Indicate failure and EXIT SCRIPT
fi
echo -e "\n${GREEN}DocsGPT is now configured to connect to ${BOLD}${engine_name}${NC}${GREEN} at ${BOLD}$openai_base_url${NC}"
echo -e "${YELLOW}Ensure your ${BOLD}${engine_name} inference server is running at that address${NC}"
echo -e "\n${GREEN}DocsGPT is running at http://localhost:5173${NC}"
echo -e "${YELLOW}You can stop the application by running: docker compose -f \"${COMPOSE_FILE}\" down${NC}"
}
# 4) Connect Cloud API Provider
connect_cloud_api_provider() {
local provider_choice api_key llm_provider
local setup_result # Variable to store the return status
get_api_key() {
echo -e "${YELLOW}Your API key will be stored locally in the .env file and will not be sent anywhere else${NC}"
read -p "$(echo -e "${DEFAULT_FG}Please enter your API key: ${NC}")" api_key
}
while true; do
clear
prompt_cloud_api_provider_options
case "$provider_choice" in
1) # OpenAI
provider_name="OpenAI"
llm_provider="openai"
model_name="gpt-4o"
get_api_key
break ;;
2) # Google
provider_name="Google (Vertex AI, Gemini)"
llm_provider="google"
model_name="gemini-2.0-flash"
get_api_key
break ;;
3) # Anthropic
provider_name="Anthropic (Claude)"
llm_provider="anthropic"
model_name="claude-3-5-sonnet-latest"
get_api_key
break ;;
4) # Groq
provider_name="Groq"
llm_provider="groq"
model_name="llama-3.1-8b-instant"
get_api_key
break ;;
5) # HuggingFace Inference API
provider_name="HuggingFace Inference API"
llm_provider="huggingface"
model_name="meta-llama/Llama-3.1-8B-Instruct"
get_api_key
break ;;
6) # Azure OpenAI
provider_name="Azure OpenAI"
llm_provider="azure_openai"
model_name="gpt-4o"
get_api_key
echo -e "\n${DEFAULT_FG}${BOLD}Azure OpenAI requires additional configuration:${NC}"
read -p "$(echo -e "${DEFAULT_FG}Enter Azure OpenAI API base URL (e.g. https://your-resource.openai.azure.com/): ${NC}")" azure_api_base
read -p "$(echo -e "${DEFAULT_FG}Enter Azure OpenAI API version (e.g. 2024-02-15-preview): ${NC}")" azure_api_version
read -p "$(echo -e "${DEFAULT_FG}Enter Azure deployment name for chat: ${NC}")" azure_deployment
read -p "$(echo -e "${DEFAULT_FG}Enter Azure deployment name for embeddings (leave empty to skip): ${NC}")" azure_emb_deployment
break ;;
7) # Novita
provider_name="Novita"
llm_provider="novita"
model_name="moonshotai/kimi-k2.5"
get_api_key
break ;;
b|B) clear; return 1 ;; # Clear screen and Back to Main Menu
*) echo -e "\n${RED}Invalid choice. Please choose 1-7, or b.${NC}" ; sleep 1 ;;
esac
done
echo -e "\n${NC}Configuring for Cloud API Provider: ${BOLD}${provider_name}...${NC}"
echo "API_KEY=$api_key" > "$ENV_FILE"
echo "LLM_PROVIDER=$llm_provider" >> "$ENV_FILE"
echo "LLM_NAME=$model_name" >> "$ENV_FILE"
echo "VITE_API_STREAMING=true" >> "$ENV_FILE"
# Azure OpenAI additional settings
if [ "$llm_provider" = "azure_openai" ]; then
[ -n "$azure_api_base" ] && echo "OPENAI_API_BASE=$azure_api_base" >> "$ENV_FILE"
[ -n "$azure_api_version" ] && echo "OPENAI_API_VERSION=$azure_api_version" >> "$ENV_FILE"
[ -n "$azure_deployment" ] && echo "AZURE_DEPLOYMENT_NAME=$azure_deployment" >> "$ENV_FILE"
[ -n "$azure_emb_deployment" ] && echo "AZURE_EMBEDDINGS_DEPLOYMENT_NAME=$azure_emb_deployment" >> "$ENV_FILE"
fi
echo -e "${GREEN}.env file configured for ${BOLD}${provider_name}${NC}${GREEN}.${NC}"
prompt_advanced_settings
check_and_start_docker
echo -e "\n${NC}Starting Docker Compose...${NC}"
docker compose --env-file "${ENV_FILE}" -f "${COMPOSE_FILE}" pull && docker compose --env-file "${ENV_FILE}" -f "${COMPOSE_FILE}" up -d
docker_compose_status=$?
echo "Docker Compose Exit Status: $docker_compose_status" # Debug output
if [ "$docker_compose_status" -ne 0 ]; then
echo -e "\n${RED}${BOLD}Error starting Docker Compose. Please ensure Docker Compose is installed and in your PATH.${NC}"
echo -e "${RED}Refer to Docker documentation for installation instructions: https://docs.docker.com/compose/install/${NC}"
exit 1 # Indicate failure and EXIT SCRIPT
fi
echo -e "\n${GREEN}DocsGPT is now configured to use ${BOLD}${provider_name}${NC}${GREEN} on http://localhost:5173${NC}"
echo -e "${YELLOW}You can stop the application by running: docker compose -f \"${COMPOSE_FILE}\" down${NC}"
}
# Main script execution
animate_dino
# Check if .env file exists and is not empty
if [ -f "$ENV_FILE" ] && [ -s "$ENV_FILE" ]; then
echo -e "\n${YELLOW}${BOLD}Warning:${NC}${YELLOW} An existing .env file was found with the following settings:${NC}"
head -3 "$ENV_FILE" | while IFS= read -r line; do echo -e "${DEFAULT_FG} $line${NC}"; done
total_lines=$(wc -l < "$ENV_FILE")
if [ "$total_lines" -gt 3 ]; then
echo -e "${DEFAULT_FG} ... and $((total_lines - 3)) more lines${NC}"
fi
echo
read -p "$(echo -e "${YELLOW}Running setup will overwrite this file. Continue? (y/N): ${NC}")" confirm_overwrite
if [[ ! "$confirm_overwrite" =~ ^[yY]$ ]]; then
echo -e "${GREEN}Setup cancelled. Your .env file was not modified.${NC}"
exit 0
fi
fi
while true; do # Main menu loop
clear # Clear screen before showing main menu again
prompt_main_menu
case $main_choice in
1) # Use DocsGPT Public API Endpoint (Docker Hub images)
COMPOSE_FILE="${SCRIPT_DIR}/deployment/docker-compose-hub.yaml"
use_docs_public_api_endpoint
break ;;
2) # Serve Local (with Ollama)
serve_local_ollama && break ;;
3) # Connect Local Inference Engine
connect_local_inference_engine && break ;;
4) # Connect Cloud API Provider
connect_cloud_api_provider && break ;;
5) # Advanced: Build images locally
echo -e "\n${YELLOW}You have selected to build images locally. This is recommended for developers or if you want to test local changes.${NC}"
COMPOSE_FILE="$COMPOSE_FILE_LOCAL"
use_docs_public_api_endpoint
break ;;
*)
echo -e "\n${RED}Invalid choice. Please choose 1-5.${NC}" ; sleep 1 ;;
esac
done
echo -e "\n${GREEN}${BOLD}DocsGPT Setup Complete.${NC}"
exit 0