@@ -327,7 +327,7 @@ sources/whisper.cpp/build/src/libwhisper.a: sources/whisper.cpp
327
327
cd sources/whisper.cpp && cmake $(WHISPER_CMAKE_ARGS ) . -B ./build
328
328
cd sources/whisper.cpp/build && cmake --build . --config Release
329
329
330
- get-sources : sources/go-piper sources/stablediffusion-ggml.cpp sources/bark.cpp sources/whisper.cpp backend/cpp/llama-cpp/llama.cpp
330
+ get-sources : sources/go-piper sources/stablediffusion-ggml.cpp sources/bark.cpp sources/whisper.cpp
331
331
332
332
replace :
333
333
$(GOCMD ) mod edit -replace github.com/ggerganov/whisper.cpp=$(CURDIR ) /sources/whisper.cpp
@@ -360,9 +360,7 @@ clean: ## Remove build related file
360
360
rm -rf backend-assets/*
361
361
$(MAKE ) -C backend/cpp/grpc clean
362
362
$(MAKE ) -C backend/go/bark-cpp clean
363
- $(MAKE ) -C backend/cpp/llama-cpp clean
364
363
$(MAKE ) -C backend/go/image/stablediffusion-ggml clean
365
- rm -rf backend/cpp/llama-cpp-* || true
366
364
$(MAKE ) dropreplace
367
365
$(MAKE ) protogen-clean
368
366
rmdir pkg/grpc/proto || true
@@ -403,18 +401,6 @@ backend-assets/lib:
403
401
mkdir -p backend-assets/lib
404
402
405
403
dist :
406
- $(MAKE ) backend-assets/grpc/llama-cpp-avx2
407
- ifeq ($(DETECT_LIBS ) ,true)
408
- scripts/prepare-libs.sh backend-assets/grpc/llama-cpp-avx2
409
- endif
410
- ifeq ($(OS ) ,Darwin)
411
- BUILD_TYPE=none $(MAKE) backend-assets/grpc/llama-cpp-fallback
412
- else
413
- $(MAKE) backend-assets/grpc/llama-cpp-cuda
414
- $(MAKE) backend-assets/grpc/llama-cpp-hipblas
415
- $(MAKE) backend-assets/grpc/llama-cpp-sycl_f16
416
- $(MAKE) backend-assets/grpc/llama-cpp-sycl_f32
417
- endif
418
404
GO_TAGS=" tts p2p" $(MAKE ) build
419
405
ifeq ($(DETECT_LIBS ) ,true)
420
406
scripts/prepare-libs.sh backend-assets/grpc/piper
@@ -679,31 +665,6 @@ ifneq ($(UPX),)
679
665
$(UPX) backend-assets/grpc/huggingface
680
666
endif
681
667
682
- backend/cpp/llama-cpp/llama.cpp :
683
- LLAMA_VERSION=$(CPPLLAMA_VERSION ) $(MAKE ) -C backend/cpp/llama-cpp llama.cpp
684
-
685
- INSTALLED_PACKAGES =$(CURDIR ) /backend/cpp/grpc/installed_packages
686
- INSTALLED_LIB_CMAKE =$(INSTALLED_PACKAGES ) /lib/cmake
687
- ADDED_CMAKE_ARGS=-Dabsl_DIR =${INSTALLED_LIB_CMAKE}/absl \
688
- -DProtobuf_DIR=${INSTALLED_LIB_CMAKE}/protobuf \
689
- -Dutf8_range_DIR=${INSTALLED_LIB_CMAKE}/utf8_range \
690
- -DgRPC_DIR=${INSTALLED_LIB_CMAKE}/grpc \
691
- -DCMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES=${INSTALLED_PACKAGES}/include
692
- build-llama-cpp-grpc-server :
693
- # Conditionally build grpc for the llama backend to use if needed
694
- ifdef BUILD_GRPC_FOR_BACKEND_LLAMA
695
- $(MAKE) -C backend/cpp/grpc build
696
- _PROTOBUF_PROTOC=${INSTALLED_PACKAGES}/bin/proto \
697
- _GRPC_CPP_PLUGIN_EXECUTABLE=${INSTALLED_PACKAGES}/bin/grpc_cpp_plugin \
698
- PATH="${INSTALLED_PACKAGES}/bin:${PATH}" \
699
- CMAKE_ARGS="${CMAKE_ARGS} ${ADDED_CMAKE_ARGS}" \
700
- LLAMA_VERSION=$(CPPLLAMA_VERSION) \
701
- $(MAKE) -C backend/cpp/${VARIANT} grpc-server
702
- else
703
- echo "BUILD_GRPC_FOR_BACKEND_LLAMA is not defined."
704
- LLAMA_VERSION=$(CPPLLAMA_VERSION) $(MAKE) -C backend/cpp/${VARIANT} grpc-server
705
- endif
706
-
707
668
backend-assets/grpc/bark-cpp : protogen-go replace backend/go/bark-cpp/libbark.a backend-assets/grpc
708
669
CGO_LDFLAGS=" $( CGO_LDFLAGS) " C_INCLUDE_PATH=$(CURDIR ) /backend/go/bark-cpp/ LIBRARY_PATH=$(CURDIR ) /backend/go/bark-cpp/ \
709
670
$(GOCMD ) build -ldflags " $( LD_FLAGS) " -tags " $( GO_TAGS) " -o backend-assets/grpc/bark-cpp ./backend/go/bark-cpp/
@@ -795,6 +756,48 @@ docker-image-intel-xpu:
795
756
--build-arg GRPC_BACKENDS=" $( GRPC_BACKENDS) " \
796
757
--build-arg BUILD_TYPE=sycl_f32 -t $(DOCKER_IMAGE ) .
797
758
759
+ backend-images :
760
+ mkdir -p backend-images
761
+
762
+ docker-build-llama-cpp :
763
+ docker build -t local-ai-backend:llama-cpp -f backend/Dockerfile.llama-cpp .
764
+
765
+ docker-save-llama-cpp : backend-images
766
+ docker save local-ai-backend:llama-cpp -o backend-images/llama-cpp.tar
767
+
768
+
769
+ docker-build-rerankers :
770
+ docker build -t local-ai-backend:rerankers -f backend/Dockerfile.python --build-arg BACKEND=rerankers .
771
+
772
+ docker-build-vllm :
773
+ docker build -t local-ai-backend:vllm -f backend/Dockerfile.python --build-arg BACKEND=vllm .
774
+
775
+ docker-build-transformers :
776
+ docker build -t local-ai-backend:transformers -f backend/Dockerfile.python --build-arg BACKEND=transformers .
777
+
778
+ docker-build-diffusers :
779
+ docker build -t local-ai-backend:diffusers -f backend/Dockerfile.python --build-arg BACKEND=diffusers .
780
+
781
+ docker-build-kokoro :
782
+ docker build -t local-ai-backend:kokoro -f backend/Dockerfile.python --build-arg BACKEND=kokoro .
783
+
784
+ docker-build-faster-whisper :
785
+ docker build -t local-ai-backend:faster-whisper -f backend/Dockerfile.python --build-arg BACKEND=faster-whisper .
786
+
787
+ docker-build-coqui :
788
+ docker build -t local-ai-backend:coqui -f backend/Dockerfile.python --build-arg BACKEND=coqui .
789
+
790
+ docker-build-bark :
791
+ docker build -t local-ai-backend:bark -f backend/Dockerfile.python --build-arg BACKEND=bark .
792
+
793
+ docker-build-chatterbox :
794
+ docker build -t local-ai-backend:chatterbox -f backend/Dockerfile.python --build-arg BACKEND=chatterbox .
795
+
796
+ docker-build-exllama2 :
797
+ docker build -t local-ai-backend:exllama2 -f backend/Dockerfile.python --build-arg BACKEND=exllama2 .
798
+
799
+ docker-build-backends : docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-exllama2
800
+
798
801
.PHONY : swagger
799
802
swagger :
800
803
swag init -g core/http/app.go --output swagger
0 commit comments