Spaces:
Sleeping
Sleeping
| # You can also run models locally or on you own server and use them instead if they are compatible with HuggingFace API | |
| # For local models seletct HF_API as a type because they usse HuggingFace API | |
| # Most probalby you don't need a key for your local model | |
| # But if you have some kind of authentication compatible with HuggingFace API you can use it here | |
| HF_API_KEY=None | |
| # The main usecase for the local models in locally running LLMs | |
| # You can serve any model using Text Generation Inference from HuggingFace | |
| # https://github.com/huggingface/text-generation-inference | |
| # This project uses Messages API that is compatible with Open AI API and allows you to just plug and play OS models | |
| # Don't gorget to add '/v1' to the end of the URL | |
| # Assuming you have Meta-Llama-3-8B-Instruct model running on your local server, your configuration will look like this | |
| LLM_URL=http://192.168.1.1:8080/v1 | |
| LLM_TYPE=HF_API | |
| LLM_NAME=Meta-Llama-3-8B-Instruct | |
| # Running STT model locally is not straightforward | |
| # But for example you can one of the whispers models on your laptop | |
| # It requires some simple wrapper over the model to make it compatible with HuggingFace API. Maybe I will share some in the future | |
| # But assuming you manages to run a local whisper-server, your configuration will look like this | |
| STT_URL=http://127.0.0.1:5000/transcribe | |
| STT_TYPE=HF_API | |
| STT_NAME=whisper-base.en | |
| # I don't see much value in running TTS models locally given the quality of online models | |
| # But if you have some kind of TTS model running on your local server you can use it here | |
| TTS_URL=http://127.0.0.1:5001/read | |
| TTS_TYPE=HF_API | |
| TTS_NAME=my-tts-model | |