forked from open-webui/open-webui
Merge branch 'main' into dev
This commit is contained in:
commit
d4f783cbb0
2 changed files with 24 additions and 18 deletions
|
@ -20,9 +20,10 @@ This configuration allows Ollama to accept connections from any source.
|
||||||
|
|
||||||
Ensure that the Ollama URL is correctly formatted in the application settings. Follow these steps:
|
Ensure that the Ollama URL is correctly formatted in the application settings. Follow these steps:
|
||||||
|
|
||||||
|
- If your Ollama runs in a different host than Web UI make sure Ollama host address is provided when running Web UI container via `OLLAMA_API_BASE_URL` environment variable. [(e.g. OLLAMA_API_BASE_URL=http://192.168.1.1:11434/api)](https://github.com/ollama-webui/ollama-webui#accessing-external-ollama-on-a-different-server)
|
||||||
- Go to "Settings" within the Ollama WebUI.
|
- Go to "Settings" within the Ollama WebUI.
|
||||||
- Navigate to the "General" section.
|
- Navigate to the "General" section.
|
||||||
- Verify that the Ollama URL is in the following format: `http://localhost:11434/api`.
|
- Verify that the Ollama Server URL is set to: `/ollama/api`.
|
||||||
|
|
||||||
It is crucial to include the `/api` at the end of the URL to ensure that the Ollama Web UI can communicate with the server.
|
It is crucial to include the `/api` at the end of the URL to ensure that the Ollama Web UI can communicate with the server.
|
||||||
|
|
||||||
|
|
|
@ -59,27 +59,32 @@ def proxy(path):
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Make a request to the target server
|
try:
|
||||||
target_response = requests.request(
|
# Make a request to the target server
|
||||||
method=request.method,
|
target_response = requests.request(
|
||||||
url=target_url,
|
method=request.method,
|
||||||
data=data,
|
url=target_url,
|
||||||
headers=headers,
|
data=data,
|
||||||
stream=True, # Enable streaming for server-sent events
|
headers=headers,
|
||||||
)
|
stream=True, # Enable streaming for server-sent events
|
||||||
|
)
|
||||||
|
|
||||||
# Proxy the target server's response to the client
|
target_response.raise_for_status()
|
||||||
def generate():
|
|
||||||
for chunk in target_response.iter_content(chunk_size=8192):
|
|
||||||
yield chunk
|
|
||||||
|
|
||||||
response = Response(generate(), status=target_response.status_code)
|
# Proxy the target server's response to the client
|
||||||
|
def generate():
|
||||||
|
for chunk in target_response.iter_content(chunk_size=8192):
|
||||||
|
yield chunk
|
||||||
|
|
||||||
# Copy headers from the target server's response to the client's response
|
response = Response(generate(), status=target_response.status_code)
|
||||||
for key, value in target_response.headers.items():
|
|
||||||
response.headers[key] = value
|
|
||||||
|
|
||||||
return response
|
# Copy headers from the target server's response to the client's response
|
||||||
|
for key, value in target_response.headers.items():
|
||||||
|
response.headers[key] = value
|
||||||
|
|
||||||
|
return response
|
||||||
|
except Exception as e:
|
||||||
|
return jsonify({"detail": "Server Connection Error", "message": str(e)}), 400
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
Loading…
Reference in a new issue