forked from open-webui/open-webui
Merge branch 'main' into dev
This commit is contained in:
commit
d4f783cbb0
2 changed files with 24 additions and 18 deletions
|
@ -20,9 +20,10 @@ This configuration allows Ollama to accept connections from any source.
|
|||
|
||||
Ensure that the Ollama URL is correctly formatted in the application settings. Follow these steps:
|
||||
|
||||
- If your Ollama runs in a different host than Web UI make sure Ollama host address is provided when running Web UI container via `OLLAMA_API_BASE_URL` environment variable. [(e.g. OLLAMA_API_BASE_URL=http://192.168.1.1:11434/api)](https://github.com/ollama-webui/ollama-webui#accessing-external-ollama-on-a-different-server)
|
||||
- Go to "Settings" within the Ollama WebUI.
|
||||
- Navigate to the "General" section.
|
||||
- Verify that the Ollama URL is in the following format: `http://localhost:11434/api`.
|
||||
- Verify that the Ollama Server URL is set to: `/ollama/api`.
|
||||
|
||||
It is crucial to include the `/api` at the end of the URL to ensure that the Ollama Web UI can communicate with the server.
|
||||
|
||||
|
|
|
@ -59,27 +59,32 @@ def proxy(path):
|
|||
else:
|
||||
pass
|
||||
|
||||
# Make a request to the target server
|
||||
target_response = requests.request(
|
||||
method=request.method,
|
||||
url=target_url,
|
||||
data=data,
|
||||
headers=headers,
|
||||
stream=True, # Enable streaming for server-sent events
|
||||
)
|
||||
try:
|
||||
# Make a request to the target server
|
||||
target_response = requests.request(
|
||||
method=request.method,
|
||||
url=target_url,
|
||||
data=data,
|
||||
headers=headers,
|
||||
stream=True, # Enable streaming for server-sent events
|
||||
)
|
||||
|
||||
# Proxy the target server's response to the client
|
||||
def generate():
|
||||
for chunk in target_response.iter_content(chunk_size=8192):
|
||||
yield chunk
|
||||
target_response.raise_for_status()
|
||||
|
||||
response = Response(generate(), status=target_response.status_code)
|
||||
# Proxy the target server's response to the client
|
||||
def generate():
|
||||
for chunk in target_response.iter_content(chunk_size=8192):
|
||||
yield chunk
|
||||
|
||||
# Copy headers from the target server's response to the client's response
|
||||
for key, value in target_response.headers.items():
|
||||
response.headers[key] = value
|
||||
response = Response(generate(), status=target_response.status_code)
|
||||
|
||||
return response
|
||||
# Copy headers from the target server's response to the client's response
|
||||
for key, value in target_response.headers.items():
|
||||
response.headers[key] = value
|
||||
|
||||
return response
|
||||
except Exception as e:
|
||||
return jsonify({"detail": "Server Connection Error", "message": str(e)}), 400
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
Loading…
Reference in a new issue