changed timeouts, changed also inputs and outputs on interface

This commit is contained in:
alpcentaur 2024-10-07 16:52:44 +02:00
parent 167cb29c3d
commit 9b3980a99e
11 changed files with 123 additions and 25 deletions

View file

@ -35,12 +35,19 @@
"de": "basabuuka",
"it": "basabuuka"
},
"index_translating": {
"en": "The algorithm is translating.. this can take up to 50 seconds the first time. All further translations take much less time, depending on the length of the paragraph..",
"fr": "L'algorithme traduit... ceci peut prendre jusqu'à 50 secondes la première fois. Les traductions ultérieures prennent beaucoup moins de temps, en fonction de la longueur du paragraphe..",
"de": "Der Algorithmus übersetzt den Absatz.. Dies kann beim ersten Mal je nach Länge des Absatzes bis zu 50 Sekunden dauern. Alle weiteren Übersetzungen jedoch dauern kürzer, sobald die Modelle in die RAM geladen sind..",
"it": "L'algoritmo sta traducendo... ciò può richiedere fino a 50 secondi la prima volta. Le traduzioni successive richiederanno molto meno tempo, a seconda della lunghezza del paragrafo..."
},
"index_title2": {
"en": "Open Language!",
"fr": "ouvrir la langue?",
"de": "Sprache Öffnen?",
"it": "Aprire le lingue?"
},
"index_title3": {
"en": "How does pluriton work?",
"fr": "Comme pluriton functionne?",

View file

@ -125,6 +125,7 @@
window.onload = function() {
// retrieved from server-side template
let lang = "{{ lang }}";
let translating = "{{ "index_translating"|tr(lang) }}";
document.getElementById('langs').value=lang;
document.getElementById("send-text").style.display = "unset";
let btn2 = document.getElementById("send-text-btn");
@ -142,7 +143,7 @@
};
xhr1.send(JSON.stringify(json));
document.getElementById("datextarea").value = "...";
document.getElementById("output1").value = translating;
xhr1.onreadystatechange = function () {
if (xhr1.readyState == 4) {
@ -154,7 +155,7 @@
};
};
document.getElementById("datextarea").value = "...";
document.getElementById("datextarea").value = text;
});
}
@ -208,14 +209,7 @@
<a style="width: 50vw; background: deeppink;" id="send-text-btn" class="ncstyle-button margin-bottom">{{ "index_search_button"|tr(lang) }}</a>
</div>
<div class="div_10"></div>
<div class="grid-container">
<div class="item1">
<textarea id="output1" placeholder="{{"index_description3"|tr(lang)}}" style="font-size:18px; outline:none; resize: none; overflow:auto; width:38vw; height:55vh; border-width:2vw border-width: 2vw; border-style:solid"></textarea>
</div>
<div class="item2">
<textarea id="output2" placeholder="{{"index_description4"|tr(lang)}}" style="font-size:18px; outline:none; resize: none; overflow:auto; width:38vw; height:55vh; border-width:2vw border-width: 2vw; border-style:solid"></textarea>
</div>
</div>
<textarea id="output1" placeholder="{{"index_description3"|tr(lang)}}" style="font-size:18px; outline:none; resize: none; overflow:auto; width:80vw; height:60vh; border-width:2vw border-width: 2vw; border-style:solid"></textarea>
<div class="div_10"></div>
</center>

View file

@ -9,11 +9,11 @@ ENV PYTHONDONTWRITEBYTECODE=1
# the application crashes without emitting any logs due to buffering.
ENV PYTHONUNBUFFERED=1
WORKDIR /app
WORKDIR /opt/fastapiserver
# Create a non-privileged user that the app will run under.
# See https://docs.docker.com/go/dockerfile-user-best-practices/
ARG UID=10001
#ARG UID=10001
#RUN adduser \
# --disabled-password \
# --gecos "" \
@ -27,14 +27,16 @@ ARG UID=10001
# Leverage a cache mount to /root/.cache/pip to speed up subsequent builds.
# Leverage a bind mount to requirements.txt to avoid having to copy them into
# into this layer.
RUN --mount=type=cache,target=/root/.cache/pip
# --mount=type=bind,source=requirements.txt,target=requirements.txt \
# python -m pip install -r requirements.txt
#RUN --mount=type=cache,target=/root/.cache/pip \
#RUN --mount=type=cache,target=/root/.cache/pip
# --mount=type=bind,source=requirements.txt,target=requirements.txt \
# python -m pip install -r requirements.txt
RUN --mount=type=cache,target=/root/.cache/pip \
--mount=type=bind,source=requirements.txt,target=requirements.txt \
python -m pip install -r requirements.txt
# Switch to the non-privileged user to run the application.
#USER appuser
CMD /bin/sh -c "while true; do sleep 30; done"
CMD python -m uvicorn fastapi_server:app --reload --host 0.0.0.0 --port 8001
#CMD /bin/sh -c "while true; do sleep 30; done"

View file

@ -4,6 +4,15 @@ from fastapi import FastAPI, Request, Response
import asyncio
import httpx
import requests
import ast
import ollama
from ollama import AsyncClient
app = FastAPI()
@app.post("/print_and_respond")
@ -14,13 +23,46 @@ async def print_and_respond(request: Request):
# Print the received message (or do any processing needed)
print(f"Received from first server: {data}")
# Respond back to the first server
return Response(
content=str(data).encode(),
media_type="application/json",
status_code=200,
headers={"Content-Type": "application/json"}
)
message = 'Forme bitte folgenden Satz in mehrere Sätze um, sodass jeder Satz genau eine Aussage enthält. Nach jeder Aussage soll ein Punkt kommen. Subjekte und Objekte dürfen sich nicht vertauschen. Bei der Umformung darf kein Wort dazukommen, und es darf auch kein Wort wegfallen. Gib keinerlei Erklärung oder andere Aussagen, sondern gib nur die resultierenden Sätze mit einer Aussage pro Satz wieder. Versuche auch logische Zusammenhänge im Resultat beizubehalten. Das ist der Satz, der umgeformt werden soll:' + data["data"]
#message = {'role': 'user', 'content': 'Why is the sky blue?'}
#preprocessed = await AsyncClient(host='http://ollama:11434').chat(model='qwen2.5:14b', messages=[message])
#print(preprocessed)
#data["data"] = preprocessed["response"]
try:
# Prepare the request to the Ollama API
ollama_api_url = f"http://ollama:11434/api/generate"
payload = {"prompt": message,"model":'qwen2.5:14b',"stream":False}
#print(payload)
# Send the request to the Ollama API
response = requests.post(ollama_api_url, json=payload)
response.raise_for_status()
# Check if the request was successful
if response.status_code == 200:
#print(response.text)
print(response.json()["response"])
data["data"] = response.json()["response"]
return Response(
content=str(data).encode(),
media_type="application/json",
status_code=200,
headers={"Content-Type": "application/json"}
)
else:
raise HTTPException(status_code=response.status_code, detail="Error in Ollama API response")
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
import uvicorn

View file

@ -0,0 +1,19 @@
annotated-types==0.7.0
anyio==4.6.0
certifi==2024.8.30
charset-normalizer==3.3.2
click==8.1.7
fastapi==0.115.0
h11==0.14.0
httpcore==1.0.6
httpx==0.27.2
idna==3.10
ollama==0.3.3
pydantic==2.9.2
pydantic_core==2.23.4
requests==2.32.3
sniffio==1.3.1
starlette==0.38.6
typing_extensions==4.12.2
urllib3==2.2.3
uvicorn==0.31.0

View file

@ -0,0 +1,13 @@
import ollama
import asyncio
from ollama import AsyncClient
async def chat():
message = {'role': 'user', 'content': 'Why is the sky blue?'}
response = await AsyncClient(host='http://ollama:11434').chat(model='llama3.1', messages=[message])
asyncio.run(chat())

View file

@ -0,0 +1,19 @@
annotated-types==0.7.0
anyio==4.6.0
certifi==2024.8.30
charset-normalizer==3.3.2
click==8.1.7
fastapi==0.115.0
h11==0.14.0
httpcore==1.0.6
httpx==0.27.2
idna==3.10
ollama==0.3.3
pydantic==2.9.2
pydantic_core==2.23.4
requests==2.32.3
sniffio==1.3.1
starlette==0.38.6
typing_extensions==4.12.2
urllib3==2.2.3
uvicorn==0.31.0

View file

@ -48,7 +48,7 @@ async def root(data: Request):
#data = {"key": "oioioi und oi."}
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(timeout=100.0) as client:
response_from_second_server = await client.post(SECOND_SERVER_URL, data=json_data)
print('oi oi oi oi')
@ -56,6 +56,8 @@ async def root(data: Request):
print(output)
print(output["data"])
#output = output["data"]
output = prototype.translate(output["data"])
daresponse = output + '?&?&' + "Future Input"