|
|
- FROM tensorflow/tensorflow:2.3.0-gpu
-
- # why 2.3 ? I looked it up on stack overflow
- # https://stackoverflow.com/questions/50622525/which-tensorflow-and-cuda-version-combinations-are-compatible
- # here is a nice list, which tf version is compatible with which cuda
- # from the cmmand docker run --runtime=nvidia --rm nvidia/cuda:9.0-base nvidia-smi
- # you get your installed cuda version running
-
- RUN useradd -ms /bin/bash pluritonian
-
- COPY Translations.txt /home/pluritonian/Translations.txt
-
- COPY test_runwithgen.py /home/pluritonian/test_runwithgen.py
- COPY test_runwithload.py /home/pluritonian/test_runwithload.py
- COPY generateModels.py /home/pluritonian/generateModels.py
-
- COPY req.js /home/pluritonian/req.js
-
- COPY postcommand /home/pluritonian/postcommand
-
- COPY updateDatabase.py /home/pluritonian/updateDatabase.py
-
- COPY FASTsearch.py /home/pluritonian/FASTsearch.py
-
- COPY fastapi_server.py /home/pluritonian/fastapi_server.py
-
- #USER pluritonian
-
- WORKDIR /home/pluritonian
-
- RUN apt-get update && apt-get install nano
-
- RUN pip install joblib scikit-learn hickle==3.4.9 fastapi uvicorn[standard]
-
- RUN pip install idna==2.9 python-multipart==0.0.5
-
- RUN python generateModels.py
-
- # to let the container running:
-
- CMD uvicorn --host 0.0.0.0 fastapi_server:app
-
- #ENTRYPOINT ["tail"]
- #CMD ["-f","/dev/null"]
|