Cyberlaywer/build/tfgpu-cyberlaywer/Dockerfile

57 lines
2.3 KiB
Docker

FROM tensorflow/tensorflow:2.3.0-gpu
# why 2.3 ? I looked it up on stack overflow
# https://stackoverflow.com/questions/50622525/which-tensorflow-and-cuda-version-combinations-are-compatible
# here is a nice list, which tf version is compatible with which cuda
# full list is here: https://www.tensorflow.org/install/source#gpu .. who knows when one of them will be
# down at some point
# from the cmmand docker run --runtime=nvidia --rm nvidia/cuda:9.0-base nvidia-smi
# you get your installed cuda version running
RUN useradd -ms /bin/bash pluritonian
# docker copy commands are not longer needed, as everything is
# organized in an overlay on the host system.. creates problems
# with windows, but with docker copy it would create problems
# with the formatting too..
#COPY Translations.txt /home/pluritonian/Translations.txt
#COPY test_runwithgen.py /home/pluritonian/test_runwithgen.py
#COPY test_runwithload.py /home/pluritonian/test_runwithload.py
#COPY generateModels.py /home/pluritonian/generateModels.py
#COPY req.js /home/pluritonian/req.js
#COPY postcommand /home/pluritonian/postcommand
#COPY EndDokumente /home/pluritonian/EndDokumente
#COPY german.model.big /home/pluritonian/german.model.big
#COPY updateDatabase.py /home/pluritonian/updateDatabase.py
#COPY txtFilesInDir2PythonListInTxtFile.py /home/pluritonian/txtFilesInDir2PythonListInTxtFile.py
#COPY pythonlistInTxtFile2NoStopwords.py /home/pluritonian/pythonlistInTxtFile2NoStopwords.py
#COPY pythonlistInTxtFile2lowercase.py /home/pluritonian/pythonlistInTxtFile2lowercase.py
#COPY pythonlistInTxtFile2wordClouds.py /home/pluritonian/pythonlistInTxtFile2wordClouds.py
#COPY FASTsearch.py /home/pluritonian/FASTsearch.py
#COPY fastapi_server.py /home/pluritonian/fastapi_server.py
#USER pluritonian
WORKDIR /home/pluritonian
RUN apt-get update && apt-get install nano
RUN pip install joblib scikit-learn hickle==3.4.9 fastapi uvicorn[standard]
RUN pip install idna==2.9 python-multipart==0.0.5
RUN pip install nltk gensim
RUN python generateModels.py
# to let the container running:
CMD uvicorn --host 0.0.0.0 fastapi_server:app
# these commands are useful, if you want to try out individual steps and keep the container running to
# try out stuff in it by executing interactive shell in the container
#ENTRYPOINT ["tail"]
#CMD ["-f","/dev/null"]