Commit cc4fd7c9 authored by chenpangpang's avatar chenpangpang
Browse files

合并了paddle分支

parents e670fa96 ec35df8c
...@@ -15,7 +15,15 @@ ARG TORCHAUDIO_VERSION ...@@ -15,7 +15,15 @@ ARG TORCHAUDIO_VERSION
# ----- tensorflow args ----- # ----- tensorflow args -----
ARG TENSORFLOW_VERSION ARG TENSORFLOW_VERSION
ARG CONDA_URL="https://mirrors.tuna.tsinghua.edu.cn/anaconda/miniconda/Miniconda3-py310_24.7.1-0-Linux-x86_64.sh" #ARG CONDA_URL="https://mirrors.tuna.tsinghua.edu.cn/anaconda/miniconda/Miniconda3-py310_24.7.1-0-Linux-x86_64.sh"
# ----- paddlepaddle args -----
ARG PADDLEPADDLE_VERSION
ARG PADDLENLP_VERSION
ARG CUDA_VERSION
ARG PADDLE_URL
# ----- conda and python args ----
ARG CONDA_URL
#ARG CONDA_URL="https://mirrors.tuna.tsinghua.edu.cn/anaconda/miniconda/Miniconda3-py310_24.7.1-0-Linux-x86_64.sh"
ARG SOURCES="-i https://pypi.tuna.tsinghua.edu.cn/simple --trusted-host pypi.tuna.tsinghua.edu.cn" ARG SOURCES="-i https://pypi.tuna.tsinghua.edu.cn/simple --trusted-host pypi.tuna.tsinghua.edu.cn"
ENV TZ=Asia/Shanghai ENV TZ=Asia/Shanghai
ENV SHELL=/bin/bash \ ENV SHELL=/bin/bash \
...@@ -43,8 +51,9 @@ RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak \ ...@@ -43,8 +51,9 @@ RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak \
&& apt-get install --no-install-recommends -y vim openssl libssl-dev net-tools locales wget git git-lfs sudo openssh-client openssh-server \ && apt-get install --no-install-recommends -y vim openssl libssl-dev net-tools locales wget git git-lfs sudo openssh-client openssh-server \
&& locale-gen en_US.UTF-8 && locale-gen en_US.UTF-8
# ----- conda and python install -----
RUN if [ $BASE_IMAGE_IS_TORCH -eq 0 ];then \ RUN if [ -n "$CONDA_URL" ];then \
mkdir -p /tmp/conda-extension \ mkdir -p /tmp/conda-extension \
&& cd /tmp/conda-extension \ && cd /tmp/conda-extension \
&& wget $CONDA_URL \ && wget $CONDA_URL \
...@@ -53,6 +62,15 @@ RUN if [ $BASE_IMAGE_IS_TORCH -eq 0 ];then \ ...@@ -53,6 +62,15 @@ RUN if [ $BASE_IMAGE_IS_TORCH -eq 0 ];then \
&& cd .. \ && cd .. \
&& rm -rf /tmp/conda-extension; fi && rm -rf /tmp/conda-extension; fi
#RUN if [ $BASE_IMAGE_IS_TORCH -eq 0 ];then \
# mkdir -p /tmp/conda-extension \
# && cd /tmp/conda-extension \
# && wget $CONDA_URL \
# && bash $(echo $CONDA_URL | awk -F "/" '{print $NF}') -b -p /opt/conda \
# && echo "export PATH=\$PATH:/opt/conda/bin" >> /etc/profile.d/sothisai.sh \
# && cd .. \
# && rm -rf /tmp/conda-extension; fi
ENV PATH=$PATH:/opt/conda/bin ENV PATH=$PATH:/opt/conda/bin
RUN pip3 install --upgrade pip ${SOURCES} || pip install --upgrade pip ${SOURCES} \ RUN pip3 install --upgrade pip ${SOURCES} || pip install --upgrade pip ${SOURCES} \
...@@ -60,7 +78,6 @@ RUN pip3 install --upgrade pip ${SOURCES} || pip install --upgrade pip ${SOURCES ...@@ -60,7 +78,6 @@ RUN pip3 install --upgrade pip ${SOURCES} || pip install --upgrade pip ${SOURCES
&& mv /etc/apt/sources.list.bak /etc/apt/sources.list \ && mv /etc/apt/sources.list.bak /etc/apt/sources.list \
&& mv /etc/apt/sources.list.d.bak /etc/apt/sources.list.d && mv /etc/apt/sources.list.d.bak /etc/apt/sources.list.d
RUN if [ $BASE_IMAGE_IS_TORCH -eq 0 && -n "$TORCH_VERSION" ];then \ RUN if [ $BASE_IMAGE_IS_TORCH -eq 0 && -n "$TORCH_VERSION" ];then \
pip3 install torch==$TORCH_VERSION torchvision==$TORCHVISION_VERSION torchaudio==$TORCHAUDIO_VERSION \ pip3 install torch==$TORCH_VERSION torchvision==$TORCHVISION_VERSION torchaudio==$TORCHAUDIO_VERSION \
--index-url https://download.pytorch.org/whl/cu$(echo "$BASE_IMAGE" | awk -F'[:-]' '{n=split($2,a,"."); print a[1] a[2]}') \ --index-url https://download.pytorch.org/whl/cu$(echo "$BASE_IMAGE" | awk -F'[:-]' '{n=split($2,a,"."); print a[1] a[2]}') \
...@@ -76,6 +93,23 @@ RUN if [ -n "$TENSORFLOW_VERSION" ]; then \ ...@@ -76,6 +93,23 @@ RUN if [ -n "$TENSORFLOW_VERSION" ]; then \
apt-get update -y && \ apt-get update -y && \
apt-get install --no-install-recommends -y libnvinfer8 libnvjitlink-12-3 libnvjpeg-12-3 libnvinfer-plugin8; fi apt-get install --no-install-recommends -y libnvinfer8 libnvjitlink-12-3 libnvjpeg-12-3 libnvinfer-plugin8; fi
# ----- paddlepaddle install -----
RUN if [ -n "$PADDLEPADDLE_VERSION" ] && [ -n "$PADDLE_URL" ]; then \
pip install paddlepaddle-gpu==$PADDLEPADDLE_VERSION -f $PADDLE_URL -i $PADDLE_URL \
&& rm -r /root/.cache/pip; \
fi
RUN if [ -n "$PADDLEPADDLE_VERSION" ] && [ -z "$PADDLE_URL" ]; then \
pip install paddlepaddle-gpu==$PADDLEPADDLE_VERSION -i https://pypi.tuna.tsinghua.edu.cn/simple \
&& rm -r /root/.cache/pip; \
fi
RUN if [ -n "$PADDLENLP_VERSION" ] ; then \
pip install paddlenlp==$PADDLENLP_VERSION ppdiffusers huggingface_hub --no-cache-dir -i https://pypi.tuna.tsinghua.edu.cn/simple && \
pip install --upgrade ppdiffusers --no-deps && rm -r /root/.cache/pip; \
fi
COPY ./python-requirements.txt /tmp/ COPY ./python-requirements.txt /tmp/
RUN pip install --no-cache-dir -r /tmp/python-requirements.txt RUN pip install --no-cache-dir -r /tmp/python-requirements.txt
......
#!/bin/bash #!/bin/bash
# 框架 # 框架
framework=$1 framework=$1
# 输出镜像tag # 输出镜像tag
...@@ -38,10 +37,11 @@ cp -f ./Dockerfile.${framework}_ubuntu ./tmp/${tmp_dockerfile} ...@@ -38,10 +37,11 @@ cp -f ./Dockerfile.${framework}_ubuntu ./tmp/${tmp_dockerfile}
echo "docker build -f ./tmp/${tmp_dockerfile} -t ${image_tag} $build_args ./tmp/" echo "docker build -f ./tmp/${tmp_dockerfile} -t ${image_tag} $build_args ./tmp/"
docker build -f ./tmp/${tmp_dockerfile} -t ${image_tag} $build_args ./tmp/ docker build -f ./tmp/${tmp_dockerfile} -t ${image_tag} $build_args ./tmp/
#echo "docker build -f ./tmp/${tmp_dockerfile} -t ${image_tag} $build_args ./tmp/" #echo "docker build -f ./tmp/${tmp_dockerfile} -t ${image_tag} $build_args ./tmp/"
build_status=$?
rm -r ./tmp rm -r ./tmp
if [[ $? -eq 0 ]];then if [[ build_status -eq 0 ]];then
echo -e "\033[32mBuild Image Successfully !\033[0m" echo -e "\033[32mBuild Image Successfully !\033[0m"
else else
echo -e "\033[32mBuild Image fail!\033[0m" echo -e "\033[32mBuild Image fail!\033[0m"
exit 1 exit 1
fi fi
\ No newline at end of file
...@@ -2,4 +2,4 @@ setuptools ...@@ -2,4 +2,4 @@ setuptools
ipywidgets ipywidgets
wheel wheel
matplotlib matplotlib
git-lfs git-lfs
\ No newline at end of file
...@@ -34,11 +34,12 @@ elif [[ "$1" == *"tensorflow"* ]]; then ...@@ -34,11 +34,12 @@ elif [[ "$1" == *"tensorflow"* ]]; then
print(\"tensorflow cuda available: \", tf.test.is_gpu_available()); \ print(\"tensorflow cuda available: \", tf.test.is_gpu_available()); \
os.system('nvcc -V | tail -n 2') os.system('nvcc -V | tail -n 2')
" "
elif [[ "$1" == *"paddle"* ]]; then
TARGET_DIR=gpu-base-image-test/paddletest
docker run --rm --platform=linux/amd64 --gpus all -v ./$TARGET_DIR:/workspace --workdir /workspace $1 python base_test.py
else else
echo "ERROR: no supported test shell" echo "ERROR: no supported test shell"
exit 1 exit 1
fi fi
...@@ -10,4 +10,8 @@ if [[ "$1" == *"pytorch"* ]]; then \ ...@@ -10,4 +10,8 @@ if [[ "$1" == *"pytorch"* ]]; then \
docker run --rm --platform=linux/amd64 --gpus all -v ./$TARGET_DIR:/workspace --workdir /workspace/pytorch/gpt2 $1 python infer.py; fi docker run --rm --platform=linux/amd64 --gpus all -v ./$TARGET_DIR:/workspace --workdir /workspace/pytorch/gpt2 $1 python infer.py; fi
if [[ "$1" == *"tensorflow"* ]]; then \ if [[ "$1" == *"tensorflow"* ]]; then \
docker run --rm --platform=linux/amd64 --gpus all -v ./$TARGET_DIR:/workspace --workdir /workspace/tensorflow/bert $1 python infer.py; fi docker run --rm --platform=linux/amd64 --gpus all -v ./$TARGET_DIR:/workspace --workdir /workspace/tensorflow/bert $1 python infer.py; fi
\ No newline at end of file if [[ "$1" == *"paddle"* ]]; then \
TARGET_DIR=gpu-base-image-test/paddletest \
docker run --rm --platform=linux/amd64 --gpus all -v ./$TARGET_DIR:/workspace --workdir /workspace $1 python text.py; fi
...@@ -11,3 +11,8 @@ if [[ "$1" == *"pytorch"* ]]; then \ ...@@ -11,3 +11,8 @@ if [[ "$1" == *"pytorch"* ]]; then \
if [[ "$1" == *"tensorflow"* ]]; then \ if [[ "$1" == *"tensorflow"* ]]; then \
docker run --rm --platform=linux/amd64 --gpus all -v ./$TARGET_DIR:/workspace --workdir /workspace/tensorflow/mnist $1 python train.py; fi docker run --rm --platform=linux/amd64 --gpus all -v ./$TARGET_DIR:/workspace --workdir /workspace/tensorflow/mnist $1 python train.py; fi
if [[ "$1" == *"paddle"* ]]; then \
TARGET_DIR=gpu-base-image-test/paddletest \
docker run --rm --platform=linux/amd64 --gpus all -v ./$TARGET_DIR:/workspace --workdir /workspace $1 python image.py; fi
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment