Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
gpu-base-image-build
Commits
e8414297
Commit
e8414297
authored
Oct 14, 2024
by
chenpangpang
Browse files
添加paddle类型的dockerfile和测试脚本
parent
33cf7c0a
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
57 additions
and
29 deletions
+57
-29
build_space/Dockerfile.jupyterlab_ubuntu
build_space/Dockerfile.jupyterlab_ubuntu
+48
-6
build_space/build_ubuntu.sh
build_space/build_ubuntu.sh
+3
-3
script/1_base_test.sh
script/1_base_test.sh
+2
-16
script/2_text_generate_test.sh
script/2_text_generate_test.sh
+2
-2
script/3_image_generate_test.sh
script/3_image_generate_test.sh
+2
-2
No files found.
build_space/Dockerfile.jupyterlab_ubuntu
View file @
e8414297
...
@@ -4,12 +4,25 @@ FROM $BASE_IMAGE
...
@@ -4,12 +4,25 @@ FROM $BASE_IMAGE
ARG BASE_IMAGE
ARG BASE_IMAGE
ARG DEBIAN_FRONTEND=noninteractive
ARG DEBIAN_FRONTEND=noninteractive
LABEL module="jupyter"
LABEL module="jupyter"
# ----- torch args -----
# 是否基于torch镜像构建
# 是否基于torch镜像构建
ARG BASE_IMAGE_IS_TORCH=0
ARG BASE_IMAGE_IS_TORCH=0
ARG TORCH_VERSION="2.0.1"
ARG TORCH_VERSION
ARG TORCHVISION_VERSION="0.15.2"
ARG TORCHVISION_VERSION
ARG TORCHAUDIO_VERSION="2.0.2"
ARG TORCHAUDIO_VERSION
ARG CONDA_URL="https://mirrors.tuna.tsinghua.edu.cn/anaconda/miniconda/Miniconda3-py310_24.7.1-0-Linux-x86_64.sh"
# ----- tensorflow args -----
ARG TENSORFLOW_VERSION
# ----- paddlepaddle args -----
ARG PADDLEPADDLE_VERSION
ARG PADDLENLP_VERSION
ARG CUDA_VERSION
ARG PADDLE_URL
# ----- conda and python args ----
ARG CONDA_URL
#ARG CONDA_URL="https://mirrors.tuna.tsinghua.edu.cn/anaconda/miniconda/Miniconda3-py310_24.7.1-0-Linux-x86_64.sh"
ARG SOURCES="-i https://pypi.tuna.tsinghua.edu.cn/simple --trusted-host pypi.tuna.tsinghua.edu.cn"
ARG SOURCES="-i https://pypi.tuna.tsinghua.edu.cn/simple --trusted-host pypi.tuna.tsinghua.edu.cn"
ENV TZ=Asia/Shanghai
ENV TZ=Asia/Shanghai
ENV SHELL=/bin/bash \
ENV SHELL=/bin/bash \
...
@@ -37,8 +50,9 @@ RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak \
...
@@ -37,8 +50,9 @@ RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak \
&& apt-get install --no-install-recommends -y vim openssl libssl-dev net-tools locales wget git git-lfs sudo openssh-client openssh-server \
&& apt-get install --no-install-recommends -y vim openssl libssl-dev net-tools locales wget git git-lfs sudo openssh-client openssh-server \
&& locale-gen en_US.UTF-8
&& locale-gen en_US.UTF-8
# ----- conda and python install -----
RUN if [
$BASE_IMAGE_IS_TORCH -eq 0
];then \
RUN if [
-n "$CONDA_URL"
];then \
mkdir -p /tmp/conda-extension \
mkdir -p /tmp/conda-extension \
&& cd /tmp/conda-extension \
&& cd /tmp/conda-extension \
&& wget $CONDA_URL \
&& wget $CONDA_URL \
...
@@ -47,6 +61,15 @@ RUN if [ $BASE_IMAGE_IS_TORCH -eq 0 ];then \
...
@@ -47,6 +61,15 @@ RUN if [ $BASE_IMAGE_IS_TORCH -eq 0 ];then \
&& cd .. \
&& cd .. \
&& rm -rf /tmp/conda-extension; fi
&& rm -rf /tmp/conda-extension; fi
#RUN if [ $BASE_IMAGE_IS_TORCH -eq 0 ];then \
# mkdir -p /tmp/conda-extension \
# && cd /tmp/conda-extension \
# && wget $CONDA_URL \
# && bash $(echo $CONDA_URL | awk -F "/" '{print $NF}') -b -p /opt/conda \
# && echo "export PATH=\$PATH:/opt/conda/bin" >> /etc/profile.d/sothisai.sh \
# && cd .. \
# && rm -rf /tmp/conda-extension; fi
ENV PATH=$PATH:/opt/conda/bin
ENV PATH=$PATH:/opt/conda/bin
RUN pip3 install --upgrade pip ${SOURCES} || pip install --upgrade pip ${SOURCES} \
RUN pip3 install --upgrade pip ${SOURCES} || pip install --upgrade pip ${SOURCES} \
...
@@ -54,11 +77,30 @@ RUN pip3 install --upgrade pip ${SOURCES} || pip install --upgrade pip ${SOURCES
...
@@ -54,11 +77,30 @@ RUN pip3 install --upgrade pip ${SOURCES} || pip install --upgrade pip ${SOURCES
&& mv /etc/apt/sources.list.bak /etc/apt/sources.list \
&& mv /etc/apt/sources.list.bak /etc/apt/sources.list \
&& mv /etc/apt/sources.list.d.bak /etc/apt/sources.list.d
&& mv /etc/apt/sources.list.d.bak /etc/apt/sources.list.d
RUN if [ $BASE_IMAGE_IS_TORCH -eq 0 ];then \
RUN if [ $BASE_IMAGE_IS_TORCH -eq 0
&& -n "$TORCH_VERSION"
];then \
pip3 install torch==$TORCH_VERSION torchvision==$TORCHVISION_VERSION torchaudio==$TORCHAUDIO_VERSION \
pip3 install torch==$TORCH_VERSION torchvision==$TORCHVISION_VERSION torchaudio==$TORCHAUDIO_VERSION \
--index-url https://download.pytorch.org/whl/cu$(echo "$BASE_IMAGE" | awk -F'[:-]' '{n=split($2,a,"."); print a[1] a[2]}') \
--index-url https://download.pytorch.org/whl/cu$(echo "$BASE_IMAGE" | awk -F'[:-]' '{n=split($2,a,"."); print a[1] a[2]}') \
&& rm -r /root/.cache/pip; fi
&& rm -r /root/.cache/pip; fi
RUN if [ -n "$TORCH_VERSION" ];then \
pip install --no-cache-dir transformers accelerate diffusers; fi
RUN if [ -n "$TENSORFLOW_VERSION" ]; then \
tf_version_minor=$(echo $TENSORFLOW_VERSION | cut -d'.' -f1-2 ) && \
pip install --no-cache-dir tensorflow[and-cuda]==$TENSORFLOW_VERSION \
tensorflow-text==$tf_version_minor.* tf-models-official==$tf_version_minor.* tensorrt; fi
# ----- paddlepaddle install -----
RUN if [ -n "$PADDLEPADDLE_VERSION" ] && [ -n "$PADDLE_URL" ]; then \
pip install paddlepaddle-gpu==$PADDLEPADDLE_VERSION -f $PADDLE_URL \
&& rm -r /root/.cache/pip; \
fi
RUN if [ -n "$PADDLENLP_VERSION" ] && [ -z "$PADDLE_URL" ]; then \
pip install paddlenlp==$PADDLENLP_VERSION -i https://pypi.tuna.tsinghua.edu.cn/simple \
&& rm -r /root/.cache/pip; \
fi
COPY ./python-requirements.txt /tmp/
COPY ./python-requirements.txt /tmp/
RUN pip install --no-cache-dir -r /tmp/python-requirements.txt
RUN pip install --no-cache-dir -r /tmp/python-requirements.txt
...
...
build_space/build_ubuntu.sh
View file @
e8414297
#!/bin/bash
#!/bin/bash
# 框架
# 框架
framework
=
$1
framework
=
$1
# 输出镜像tag
# 输出镜像tag
...
@@ -38,10 +37,11 @@ cp -f ./Dockerfile.${framework}_ubuntu ./tmp/${tmp_dockerfile}
...
@@ -38,10 +37,11 @@ cp -f ./Dockerfile.${framework}_ubuntu ./tmp/${tmp_dockerfile}
echo
"docker build -f ./tmp/
${
tmp_dockerfile
}
-t
${
image_tag
}
$build_args
./tmp/"
echo
"docker build -f ./tmp/
${
tmp_dockerfile
}
-t
${
image_tag
}
$build_args
./tmp/"
docker build
-f
./tmp/
${
tmp_dockerfile
}
-t
${
image_tag
}
$build_args
./tmp/
docker build
-f
./tmp/
${
tmp_dockerfile
}
-t
${
image_tag
}
$build_args
./tmp/
#echo "docker build -f ./tmp/${tmp_dockerfile} -t ${image_tag} $build_args ./tmp/"
#echo "docker build -f ./tmp/${tmp_dockerfile} -t ${image_tag} $build_args ./tmp/"
build_status
=
$?
rm
-r
./tmp
rm
-r
./tmp
if
[[
$?
-eq
0
]]
;
then
if
[[
build_status
-eq
0
]]
;
then
echo
-e
"
\0
33[32mBuild Image Successfully !
\0
33[0m"
echo
-e
"
\0
33[32mBuild Image Successfully !
\0
33[0m"
else
else
echo
-e
"
\0
33[32mBuild Image fail!
\0
33[0m"
echo
-e
"
\0
33[32mBuild Image fail!
\0
33[0m"
exit
1
exit
1
fi
fi
\ No newline at end of file
script/1_base_test.sh
View file @
e8414297
#!/bin/bash
#!/bin/bash
TARGET_DIR
=
gpu-base-image-test/paddletest
docker run
--rm
--platform
=
linux/amd64
--gpus
all
$1
python
-c
\
docker run
--rm
--platform
=
linux/amd64
--gpus
all
-v
./
$TARGET_DIR
:/workspace
--workdir
/workspace
$1
python base_test.py
"import os;
\
os.system(
\"
cat /etc/issue
\"
);
\
import sys;
\
print(
\"
python version:
\"
, sys.version);
\
import torch;
\
print(
\"
torch version:
\"
, torch.__version__);
\
print(
\"
torch cuda available:
\"
, torch.cuda.is_available());
\
print(
\"
torch cuda version:
\"
, torch.version.cuda);
\
print(
\"
torch cudnn version:
\"
,torch.backends.cudnn.version());
\
import torchvision;
\
print(
\"
torchvision version:
\"
, torchvision.__version__);
\
import torchaudio;
\
print(
\"
torchaudio version:
\"
, torchaudio.__version__);
"
\ No newline at end of file
script/2_text_generate_test.sh
View file @
e8414297
#!/bin/bash
#!/bin/bash
TARGET_DIR
=
gpu-base-image-test
ARGET_DIR
=
gpu-base-image-test/paddletest
docker run
--rm
--platform
=
linux/amd64
--gpus
all
-v
./
$TARGET_DIR
:/workspace
--workdir
/workspace/gpt2
$1
python infer.py
docker run
--rm
--platform
=
linux/amd64
--gpus
all
-v
./
$TARGET_DIR
:/workspace
--workdir
/workspace
$1
python text.py
\ No newline at end of file
script/3_image_generate_test.sh
View file @
e8414297
#!/bin/bash
#!/bin/bash
TARGET_DIR
=
gpu-base-image-test
TARGET_DIR
=
gpu-base-image-test/paddletest
docker run
--rm
--platform
=
linux/amd64
--gpus
all
-v
./
$TARGET_DIR
:/workspace
--workdir
/workspace/stable-diffusion-v1-4
$1
python infer.py
docker run
--rm
--platform
=
linux/amd64
--gpus
all
-v
./
$TARGET_DIR
:/workspace
--workdir
/workspace
$1
python image.py
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment