diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..140e743 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,54 @@ +# This Dockerfile uses the nvidia cuda container but will work without cuda too + +FROM nvidia/cuda:12.1.0-devel-ubuntu20.04 +ENV CUDAARCHS='60' +ENV TRIMESH_VERSION='2020.03.04' +ENV CMAKE_VERSION='3.20.4' + +WORKDIR /installation/ + +RUN apt update +RUN apt install -y --no-install-recommends apt-utils +RUN apt install -y build-essential libglm-dev libgomp1 git mesa-common-dev libglu1-mesa-dev libxi-dev wget ninja-build + +WORKDIR /installation/cuda_voxelizer + +COPY . . + +RUN wget -q -O ./cmake-install.sh https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-Linux-x86_64.sh +RUN chmod u+x ./cmake-install.sh +RUN mkdir /cmake +RUN ./cmake-install.sh --skip-license --prefix=/cmake + +WORKDIR /installation/ +RUN git clone --single-branch --depth 1 -b ${TRIMESH_VERSION} https://github.com/Forceflow/trimesh2.git trimesh2 +WORKDIR /installation/trimesh2 +RUN pwd +RUN make all -j $(nproc) + +ENV PATH="${PATH}:$HOME"/cmake/bin + +WORKDIR /installation/cuda_voxelizer + +RUN cmake -GNinja \ + -DTrimesh2_INCLUDE_DIR="/installation/trimesh2/include" \ + -DTrimesh2_LINK_DIR="/installation/trimesh2/lib.Linux64" \ + -S . -B ./build + + +RUN PATH=$PATH:"$HOME"/cmake/bin +RUN cmake --build ./build --parallel $(nproc) + +FROM nvidia/cuda:12.1.0-devel-ubuntu20.04 + +WORKDIR /app/ + +COPY --from=0 /installation/cuda_voxelizer/build/cuda_voxelizer ./ +COPY --from=0 /installation/cuda_voxelizer/test_models/bunny.OBJ ./ + +RUN ./cuda_voxelizer -o binvox -thrust -f ./bunny.OBJ -s 256 +#RUN ./cuda_voxelizer -o binvox -thrust -f ./bunny.OBJ -s 512 +#RUN ./cuda_voxelizer -o binvox -thrust -f ./bunny.OBJ -s 1024 +#RUN ./cuda_voxelizer -o binvox -thrust -f ./bunny.OBJ -s 2048 + +ENTRYPOINT ["/app/cuda_voxelizer"] diff --git a/README.md b/README.md index 1c2bd06..3590c65 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,22 @@ The project has the following build dependencies: * [GLM](http://glm.g-truc.net/0.9.8/index.html) for vector math. Any recent version will do. * [OpenMP](https://www.openmp.org/) for multi-threading. +### Docker Compose +1. Install [Docker](https://docs.docker.com/get-docker/) & [Docker Compose](https://docs.docker.com/compose/) +2. Install the [NVIDIA Container Toolkit](https://github.com/NVIDIA/nvidia-docker) + +Then you can use the app like this: +```bash +docker compose run cuda_voxelizer cuda_voxelizer -o binvox -thrust -f ./test_models/bunny.OBJ -s 256 + +``` +This should create a binvox file in the test_models folder. +If you have a cuda enabled gpu it will use it(maybe you have to lower the cuda arch variable in the Dockerfile). +You can always add more folders inside of the docker-compose.yml. + +With this way, you dont have to install all the dependencies yourself. +In the following ways you have to do the steps described in the Dockerfile yourself. + ### Build using CMake (Windows, Linux) After installing dependencies, do `mkdir build` and `cd build`, followed by: diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..9cd268e --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,7 @@ +version: '3' +services: + cuda_voxelizer: + image: forceflow/cuda_voxelizer + build: . + volumes: + - ./test_models/:/app/test_models/