Skip to content

Commit

Permalink
Merge pull request #279 from bl1231/273-adapt-for-nersc
Browse files Browse the repository at this point in the history
273 adapt for nersc
  • Loading branch information
dsclassen authored Apr 26, 2024
2 parents e3608d6 + 686ee2b commit b6f0e44
Show file tree
Hide file tree
Showing 7 changed files with 1,160 additions and 2 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -112,4 +112,5 @@ build
*.zip

test/
test-data/
test-data/
*.slurm
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ RUN apt-get update && \
# -----------------------------------------------------------------------------
# Build stage 2 - Configure CHARMM
FROM builder AS build_charmm
ARG CHARMM_VER=c47b2
ARG CHARMM_VER=c48b2

# Combine the mkdir, tar extraction, and cleanup into a single RUN command
COPY ./charmm/${CHARMM_VER}.tar.gz /usr/local/src/
Expand Down
131 changes: 131 additions & 0 deletions NERSC.dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
# -----------------------------------------------------------------------------
# Build stage 1 - Install build tools
FROM ubuntu:22.04 AS builder
RUN apt-get update && \
apt-get install -y cmake gcc gfortran g++

# -----------------------------------------------------------------------------
# Build stage 2 - Configure CHARMM
FROM builder AS build_charmm
ARG CHARMM_VER=c48b2

# Combine the mkdir, tar extraction, and cleanup into a single RUN command
COPY ./charmm/${CHARMM_VER}.tar.gz /usr/local/src/
RUN mkdir -p /usr/local/src && \
tar -zxvf /usr/local/src/${CHARMM_VER}.tar.gz -C /usr/local/src && \
rm /usr/local/src/${CHARMM_VER}.tar.gz

# Configure CHARMM in the same layer as the extraction if possible
WORKDIR /usr/local/src/charmm
RUN ./configure

# Build CHARMM
RUN make -j8 -C build/cmake install

# -----------------------------------------------------------------------------
# Build stage 3 - Copy CHARMM binary
# I'm not sure if this needs to be a separate step.
FROM build_charmm AS bilbomd-worker-step1
COPY --from=build_charmm /usr/local/src/charmm/bin/charmm /usr/local/bin/

# -----------------------------------------------------------------------------
# Build stage 4 - Install NodeJS
FROM bilbomd-worker-step1 AS bilbomd-worker-step2
ARG NODE_MAJOR=20
RUN apt-get update && \
apt-get install -y gpg curl && \
mkdir -p /etc/apt/keyrings && \
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg && \
echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_${NODE_MAJOR}.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list && \
apt-get update && \
apt-get install -y nodejs

# -----------------------------------------------------------------------------
# Build stage 5 - Install Miniconda3
FROM bilbomd-worker-step2 AS bilbomd-worker-step3

# Libraries needed by CHARMM
RUN apt-get update && \
apt-get install -y wget bzip2 ncat gfortran libgl1-mesa-dev libarchive13 && \
rm -rf /var/lib/apt/lists/*

# Download and install Miniforge3
RUN wget "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-$(uname)-$(uname -m).sh" && \
bash Miniforge3-$(uname)-$(uname -m).sh -b -p "/miniforge3" && \
rm Miniforge3-$(uname)-$(uname -m).sh

# Add Conda to PATH
ENV PATH="/miniforge3/bin/:${PATH}"

# Update conda
RUN conda update -y -n base -c defaults conda

# Copy in the environment.yml file
COPY environment.yml /tmp/environment.yml

# Update existing conda base env from environment.yml
RUN conda env update -f /tmp/environment.yml && \
rm /tmp/environment.yml

# -----------------------------------------------------------------------------
# Build stage 6 - Install BioXTAS
FROM bilbomd-worker-step3 AS bilbomd-worker-step4

# Install deps
RUN apt-get update && \
apt-get install -y zip build-essential libarchive13

# Copy the BioXTAS GitHiub master zip file
# 1e2b05c74bbc595dc84e64ee962680b700b258be
WORKDIR /tmp
# RUN git clone https://github.com/jbhopkins/bioxtasraw.git
COPY bioxtas/bioxtasraw-master.zip .
RUN unzip bioxtasraw-master.zip && rm bioxtasraw-master.zip


# Install BioXTAS RAW into local Python environment
WORKDIR /tmp/bioxtasraw-master
RUN python setup.py build_ext --inplace && \
pip install .

# -----------------------------------------------------------------------------
# Build stage 7 - IMP & worker app
FROM bilbomd-worker-step4 AS bilbomd-worker
ARG USER_ID=1001
ARG GROUP_ID=1001

RUN apt-get update && \
apt-get install -y wget && \
echo "deb https://integrativemodeling.org/latest/download jammy/" >> /etc/apt/sources.list && \
wget -O /etc/apt/trusted.gpg.d/salilab.asc https://salilab.org/~ben/pubkey256.asc && \
apt-get update && \
apt-get install -y imp


RUN mkdir -p /app/node_modules
RUN mkdir -p /bilbomd/uploads
# VOLUME [ "/bilbomd/uploads" ]
WORKDIR /app

# Create a user and group with the provided IDs
RUN mkdir -p /home/bilbo
RUN groupadd -g $GROUP_ID bilbomd && useradd -u $USER_ID -g $GROUP_ID -d /home/bilbo -s /bin/bash bilbo

# Change ownership of directories to the user and group
RUN chown -R bilbo:bilbomd /app /bilbomd/uploads /home/bilbo

# Switch to the non-root user
USER bilbo:bilbomd

# Copy over the package*.json files
COPY --chown=bilbo:bilbomd package*.json .

# Install dependencies
RUN npm ci

# Copy the app code
COPY --chown=bilbo:bilbomd . .

# Fire that bad boy up.
# Not needed for NERSC
# CMD ["npm", "start"]
23 changes: 23 additions & 0 deletions scripts/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# The `bilbomd-worker` scripts directory

Some notes on stuff in the scripts directory.

## `/topper`

The directory contains all of the CHARMM topology and parameter files. They will be copied into the container when it is built.

## `/nersc`

This directory will contain various scripts used submitting NERSC jobs.

## Notes to build docker image on Perlmutter login node

Since all jobs on Perlmutter will use Docker containers to run `python`, `charmm`, `foxs`, and `multi_foxs` in our well-defined container environment, we need to use [podman-hpc](https://docs.nersc.gov/development/containers/podman-hpc/podman-beginner-tutorial/#podman-hpc-for-beginners-tutorial) to build our container images, and then "deploy/migrate" them to `$SCRATCH`.

You will build our images locally on a login node. If you'd like to use this image in a job (or access it on any other login node), you'll need to migrate your image onto the `$SCRATCH` filesystem.

```bash
cd ~/projects/bilbomd/bilbomd-worker
podman-hpc build -t bilbomd/bilbomd-worker:0.0.2 --build-arg CHARMM_VER=c48b2 --build-arg USER_ID=$UID -f NERSC.dockerfile
podman-hpc migrate bilbomd/bilbomd-worker:0.0.2
```
Loading

0 comments on commit b6f0e44

Please sign in to comment.