FROM nvcr.io/nvidia/pytorch:23.02-py3 as base

WORKDIR /wdr

COPY requirements.txt .

RUN pip install --trusted-host pypi.python.org -r requirements.txt

COPY . .

RUN pip install triton flash-attn

# flash-attn v0.2.8 does not install all C/CUDA extensions directly
RUN git clone https://github.com/HazyResearch/flash-attention.git && \
    cd flash-attention/csrc/fused_dense_lib/ && python setup.py install && \
    cd ../fused_softmax && python setup.py install && \
    cd ../layer_norm && python setup.py install

RUN cd /wdr/csrc/fftconv && python setup.py install

