# The LOCAL_HF package preset is copied from this list
# Remember to bump the LOCAL_HF code env preset version when updating this list
# Keep in sync with HuggingFaceKernelClient.VLLM_VERSION static variable
vllm==0.10.1.1
transformers[torch]==4.55.4

# embedding
sentence-transformers==5.1.0
timm==1.0.19

# fine-tuning
peft==0.17.1
trl==0.21.0
datasets==4.0.0 # also pulled by vllm

# transitive dependencies

# Use torch 2.7.1 with CUDA 12.8 in order to support Blackwell GPUs on Linux
--extra-index-url https://download.pytorch.org/whl/cu128
torch==2.7.1; platform_system == "Linux"
# Use torch 2.7.0 for other systems to match vLLM requirements (CPU)
torch==2.7.0; platform_system != "Linux"

tokenizers==0.21.4  # (sentence-transformers, transformers, vllm) version pinned by vllm, so we pin it here to make sure we use the same version with/without vllm
pillow==11.3.0
scipy>=1.10,<1.15  # (sentence-transformers) security issue with <1.10
cachetools==6.1.0 # unpinned dependency of vLLM that introduced a breaking change in 6.0.0

# runtime requirements
hf-transfer==0.1.9
accelerate==1.10.0  # multi-gpu support with transformers
bitsandbytes==0.47.0; platform_system == "Linux"  # macOS x86_64 binaries are not built anymore for release >=0.43
protobuf==5.29.5  # required for fine-tuning mistral/llama models on ARM (also pulled by vllm)
sentencepiece==0.2.1
tiktoken==0.11.0
einops==0.8.1

# image generation requirements
diffusers==0.35.1
compel==2.1.1 # used for prompt weighting

# analytics
psutil==7.0.0
py-cpuinfo==9.0.0
