-
Notifications
You must be signed in to change notification settings - Fork 29
/
Copy pathCargo.toml
54 lines (44 loc) · 1.56 KB
/
Cargo.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
[package]
authors = ["guillaumelagrange <[email protected]>"]
license = "MIT OR Apache-2.0"
name = "llama-burn"
version = "0.1.0"
edition = "2021"
description = "Llama 3 large language model with Burn"
[features]
default = ["pretrained"]
pretrained = ["burn/network", "dep:dirs"]
llama3 = ["dep:tiktoken-rs", "dep:rustc-hash", "dep:base64"]
tiny = ["dep:tokenizers"]
# Example feature flags (backend selection)
tch-cpu = ["burn/tch"]
tch-gpu = ["burn/tch"]
cuda = ["burn/cuda-jit"]
wgpu = ["burn/wgpu"]
# To import pytorch weights
import = ["burn-import"]
[dependencies]
burn = { version = "0.16.0", default-features = false, features = ["std", "fusion"] }
burn-import = { version = "0.16.0", optional = true }
itertools = { version = "0.12.1", default-features = false, features = [
"use_alloc",
] }
dirs = { version = "5.0.1", optional = true }
serde = { version = "1.0.192", default-features = false, features = [
"derive",
"alloc",
] } # alloc is for no_std, derive is needed
# Tiktoken tokenizer (llama 3)
tiktoken-rs = { version = "0.5.8", optional = true }
base64 = { version = "0.22.1", optional = true }
rustc-hash = { version = "1.1.0", optional = true }
# SentencePiece tokenizer (tiny llama / llama 2)
tokenizers = { version = "0.19.1", default-features = false, features = [
"onig",
], optional = true }
rand = { version = "0.8.5", default-features = false, features = [
"std_rng",
] } # std_rng is for no_std
[dev-dependencies]
burn = { version = "0.16.0", default-features = false }
clap = { version = "4.5.4", features = ["derive"] }