-
-
Notifications
You must be signed in to change notification settings - Fork 27
/
example-local-jsonl.yaml
67 lines (59 loc) · 2.24 KB
/
example-local-jsonl.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
###############################################
##
## Trainer settings are kept minimal
## with checkpoint / model saving disabled,
## as this tutorial focuses only on datasets
##
## It only runs 10 steps, enough to prove that
## the dataset configs are valid
##
## See the full `config-example.yaml` for more
## detailes on the trainer/model configs
##
###############################################
trainer:
max_steps: 10
# Resonable batch size, for a more realistic it/s rate
target_batch_size: 32
model:
load_model: ../model/L6-D512-world-init.pth
ctx_len: 1024
lr_init: 3e-4
# BPTT learning, this allows you to run the trainer against dataset
# larger then its training context length
bptt_learning: true
bptt_learning_range: -1
########################################
## Training model settings
########################################
data:
# dataset_path for the prebuilt dataset, using HF `load_from_disk()`
#
# Use this if you have built your own dataset and saved it with `save_to_disk()`
# with source left as null. Other wise configure this to a directory which the
# dataset will be built and tokenized by the huggingface dataset process.
data_path: ../datapath/sample-mem-train/
# Other wise provide the source path, which is used as huggingface dataset path
# this will be used to populate the dataset_path
#
# Use either the following
# - hugging face dataset
# - Directory path to a directory containing dataset files
# - Path to a single dataset file
# - hugging face dataset mode (ie: text,csv,etc - use data_dir, to configure the path then)
# - null
#
# If source is disabled, all other params, except data_path, is ignored
source: "json"
# Use data_dir, if you are using source=text/json/etc
# this should be relative to the trainer script path
source_data_dir: "../dataset/dataset-config/jsonl/"
# Tokenizer to use, use either the inbuilt 'neox', or 'neox' tokenizer
# If using a custom tokenizer, provide the tokenizer file path
# ---
tokenizer: world
# After loading the dataset, split out test data used for validation,
# This process is skipped if the dataset includes a test split
# This process is skipped if set to zero
test_split: 0.01
test_split_shuffle: false