-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnohup.out
116 lines (116 loc) · 7.96 KB
/
nohup.out
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
2021-10-16 12:34:20.869286: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory
2021-10-16 12:34:20.869327: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
usage: run_summarization.py [-h] --model_name_or_path MODEL_NAME_OR_PATH
[--config_name CONFIG_NAME]
[--tokenizer_name TOKENIZER_NAME]
[--cache_dir CACHE_DIR]
[--use_fast_tokenizer [USE_FAST_TOKENIZER]]
[--no_use_fast_tokenizer]
[--model_revision MODEL_REVISION]
[--use_auth_token [USE_AUTH_TOKEN]]
[--resize_position_embeddings RESIZE_POSITION_EMBEDDINGS]
[--dataset_name DATASET_NAME]
[--dataset_config_name DATASET_CONFIG_NAME]
[--text_column TEXT_COLUMN]
[--summary_column SUMMARY_COLUMN]
[--train_file TRAIN_FILE]
[--validation_file VALIDATION_FILE]
[--test_file TEST_FILE]
[--overwrite_cache [OVERWRITE_CACHE]]
[--preprocessing_num_workers PREPROCESSING_NUM_WORKERS]
[--max_source_length MAX_SOURCE_LENGTH]
[--max_target_length MAX_TARGET_LENGTH]
[--val_max_target_length VAL_MAX_TARGET_LENGTH]
[--pad_to_max_length [PAD_TO_MAX_LENGTH]]
[--max_train_samples MAX_TRAIN_SAMPLES]
[--max_eval_samples MAX_EVAL_SAMPLES]
[--max_predict_samples MAX_PREDICT_SAMPLES]
[--num_beams NUM_BEAMS]
[--ignore_pad_token_for_loss [IGNORE_PAD_TOKEN_FOR_LOSS]]
[--no_ignore_pad_token_for_loss]
[--source_prefix SOURCE_PREFIX] --output_dir
OUTPUT_DIR
[--overwrite_output_dir [OVERWRITE_OUTPUT_DIR]]
[--do_train [DO_TRAIN]] [--do_eval [DO_EVAL]]
[--do_predict [DO_PREDICT]]
[--evaluation_strategy {no,steps,epoch}]
[--prediction_loss_only [PREDICTION_LOSS_ONLY]]
[--per_device_train_batch_size PER_DEVICE_TRAIN_BATCH_SIZE]
[--per_device_eval_batch_size PER_DEVICE_EVAL_BATCH_SIZE]
[--per_gpu_train_batch_size PER_GPU_TRAIN_BATCH_SIZE]
[--per_gpu_eval_batch_size PER_GPU_EVAL_BATCH_SIZE]
[--gradient_accumulation_steps GRADIENT_ACCUMULATION_STEPS]
[--eval_accumulation_steps EVAL_ACCUMULATION_STEPS]
[--learning_rate LEARNING_RATE]
[--weight_decay WEIGHT_DECAY]
[--adam_beta1 ADAM_BETA1]
[--adam_beta2 ADAM_BETA2]
[--adam_epsilon ADAM_EPSILON]
[--max_grad_norm MAX_GRAD_NORM]
[--num_train_epochs NUM_TRAIN_EPOCHS]
[--max_steps MAX_STEPS]
[--lr_scheduler_type {linear,cosine,cosine_with_restarts,polynomial,constant,constant_with_warmup}]
[--warmup_ratio WARMUP_RATIO]
[--warmup_steps WARMUP_STEPS]
[--log_level {debug,info,warning,error,critical,passive}]
[--log_level_replica {debug,info,warning,error,critical,passive}]
[--log_on_each_node [LOG_ON_EACH_NODE]]
[--no_log_on_each_node]
[--logging_dir LOGGING_DIR]
[--logging_strategy {no,steps,epoch}]
[--logging_first_step [LOGGING_FIRST_STEP]]
[--logging_steps LOGGING_STEPS]
[--logging_nan_inf_filter LOGGING_NAN_INF_FILTER]
[--save_strategy {no,steps,epoch}]
[--save_steps SAVE_STEPS]
[--save_total_limit SAVE_TOTAL_LIMIT]
[--save_on_each_node [SAVE_ON_EACH_NODE]]
[--no_cuda [NO_CUDA]] [--seed SEED]
[--fp16 [FP16]] [--fp16_opt_level FP16_OPT_LEVEL]
[--fp16_backend {auto,amp,apex}]
[--fp16_full_eval [FP16_FULL_EVAL]]
[--local_rank LOCAL_RANK]
[--xpu_backend {mpi,ccl}]
[--tpu_num_cores TPU_NUM_CORES]
[--tpu_metrics_debug [TPU_METRICS_DEBUG]]
[--debug DEBUG]
[--dataloader_drop_last [DATALOADER_DROP_LAST]]
[--eval_steps EVAL_STEPS]
[--dataloader_num_workers DATALOADER_NUM_WORKERS]
[--past_index PAST_INDEX] [--run_name RUN_NAME]
[--disable_tqdm DISABLE_TQDM]
[--remove_unused_columns [REMOVE_UNUSED_COLUMNS]]
[--no_remove_unused_columns]
[--label_names LABEL_NAMES [LABEL_NAMES ...]]
[--load_best_model_at_end [LOAD_BEST_MODEL_AT_END]]
[--metric_for_best_model METRIC_FOR_BEST_MODEL]
[--greater_is_better GREATER_IS_BETTER]
[--ignore_data_skip [IGNORE_DATA_SKIP]]
[--sharded_ddp SHARDED_DDP]
[--deepspeed DEEPSPEED]
[--label_smoothing_factor LABEL_SMOOTHING_FACTOR]
[--adafactor [ADAFACTOR]]
[--group_by_length [GROUP_BY_LENGTH]]
[--length_column_name LENGTH_COLUMN_NAME]
[--report_to REPORT_TO [REPORT_TO ...]]
[--ddp_find_unused_parameters DDP_FIND_UNUSED_PARAMETERS]
[--dataloader_pin_memory [DATALOADER_PIN_MEMORY]]
[--no_dataloader_pin_memory]
[--skip_memory_metrics [SKIP_MEMORY_METRICS]]
[--no_skip_memory_metrics]
[--use_legacy_prediction_loop [USE_LEGACY_PREDICTION_LOOP]]
[--push_to_hub [PUSH_TO_HUB]]
[--resume_from_checkpoint RESUME_FROM_CHECKPOINT]
[--hub_model_id HUB_MODEL_ID]
[--hub_strategy {end,every_save,checkpoint,all_checkpoints}]
[--hub_token HUB_TOKEN]
[--gradient_checkpointing [GRADIENT_CHECKPOINTING]]
[--push_to_hub_model_id PUSH_TO_HUB_MODEL_ID]
[--push_to_hub_organization PUSH_TO_HUB_ORGANIZATION]
[--push_to_hub_token PUSH_TO_HUB_TOKEN]
[--mp_parameters MP_PARAMETERS]
[--sortish_sampler [SORTISH_SAMPLER]]
[--predict_with_generate [PREDICT_WITH_GENERATE]]
[--generation_max_length GENERATION_MAX_LENGTH]
[--generation_num_beams GENERATION_NUM_BEAMS]
run_summarization.py: error: the following arguments are required: --model_name_or_path, --output_dir