Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rm http_proxy #2586

Open
wants to merge 11 commits into
base: develop
Choose a base branch
from
2 changes: 0 additions & 2 deletions models/PaddleNLP/CI/daily_case.sh
Original file line number Diff line number Diff line change
Expand Up @@ -979,8 +979,6 @@ python -m paddle.distributed.launch run_cmrc2018.py \
--max_steps 1 \
--output_dir ./tmp >${log_path}/clue-mrc >>${log_path}/clue-mrc 2>&1
print_info $? clue-mrc
export http_proxy=${http_proxy};
export https_proxy=${http_proxy}
}

taskflow (){
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@ case:
train:
-
name: prepare
path: applications/information_extraction/document
path: legacy/applications/information_extraction/document
cmd: wget https://paddlenlp.bj.bcebos.com/datasets/tax.tar.gz && tar -zxvf tax.tar.gz && mv tax data && rm -f tax.tar.gz
-
name: label_studio
path: applications/information_extraction/document
path: legacy/applications/information_extraction/document
cmd: python ../label_studio.py
params:
- --label_studio_file ./data/label_studio.json
Expand All @@ -16,7 +16,7 @@ case:
- --task_type ext
-
name: fintune
path: applications/information_extraction/document
path: legacy/applications/information_extraction/document
cmd: python -u -m paddle.distributed.launch finetune.py
params:
- --device gpu
Expand Down Expand Up @@ -49,7 +49,7 @@ case:
eval:
-
name: evaluate
path: applications/information_extraction/document
path: legacy/applications/information_extraction/document
cmd: python evaluate.py
params:
- --device 'gpu'
Expand All @@ -73,11 +73,11 @@ case:
train:
-
name: prepare
path: applications/information_extraction/document
path: legacy/applications/information_extraction/document
cmd: wget https://paddlenlp.bj.bcebos.com/datasets/tax.tar.gz && tar -zxvf tax.tar.gz && ren tax data
-
name: label_studio
path: applications/information_extraction/document
path: legacy/applications/information_extraction/document
cmd: python ../label_studio.py
params:
- --label_studio_file ./data/label_studio.json
Expand All @@ -86,7 +86,7 @@ case:
- --task_type ext
-
name: fintune
path: applications/information_extraction/document
path: legacy/applications/information_extraction/document
cmd: python -u -m paddle.distributed.launch finetune.py
params:
- --device gpu
Expand Down Expand Up @@ -119,7 +119,7 @@ case:
eval:
-
name: evaluate
path: applications/information_extraction/document
path: legacy/applications/information_extraction/document
cmd: python evaluate.py
params:
- --device 'gpu'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@ case:
train:
-
name: prepare
path: applications/information_extraction/text
path: legacy/applications/information_extraction/text
cmd: wget https://bj.bcebos.com/paddlenlp/datasets/military.tar.gz && tar -xvf military.tar.gz && mv military data
-
name: label_studio
path: applications/information_extraction/text
path: legacy/applications/information_extraction/text
cmd: python ../label_studio.py
params:
- --label_studio_file ./data/label_studio.json
Expand All @@ -17,7 +17,7 @@ case:
- --task_type ext
-
name: fintune
path: applications/information_extraction/text
path: legacy/applications/information_extraction/text
cmd: python -u -m paddle.distributed.launch finetune.py
params:
- --device gpu
Expand Down Expand Up @@ -51,7 +51,7 @@ case:
evaluation: "="
-
name: data_distill
path: applications/information_extraction/text/data_distill
path: legacy/applications/information_extraction/text/data_distill
cmd: python data_distill.py
params:
- --data_path ../data
Expand All @@ -66,7 +66,7 @@ case:
evaluation: "="
-
name: distill_train_student
path: applications/information_extraction/text/data_distill
path: legacy/applications/information_extraction/text/data_distill
cmd: python train.py
params:
- --task_type relation_extraction
Expand All @@ -80,7 +80,7 @@ case:
eval:
-
name: evaluate
path: applications/information_extraction/text
path: legacy/applications/information_extraction/text
cmd: python evaluate.py
params:
- --model_path ./checkpoint/model_best
Expand All @@ -94,7 +94,7 @@ case:
evaluation: "="
-
name: distill_evaluate_teacher
path: applications/information_extraction/text/data_distill
path: legacy/applications/information_extraction/text/data_distill
cmd: python evaluate_teacher.py
params:
- --task_type relation_extraction
Expand All @@ -103,7 +103,7 @@ case:
- --model_path ../checkpoint/model_best
-
name: distill_evaluate_student
path: applications/information_extraction/text/data_distill
path: legacy/applications/information_extraction/text/data_distill
cmd: python evaluate.py
params:
- --model_path ./checkpoint/model_39
Expand All @@ -119,11 +119,11 @@ case:
train:
-
name: prepare
path: applications/information_extraction/text
path: legacy/applications/information_extraction/text
cmd: wget https://bj.bcebos.com/paddlenlp/datasets/military.tar.gz && tar -xvf military.tar.gz && ren military data
-
name: label_studio
path: applications/information_extraction/text
path: legacy/applications/information_extraction/text
cmd: python ../label_studio.py
params:
- --label_studio_file ./data/label_studio.json
Expand All @@ -133,7 +133,7 @@ case:
- --task_type ext
-
name: fintune
path: applications/information_extraction/text
path: legacy/applications/information_extraction/text
cmd: python -u -m paddle.distributed.launch finetune.py
params:
- --device gpu
Expand Down Expand Up @@ -167,7 +167,7 @@ case:
evaluation: "="
-
name: data_distill
path: applications/information_extraction/text/data_distill
path: legacy/applications/information_extraction/text/data_distill
cmd: python data_distill.py
params:
- --data_path ../data
Expand All @@ -182,7 +182,7 @@ case:
evaluation: "="
-
name: distill_train_student
path: applications/information_extraction/text/data_distill
path: legacy/applications/information_extraction/text/data_distill
cmd: python train.py
params:
- --task_type relation_extraction
Expand All @@ -196,7 +196,7 @@ case:
eval:
-
name: evaluate
path: applications/information_extraction/text
path: legacy/applications/information_extraction/text
cmd: python evaluate.py
params:
- --model_path ./checkpoint/model_best
Expand All @@ -210,7 +210,7 @@ case:
evaluation: "="
-
name: distill_evaluate_teacher
path: applications/information_extraction/text/data_distill
path: legacy/applications/information_extraction/text/data_distill
cmd: python evaluate_teacher.py
params:
- --task_type relation_extraction
Expand All @@ -219,7 +219,7 @@ case:
- --model_path ../checkpoint/model_best
-
name: distill_evaluate_student
path: applications/information_extraction/text/data_distill
path: legacy/applications/information_extraction/text/data_distill
cmd: python evaluate.py
params:
- --model_path ./checkpoint/model_39
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@ case:
train:
-
name: prepare
path: applications/question_answering/unsupervised_qa
cmd: mkdir data && cd data && wget https://paddlenlp.bj.bcebos.com/applications/unsupervised_qa/source_file.txt && wget https://paddlenlp.bj.bcebos.com/applications/unsupervised_qa/train.json && wget https://paddlenlp.bj.bcebos.com/applications/unsupervised_qa/dev.json
path: legacy/applications/question_answering/unsupervised_qa
cmd: mkdir data && cd data && wget https://paddlenlp.bj.bcebos.com/legacy/applications/unsupervised_qa/source_file.txt && wget https://paddlenlp.bj.bcebos.com/legacy/applications/unsupervised_qa/train.json && wget https://paddlenlp.bj.bcebos.com/legacy/applications/unsupervised_qa/dev.json
-
name: run_qa_pairs_generation
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python -u run_qa_pairs_generation.py
params:
- --source_file_path data/source_file.txt
Expand All @@ -29,23 +29,23 @@ case:
- --do_debug
-
name: run_data_preprocess_train
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python -u run_data_preprocess.py
params:
- --source_file_path data/train.json
- --target_dir data/finetune
- --do_answer_prompt
-
name: run_data_preprocess_dev
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python -u run_data_preprocess.py
params:
- --source_file_path data/dev.json
- --target_dir data/finetune
- --do_answer_prompt
-
name: answer_extraction_and_roundtrip_filtration_finetune
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python -u -m paddle.distributed.launch --log_dir log/answer_extraction finetune/answer_extraction_and_roundtrip_filtration/finetune.py
params:
- --train_path data/finetune/answer_extraction/train.json
Expand All @@ -62,7 +62,7 @@ case:
- --device gpu
-
name: question_generation_finetune
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python -u -m paddle.distributed.launch --log_dir log/question_generation finetune/question_generation/train.py
params:
- --train_file=data/finetune/question_generation/train.json
Expand All @@ -89,7 +89,7 @@ case:
- --device=gpu
-
name: filtration_finetune
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python -u -m paddle.distributed.launch --log_dir log/filtration finetune/answer_extraction_and_roundtrip_filtration/finetune.py
params:
- --train_path=data/finetune/filtration/train.json
Expand All @@ -107,7 +107,7 @@ case:
eval:
-
name: answer_extraction
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python finetune/answer_extraction_and_roundtrip_filtration/evaluate.py
params:
- --model_path=log/answer_extraction/checkpoints/model_best
Expand All @@ -117,7 +117,7 @@ case:
- --limit=0.01
-
name: filtration
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python finetune/answer_extraction_and_roundtrip_filtration/evaluate.py
params:
- --model_path=log/filtration/checkpoints/model_best
Expand All @@ -133,11 +133,11 @@ case:
train:
-
name: prepare
path: applications/question_answering/unsupervised_qa
cmd: mkdir data && cd data && wget https://paddlenlp.bj.bcebos.com/applications/unsupervised_qa/source_file.txt && wget https://paddlenlp.bj.bcebos.com/applications/unsupervised_qa/train.json && wget https://paddlenlp.bj.bcebos.com/applications/unsupervised_qa/dev.json
path: legacy/applications/question_answering/unsupervised_qa
cmd: mkdir data && cd data && wget https://paddlenlp.bj.bcebos.com/legacy/applications/unsupervised_qa/source_file.txt && wget https://paddlenlp.bj.bcebos.com/legacy/applications/unsupervised_qa/train.json && wget https://paddlenlp.bj.bcebos.com/legacy/applications/unsupervised_qa/dev.json
-
name: run_qa_pairs_generation
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python -u run_qa_pairs_generation.py
params:
- --source_file_path data/source_file.txt
Expand All @@ -159,23 +159,23 @@ case:
- --do_debug
-
name: run_data_preprocess_train
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python -u run_data_preprocess.py
params:
- --source_file_path data/train.json
- --target_dir data/finetune
- --do_answer_prompt
-
name: run_data_preprocess_dev
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python -u run_data_preprocess.py
params:
- --source_file_path data/dev.json
- --target_dir data/finetune
- --do_answer_prompt
-
name: answer_extraction_and_roundtrip_filtration_finetune
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python -u -m paddle.distributed.launch --log_dir log/answer_extraction finetune/answer_extraction_and_roundtrip_filtration/finetune.py
params:
- --train_path data/finetune/answer_extraction/train.json
Expand All @@ -192,7 +192,7 @@ case:
- --device gpu
-
name: question_generation_finetune
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python -u -m paddle.distributed.launch --log_dir log/question_generation finetune/question_generation/train.py
params:
- --train_file=data/finetune/question_generation/train.json
Expand All @@ -219,7 +219,7 @@ case:
- --device=gpu
-
name: filtration_finetune
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python -u -m paddle.distributed.launch --log_dir log/filtration finetune/answer_extraction_and_roundtrip_filtration/finetune.py
params:
- --train_path=data/finetune/filtration/train.json
Expand All @@ -237,7 +237,7 @@ case:
eval:
-
name: answer_extraction_evaluate
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python finetune/answer_extraction_and_roundtrip_filtration/evaluate.py
params:
- --model_path=log/answer_extraction/checkpoints/model_best
Expand All @@ -247,7 +247,7 @@ case:
- --limit=0.01
-
name: evaluate_filtration
path: applications/question_answering/unsupervised_qa
path: legacy/applications/question_answering/unsupervised_qa
cmd: python finetune/answer_extraction_and_roundtrip_filtration/evaluate.py
params:
- --model_path=log/filtration/checkpoints/model_best
Expand Down
Loading