update to tech report version (#10)

* feat(run_eval): add checkpoint resume functionality and update example documentation;
- update new bootcamp benchmark dataset

* refactor(data_pipeline): optimize data generation pipeline; add multiple preset configurations for data generation

* docs: update bootcamp list and add new scripts

- Update Fulllist_InternBootcamp.md with new bootcamps and categories
- Add new scripts to .gitignore:
  - examples/pipelines/filter_autogen_configs.py
  - examples/pipelines/quickgen_data_configs_from_eval_meta.py
- Update dependencies in setup.py:
  - Add scipy and scikit-learn

* refactor(internbootcamp): update bootcamp modules and improve error handling

- Update import statements in __init__.py files
- Add timestamp to target directory name in verl_data_preprocess.py
- Improve error handling and scoring logic in bootcamp_judger.py
- Remove unnecessary comments and update puzzle descriptions in multiple files
This commit is contained in:
Yongkang Chen 2025-08-28 12:39:47 +08:00 committed by GitHub
parent 125a7818e0
commit a8249acc18
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2952 changed files with 105460 additions and 17649 deletions

View file

@ -1,15 +1,12 @@
import json
import jsonlines
import os
import argparse
import os
import json
import glob
import re
# 每个puzzle的gen数量
train_sample_number = 10000
test_sample_number = 100
def checkpath(target_dir):
# 检查目录是否存在
@ -26,11 +23,12 @@ def checkpath(target_dir):
except Exception as e:
print(f"创建目录 {target_dir} 时出现错误: {e}")
def process_data_config():
def process_data_config(config_dir, train_sample_number, test_sample_number):
"""
# 遍历data_config目录下所有符合条件的json文件
data_dir = 'examples/pipelines/puzzle_configs'
"""
json_files = os.listdir(data_dir)
json_files = os.listdir(config_dir)
train_data = []
test_data = []
json_files.sort(key=lambda x: x.capitalize())
@ -69,6 +67,11 @@ def process_data_config():
test_data.append(entry_test)
save_dir = 'examples/pipelines/data_configs'
# 不区分大小写排序
train_data.sort(key=lambda x: x['bootcamp_cls_name'].lower())
test_data.sort(key=lambda x: x['bootcamp_cls_name'].lower())
# 检查dir
checkpath(save_dir)
output_file_train = f'{save_dir}/data_config_train.jsonl'
@ -83,4 +86,9 @@ def process_data_config():
if __name__ == '__main__':
process_data_config()
parser = argparse.ArgumentParser(description='Process data config files.')
parser.add_argument('--config_dir', type=str, default='examples/pipelines/puzzle_configs', help='Directory containing config files')
parser.add_argument('--train_sample_number', type=int, default=1000, help='Number of training samples per task')
parser.add_argument('--test_sample_number', type=int, default=0, help='Number of test samples per task')
args = parser.parse_args()
process_data_config(args.config_dir, args.train_sample_number, args.test_sample_number)