From 9f1b0c87c3920d67b9b83c0fb2ccf903c219540b Mon Sep 17 00:00:00 2001 From: Yilun Huang Date: Fri, 6 Dec 2024 16:59:30 +0800 Subject: [PATCH 1/6] Update to v1.0.1 --- data_juicer/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_juicer/__init__.py b/data_juicer/__init__.py index 7c02d6d0e..91ce93bae 100644 --- a/data_juicer/__init__.py +++ b/data_juicer/__init__.py @@ -1,4 +1,4 @@ -__version__ = '1.0.0' +__version__ = '1.0.1' import os import subprocess From 4b8b436c20b8dc0443bd3c4c3e1c10b17327c04f Mon Sep 17 00:00:00 2001 From: Yilun Huang Date: Mon, 9 Dec 2024 15:33:32 +0800 Subject: [PATCH 2/6] Patch for Perf Bench (#506) * * refine perf bench workflow * fix wrong var in sphinx docs * * refine perf bench workflow * * fix wrong var in sphinx docs * * set python version matrix to include only 3.9 and 3.10 * * hide unnecessary logs * * update mem_required for image tagging models * * enable unittests for 3 OPs due to dependency * + add two dependencies by librosa --- .github/workflows/deploy_sphinx_docs.yml | 5 ++++- .github/workflows/perf-bench.yml | 6 +++--- configs/config_all.yaml | 3 +++ environments/minimal_requires.txt | 4 ++++ tests/benchmark_performance/configs/video.yaml | 1 + tests/benchmark_performance/run.sh | 2 +- tests/ops/filter/test_audio_duration_filter.py | 5 +---- tests/ops/filter/test_audio_nmf_snr_filter.py | 5 +---- tests/ops/mapper/test_video_tagging_from_audio_mapper.py | 5 +---- 9 files changed, 19 insertions(+), 17 deletions(-) diff --git a/.github/workflows/deploy_sphinx_docs.yml b/.github/workflows/deploy_sphinx_docs.yml index 9c8ae89a0..5cf0205ae 100644 --- a/.github/workflows/deploy_sphinx_docs.yml +++ b/.github/workflows/deploy_sphinx_docs.yml @@ -12,13 +12,16 @@ on: jobs: pages: runs-on: ubuntu-20.04 + strategy: + matrix: + python-version: [ "3.9", "3.10" ] steps: - name: Checkout uses: actions/checkout@v4 - name: Setup Python ${{ matrix.python-version }} uses: actions/setup-python@master with: - python_version: ${{ matrix.python-version }} + python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/perf-bench.yml b/.github/workflows/perf-bench.yml index 2a4d6658b..4094070db 100644 --- a/.github/workflows/perf-bench.yml +++ b/.github/workflows/perf-bench.yml @@ -16,8 +16,8 @@ env: ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true jobs: - unittest-single: - runs-on: [self-hosted, linux] + perf_bench: + runs-on: [GPU, unittest] environment: Testing steps: - uses: actions/checkout@v3 @@ -42,7 +42,7 @@ jobs: - name: Run performance benchmark standalone working-directory: dj-${{ github.run_id }}/.github/workflows/docker run: | - docker compose exec ray-head python tests/benchmark_performance/run.sh ${{ secrets.INTERNAL_WANDB_URL }} ${{ secrets.INTERNAL_WANDB_API_KEY }} + docker compose exec ray-head bash tests/benchmark_performance/run.sh ${{ secrets.INTERNAL_WANDB_URL }} ${{ secrets.INTERNAL_WANDB_API_KEY }} - name: Remove docker compose working-directory: dj-${{ github.run_id }}/.github/workflows/docker diff --git a/configs/config_all.yaml b/configs/config_all.yaml index 4019b66a5..756cadd81 100644 --- a/configs/config_all.yaml +++ b/configs/config_all.yaml @@ -212,6 +212,7 @@ process: radius: 2 # radius of blur kernel - image_tagging_mapper: # Mapper to generate image tags. tag_field_name: '__dj__image_tags__' # the field name to store the tags. It's "__dj__image_tags__" in default. + mem_required: '9GB' - nlpaug_en_mapper: # simply augment texts in English based on the nlpaug library sequential: false # whether combine all augmentation methods to a sequence. If it's True, a sample will be augmented by all opened augmentation methods sequentially. If it's False, each opened augmentation method would generate its augmented samples independently. aug_num: 1 # number of augmented samples to be generated. If `sequential` is True, there will be total aug_num augmented samples generated. If it's False, there will be (aug_num * #opened_aug_method) augmented samples generated. @@ -382,6 +383,7 @@ process: frame_sampling_method: 'all_keyframes' # sampling method of extracting frame images from the videos. Should be one of ["all_keyframes", "uniform"]. The former one extracts all key frames and the latter one extract specified number of frames uniformly from the video. Default: "all_keyframes". frame_num: 3 # the number of frames to be extracted uniformly from the video. Only works when frame_sampling_method is "uniform". If it's 1, only the middle frame will be extracted. If it's 2, only the first and the last frames will be extracted. If it's larger than 2, in addition to the first and the last frames, other frames will be extracted uniformly within the video duration. tag_field_name: '__dj__video_frame_tags__' # the field name to store the tags. It's "__dj__video_frame_tags__" in default. + mem_required: '9GB' - whitespace_normalization_mapper: # normalize different kinds of whitespaces to English whitespace. # Filter ops @@ -614,6 +616,7 @@ process: frame_num: 3 # the number of frames to be extracted uniformly from the video. Only works when frame_sampling_method is "uniform". If it's 1, only the middle frame will be extracted. If it's 2, only the first and the last frames will be extracted. If it's larger than 2, in addition to the first and the last frames, other frames will be extracted uniformly within the video duration. tag_field_name: '__dj__video_frame_tags__' # the field name to store the tags. It's "__dj__video_frame_tags__" in default. any_or_all: any # keep this sample when any/all videos meet the filter condition + mem_required: '9GB' - words_num_filter: # filter text with number of words out of specific range lang: en # sample in which language tokenization: false # whether to use model to tokenize documents diff --git a/environments/minimal_requires.txt b/environments/minimal_requires.txt index df76b1358..414458edc 100644 --- a/environments/minimal_requires.txt +++ b/environments/minimal_requires.txt @@ -4,7 +4,11 @@ pandas numpy av==13.1.0 soundfile +# need to install two dependencies by librosa to avoid lazy_loader error librosa>=0.10 +samplerate +resampy +# need to install two dependencies by librosa to avoid lazy_loader error loguru tabulate tqdm diff --git a/tests/benchmark_performance/configs/video.yaml b/tests/benchmark_performance/configs/video.yaml index a7df19639..28fb3b98a 100644 --- a/tests/benchmark_performance/configs/video.yaml +++ b/tests/benchmark_performance/configs/video.yaml @@ -14,6 +14,7 @@ process: score_threshold: 1.0 mem_required: '1GB' - video_tagging_from_frames_mapper: + mem_required: '9GB' - video_duration_filter: - video_split_by_key_frame_mapper: keep_original_sample: false diff --git a/tests/benchmark_performance/run.sh b/tests/benchmark_performance/run.sh index 4104967b8..1ec839d57 100644 --- a/tests/benchmark_performance/run.sh +++ b/tests/benchmark_performance/run.sh @@ -11,7 +11,7 @@ MODALITIES=("text" "image" "video" "audio") cd $BENCH_PATH # 1. prepare dataset -wget http://dail-wlcb.oss-cn-wulanchabu.aliyuncs.com/data_juicer/perf_bench_data/perf_bench_data.tar.gz && tar zxvf perf_bench_data.tar.gz +wget -q http://dail-wlcb.oss-cn-wulanchabu.aliyuncs.com/data_juicer/perf_bench_data/perf_bench_data.tar.gz && tar zxf perf_bench_data.tar.gz # 2. run the benchmark for modality in ${MODALITIES[@]} diff --git a/tests/ops/filter/test_audio_duration_filter.py b/tests/ops/filter/test_audio_duration_filter.py index 5b367f0ec..64a5c05c8 100644 --- a/tests/ops/filter/test_audio_duration_filter.py +++ b/tests/ops/filter/test_audio_duration_filter.py @@ -5,11 +5,8 @@ from data_juicer.ops.filter.audio_duration_filter import AudioDurationFilter from data_juicer.utils.constant import Fields -from data_juicer.utils.unittest_utils import DataJuicerTestCaseBase, TEST_TAG, SKIPPED_TESTS +from data_juicer.utils.unittest_utils import DataJuicerTestCaseBase, TEST_TAG -# skip due to conflicts when run lazy_load in multiprocessing in librosa -# tests passed locally. -@SKIPPED_TESTS.register_module() class AudioDurationFilterTest(DataJuicerTestCaseBase): data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', diff --git a/tests/ops/filter/test_audio_nmf_snr_filter.py b/tests/ops/filter/test_audio_nmf_snr_filter.py index 384435828..d0dec38b8 100644 --- a/tests/ops/filter/test_audio_nmf_snr_filter.py +++ b/tests/ops/filter/test_audio_nmf_snr_filter.py @@ -5,11 +5,8 @@ from data_juicer.ops.filter.audio_nmf_snr_filter import AudioNMFSNRFilter from data_juicer.utils.constant import Fields -from data_juicer.utils.unittest_utils import DataJuicerTestCaseBase, SKIPPED_TESTS +from data_juicer.utils.unittest_utils import DataJuicerTestCaseBase -# skip due to conflicts when run lazy_load in multiprocessing in librosa -# tests passed locally. -@SKIPPED_TESTS.register_module() class AudioNMFSNRFilterTest(DataJuicerTestCaseBase): data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', diff --git a/tests/ops/mapper/test_video_tagging_from_audio_mapper.py b/tests/ops/mapper/test_video_tagging_from_audio_mapper.py index 5cace0b7a..8bbf05933 100644 --- a/tests/ops/mapper/test_video_tagging_from_audio_mapper.py +++ b/tests/ops/mapper/test_video_tagging_from_audio_mapper.py @@ -6,11 +6,8 @@ VideoTaggingFromAudioMapper from data_juicer.utils.constant import Fields from data_juicer.utils.mm_utils import SpecialTokens -from data_juicer.utils.unittest_utils import DataJuicerTestCaseBase, SKIPPED_TESTS +from data_juicer.utils.unittest_utils import DataJuicerTestCaseBase -# skip due to conflicts when run lazy_load in multiprocessing in librosa -# tests passed locally. -@SKIPPED_TESTS.register_module() class VideoTaggingFromAudioMapperTest(DataJuicerTestCaseBase): data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'data') From 10242c4dfaacdaf26ea7b81913343f36bd97dbfa Mon Sep 17 00:00:00 2001 From: Haibin <1400012807@pku.edu.cn> Date: Tue, 10 Dec 2024 16:49:08 +0800 Subject: [PATCH 3/6] install by recipe --- README.md | 12 ++ README_ZH.md | 11 ++ data_juicer/utils/auto_install_mapping.py | 159 +++++++++------------- tools/install_by_recipe.py | 65 +++++++++ 4 files changed, 155 insertions(+), 92 deletions(-) create mode 100644 tools/install_by_recipe.py diff --git a/README.md b/README.md index eb34e17ba..3ede912b0 100644 --- a/README.md +++ b/README.md @@ -197,6 +197,18 @@ The dependency options are listed below: | `.[tools]` | Install dependencies for dedicated tools, such as quality classifiers. | | `.[sandbox]` | Install all dependencies for sandbox. | +- Install dependencies for specific OPs + +With the growth of the number of OPs, the dependencies of all OPs becomes very heavy. Instead of using the command `pip install -v -e .[sci]` to install all dependencies, +we provide two alternative, lighter options: + + - Automatic Minimal Dependency Installation: During the execution of Data-Juicer, minimal dependencies will be automatically installed. This allows for immediate execution, but may potentially lead to dependency conflicts. + + - Manual Minimal Dependency Installation: To manually install minimal dependencies tailored to a specific execution configuration, run the following command: + ```shell + python tools/install_by_recipe.py --config path_to_your_data-juicer_config_file + ``` + ### Using pip - Run the following command to install the latest released `data_juicer` using `pip`: diff --git a/README_ZH.md b/README_ZH.md index 905a4e1a2..a0439cee2 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -178,6 +178,17 @@ pip install -v -e .[tools] # 安装部分工具库的依赖 | `.[tools]` | 安装专用工具库(如质量分类器)所需的依赖项 | | `.[sandbox]` | 安装沙盒实验室的基础依赖 | +* 只安装部分算子依赖 + +随着OP数量的增长,所有OP的依赖变得很重。为此,我们提供了两个替代的、更轻量的选项,作为使用命令`pip install -v -e .[sci]`安装所有依赖的替代: + + * 自动最小依赖安装:在执行Data-Juicer的过程中,将自动安装最小依赖。也就是说你可以直接执行,但这种方式可能会导致一些依赖冲突。 + + * 手动最小依赖安装:可以通过如下指令手动安装适合特定执行配置的最小依赖: + ```shell + python tools/install_by_recipe.py --config path_to_your_data-juicer_config_file + ``` + ### 使用 pip 安装 * 运行以下命令用 `pip` 安装 `data_juicer` 的最新发布版本: diff --git a/data_juicer/utils/auto_install_mapping.py b/data_juicer/utils/auto_install_mapping.py index 96a54b437..7f685a05a 100644 --- a/data_juicer/utils/auto_install_mapping.py +++ b/data_juicer/utils/auto_install_mapping.py @@ -10,99 +10,74 @@ 'simhash': ['simhash-pybind'], } -# Packages to corresponding ops that require them -PKG_TO_OPS = { - 'torch': [ - 'image_aesthetics_filter', 'image_nsfw_filter', - 'image_text_matching_filter', 'image_text_similarity_filter', - 'image_watermark_filter', 'phrase_grounding_recall_filter', - 'video_aesthetics_filter', 'video_frames_text_similarity_filter', - 'video_nsfw_filter', 'video_tagging_from_frames_filter', - 'video_watermark_filter', 'generate_qa_from_text_mapper', - 'generate_qa_from_examples_mapper', 'image_captioning_mapper', - 'image_diffusion_mapper', 'image_tagging_mapper', - 'optimize_query_mapper', 'optimize_response_mapper', - 'optimize_qa_mapper', 'video_captioning_from_frames_mapper', - 'video_captioning_from_summarizer_mapper', - 'video_captioning_from_video_mapper', - 'video_tagging_from_audio_mapper', 'video_tagging_from_frames_mapper' - ], - 'torchaudio': [ - 'video_captioning_from_summarizer_mapper', - 'video_tagging_from_audio_mapper' - ], - 'easyocr': ['video_ocr_area_ratio_filter'], - 'fasttext-wheel': ['language_id_score_filter'], - 'kenlm': ['perplexity_filter'], - 'sentencepiece': [ - 'flagged_words_filter', 'perplexity_filter', 'stopwords_filter', - 'word_repetition_filter', 'words_num_filter' - ], - 'scipy': ['document_minhash_deduplicator'], - 'ftfy': ['fix_unicode_mapper'], - 'simhash-pybind': [ - 'document_simhash_deduplicator', 'image_captioning_mapper', - 'image_diffusion_mapper', 'video_captioning_from_frames_mapper', - 'video_captioning_from_summarizer_mapper', - 'video_captioning_from_video_mapper' - ], - 'selectolax': ['clean_html_mapper'], - 'nlpaug': ['nlpaug_en_mapper'], +# Extra packages required by each op +OPS_TO_PKG = { + 'video_aesthetics_filter': + ['simple-aesthetics-predictor', 'torch', 'transformers'], + 'document_simhash_deduplicator': ['simhash-pybind'], 'nlpcda': ['nlpcda'], - 'nltk': ['phrase_grounding_recall_filter', 'sentence_split_mapper'], - 'transformers': [ - 'alphanumeric_filter', 'image_aesthetics_filter', 'image_nsfw_filter', - 'image_text_matching_filter', 'image_text_similarity_filter', - 'image_watermark_filter', 'phrase_grounding_recall_filter', - 'token_num_filter', 'video_aesthetics_filter', - 'video_frames_text_similarity_filter', 'video_nsfw_filter', - 'generate_qa_from_text_mapper', 'generate_qa_from_examples_mapper', - 'image_captioning_mapper', 'image_diffusion_mapper', - 'optimize_query_mapper', 'optimize_response_mapper', - 'optimize_qa_mapper', 'video_captioning_from_audio_mapper', - 'video_captioning_from_frames_mapper', - 'video_captioning_from_summarizer_mapper', - 'video_captioning_from_video_mapper', 'video_tagging_from_audio_mapper' - ], - 'transformers_stream_generator': [ - 'video_captioning_from_audio_mapper', - 'video_captioning_from_summarizer_mapper' - ], - 'einops': [ - 'video_captioning_from_audio_mapper', - 'video_captioning_from_summarizer_mapper' - ], - 'accelerate': [ - 'video_captioning_from_audio_mapper', - 'video_captioning_from_summarizer_mapper' - ], - 'tiktoken': [ - 'video_captioning_from_audio_mapper', - 'video_captioning_from_summarizer_mapper' - ], - 'opencc': ['chinese_convert_mapper'], - 'imagededup': ['image_deduplicator', 'ray_image_deduplicator'], - 'spacy-pkuseg': ['text_action_filter', 'text_entity_dependency_filter'], - 'diffusers': ['image_diffusion_mapper'], - 'simple-aesthetics-predictor': - ['image_aesthetics_filter', 'video_aesthetics_filter'], - 'scenedetect[opencv]': ['video_split_by_scene_mapper'], - 'ffmpeg-python': [ - 'audio_ffmpeg_wrapped_mapper', 'video_ffmpeg_wrapped_mapper', - 'video_resize_aspect_ratio_mapper', 'video_resize_resolution_mapper' - ], - 'opencv-python': [ - 'image_face_ratio_filter', 'video_motion_score_filter', - 'image_face_blur_mapper', 'video_face_blur_mapper', - 'video_remove_watermark_mapper' + 'image_aesthetics_filter': + ['simple-aesthetics-predictor', 'torch', 'transformers'], + 'video_nsfw_filter': ['torch', 'transformers'], + 'video_face_blur_mapper': ['opencv-python'], + 'stopwords_filter': ['sentencepiece'], + 'fix_unicode_mapper': ['ftfy'], + 'token_num_filter': ['transformers'], + 'optimize_qa_mapper': ['torch', 'transformers', 'vllm'], + 'video_motion_score_filter': ['opencv-python'], + 'image_tagging_mapper': ['ram', 'torch'], + 'video_resize_aspect_ratio_mapper': ['ffmpeg-python'], + 'video_captioning_from_audio_mapper': [ + 'accelerate', 'einops', 'tiktoken', 'transformers', + 'transformers_stream_generator' ], - 'vllm': [ - 'generate_qa_from_text_mapper', - 'generate_qa_from_examples_mapper', - 'optimize_query_mapper', - 'optimize_response_mapper', - 'optimize_qa_mapper', + 'clean_html_mapper': ['selectolax'], + 'video_tagging_from_audio_mapper': ['torch', 'torchaudio', 'transformers'], + 'image_deduplicator': ['imagededup'], + 'image_diffusion_mapper': + ['diffusers', 'simhash-pybind', 'torch', 'transformers'], + 'image_text_similarity_filter': ['torch', 'transformers'], + 'alphanumeric_filter': ['transformers'], + 'image_nsfw_filter': ['torch', 'transformers'], + 'image_watermark_filter': ['torch', 'transformers'], + 'ray_image_deduplicator': ['imagededup'], + 'video_captioning_from_frames_mapper': + ['simhash-pybind', 'torch', 'transformers'], + 'video_tagging_from_frames_filter': ['torch'], + 'video_resize_resolution_mapper': ['ffmpeg-python'], + 'optimize_query_mapper': ['torch', 'transformers', 'vllm'], + 'sentence_split_mapper': ['nltk'], + 'image_text_matching_filter': ['torch', 'transformers'], + 'phrase_grounding_recall_filter': ['nltk', 'torch', 'transformers'], + 'video_split_by_scene_mapper': ['scenedetect[opencv]'], + 'image_face_blur_mapper': ['opencv-python'], + 'image_face_ratio_filter': ['opencv-python'], + 'document_minhash_deduplicator': ['scipy'], + 'flagged_words_filter': ['sentencepiece'], + 'language_id_score_filter': ['fasttext-wheel'], + 'words_num_filter': ['sentencepiece'], + 'chinese_convert_mapper': ['opencc'], + 'video_frames_text_similarity_filter': ['torch', 'transformers'], + 'generate_qa_from_text_mapper': ['torch', 'transformers', 'vllm'], + 'video_ffmpeg_wrapped_mapper': ['ffmpeg-python'], + 'image_captioning_mapper': ['simhash-pybind', 'torch', 'transformers'], + 'video_ocr_area_ratio_filter': ['easyocr'], + 'video_captioning_from_video_mapper': + ['simhash-pybind', 'torch', 'transformers'], + 'video_remove_watermark_mapper': ['opencv-python'], + 'text_action_filter': ['spacy-pkuseg'], + 'nlpaug_en_mapper': ['nlpaug'], + 'word_repetition_filter': ['sentencepiece'], + 'video_watermark_filter': ['torch'], + 'video_captioning_from_summarizer_mapper': [ + 'accelerate', 'einops', 'simhash-pybind', 'tiktoken', 'torch', + 'torchaudio', 'transformers', 'transformers_stream_generator' ], - 'rouge': ['generate_qa_from_examples_mapper'], - 'ram': ['image_tagging_mapper', 'video_tagging_from_frames_mapper'] + 'audio_ffmpeg_wrapped_mapper': ['ffmpeg-python'], + 'perplexity_filter': ['kenlm', 'sentencepiece'], + 'generate_qa_from_examples_mapper': + ['rouge', 'torch', 'transformers', 'vllm'], + 'video_tagging_from_frames_mapper': ['ram', 'torch'], + 'text_entity_dependency_filter': ['spacy-pkuseg'], + 'optimize_response_mapper': ['torch', 'transformers', 'vllm'] } diff --git a/tools/install_by_recipe.py b/tools/install_by_recipe.py new file mode 100644 index 000000000..54b0b3dd3 --- /dev/null +++ b/tools/install_by_recipe.py @@ -0,0 +1,65 @@ +import os +import subprocess +import sys +import tempfile + +from loguru import logger + +from data_juicer.config import init_configs +from data_juicer.utils.auto_install_mapping import OPS_TO_PKG + +require_version_paths = ['./environments/science_requires.txt'] + + +def main(): + cfg = init_configs() + + # get the ops in the recipe + op_names = [list(op.keys())[0] for op in cfg.process] + recipe_reqs = [] + for op_name in op_names: + recipe_reqs.extend(OPS_TO_PKG[op_name]) + recipe_reqs = list(set(recipe_reqs)) + + # get the package version limit of Data-Juicer + version_map, reqs = {}, [] + for path in require_version_paths: + if not os.path.exists(path): + logger.warning(f'target file does not exist: {path}') + else: + with open(path, 'r', encoding='utf-8') as fin: + reqs += [x.strip() for x in fin.read().splitlines()] + for req in reqs: + clean_req = req.replace('<', + ' ').replace('>', + ' ').replace('=', + ' ').split(' ')[0] + version_map[clean_req] = req + + # generate require file for the recipe + with tempfile.NamedTemporaryFile(delete=False, mode='w') as temp_file: + temp_file_path = temp_file.name + for req in recipe_reqs: + if req in version_map: + temp_file.write(version_map[req] + '\n') + else: + temp_file.write(req + '\n') + + # install by calling 'pip install -r ...' + try: + subprocess.check_call( + [sys.executable, '-m', 'pip', 'install', '-r', temp_file_path]) + logger.info('Requirements were installed successfully.') + except subprocess.CalledProcessError as e: + logger.info( + f'An error occurred while installing the requirements: {e}') + if os.path.exists(temp_file_path): + os.remove(temp_file_path) + sys.exit(1) + finally: + if os.path.exists(temp_file_path): + os.remove(temp_file_path) + + +if __name__ == '__main__': + main() From b46d105997a9eb3bb593a7b0aca51095344aac71 Mon Sep 17 00:00:00 2001 From: Haibin <1400012807@pku.edu.cn> Date: Thu, 12 Dec 2024 19:15:12 +0800 Subject: [PATCH 4/6] change to dj_install --- README.md | 2 +- README_ZH.md | 2 +- setup.py | 1 + tools/{install_by_recipe.py => dj_install.py} | 0 4 files changed, 3 insertions(+), 2 deletions(-) rename tools/{install_by_recipe.py => dj_install.py} (100%) diff --git a/README.md b/README.md index 3ede912b0..9f9b3bac1 100644 --- a/README.md +++ b/README.md @@ -206,7 +206,7 @@ we provide two alternative, lighter options: - Manual Minimal Dependency Installation: To manually install minimal dependencies tailored to a specific execution configuration, run the following command: ```shell - python tools/install_by_recipe.py --config path_to_your_data-juicer_config_file + python tools/dj_install.py --config path_to_your_data-juicer_config_file ``` ### Using pip diff --git a/README_ZH.md b/README_ZH.md index a0439cee2..172373db8 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -186,7 +186,7 @@ pip install -v -e .[tools] # 安装部分工具库的依赖 * 手动最小依赖安装:可以通过如下指令手动安装适合特定执行配置的最小依赖: ```shell - python tools/install_by_recipe.py --config path_to_your_data-juicer_config_file + python tools/dj_install.py --config path_to_your_data-juicer_config_file ``` ### 使用 pip 安装 diff --git a/setup.py b/setup.py index 3df3d0170..d0ec5b546 100644 --- a/setup.py +++ b/setup.py @@ -69,6 +69,7 @@ def get_install_requirements(require_f_paths, env_dir='environments'): 'console_scripts': [ 'dj-process = data_juicer.tools.process_data:main', 'dj-analyze = data_juicer.tools.analyze_data:main', + 'dj-install = data_juicer.tools.dj_install:main', ] }, install_requires=min_requires, diff --git a/tools/install_by_recipe.py b/tools/dj_install.py similarity index 100% rename from tools/install_by_recipe.py rename to tools/dj_install.py From a0da444478fdbb0959892c1315d23c9052478e3c Mon Sep 17 00:00:00 2001 From: Haibin <1400012807@pku.edu.cn> Date: Thu, 12 Dec 2024 19:21:32 +0800 Subject: [PATCH 5/6] change to dj_install --- README.md | 4 ++++ README_ZH.md | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/README.md b/README.md index 9f9b3bac1..518e54713 100644 --- a/README.md +++ b/README.md @@ -206,7 +206,11 @@ we provide two alternative, lighter options: - Manual Minimal Dependency Installation: To manually install minimal dependencies tailored to a specific execution configuration, run the following command: ```shell + # only for installation from source python tools/dj_install.py --config path_to_your_data-juicer_config_file + + # use command line tool + dj-install --config path_to_your_data-juicer_config_file ``` ### Using pip diff --git a/README_ZH.md b/README_ZH.md index 172373db8..366fcb004 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -186,7 +186,11 @@ pip install -v -e .[tools] # 安装部分工具库的依赖 * 手动最小依赖安装:可以通过如下指令手动安装适合特定执行配置的最小依赖: ```shell + # 适用于从源码安装 python tools/dj_install.py --config path_to_your_data-juicer_config_file + + # 使用命令行工具 + dj-install --config path_to_your_data-juicer_config_file ``` ### 使用 pip 安装 From 02f8dda38d103140e3df82df520fc5b94b0f7af2 Mon Sep 17 00:00:00 2001 From: Haibin <1400012807@pku.edu.cn> Date: Thu, 12 Dec 2024 19:29:55 +0800 Subject: [PATCH 6/6] developer doc done --- docs/DeveloperGuide.md | 8 +++++--- docs/DeveloperGuide_ZH.md | 8 +++++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/docs/DeveloperGuide.md b/docs/DeveloperGuide.md index e736b5ade..734f1201a 100644 --- a/docs/DeveloperGuide.md +++ b/docs/DeveloperGuide.md @@ -209,7 +209,9 @@ __all__ = [ ] ``` -4. Now you can use this new OP with custom arguments in your own config files! +4. When an operator has package dependencies listed in `environments/science_requires.txt`, you need to add the corresponding dependency packages to the `OPS_TO_PKG` dictionary in `data_juicer/utils/auto_install_mapping.py` to support dependency installation at the operator level. + +5. Now you can use this new OP with custom arguments in your own config files! ```yaml # other configs @@ -222,7 +224,7 @@ process: max_len: 1000 ``` -5. (Strongly Recommend) It's better to add corresponding tests for your own OPs. For `TextLengthFilter` above, you would like to add `test_text_length_filter.py` into `tests/ops/filter/` directory as below. +6. (Strongly Recommend) It's better to add corresponding tests for your own OPs. For `TextLengthFilter` above, you would like to add `test_text_length_filter.py` into `tests/ops/filter/` directory as below. ```python import unittest @@ -244,7 +246,7 @@ if __name__ == '__main__': unittest.main() ``` -6. (Strongly Recommend) In order to facilitate the use of other users, we also need to update this new OP information to +7. (Strongly Recommend) In order to facilitate the use of other users, we also need to update this new OP information to the corresponding documents, including the following docs: 1. `configs/config_all.yaml`: this complete config file contains a list of all OPs and their arguments, serving as an important document for users to refer to all available OPs. Therefore, after adding the new OP, we need to add it to the process diff --git a/docs/DeveloperGuide_ZH.md b/docs/DeveloperGuide_ZH.md index e9d746d7c..fcc76aafe 100644 --- a/docs/DeveloperGuide_ZH.md +++ b/docs/DeveloperGuide_ZH.md @@ -202,7 +202,9 @@ __all__ = [ ] ``` -4. 全部完成!现在您可以在自己的配置文件中使用新添加的算子: +4. 算子有`environments/science_requires.txt`中列举的包依赖时,需要在`data_juicer/utils/auto_install_mapping.py`里的`OPS_TO_PKG`中添加对应的依赖包,以支持算子粒度的依赖安装。 + +5. 全部完成!现在您可以在自己的配置文件中使用新添加的算子: ```yaml # other configs @@ -215,7 +217,7 @@ process: max_len: 1000 ``` -5. (强烈推荐)最好为新添加的算子进行单元测试。对于上面的 `TextLengthFilter` 算子,建议在 `tests/ops/filter/` 中实现如 `test_text_length_filter.py` 的测试文件: +6. (强烈推荐)最好为新添加的算子进行单元测试。对于上面的 `TextLengthFilter` 算子,建议在 `tests/ops/filter/` 中实现如 `test_text_length_filter.py` 的测试文件: ```python import unittest @@ -238,7 +240,7 @@ if __name__ == '__main__': unittest.main() ``` -6. (强烈推荐)为了方便其他用户使用,我们还需要将新增的算子信息更新到相应的文档中,具体包括如下文档: +7. (强烈推荐)为了方便其他用户使用,我们还需要将新增的算子信息更新到相应的文档中,具体包括如下文档: 1. `configs/config_all.yaml`:该全集配置文件保存了所有算子及参数的一个列表,作为用户参考可用算子的一个重要文档。因此,在新增算子后,需要将其添加到该文档process列表里(按算子类型分组并按字母序排序): ```yaml