Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

LFX Mentorship Autumn 2024 Pretest - FuryMartin #139

Closed
wants to merge 10 commits into from
3 changes: 3 additions & 0 deletions core/common/constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ class DatasetFormat(Enum):
CSV = "csv"
TXT = "txt"
JSON = "json"
JSONL = "jsonl"


class ParadigmType(Enum):
Expand All @@ -35,13 +36,15 @@ class ParadigmType(Enum):
INCREMENTAL_LEARNING = "incrementallearning"
MULTIEDGE_INFERENCE = "multiedgeinference"
LIFELONG_LEARNING = "lifelonglearning"
JOINT_INFERENCE = "jointinference"


class ModuleType(Enum):
"""
Algorithm module type.
"""
BASEMODEL = "basemodel"
APIMODEL = "apimodel"

# HEM
HARD_EXAMPLE_MINING = "hard_example_mining"
Expand Down
4 changes: 4 additions & 0 deletions core/testcasecontroller/algorithm/algorithm.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
IncrementalLearning,
MultiedgeInference,
LifelongLearning,
JointInference
)
from core.testcasecontroller.generation_assistant import get_full_combinations

Expand Down Expand Up @@ -103,6 +104,9 @@ def paradigm(self, workspace: str, **kwargs):

if self.paradigm_type == ParadigmType.LIFELONG_LEARNING.value:
return LifelongLearning(workspace, **config)

if self.paradigm_type == ParadigmType.JOINT_INFERENCE.value:
return JointInference(workspace, **config)

return None

Expand Down
1 change: 1 addition & 0 deletions core/testcasecontroller/algorithm/paradigm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,4 @@
from .singletask_learning import SingleTaskLearning
from .multiedge_inference import MultiedgeInference
from .lifelong_learning import LifelongLearning
from .joint_inference import JointInference
12 changes: 11 additions & 1 deletion core/testcasecontroller/algorithm/paradigm/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

from sedna.core.incremental_learning import IncrementalLearning
from sedna.core.lifelong_learning import LifelongLearning

from .sedna_joint_inference import JointInference
from core.common.constant import ModuleType, ParadigmType


Expand Down Expand Up @@ -125,5 +125,15 @@ def build_paradigm_job(self, paradigm_type):
# pylint: disable=E1101
if paradigm_type == ParadigmType.MULTIEDGE_INFERENCE.value:
return self.modules_funcs.get(ModuleType.BASEMODEL.value)()

if paradigm_type == ParadigmType.JOINT_INFERENCE.value:
return JointInference(
estimator=self.module_instances.get(
ModuleType.BASEMODEL.value),
cloud=self.module_instances.get(
ModuleType.APIMODEL.value),
hard_example_mining=self.module_instances.get(
ModuleType.HARD_EXAMPLE_MINING.value)
)

return None
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# Copyright 2022 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# pylint: disable=missing-module-docstring
from .joint_inference import JointInference
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
# Copyright 2022 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Cloud-Edge Joint Inference"""

# Ianvs imports
import os
from copy import deepcopy
from core.common.constant import ParadigmType
from core.testcasecontroller.algorithm.paradigm.base import ParadigmBase
from sedna.common.class_factory import ClassFactory, ClassType
# from sedna_joint_inference import JointInference as SednaJointInference

class JointInference(ParadigmBase):
"""
Cloud-Edge-JointInference:
provide the flow of multi-edge inference paradigm.
Notes:
1. Ianvs serves as testing tools for test objects, e.g., algorithms.
2. Ianvs does NOT include code directly on test object.
3. Algorithms serve as typical test objects in Ianvs
and detailed algorithms are thus NOT included in this Ianvs python file.
4. As for the details of example test objects, e.g., algorithms,
please refer to third party packages in Ianvs example.
For example, AI workflow and interface pls refer to sedna
(sedna docs: https://sedna.readthedocs.io/en/latest/api/lib/index.html),
and module implementation pls refer to `examples' test algorithms`,
e.g., basemodel.py, hard_example_mining.py.

Parameters
---------
workspace: string
the output required for multi-edge inference paradigm.
kwargs: dict
config required for the test process of multi-edge inference paradigm,
e.g.: algorithm modules, dataset, initial model, etc.

"""

def __init__(self, workspace, **kwargs):
ParadigmBase.__init__(self, workspace, **kwargs)
self.kwargs = kwargs
self.hard_example_mining_mode = kwargs.get("hard_example_mining_mode", "mining-then-inference")

def run(self):
"""
run the test flow of multi-edge inference paradigm.

Returns
------
test result: numpy.ndarray
system metric info: dict
information needed to compute system metrics.

"""

job = self.build_paradigm_job(ParadigmType.JOINT_INFERENCE.value)

inference_result = self._inference(job)

return inference_result, self.system_metric_info

def _inference(self, job):
# Ianvs API
inference_dataset = self.dataset.load_data(self.dataset.test_url, "inference")
inference_output_dir = os.path.join(self.workspace, "output/inference/")
os.environ["RESULT_SAVED_URL"] = inference_output_dir

batch_infer_res = []

for data in inference_dataset.x:
# inference via sedna JointInference API
infer_res = job.inference(data, mining_mode=self.hard_example_mining_mode)
batch_infer_res.append(infer_res)

return batch_infer_res # (is_hard_example, res, edge_result, cloud_result)
Loading
Loading