Skip to content

Commit

Permalink
Removed all references to apriori_training
Browse files Browse the repository at this point in the history
Also fixed some spacing considerations (i.e. newlines at end of files)
  • Loading branch information
asgibson committed Nov 8, 2023
1 parent 4df130a commit bb494be
Show file tree
Hide file tree
Showing 8 changed files with 31 additions and 121 deletions.
17 changes: 2 additions & 15 deletions onair/src/ai_components/ai_plugin_abstract/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@

from abc import ABC, abstractmethod
"""This object serves as a proxy for all plug-ins.
Therefore, the AIPlugIn object is meant to induce
standards and structures of compliance for user-created
Therefore, the AIPlugIn object is meant to induce
standards and structures of compliance for user-created
and/or imported plug-ins/libraries
"""
class AIPlugIn(ABC):
Expand All @@ -22,19 +22,6 @@ def __init__(self, _name, _headers):
self.component_name = _name
self.headers = _headers

@abstractmethod
def apriori_training(self, batch_data=[]):
"""
Given data, system should learn any priors necessary for realtime diagnosis.
"""
# I dont know yet whether we should allow empty frames from updates
# The batch data format could change
# depending on how the tutorial fleshes out (too early to tell)
# There will be no return from this function (user can pull training)
# data from the construct itself)
# I dont know yet whether we should allow empty batch data
raise NotImplementedError

@abstractmethod
def update(self, low_level_data=[], high_level_data={}):
"""
Expand Down
4 changes: 0 additions & 4 deletions onair/src/ai_components/learners_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,6 @@ def __init__(self, headers, _learner_plugins={}):
assert(len(headers)>0), 'Headers are required'
self.headers = headers
self.ai_constructs = import_plugins(self.headers, _learner_plugins)

def apriori_training(self, batch_data):
for plugin in self.ai_constructs:
plugin.apriori_training(batch_data)

def update(self, low_level_data, high_level_data):
for plugin in self.ai_constructs:
Expand Down
8 changes: 1 addition & 7 deletions plugins/generic/generic_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,6 @@
from onair.src.ai_components.ai_plugin_abstract.core import AIPlugIn

class Plugin(AIPlugIn):
def apriori_training(self,batch_data=[]):
"""
Given data, system should learn any priors necessary for realtime diagnosis.
"""
pass

def update(self,low_level_data=[], high_level_data={}):
"""
Given streamed data point, system should update internally
Expand All @@ -27,4 +21,4 @@ def render_reasoning(self):
"""
System should return its diagnosis
"""
pass
pass
21 changes: 9 additions & 12 deletions plugins/kalman/kalman_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,6 @@ def __init__(self, name, headers, window_size=3):
observation_noise = 1.0) # R

#### START: Classes mandated by plugin architecture
def apriori_training(self):
pass

def update(self, frame):
"""
:param frame: (list of floats) input sequence of len (input_dim)
Expand All @@ -42,11 +39,11 @@ def update(self, frame):
if len(self.frames) < len(frame): # If the frames variable is empty, append each data point in frame to it, each point wrapped as a list
# This is done so the data can have each attribute grouped in one list before being passed to kalman
# Ex: [[1:00, 1:01, 1:02, 1:03, 1:04, 1:05], [1, 2, 3, 4, 5]]
self.frames.append([frame[data_point_index]])
self.frames.append([frame[data_point_index]])
else:
self.frames[data_point_index].append(frame[data_point_index])
if len(self.frames[data_point_index]) > self.window_size: # If after adding a point to the frame, that attribute is larger than the window_size, take out the first element
self.frames[data_point_index].pop(0)
self.frames[data_point_index].pop(0)

def render_reasoning(self):
"""
Expand Down Expand Up @@ -84,20 +81,20 @@ def predictions_for_given_data(self, data):
returned_data = []
initial_val = data[0]
for item in range(len(data)-1):
predicted = self.predict(data[0:item+1], 1, initial_val)
predicted = self.predict(data[0:item+1], 1, initial_val)
actual_next_state = data[item+1]
pred_mean = predicted.observations.mean
returned_data.append(pred_mean)
if(len(returned_data) == 0): # If there's not enough data just set it to 0
returned_data.append(0)
return returned_data
# Get data, make predictions, and then find the errors for these predictions

# Get data, make predictions, and then find the errors for these predictions
def generate_residuals_for_given_data(self, data):
residuals = []
initial_val = data[0]
for item in range(len(data)-1):
predicted = self.predict(data[0:item+1], 1, initial_val)
predicted = self.predict(data[0:item+1], 1, initial_val)
actual_next_state = data[item+1]
pred_mean = predicted.observations.mean
residual_error = float(self.residual(pred_mean, actual_next_state))
Expand All @@ -107,18 +104,18 @@ def generate_residuals_for_given_data(self, data):
return residuals

#Info: takes a chunk of data of n size. Walks through it and gets residual errors.
#Takes the mean of the errors and determines if they're too large overall in order to determine whether or not there's a chunk in said error.
#Takes the mean of the errors and determines if they're too large overall in order to determine whether or not there's a chunk in said error.
def current_attribute_chunk_get_error(self, data):
residuals = self.generate_residuals_for_given_data(data)
mean_residuals = abs(self.mean(residuals))
if (abs(mean_residuals) < 1.5):
return False
return True

def frame_diagnosis(self, frame, headers):
kal_broken_attributes = []
for attribute_index in range(len(frame)):
error = self.current_attribute_chunk_get_error(frame[attribute_index])
if error and not headers[attribute_index].upper() == 'TIME':
kal_broken_attributes.append(headers[attribute_index])
return kal_broken_attributes
return kal_broken_attributes
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,6 @@ class FakeAIPlugIn(AIPlugIn):
def __init__(self, _name, _headers):
return super().__init__(_name, _headers)

def apriori_training(self):
return None

def update(self):
return None

Expand All @@ -35,25 +32,21 @@ class BadFakeAIPlugIn(AIPlugIn):
def __init__(self, _name, _headers):
return super().__init__(_name, _headers)

def apriori_training(self):
return super().apriori_training()

def update(self):
return super().update()

def render_reasoning(self):
return super().render_reasoning()

# abstract methods tests
def test_AIPlugIn_raises_error_because_of_unimplemented_abstract_methods():
# Arrange - None
# Act
with pytest.raises(TypeError) as e_info:
cut = AIPlugIn.__new__(AIPlugIn)

# Assert
assert "Can't instantiate abstract class AIPlugIn with" in e_info.__str__()
assert "apriori_training" in e_info.__str__()
assert "update" in e_info.__str__()
assert "render_reasoning" in e_info.__str__()

Expand All @@ -63,10 +56,9 @@ def test_AIPlugIn_raises_error_when_an_inherited_class_is_instantiated_because_a
# Act
with pytest.raises(TypeError) as e_info:
cut = IncompleteFakeAIPlugIn.__new__(IncompleteFakeAIPlugIn)

# Assert
assert "Can't instantiate abstract class IncompleteFakeAIPlugIn with" in e_info.__str__()
assert "apriori_training" in e_info.__str__()
assert "update" in e_info.__str__()
assert "render_reasoning" in e_info.__str__()

Expand All @@ -75,7 +67,7 @@ def test_AIPlugIn_raises_error_when_an_inherited_class_calls_abstract_methods_in
cut = BadFakeAIPlugIn.__new__(BadFakeAIPlugIn)

# populate list with the functions that should raise exceptions when called.
not_implemented_functions = [cut.update, cut.apriori_training, cut.render_reasoning]
not_implemented_functions = [cut.update, cut.render_reasoning]
for fnc in not_implemented_functions:
with pytest.raises(NotImplementedError) as e_info:
fnc()
Expand All @@ -89,7 +81,7 @@ def test_AIPlugIn_does_not_raise_error_when_an_inherited_class_is_instantiated_b
fake_ic = FakeAIPlugIn.__new__(FakeAIPlugIn)
except:
exception_raised = True

# Assert
assert exception_raised == False

Expand Down
36 changes: 1 addition & 35 deletions test/onair/src/ai_components/test_learners_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def test_LearnersInterface__init__sets_self_headers_to_given_headers_and_sets_se
forced_return_ai_constructs = MagicMock()

mocker.patch(learners_interface.__name__ + '.import_plugins', return_value=forced_return_ai_constructs)


cut = LearnersInterface.__new__(LearnersInterface)

Expand All @@ -53,40 +53,6 @@ def test_LearnersInterface__init__sets_self_headers_to_given_headers_and_sets_se
assert learners_interface.import_plugins.call_args_list[0].args == (arg_headers, arg__learner_plugins)
assert cut.ai_constructs == forced_return_ai_constructs

# apriori_training tests
def test_LearnersInterface_apriori_training_does_nothing_when_instance_ai_constructs_is_empty():
# Arrange
arg_batch_data = MagicMock()

cut = LearnersInterface.__new__(LearnersInterface)
cut.ai_constructs = []

# Act
result = cut.apriori_training(arg_batch_data)

# Assert
assert result == None

def test_LearnersInterface_apriori_training_calls_apriori_training_on_each_ai_constructs_item(mocker):
# Arrange
arg_batch_data = MagicMock()

cut = LearnersInterface.__new__(LearnersInterface)
cut.ai_constructs = []

num_fake_ai_constructs = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 (0 has own test)
for i in range(num_fake_ai_constructs):
cut.ai_constructs.append(MagicMock())

# Act
result = cut.apriori_training(arg_batch_data)

# Assert
for i in range(num_fake_ai_constructs):
assert cut.ai_constructs[i].apriori_training.call_count == 1
assert cut.ai_constructs[i].apriori_training.call_args_list[0].args == (arg_batch_data, )
assert result == None

# update tests
def test_LearnersInterface_update_does_nothing_when_instance_ai_constructs_is_empty():
# Arrange
Expand Down
15 changes: 2 additions & 13 deletions test/plugins/generic/test_generic_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,6 @@

from plugins.generic.generic_plugin import Plugin

# test apriori training
def test_apriori_training_does_nothing():
# Arrange
cut = Plugin.__new__(Plugin)

# Act
result = cut.apriori_training()

# Assert
assert result == None

def test_update_does_nothing():
# Arrange
cut = Plugin.__new__(Plugin)
Expand All @@ -37,9 +26,9 @@ def test_update_does_nothing():
def test_render_reasoning_does_nothing():
# Arrange
cut = Plugin.__new__(Plugin)

# Act
result = cut.render_reasoning()

# Assert
assert result == None
assert result == None
33 changes: 11 additions & 22 deletions test/plugins/kalman/test_kalman_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def __init__(self, state_transition, process_noise, observation_model, observati
assert cut.kf.process_noise == forced_diag_return_value
assert cut.kf.observation_model == forced_array_return_value
assert cut.kf.observation_noise == 1.0

def test_Kalman__init__initializes_variables_to_expected_values_when_given_all_args(mocker):
# Arrange
arg_name = MagicMock()
Expand Down Expand Up @@ -90,17 +90,6 @@ def __init__(self, state_transition, process_noise, observation_model, observati
assert cut.kf.process_noise == forced_diag_return_value
assert cut.kf.observation_model == forced_array_return_value
assert cut.kf.observation_noise == 1.0

# test apiori training
def test_Kalman_apiori_training_returns_none():
# Arrange
cut = Kalman.__new__(Kalman)

# Act
result = cut.apriori_training()

# Assert
assert result == None

# test update
def test_Kalman_update_does_not_mutate_frames_attribute_when_arg_frame_is_empty():
Expand Down Expand Up @@ -141,7 +130,7 @@ def test_Kalman_update_mutates_frames_attribute_as_expected_when_both_frames_and
len_fake_frames = pytest.gen.randint(1, 5) # arbitrary, random int from 1 to 5
fake_frames = [[MagicMock()]] * len_fake_frames
fake_window_size = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10

len_arg_frame = pytest.gen.randint(6, 10) # arbitrary int greater than max len of fake_frames, from 6 to 10
arg_frame = [MagicMock()] * len_arg_frame

Expand Down Expand Up @@ -171,7 +160,7 @@ def test_Kalman_update_mutates_frames_attribute_as_expected_when_both_frames_and
len_fake_frames = pytest.gen.randint(6, 10) # arbitrary int greater than max len of arg_frame, from 6 to 10
fake_frames = [[MagicMock()]] * len_fake_frames
fake_window_size = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10

len_arg_frame = pytest.gen.randint(1, 5) # arbitrary, random int from 1 to 5
arg_frame = [MagicMock()] * len_arg_frame

Expand Down Expand Up @@ -261,13 +250,13 @@ def test_Kalman_render_reasoning_returns_value_returned_by_frame_diagnosis_funct
fake_frames = MagicMock()
fake_headers = MagicMock()
forced_frame_diagnose_return = MagicMock()

cut = Kalman.__new__(Kalman)
cut.frames = fake_frames
cut.headers = fake_headers

mocker.patch.object(cut, 'frame_diagnosis', return_value=forced_frame_diagnose_return)

# Act
result = cut.render_reasoning()

Expand Down Expand Up @@ -358,7 +347,7 @@ def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_fun
assert fake_kf.smooth.call_args_list[0].args == (arg_data, )
assert fake_kf.predict.call_count == 1
assert fake_kf.predict.call_args_list[0].args == (arg_data, arg_forward_steps)

def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_functions_as_expected_when_data_is_empty_and_initial_val_is_not_None(mocker):
# Arrange
arg_data = []
Expand Down Expand Up @@ -413,7 +402,7 @@ def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_fun
assert fake_kf.smooth.call_args_list[0].args == (arg_data, )
assert fake_kf.predict.call_count == 1
assert fake_kf.predict.call_args_list[0].args == (arg_data, arg_forward_steps)

def test_Kalman_predict_when_not_given_initial_val_arg_sets_initial_val_arg_equal_to_None(mocker):
# Arrange
len_arg_data = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10
Expand Down Expand Up @@ -441,7 +430,7 @@ def test_Kalman_predict_when_not_given_initial_val_arg_sets_initial_val_arg_equa
assert fake_kf.smooth.call_args_list[0].args == (arg_data, )
assert fake_kf.predict.call_count == 1
assert fake_kf.predict.call_args_list[0].args == (arg_data, arg_forward_steps)

def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_functions_as_expected_when_initial_val_is_not_None(mocker):
# Arrange
len_arg_data = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10
Expand Down Expand Up @@ -565,7 +554,7 @@ def test_Kalman_predictions_for_given_data_returns_expected_result_when_data_arg
assert cut.predict.call_count == len_data - 1
for i in range(len_data - 1):
cut.predict.call_args_list[i].args == (arg_data[0:i+1], 1, arg_data[0])

# test generate_residuals_for_given_data
def test_Kalman_generate_residuals_for_given_data_raises_error_when_data_arg_is_empty(mocker):
# Arrange
Expand Down Expand Up @@ -659,7 +648,7 @@ def test_Kalman_current_attribute_chunk_get_error_returns_true_when_abs_of_mean_
assert kalman_plugin.abs.call_count == 2
assert kalman_plugin.abs.call_args_list[0].args == (forced_mean_return_value, )
assert kalman_plugin.abs.call_args_list[1].args == (forced_abs_return_value, )

def test_Kalman_current_attribute_chunk_get_error_returns_false_when_abs_of_mean_residuals_less_than_one_point_five(mocker):
# Arrange
arg_data = MagicMock()
Expand Down Expand Up @@ -788,4 +777,4 @@ def test_Kalman_frame_diagnosis_returns_expected_sublist_of_headers_when_headers
result = cut.frame_diagnosis(arg_frame, arg_headers)

# Assert
assert result == expected_result
assert result == expected_result

0 comments on commit bb494be

Please sign in to comment.