Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main'
Browse files Browse the repository at this point in the history
# Conflicts:
#	fairpyx/algorithms/heterogeneous_matroid_constraints_algorithms.py
  • Loading branch information
Abodi-Massarwa committed Aug 29, 2024
2 parents 7f2084d + 55da4fe commit 6d0f64e
Show file tree
Hide file tree
Showing 27 changed files with 2,980 additions and 28 deletions.
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import experiments_csv

from fairpyx.algorithms.fractional_egalitarian import fractional_egalitarian_allocation
from fairpyx.algorithms.heterogeneous_matroid_constraints_algorithms import *
from fairpyx.utils.test_heterogeneous_matroid_constraints_algorithms_utils import *
Expand Down Expand Up @@ -102,13 +104,14 @@ def run_experiment(equal_capacities:bool,equal_valuations:bool,binary_valuations
capped_round_robin: {'alloc', 'item_categories', 'agent_category_capacities',
'initial_agent_order', 'target_category'},
two_categories_capped_round_robin: {'alloc', 'item_categories', 'agent_category_capacities', 'initial_agent_order','target_category_pair'},
per_category_capped_round_robin: {'alloc', 'agent_category_capacities', 'item_categories', 'initial_agent_order'},
iterated_priority_matching: {'alloc', 'item_categories', 'agent_category_capacities'},
egalitarian_algorithm:{'instance'},
utilitarian_algorithm:{'instance'},
iterated_maximum_matching:{'alloc'}

}

#print(f'algorithm{algorithm.__name__} , binary valuations ->{binary_valuations}')
instance, agent_category_capacities, categories, initial_agent_order = random_instance(
equal_capacities=equal_capacities,
equal_valuations=equal_valuations,
Expand All @@ -134,6 +137,7 @@ def run_experiment(equal_capacities:bool,equal_valuations:bool,binary_valuations
current_algorithm_bundle_sum,current_algorithm_bundle_min_value = utilitarian_algorithm(instance)
# our algorithm
else:# one of our algorithms then !
print(f'filtered kwargs->{filtered_kwargs["alloc"].instance._valuations}')
algorithm(**filtered_kwargs)
current_algorithm_bundle_min_value=min(alloc.agent_bundle_value(agent,bundle) for agent,bundle in alloc.bundles.items())# to compare with egalitarian algorithm
current_algorithm_bundle_sum=sum(alloc.agent_bundle_value(agent,bundle)for agent,bundle in alloc.bundles.items())# to compare with utilitarian
Expand All @@ -159,33 +163,43 @@ def utilitarian_algorithm(instance):


def egalitarian_algorithm(instance):
# Egalitarian algorithm
# Step 1: Form the valuation matrix
valuation_matrix = [
[instance.agent_item_value(agent, item) for item in instance.items]
for agent in instance.agents
]

# Step 2: Compute the fractional egalitarian allocation
not_rounded_egal = fractional_egalitarian_allocation(
Instance(valuation_matrix), normalize_utilities=False
)

# Step 3: Multiply the fractions by the original valuation matrix
not_rounded_egalitarian_valuations_matrix = [
not_rounded_egalitarian_bundle_matrix = [
[
not_rounded_egal[agent][item] * valuation_matrix[agent][item]
for item in range(len(instance.items))
]
for agent in range(len(instance.agents))
]
min_egalitarian_algorithm_value = min(not_rounded_egalitarian_valuations_matrix) # egalitarian value
total_sum = sum(sum(row) for row in not_rounded_egalitarian_valuations_matrix) # sum of bundles (for the sake of comparison with utilitarian algorithm)

return total_sum, min_egalitarian_algorithm_value
# Step 4: Calculate the total value each agent receives from their allocation
agent_total_values = [
sum(not_rounded_egalitarian_bundle_matrix[agent])
for agent in range(len(instance.agents))
]

# Step 5: Find the minimum value among these totals
min_egalitarian_algorithm_value = min(agent_total_values)

# Step 6: Calculate the total sum of all allocations (for comparison)
total_sum = sum(agent_total_values)

return total_sum, min_egalitarian_algorithm_value

if __name__ == '__main__':
#experiments_csv.logger.setLevel(logging.INFO)
compare_heterogeneous_matroid_constraints_algorithms_egalitarian_utilitarian()
#compare_heterogeneous_matroid_constraints_algorithms_egalitarian_utilitarian()
experiments_csv.single_plot_results('results/egalitarian_utilitarian_comparison_heterogeneous_constraints_algorithms_bigData.csv',filter={},x_field='num_of_agents',y_field='current_algorithm_bundle_min_value',z_field='algorithm',save_to_file='results/egalitarian_comparison_heterogeneous_constraints_algorithms_bigData.png') # egalitarian ratio plot
experiments_csv.single_plot_results('results/egalitarian_utilitarian_comparison_heterogeneous_constraints_algorithms_bigData.csv',filter={},x_field='num_of_agents',y_field='current_algorithm_bundle_sum',z_field='algorithm',save_to_file='results/utilitarian_comparison_heterogeneous_constraints_algorithms_bigData.png') # utilitarian ratio plot
experiments_csv.single_plot_results('results/egalitarian_utilitarian_comparison_heterogeneous_constraints_algorithms_bigData.csv',filter={},x_field='num_of_agents',y_field='runtime',z_field='algorithm',save_to_file='results/runtime_comparison_heterogeneous_constraints_algorithms_bigData.png') # runtime plot
Expand Down
224 changes: 224 additions & 0 deletions experiments/compare_high_multiplicity.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,224 @@
"""
Compare the performance of a high_multiplicity_fair_allocation
Programmer: Naor Ladani & Elor Israeli
Since: 2024-06
"""
import experiments_csv
from pandas import read_csv
import matplotlib.pyplot as plt
from fairpyx import divide, AgentBundleValueMatrix, Instance
import fairpyx.algorithms.high_multiplicity_fair_allocation as high
import fairpyx.algorithms.improved_high_multiplicity as imp
from typing import *
import numpy as np
from eefpy import Objective, EnvyNotion
from eefpy import solve as solve

max_value = 1000
normalized_sum_of_values = 100
TIME_LIMIT = 60

algorithms_plot = [
"high_multiplicity_fair_allocation",
"solve",
"improved_high_multiplicity_fair_allocation",
]
# Define the specific algorithm you want to check
algorithms = [
high.high_multiplicity_fair_allocation,
solve,
imp.improved_high_multiplicity_fair_allocation,
]


######### EXPERIMENT WITH UNIFORMLY-RANDOM DATA ##########


def evaluate_algorithm_on_instance(algorithm, instance):

if algorithm is solve:
agent_valuations = [[int(instance.agent_item_value(agent, item)) for item in instance.items] for agent in
instance.agents]
print(f' agent valuations: {agent_valuations}')
items = [instance.item_capacity(item) for item in instance.items]

alloc = solve(num_agents=instance.num_of_agents
, num_types=instance.num_of_items
, agent_utils=agent_valuations,
items=items,
envy=EnvyNotion.EF, obj=Objective.NONE)
allocation = {}
for i, agent in enumerate(instance.agents):
allocation[agent] = []
for j, item in enumerate(instance.items):
if alloc == []:
allocation[agent] = []
else:
for sum in range(alloc[i][j]):
allocation[agent].append(item)

else:
allocation = divide(algorithm, instance)
matrix = AgentBundleValueMatrix(instance, allocation)
matrix.use_normalized_values()
return {
"utilitarian_value": matrix.utilitarian_value(),
"egalitarian_value": matrix.egalitarian_value(),

}


def course_allocation_with_random_instance_uniform(
num_of_agents: int, num_of_items: int,
value_noise_ratio: float,
algorithm: Callable,
random_seed: int):
agent_capacity_bounds = [1000, 1000]
item_capacity_bounds = [2, 10]
np.random.seed(random_seed)
instance = Instance.random_uniform(
num_of_agents=num_of_agents, num_of_items=num_of_items,
normalized_sum_of_values=normalized_sum_of_values,
agent_capacity_bounds=agent_capacity_bounds,
item_capacity_bounds=item_capacity_bounds,
item_base_value_bounds=[1, max_value],
item_subjective_ratio_bounds=[1 - value_noise_ratio, 1 + value_noise_ratio]
)
return evaluate_algorithm_on_instance(algorithm, instance)


def run_uniform_experiment():
# Run on uniformly-random data:
experiment = experiments_csv.Experiment("results/", "high_multi.csv", backup_folder="results/backup/")
input_ranges = {
"num_of_agents": [2, 3, 4, 5],
"num_of_items": [2, 3, 5, 6],
"value_noise_ratio": [0, 0.2, 0.5, 0.8, 1],
"algorithm": algorithms,
"random_seed": range(2),
}
experiment.run_with_time_limit(course_allocation_with_random_instance_uniform, input_ranges, time_limit=TIME_LIMIT)


######### EXPERIMENT WITH DATA SAMPLED FROM naor input DATA ##########


import json

filename = "data/naor_input.json"
with open(filename, "r", encoding="utf-8") as file:
naor_input = json.load(file)


def course_allocation_with_random_instance_sample(
max_total_agent_capacity: int,
algorithm: Callable,
random_seed: int, ):
np.random.seed(random_seed)

(agent_capacities, item_capacities, valuations) = \
(naor_input["agent_capacities"], naor_input["item_capacities"], naor_input["valuations"])
instance = Instance.random_sample(
max_num_of_agents=max_total_agent_capacity,
max_total_agent_capacity=max_total_agent_capacity,
prototype_agent_conflicts=[],
prototype_agent_capacities=agent_capacities,
prototype_valuations=valuations,
item_capacities=item_capacities,
item_conflicts=[])

return evaluate_algorithm_on_instance(algorithm, instance)


def run_naor_experiment():
# Run on Ariel sample data:z
experiment = experiments_csv.Experiment("results/", "course_allocation_naor.csv", backup_folder="results/backup/")
input_ranges = {
"max_total_agent_capacity": [12], # in reality: 1115
"algorithm": algorithms,
"random_seed": range(2),
}
experiment.run_with_time_limit(course_allocation_with_random_instance_sample, input_ranges, time_limit=TIME_LIMIT)


def create_plot_naor_experiment():
csv_file = 'results/course_allocation_naor.csv'
data = read_csv(csv_file)

print(data.head())
algorithms_for_plot = data['algorithm'].unique()

fig, axes = plt.subplots(1, 3, figsize=(21, 6))

for algorithm_p in algorithms_for_plot:
df_algo = data[data['algorithm'] == algorithm_p]
axes[0].plot(df_algo['utilitarian_value'], marker='o', linestyle='-', label=algorithm_p)
axes[1].plot(df_algo['egalitarian_value'], marker='o', linestyle='-', label=algorithm_p)
axes[2].plot(df_algo['runtime'], marker='o', linestyle='-', label=algorithm_p)

axes[0].set_title('Utilitarian Value Comparison')
axes[0].set_xlabel('')
axes[0].set_ylabel('Utilitarian Value')
axes[0].legend()

axes[1].set_title('Egalitarian Value Comparison')
axes[1].set_xlabel('')
axes[1].set_ylabel('Egalitarian Value')
axes[1].legend()

axes[2].set_title('runtime Comparison')
axes[2].set_xlabel('')
axes[2].set_ylabel('runtime')
axes[2].legend()

plt.tight_layout()
plt.savefig('results/naor_and_elor_plot.png')


def create_plot_uniform():
csv_file = 'results/high_multi.csv'
data = read_csv(csv_file)

print(data.head())
algorithms_for_plot = data['algorithm'].unique()

fig, axes = plt.subplots(1, 3, figsize=(21, 6))

for algorithm_p in algorithms_for_plot:
df_algo = data[data['algorithm'] == algorithm_p]
axes[0].plot(df_algo['utilitarian_value'], marker='o', linestyle='-', label=algorithm_p)
axes[1].plot(df_algo['egalitarian_value'], marker='o', linestyle='-', label=algorithm_p)
axes[2].plot(df_algo['runtime'], marker='o', linestyle='-', label=algorithm_p)

axes[0].set_title('Utilitarian Value Comparison')
axes[0].set_xlabel('')
axes[0].set_ylabel('Utilitarian Value')
axes[0].legend()

axes[1].set_title('Egalitarian Value Comparison')
axes[1].set_xlabel('')
axes[1].set_ylabel('Egalitarian Value')
axes[1].legend()

axes[2].set_title('runtime Comparison')
axes[2].set_xlabel('')
axes[2].set_ylabel('runtime')
axes[2].legend()

plt.tight_layout()

plt.savefig('results/high_multiplicity_uniforn_plot.png')


######### MAIN PROGRAM ##########


if __name__ == "__main__":
import logging

experiments_csv.logger.setLevel(logging.DEBUG)
# run_naor_experiment()
# create_plot_naor_experiment()
# run_uniform_experiment()
create_plot_uniform()
10 changes: 10 additions & 0 deletions experiments/data/naor_input.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
{
"agent_capacities": {"s1": 23, "s2": 23, "s3": 23, "s4": 23},
"item_capacities": {"c1": 5, "c2": 6, "c3": 2, "c4": 3, "c5": 5, "c6": 2},
"valuations": {
"s1": {"c1": 152, "c2": 86, "c3": 262, "c4": 68, "c5": 263, "c6": 169},
"s2": {"c1": 124, "c2": 70, "c3": 98, "c4": 244, "c5": 329, "c6": 135},
"s3": {"c1": 170, "c2": 235, "c3": 295, "c4": 91, "c5": 91, "c6": 118},
"s4": {"c1": 158, "c2": 56, "c3": 134, "c4": 255, "c5": 192, "c6": 205}
}
}
20 changes: 20 additions & 0 deletions experiments/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
numpy>=1.21.3,<2.0.0
scipy>=1.6.1
networkz~=1.0.6
cvxpy_base>=1.1.17
prtpy
# cmake
# matplotlib
# repackage
# pytest
# cvxpy_leximin>=0.4.4
# prtpy>=0.7.0
# more_itertools
# scs
experiments_csv
cvxpy~=1.5.1
setuptools~=67.4.0
matplotlib~=3.9.0
pandas~=2.2.2
pytest~=8.2.2
eefpy~=0.1.5
7 changes: 7 additions & 0 deletions experiments/results/course_allocation_naor.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
max_total_agent_capacity,algorithm,random_seed,utilitarian_value,egalitarian_value,runtime
12,high_multiplicity_fair_allocation,0,103.02000000000001,92.30000000000001,29.27266180400011
12,high_multiplicity_fair_allocation,1,0.0,0,968.108752568
12,solve,0,104.24000000000001,94.0,1.5066105169998991
12,solve,1,0.0,0,178.2857671109996
12,improved_high_multiplicity_fair_allocation,0,103.02000000000001,92.30000000000001,27.41918799999985
12,improved_high_multiplicity_fair_allocation,1,0.0,0,922.6537668400006
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading

0 comments on commit 6d0f64e

Please sign in to comment.