-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Implemented from: https://github.com/MaastrichtU-CDS/v6-healthai-dashboard-py
- Loading branch information
Showing
5 changed files
with
173 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,42 @@ | ||
import json | ||
import logging | ||
from typing import Any, Dict | ||
|
||
from .algo import FederatedServerAlgo | ||
|
||
logger = logging.getLogger(__name__) | ||
logger.setLevel(level=logging.DEBUG) | ||
|
||
class StatsFederatedServerAlgo(FederatedServerAlgo): | ||
|
||
def __init__(self, params: Dict[str, Any]): | ||
super().__init__(name="stats", params=params, model_suffix="json") | ||
|
||
def initialize(self): | ||
# parameters to be passed to the worker | ||
self.params["cutoff"] = self.params.get("cutoff", 730) | ||
self.params["delta"] = self.params.get("delta", 30) | ||
logger.debug("Parameters to be passed to the worker have been initialized: %s", self.params) | ||
|
||
# we definately don't need an initial model for this algorithm but | ||
# server makes workers start training by sharing an initial model so, we | ||
# create an empty file here... | ||
# Hopefully it's clear by now that this whole thing is just a PoC. | ||
# This is not an actual proper FL implementation with IDS/TSG! | ||
with open(self.model_aggregated_path, "w+") as f: | ||
f.write("") | ||
|
||
def aggregate(self, current_round): | ||
# no real aggregation, just concatenate all partial results | ||
aggregated_results = [ | ||
json.load(open(file)) | ||
for file in self.round_partial_models[current_round] | ||
] | ||
logger.info("Concatenated %s partial results", len(aggregated_results)) | ||
logger.info("Saving final compiled stats") | ||
with open(self.model_aggregated_path, "w+") as f: | ||
json.dump(aggregated_results, f) | ||
|
||
# we only require one round for this algorithm, so aggregation happens | ||
# only once and we can signal we are done | ||
return True |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,121 @@ | ||
import json | ||
import logging | ||
from typing import Optional | ||
|
||
import numpy as np | ||
import pandas as pd | ||
|
||
from .algo import FederatedWorkerAlgo | ||
|
||
logger = logging.getLogger(__name__) | ||
logger.setLevel(level=logging.DEBUG) | ||
|
||
class StatsFederatedWorkerAlgo(FederatedWorkerAlgo): | ||
def __init__(self, params: dict = {}): | ||
self.data: Optional[np.ndarray] = None | ||
super().__init__(name="stats", params=params, model_suffix="json") | ||
|
||
def initialize(self): | ||
# self.params['cutoff'] = self.params.get('cutoff', 730) | ||
# self.params['delta'] = self.params.get('delta', 30) | ||
self.cutoff = self.params.get('cutoff') | ||
self.delta = self.params.get('delta') | ||
|
||
# will be used to store results during "training" | ||
self.results = {'logs': ''} | ||
|
||
logger.info("Reading data from %s.csv", self.key) | ||
# "training" will read from here (self.data) | ||
self.data = pd.read_csv(f"{self.key}.csv") | ||
logger.info("Data shape: %s", self.data.shape) | ||
logger.info("Initialized, ready for training") | ||
|
||
def survival_rate(self, df: pd.DataFrame, cutoff: int, delta: int) -> list: | ||
""" Compute survival rate at certain time points after diagnosis | ||
Parameters | ||
---------- | ||
df | ||
DataFrame with TNM data | ||
cutoff | ||
Maximum number of days for the survival rate profile | ||
delta | ||
Number of days between the time points in the profile | ||
Returns | ||
------- | ||
survival_rates | ||
Survival rate profile | ||
""" | ||
|
||
# Get survival days, here we assume the date of last follow-up as death date | ||
df['date_of_diagnosis'] = pd.to_datetime(df['date_of_diagnosis']) | ||
df['date_of_fu'] = pd.to_datetime(df['date_of_fu']) | ||
df['survival_days'] = df.apply( | ||
lambda x: (x['date_of_fu'] - x['date_of_diagnosis']).days, axis=1 | ||
) | ||
|
||
# Get survival rate after a certain number of days | ||
times = list(range(0, cutoff, delta)) | ||
all_alive = len(df[df['vital_status'] == 'alive']) | ||
all_dead = len(df[df['vital_status'] == 'dead']) | ||
survival_rates = [] | ||
for time in times: | ||
dead = len( | ||
df[(df['survival_days'] <= time) & (df['vital_status'] == 'dead')] | ||
) | ||
alive = (all_dead - dead) + all_alive | ||
survival_rates.append(alive / len(df)) | ||
|
||
return survival_rates | ||
|
||
|
||
def train(self, callback=None): | ||
# statistics adapted from: https://github.com/MaastrichtU-CDS/v6-healthai-dashboard-py | ||
logger.info('Getting centre name') | ||
column = 'centre' | ||
if column in self.data.columns: | ||
centre = self.data[column].unique()[0] | ||
self.results['organisation'] = centre | ||
else: | ||
self.results['organisation'] = None | ||
self.results['logs'] += f'Column {column} not found in the data\n' | ||
|
||
logger.info('Counting number of unique ids') | ||
column = 'id' | ||
if column in self.data.columns: | ||
nids = self.data[column].nunique() | ||
self.results['nids'] = nids | ||
else: | ||
self.results['logs'] += f'Column {column} not found in the data\n' | ||
|
||
logger.info('Counting number of unique ids per stage') | ||
column = 'stage' | ||
if column in self.data.columns: | ||
self.data[column] = self.data[column].str.upper() | ||
stages = self.data.groupby([column])['id'].nunique().reset_index() | ||
self.results[column] = stages.to_dict() | ||
else: | ||
self.results['logs'] += f'Column {column} not found in the data' | ||
|
||
logger.info('Counting number of unique ids per vital status') | ||
column = 'vital_status' | ||
if column in self.data.columns: | ||
vital_status = self.data.groupby([column])['id'].nunique().reset_index() | ||
self.results[column] = vital_status.to_dict() | ||
else: | ||
self.results['logs'] += f'Column {column} not found in the data' | ||
|
||
logger.info('Getting survival rates') | ||
columns = ['date_of_diagnosis', 'date_of_fu'] | ||
if (columns[0] in self.data.columns) and (columns[1] in self.data.columns): | ||
survival = self.survival_rate(self.data, self.cutoff, self.delta) | ||
self.results['survival'] = survival | ||
else: | ||
self.results['logs'] += \ | ||
f'Columns {columns[0]} and/or {columns[1]} not found in the data' | ||
|
||
# Save results | ||
logger.info("Saving local statistics results") | ||
with open(self.model_path, "w+") as f: | ||
json.dump(self.results, f) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters