Skip to content

Commit

Permalink
Initial commit.
Browse files Browse the repository at this point in the history
  • Loading branch information
Julia Kruetzmann committed Apr 30, 2020
1 parent d61df8f commit d875ab5
Show file tree
Hide file tree
Showing 37 changed files with 2,143 additions and 0 deletions.
33 changes: 33 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
torch/data/*
torch/runs/*
torch/checkpoints/*
*/runs/*
*/data/*
*/checkpoints/*
.directory

# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]

# Distribution / packaging
bin/
build/
lib/
lib64/
var/

# Logging:
*.log
*.out

models/
weights/
rl_models/
rf_models/
runs/
ga_runs/
process_control_model/params/

preprocessing/.ipynb_checkpoints/
preprocessing/checkpoints/
39 changes: 39 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# adaptive-spreading

Code for the paper "Learning Controllers for Adaptive Spreading ofCarbon Fiber Tows".
Download data (preprocessed) and a pretrained process model here:
https://figshare.com/s/1a3e9b1ac16362b46cf9

## Preprocessing

Scripts to preprocess the data are located in the subdirectory ./preprocessing.
The major preprocessing steps are:
+ Removing NANs
+ Applying Savitzky-Golay-Filter
+ Building the average of consecutive measurements
+ Shifting the tow to the middle of each measurement

## Tow Prediction / Process Model

### Feedforward Neural Networks

### Random Forests

## Process Control Model

### Reward/Fitness Function

The reward function consists of three parts:
+ Target height
+ Target width
+ Bar movement

`cost = -(k_h * abs(target_height - mean_current_height) + k_w * abs(target_width - current_width) + k_m * total_bar_movement)`

The three criteria can be scaled individually.

### Algorithms


+ Genetic Algorithm (start_neuroevolution.py)
+ Implements a rather simplistic GA with mutation. No crossover capabilities are provided.
20 changes: 20 additions & 0 deletions bar_setups.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
BAR_CONFIG = {
'Setup0': [],
'Setup1': [17.2, 16.1, 17.4, 16.1, 17.2],
'Setup2': [17.2, 16.1, 22.4, 16.1, 17.2],
'Setup3': [17.2, 11.2, 22.4, 11.1, 17.2],
'Setup4': [22.2, 11.2, 22.4, 11.1, 22.4],
'Setup5': [17.1, 11.2, 27.5, 11.1, 17.3],
'Setup6': [17.2, 16.1, 17.2, 16.1, 17.3],
'Setup7': [17.2, 16.1, 27.3, 16.1, 17.3],
'Setup8': [17.2, 16.1, 32.2, 16.1, 17.3],
'Setup9': [17.2, 16.1, 37.2, 16.1, 17.3],
'Setup10': [22.1, 16.1, 37.2, 16.1, 22.1],
'Setup11': [22.1, 16.1, 32.2, 16.1, 22.1],
'Setup12': [27.3, 16.1, 17.6, 16.1, 27.3],
'Setup13': [27.3, 11.0, 12.5, 11.0, 27.3],
'Setup14': [27.3, 11.0, 31.2, 16.2, 22.2],
'Setup15': [27.3, 19.5, 31.2, 7.8, 22.2],
'Setup16': [27.3, 11.0, 12.5, 11.0, 27.3],
'Setup17': [22.1, 16.1, 37.2, 16.1, 22.1],
'Setup18': [17.2, 16.1, 32.2, 16.1, 17.3]}
Empty file added plotter/__init__.py
Empty file.
20 changes: 20 additions & 0 deletions plotter/input_output_plotter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt


def plot_profile(x):
plt.ylim((0., 2.))
plt.plot(x, linewidth=1.4)
plt.axis('off')
plt.show()


if __name__ == '__main__':
input_file = '/media/julia/Data/datasets/lufpro/real/simple_preprocess3/Razieh05.09_1Tow_Setup8_6,0m_min_3.csv'
df = pd.read_csv(input_file, header=None)
x_data = df.loc[:, 5:804].to_numpy()
y_data = df.loc[:, 805:].to_numpy()
del df
plot_profile(x_data[30])
plot_profile(y_data[30])
32 changes: 32 additions & 0 deletions plotter/preprocessing_plotter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from pathlib import Path
import pandas as pd
import numpy as np
from preprocessing.simple_preprocessing import subproc
from process_model.model_comparison import plot_samples

if __name__ == '__main__':
filename = Path('/media/julia/Data/datasets/lufpro/real/Razieh11.09_1Tow_Setup10_8,0m_min_3.csv')
#preprocessed_data = subproc(filename)
sensor_dim = 800
params_dim = 17

data = pd.read_csv(filename)
data.columns = range(data.shape[1])
num = data._get_numeric_data()
num[num < -100] = np.nan
xmean_val = np.nanmean(num.loc[:, params_dim:sensor_dim + params_dim - 1])
ymean_val = np.nanmean(num.loc[:, sensor_dim + params_dim:])
x = num.values
params_dim = x.shape[-1] - 2 * sensor_dim

y = x[:, (sensor_dim + params_dim):] #- ymean_val
x = x[:, params_dim:(sensor_dim + params_dim)] #- xmean_val

# x[x < xmean_val-0.5] = np.nan
# x[x > xmean_val+0.5] = np.nan
# y[y < ymean_val-0.5] = np.nan
# y[y > ymean_val+0.5] = np.nan
select_data = np.random.choice(x.shape[0], 8, replace=False)
# preprocessed_data[select_data, 5:805]
plot_samples([x[select_data], y[select_data]], legend=['raw', 'preprocessed'],
title='Pre-processing', plot_edges=False)
31 changes: 31 additions & 0 deletions plotter/profile_plotter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import numpy as np
import torch
import matplotlib.pyplot as plt


def plot_samples(model_name, sensor_dim, data, meta=None):
num_samples = 10
ixs = np.random.randint(len(data[0]), size=num_samples)
preds = data[0].view(-1, sensor_dim)
targets = data[1].view(-1, sensor_dim)
print(ixs)
plot_i = 1
for i in ixs:
if meta:
plt.subplot(num_samples, 1, plot_i,
title=f'{meta[0][i]}, line {int(meta[1][i].item())}')
plt.subplots_adjust(hspace=.5)
else:
plt.subplot(num_samples, 1, plot_i)
plt.plot(targets[i], linewidth=0.4)
plt.plot(preds[i], linewidth=0.4)
plot_i += 1
plt.legend(['target', 'prediction'])
plt.suptitle(model_name)
plt.show()


def plot_samples_np_wrapper(preds, targets, sensor_dim, name='Random forest', meta=None):
if meta is not None:
meta = meta[0], torch.tensor(meta[1])
plot_samples(name, sensor_dim, (torch.tensor(preds), torch.tensor(targets)), meta=meta)
36 changes: 36 additions & 0 deletions plotter/rl_plotter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
from _tkinter import TclError
import matplotlib.pyplot as plt

from preprocessing.tape_detection import get_tape_edges
from process_control_model.rl_utils import create_target


def render(tape_in, tape_out, bar_positions, target_width, target_height, sensor_dim=800, setup_dim=5):
target = create_target(target_width, target_height, dim=1, sensor_dim=sensor_dim).squeeze()
fig, axs = plt.subplots(3)
fig.set_size_inches(16, 14)
plt.show(block=False)
raising_edges, falling_edges = get_tape_edges(tape_out)
for i, (start, row, e1, e2, action) in enumerate(zip(tape_in, tape_out, raising_edges, falling_edges,
bar_positions)):
try:
axs[0].clear()
axs[0].plot(start, '-b', label='start values')
axs[0].legend()
axs[0].set_ylim([-0.3, 0.6])
axs[1].clear()
axs[1].plot(target, color='red', label='target values')
axs[1].plot(row, '-bo', markerfacecolor='r',
markevery=[e1, e2], label='end values')
axs[1].legend()
axs[1].set_ylim([-0.3, 0.6])
axs[2].clear()
axs[2].plot(action, '-ko', markerfacecolor='r', markevery=[i for i in range(setup_dim)],
label='bar position')
axs[2].legend()
axs[2].set_ylim([10, 40])
fig.canvas.draw()
plt.pause(0.001)
except (KeyboardInterrupt, TclError):
break
plt.close(fig)
Empty file added preprocessing/__init__.py
Empty file.
62 changes: 62 additions & 0 deletions preprocessing/datasetloader.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
import logging
from pathlib import Path
import torch
from torch.utils.data.dataloader import DataLoader

from preprocessing.datasets import DefaultDataset, ExtendedMetaDataset
from preprocessing.split_datasets import get_test_set_respecting_files,\
split_data_respecting_files_including_meta, get_test_set_including_meta
import utils.paths as dirs


LOGGER = logging.getLogger(__name__)


def load_data_and_meta(device, batch_size, *args):
train_data, test_data, val_data = split_data_respecting_files_including_meta(*args)

def get_dataloader(data_x, data_y, data_filenames, data_index, shuffle=False, batchsize=1):
return DataLoader(ExtendedMetaDataset(torch.from_numpy(data_x).float().to(device),
torch.from_numpy(data_y).float().to(device),
data_filenames, data_index),
batch_size=batchsize, shuffle=shuffle)
train_loader = get_dataloader(*train_data, shuffle=True, batchsize=batch_size)
validation_loader = get_dataloader(*val_data)
test_loader = get_dataloader(*test_data)
return train_loader, validation_loader, test_loader


def load_test_data_and_meta(device, datapath, sensor_dim, setup_dim, random_seed=42):
test_x, test_y, filenames, indices = get_test_set_including_meta(datapath, sensor_dim,
setup_dim, random_seed)
return DataLoader(ExtendedMetaDataset(torch.from_numpy(test_x).float().to(device),
torch.from_numpy(test_y).float().to(device),
filenames, indices))


def load_data(device, split_function, batch_size, *args):
train_x, test_x, val_x, train_y, test_y, val_y = split_function(*args)
LOGGER.debug(f'size training set: {train_x.shape}, test set: {test_y.shape}')
train_loader = DataLoader(DefaultDataset(torch.from_numpy(train_x).float().to(device),
torch.from_numpy(train_y).float().to(device)),
batch_size=batch_size, shuffle=True)
validation_loader = DataLoader(DefaultDataset(torch.from_numpy(val_x).float().to(device),
torch.from_numpy(val_y).float().to(device)))
test_loader = DataLoader(DefaultDataset(torch.from_numpy(test_x).float().to(device),
torch.from_numpy(test_y).float().to(device)))
return train_loader, validation_loader, test_loader


def load_test_data(device, datapath, sensor_dim, setup_dim, random_seed=42):
test_x, test_y = get_test_set_respecting_files(
datapath, sensor_dim, setup_dim, random_seed)
return DataLoader(DefaultDataset(torch.from_numpy(test_x).float().to(device),
torch.from_numpy(test_y).float().to(device)))


if __name__ == '__main__':
DEVICE = torch.device("cpu")
SPLIT_ARGS = [Path(dirs.DATA), 800, 5, False, 1111]
_, _, TEST_LOADER = load_data_and_meta(DEVICE, 20, *SPLIT_ARGS)
_, _, FILENAME, INDEX = TEST_LOADER.__iter__().__next__()
print(FILENAME[0], INDEX[0].item())
32 changes: 32 additions & 0 deletions preprocessing/datasets.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from torch.utils.data import Dataset


class DefaultDataset(Dataset):
def __init__(self, input_profiles, target_profiles):
super(DefaultDataset, self).__init__()
self.input_profiles = input_profiles
self.target_profiles = target_profiles
self.datasetsize = len(self.input_profiles)

def __getitem__(self, index):
return self.input_profiles[index], self.target_profiles[index]

def __len__(self):
return self.datasetsize


class ExtendedMetaDataset(Dataset):
def __init__(self, input_profiles, target_profiles, filenames, lines):
super(ExtendedMetaDataset, self).__init__()
self.input_profiles = input_profiles
self.target_profiles = target_profiles
self.filenames = filenames
self.lines = lines
self.dataset_size = len(self.input_profiles)

def __getitem__(self, index):
return self.input_profiles[index], self.target_profiles[index], \
self.filenames[index], self.lines[index]

def __len__(self):
return self.dataset_size
Loading

0 comments on commit d875ab5

Please sign in to comment.