Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

dfs fixed #414

Draft
wants to merge 5 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions interrogate_badge.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
7 changes: 4 additions & 3 deletions src/qililab/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,17 +219,18 @@ class RESULTSDATAFRAME:
SOFTWARE_AVG_INDEX = "software_avg_index"
SEQUENCE_INDEX = "sequence_index"
LOOP_INDEX = "loop_index_"
QUBIT_INDEX = "qubit_index"
RESULTS_INDEX = "results_index"
BINS_INDEX = "bins_index"
SEQUENCER = "sequencer"
BIN = "bin"
CIRCUIT = "circuit"
SCOPE_INDEX = "scope_index"
ACQUISITION_INDEX = "acquisition_index"
P0 = "p0"
P1 = "p1"
I = "i" # noqa: E741
Q = "q"
AMPLITUDE = "amplitude"
PHASE = "phase"
BINARY_CLASSIFICATION = "binary_classification"


UNITS = {"frequency": "Hz"}
Expand Down
3 changes: 1 addition & 2 deletions src/qililab/result/acquisition.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ class Acquisition:
Args:
pulse_length (int): Duration (in ns) of the pulse.
i_values: (NDArray[numpy.float32]): I data normalized
q_values: (NDArray[numpy.float32]): I data normalize
q_values: (NDArray[numpy.float32]): Q data normalize
amplitude_values: (NDArray[numpy.float32]): amplitude values from I/Q normalized
phase_values: (NDArray[numpy.float32]): phase values from I/Q normalized

Expand Down Expand Up @@ -48,7 +48,6 @@ def _create_acquisition(self) -> pd.DataFrame:
amplitude, phase.
For multiple values you may need to redefine this method.
"""

return pd.DataFrame(
{
RESULTSDATAFRAME.I: self.i_values,
Expand Down
10 changes: 4 additions & 6 deletions src/qililab/result/acquisitions.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from qililab.constants import RESULTSDATAFRAME
from qililab.result.acquisition import Acquisition
from qililab.utils.dataframe_manipulation import concatenate_creating_new_name_index
from qililab.utils.dataframe_manipulation import concatenate_creating_new_concatenation_index_name


@dataclass
Expand All @@ -21,13 +21,11 @@ class Acquisitions:
data_dataframe_indices: set[str] = field(init=False, default_factory=set)

def acquisitions(self) -> pd.DataFrame:
"""return the acquisitions with a structure
I, Q, Amplitude, Phase
"""
"""return the acquisitions with a structure: qubit_index, bin, I, Q, Amplitude, Phase"""
acquisition_list = [acquisition.acquisition for acquisition in self._acquisitions]

return concatenate_creating_new_name_index(
dataframe_list=acquisition_list, new_index_name=RESULTSDATAFRAME.ACQUISITION_INDEX
return concatenate_creating_new_concatenation_index_name(
dataframe_list=acquisition_list, new_concatenation_index_name=RESULTSDATAFRAME.SEQUENCER
)

def probabilities(self) -> dict[str, float]:
Expand Down
6 changes: 3 additions & 3 deletions src/qililab/result/qblox_results/bins_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,13 @@ def __post_init__(self):
self.path1 = [value for value in self.path1 if not np.isnan(value)]

integration: QbloxIntegrationData
threshold: list
binary_classification: list
avg_cnt: list

def __post_init__(self):
"""Remove nan values.""" # FIXME: Since we cannot do ascending loops in Qpysequence, we need to
# use always a number of bins = num_loops + 1. Thus the first bin is always a nan.
self.threshold = [value for value in self.threshold if not np.isnan(value)]
self.binary_classification = [value for value in self.binary_classification if not np.isnan(value)]
self.avg_cnt = [value for value in self.avg_cnt if not np.isnan(value)]

def __len__(self) -> int:
Expand All @@ -39,4 +39,4 @@ def __len__(self) -> int:
Returns:
int: Length of the QbloxIntegrationData.
"""
return len(self.threshold)
return len(self.binary_classification)
28 changes: 24 additions & 4 deletions src/qililab/result/qblox_results/qblox_bins_acquisition.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

from dataclasses import dataclass

import numpy as np
import numpy.typing as npt
import pandas as pd

from qililab.constants import RESULTSDATAFRAME
Expand All @@ -10,13 +12,31 @@

@dataclass
class QbloxBinAcquisition(Acquisition):
"""Qblox Bin Acquisition normalized"""
"""Qblox Bin Acquisition normalized
Args:
binary_classification_values (NDArray[np.float32]): Thresholded values in case of
"""

binary_classification_values: npt.NDArray[np.float32]

def __post_init__(self):
"""Create acquisitions"""
super().__post_init__()
self.data_dataframe_indices.add(RESULTSDATAFRAME.BINARY_CLASSIFICATION)

def _create_acquisition(self) -> pd.DataFrame:
"""transposes each of the acquired results arrays so that we have for each value
a structure with i, q, amplitude, phase.
a structure with: bin, i, q, amplitude, phase.
"""
acquisition_dataframe = super()._create_acquisition()
acquisition_dataframe.index.rename(RESULTSDATAFRAME.BINS_INDEX, inplace=True)
acquisition_dataframe = pd.DataFrame(
{
RESULTSDATAFRAME.I: self.i_values,
RESULTSDATAFRAME.Q: self.q_values,
RESULTSDATAFRAME.AMPLITUDE: self.amplitude_values,
RESULTSDATAFRAME.PHASE: self.phase_values,
RESULTSDATAFRAME.BINARY_CLASSIFICATION: self.binary_classification_values,
}
)
acquisition_dataframe.index.rename(RESULTSDATAFRAME.BIN, inplace=True)
acquisition_dataframe.reset_index(inplace=True)
return acquisition_dataframe
14 changes: 10 additions & 4 deletions src/qililab/result/qblox_results/qblox_bins_acquisitions.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,13 @@ def _build_bin_acquisition(self, bins_data: BinsData, integration_length: int):
"""build a bin acquisition"""
i_values = np.array(bins_data.integration.path0, dtype=np.float32)
q_values = np.array(bins_data.integration.path1, dtype=np.float32)
return QbloxBinAcquisition(integration_length=integration_length, i_values=i_values, q_values=q_values)
binary_classification_values = np.array(bins_data.binary_classification, dtype=np.float32)
return QbloxBinAcquisition(
integration_length=integration_length,
i_values=i_values,
q_values=q_values,
binary_classification_values=binary_classification_values,
)

def counts(self) -> Counts:
"""Return the counts of measurements in each state.
Expand All @@ -46,9 +52,9 @@ def counts(self) -> Counts:
# TODO: Add limitations to check we are doing single-shot for multi qubit?
counts_object = Counts(n_qubits=len(self.bins))
for bin_idx in range(num_bins):
# The threshold inside of a qblox bin is the name they use for already classified data as a value between
# 0 and 1, not the value used in the comparator to perform such classification.
measurement_as_list = [int(bins_data.threshold[bin_idx]) for bins_data in self.bins]
# The binary_classification inside of a qblox bin is the name they use for already classified data as a
# value between 0 and 1, and threshold is the value used in the comparator to perform such classification.
measurement_as_list = [int(bins_data.binary_classification[bin_idx]) for bins_data in self.bins]
measurement = "".join(str(bit) for bit in measurement_as_list)
counts_object.add_measurement(state=measurement)
return counts_object
2 changes: 1 addition & 1 deletion src/qililab/result/qblox_results/qblox_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class QbloxResult(Result):
- integration: integration data.
- path_0: input path 0 integration result bin list.
- path_1: input path 1 integration result bin list.
- threshold: threshold result bin list.
- binary_classification: thresholded result bin list.
- valid: list of valid indications per bin.
- avg_cnt: list of number of averages per bin.
Args:
Expand Down
23 changes: 17 additions & 6 deletions src/qililab/result/results.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
"""Results class."""
from collections import Counter
from copy import deepcopy
from dataclasses import dataclass, field

Expand All @@ -11,7 +10,10 @@
from qililab.result.qblox_results.qblox_result import QbloxResult
from qililab.result.result import Result
from qililab.utils import coordinate_decompose
from qililab.utils.dataframe_manipulation import concatenate_creating_new_name_index
from qililab.utils.dataframe_manipulation import (
concatenate_creating_new_concatenation_index_name,
concatenate_creating_new_index_name_and_concatenation_index_name,
)
from qililab.utils.factory import Factory
from qililab.utils.loop import Loop
from qililab.utils.util_loops import compute_ranges_from_loops, compute_shapes_from_loops
Expand Down Expand Up @@ -91,7 +93,10 @@ def to_dataframe(self) -> pd.DataFrame:
"""

result_dataframes = [result.to_dataframe() for result in self.results]
return concatenate_creating_new_name_index(dataframe_list=result_dataframes, new_index_name="result_index")
return concatenate_creating_new_concatenation_index_name(
dataframe_list=result_dataframes,
new_concatenation_index_name=RESULTSDATAFRAME.CIRCUIT,
)

def _build_empty_result_dataframe(self):
"""Builds an empty result dataframe, with the minimal number of columns and nans as values"""
Expand All @@ -110,8 +115,10 @@ def _concatenate_acquisition_dataframes(self):
result.acquisitions().reset_index(drop=True) if result is not None else self._build_empty_result_dataframe()
for result in self.results
]
return concatenate_creating_new_name_index(
dataframe_list=result_acquisition_list, new_index_name=RESULTSDATAFRAME.RESULTS_INDEX
return concatenate_creating_new_index_name_and_concatenation_index_name(
dataframe_list=result_acquisition_list,
new_index_name=RESULTSDATAFRAME.RESULTS_INDEX,
new_concatenation_index_name=RESULTSDATAFRAME.CIRCUIT,
)

def _generate_new_acquisition_column_names(self):
Expand Down Expand Up @@ -191,7 +198,11 @@ def acquisitions(self, mean: bool = False) -> pd.DataFrame:
expanded_acquisition_df = self._add_meaningful_acquisition_indices(
result_acquisition_dataframe=result_acquisition_df
)
return self._process_acquisition_dataframe_if_needed(result_dataframe=expanded_acquisition_df, mean=mean)
processed_acquisition_df = self._process_acquisition_dataframe_if_needed(
result_dataframe=expanded_acquisition_df, mean=mean
)

return processed_acquisition_df.drop(columns=[RESULTSDATAFRAME.RESULTS_INDEX])

def _fill_missing_values(self):
"""Fill with None the missing values."""
Expand Down
96 changes: 88 additions & 8 deletions src/qililab/utils/dataframe_manipulation.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,29 @@
"""Utilities for usual dataframe manipulation"""
import pandas as pd


import pandas as pd
def concatenate_creating_new_index_name(dataframe_list: list[pd.DataFrame], new_index_name: str) -> pd.DataFrame:
"""Concatenates an ordered list of dataframes into a single one, adding a new named column copying the index of
the final concatenated dataframe.

The result dataframe will have as columns the union of the original dataframes plus the new column with a copy
of the final index.

def concatenate_creating_new_name_index(dataframe_list: list[pd.DataFrame], new_index_name: str) -> pd.DataFrame:
"""Concatenates an ordered list of dataframes into a single one, adding a new named column containing the index of
the dataframe the data came from in the original list.
The result dataframe will have as columns the union of the original dataframes plus the new column with the
indices.
The index structure of the dataframes will not be considered when creating the new one (as opposed to when
normally using a dataframe, where you would get a multiindex if previous index was structured).

Example:
new_index_name (copy of left index)
v
df0 df1 |NEW' BIN Q I A Phase
| BIN Q I A Phase | BIN Q I A Phase 0 | 0 ' 0 1 1 1 0 ] df0
0 | 0 1 1 1 0 + 0 | 0 0 0 0 0 = 1 | 1 ' 1 1 1 1 0 ]
1 | 1 1 1 1 0 1 | 1 0 0 0 0 2 | 2 ' 0 0 0 0 0 ] df1
3 | 3 ' 1 0 0 0 0 ]

Args:
dataframe_list: list of dataframes to concatenate
new_index_name: name to be given to the new column containing the indices
dataframe_list (list[df]): list of dataframes to concatenate
new_index_name (str): name to be given to the new column containing the copy of the final index

Returns:
pd.Dataframe: the new dataframe
Expand All @@ -23,3 +32,74 @@ def concatenate_creating_new_name_index(dataframe_list: list[pd.DataFrame], new_
concatenated_df.index.rename(new_index_name, inplace=True)
concatenated_df.reset_index(inplace=True)
return concatenated_df


def concatenate_creating_new_concatenation_index_name(
dataframe_list: list[pd.DataFrame], new_concatenation_index_name: str
) -> pd.DataFrame:
"""Concatenates an ordered list of dataframes into a single one, adding a new named colum containing the position
from the concatenation_list.

The result dataframe will have as columns the union of the original dataframes plus the new column that tells you
from which element of the concatenation_list it came.

Example:
new_concatenation_index_name (position of concatenation)
v
df0 df1 |NEW' BIN Q I A Phase
| BIN Q I A Phase | BIN Q I A Phase 0 | 0 ' 0 1 1 1 0 ] df0
0 | 0 1 1 1 0 + 0 | 0 0 0 0 0 = 1 | 0 ' 1 1 1 1 0 ]
1 | 1 1 1 1 0 1 | 1 0 0 0 0 2 | 1 ' 0 0 0 0 0 ] df1
3 | 1 ' 1 0 0 0 0 ]

Args:
dataframe_list (list[df]): list of dataframes to concatenate
new_concatenation_index_name (str): name of the new column containing the position from the concatenation_list.

Returns:
pd.Dataframe: the new dataframe
"""
for index, dataframe in enumerate(dataframe_list):
dataframe[new_concatenation_index_name] = index
col = dataframe.pop(new_concatenation_index_name)
dataframe.insert(0, col.name, col)

return pd.concat(dataframe_list, ignore_index=True)


def concatenate_creating_new_index_name_and_concatenation_index_name(
dataframe_list: list[pd.DataFrame], new_index_name: str, new_concatenation_index_name: str
) -> pd.DataFrame:
"""Concatenates an ordered list of dataframes into a single one, adding two new named columns, one that copies
the index of the final concatenated dataframe and another that contains the position from the concatenation_list.

The result dataframe will have as columns the union of the original dataframes plus the two new columns of the
"concatenate_creating_new_index_name", "concatenate_creating_new_concatenation_index_name" functions defined above.

The index structure of the dataframes will not be considered when creating the new one (as opposed to when
normally using a dataframe, where you would get a multiindex if previous index was structured).

Example: new_index_name
v new_concatenation_index_name
v v
df0 df1 |NEW1 NEW2' BIN Q I A Phase
| BIN Q I A Phase | BIN Q I A Phase 0 | 0 0 ' 0 1 1 1 0 ] df0
0 | 0 1 1 1 0 + 0 | 0 0 0 0 0 = 1 | 1 0 ' 1 1 1 1 0 ]
1 | 1 1 1 1 0 1 | 1 0 0 0 0 2 | 2 1 ' 0 0 0 0 0 ] df1
3 | 3 1 ' 1 0 0 0 0 ]

Args:
dataframe_list (list[df]): list of dataframes to concatenate
new_index_name (str): name to be given to the new column containing the copy of the final index
new_concatenation_index_name (str): name of the new column containing the position from the concatenation_list.
Returns:
pd.Dataframe: the new dataframe
"""
dataframe_list = [
concatenate_creating_new_concatenation_index_name(
dataframe_list=dataframe_list,
new_concatenation_index_name=new_concatenation_index_name,
)
]

return concatenate_creating_new_index_name(dataframe_list=dataframe_list, new_index_name=new_index_name)
Loading