[1]:
import copy
import inspect
import os
import shutil
from datetime import date

import numpy as np
import pandas as pd
import plotly.express as px
from datasets import Dataset
from kaggle.api.kaggle_api_extended import KaggleApi
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm

from cyclops.data.slicer import SliceSpec
from cyclops.evaluate.metrics import create_metric
from cyclops.evaluate.metrics.experimental.metric_dict import MetricDict
from cyclops.report import ModelCardReport
from cyclops.report.plot.classification import ClassificationPlotter
from cyclops.report.utils import flatten_results_dict
/mnt/data/actions_runners/cyclops-actions-runner-1/_work/cyclops/cyclops/.venv/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
  from .autonotebook import tqdm as notebook_tqdm

Report Generation for Heart Failure Prediction#

Here’s an example to demonstrate how we can generate a report as we proceed through all the steps to train and evaluate a model. For this purpose, we are going to use Kaggle’s heart prediction failure dataset and gradually populate the report with information about dataset, model and results.

Create Model Card Report#

First, we should create a ModelCardReport object to fill in the fields and sections after training.

[2]:
report = ModelCardReport()
[3]:
# Constants
DATA_DIR = "./data"
RANDOM_SEED = 21

Data Loading#

Before starting, make sure to install the Kaggle API by running pip install kaggle. To use the Kaggle API, you need to sign up for a Kaggle account at https://www.kaggle.com. Then go to the β€˜Account’ tab of your user profile (https://www.kaggle.com//account) and select β€˜Create API Token’. This will trigger the download of kaggle.json, a file containing your API credentials. Place this file in the location ~/.kaggle/kaggle.json on your machine.

[4]:
api = KaggleApi()
api.authenticate()
api.dataset_download_files(
    "fedesoriano/heart-failure-prediction",
    path=DATA_DIR,
    unzip=True,
)
[5]:
df = pd.read_csv(os.path.join(DATA_DIR, "heart.csv"))
df.head()
[5]:
Age Sex ChestPainType RestingBP Cholesterol FastingBS RestingECG MaxHR ExerciseAngina Oldpeak ST_Slope HeartDisease
0 40 M ATA 140 289 0 Normal 172 N 0.0 Up 0
1 49 F NAP 160 180 0 Normal 156 N 1.0 Flat 1
2 37 M ATA 130 283 0 ST 98 N 0.0 Up 0
3 48 F ASY 138 214 0 Normal 108 Y 1.5 Flat 1
4 54 M NAP 150 195 0 Normal 122 N 0.0 Up 0
[6]:
df.describe().T
[6]:
count mean std min 25% 50% 75% max
Age 918.0 53.510893 9.432617 28.0 47.00 54.0 60.0 77.0
RestingBP 918.0 132.396514 18.514154 0.0 120.00 130.0 140.0 200.0
Cholesterol 918.0 198.799564 109.384145 0.0 173.25 223.0 267.0 603.0
FastingBS 918.0 0.233115 0.423046 0.0 0.00 0.0 0.0 1.0
MaxHR 918.0 136.809368 25.460334 60.0 120.00 138.0 156.0 202.0
Oldpeak 918.0 0.887364 1.066570 -2.6 0.00 0.6 1.5 6.2
HeartDisease 918.0 0.553377 0.497414 0.0 0.00 1.0 1.0 1.0
[7]:
fig = px.pie(df, names="Sex")

fig.update_layout(
    title="Sex Distribution",
)

fig.show()

Adding figures to report#

We can add figures and diagrams to report. We can define caption and the section of the report that this figure belongs to. Since we are exploring the distribution of different features in the dataset, we add it to datasets section:

[8]:
report.log_plotly_figure(
    fig=fig,
    caption="Sex Distribution",
    section_name="datasets",
)

Age distribution figure#

We plot a histogram of ages similarly and add the figure to our report. This will appear under Dataset section:

[9]:
fig = px.histogram(df, x="Age")
fig.update_layout(
    title="Age Distribution",
    xaxis_title="Age",
    yaxis_title="Count",
    bargap=0.2,
)

fig.show()

Outcome distribution#

Plot outcome distribution and add it to report. Similarly, we also place it under Dataset section:

[10]:
df["outcome"] = df["HeartDisease"].astype("int")
df = df.drop(columns=["HeartDisease"])
[11]:
fig = px.pie(df, names="outcome")
fig.update_traces(textinfo="percent+label")
fig.update_layout(title_text="Outcome Distribution")
fig.update_traces(
    hovertemplate="Outcome: %{label}<br>Count: \
    %{value}<br>Percent: %{percent}",
)
fig.show()
[12]:
report.log_plotly_figure(
    fig=fig,
    caption="Outcome Distribution",
    section_name="datasets",
)
[13]:
class_counts = df["outcome"].value_counts()
class_ratio = class_counts[0] / class_counts[1]
print(class_ratio, class_counts)
0.8070866141732284 outcome
1    508
0    410
Name: count, dtype: int64

Data Preprocessing#

This dataset does not have any null values, so we can jump to feature scaling. The string data in the dataframe is in the form of object, we need to convert it back to string to work on it:

[14]:
string_col = df.select_dtypes(include="object").columns
df[string_col] = df[string_col].astype("string")
string_col = df.select_dtypes("string").columns.to_list()

We are going to apply tree-based models to our data, so we use LabelEncoder:

[15]:
target = "outcome"

df_processed = pd.get_dummies(df, columns=string_col, drop_first=False)
df_processed.head()
[15]:
Age RestingBP Cholesterol FastingBS MaxHR Oldpeak outcome Sex_F Sex_M ChestPainType_ASY ... ChestPainType_NAP ChestPainType_TA RestingECG_LVH RestingECG_Normal RestingECG_ST ExerciseAngina_N ExerciseAngina_Y ST_Slope_Down ST_Slope_Flat ST_Slope_Up
0 40 140 289 0 172 0.0 0 False True False ... False False False True False True False False False True
1 49 160 180 0 156 1.0 1 True False False ... True False False True False True False False True False
2 37 130 283 0 98 0.0 0 False True False ... False False False False True True False False False True
3 48 138 214 0 108 1.5 1 True False True ... False False False True False False True False True False
4 54 150 195 0 122 0.0 0 False True False ... True False False True False True False False False True

5 rows Γ— 21 columns

[16]:
df.head()
[16]:
Age Sex ChestPainType RestingBP Cholesterol FastingBS RestingECG MaxHR ExerciseAngina Oldpeak ST_Slope outcome
0 40 M ATA 140 289 0 Normal 172 N 0.0 Up 0
1 49 F NAP 160 180 0 Normal 156 N 1.0 Flat 1
2 37 M ATA 130 283 0 ST 98 N 0.0 Up 0
3 48 F ASY 138 214 0 Normal 108 Y 1.5 Flat 1
4 54 M NAP 150 195 0 Normal 122 N 0.0 Up 0
[17]:
feature_cols = df_processed.columns.to_list()
feature_cols.remove(target)

Before training, let’s document dataset in the model card. This can be done using the log_dataset method, which takes the following arguments:

  • description: A description of the dataset.

  • citation: The citation for the dataset.

  • link: A link to a resource for the dataset.

  • license_id: The SPDX license identifier for the dataset.

  • version: The version of the dataset.

  • features: A list of features in the dataset.

  • split: The split of the dataset (train, test, validation, etc.).

  • sensitive_features: A list of sensitive features used to train/evaluate the model.

  • sensitive_feature_justification: A justification for the sensitive features used to train/evaluate the model.

[18]:
report.log_dataset(
    description="""This dataset was created by combining different datasets
    already available independently but not combined before. In this dataset,
    5 heart datasets are combined over 11 common features. Every dataset used
    can be found under the Index of heart disease datasets from UCI
    Machine Learning Repository on the following link:
    https://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/.""",
    citation=inspect.cleandoc(
        """
        @misc{fedesoriano,
          title={Heart Failure Prediction Dataset.},
          author={Fedesoriano, F},
          year={2021},
          publisher={Kaggle}
        }
    """,
    ),
    link="""
    https://www.kaggle.com/datasets/fedesoriano/heart-failure-prediction
    """,
    license_id="CC0-1.0",
    version="Version 1",
    features=df.columns.to_list().remove(target),
    sensitive_features=["Sex", "Age"],
    sensitive_feature_justification="Demographic information like age and gender \
        often have a strong correlation with health outcomes. For example, older \
        patients are more likely to have a higher risk of heart disease.",
)

Create HuggingFace Dataset#

We convert our processed Pandas dataframe into a Hugging Face dataset, for later evaluation by Cyclop metrics.

[19]:
dataset = Dataset.from_pandas(df_processed)
dataset.cleanup_cache_files()
print(dataset)
Dataset({
    features: ['Age', 'RestingBP', 'Cholesterol', 'FastingBS', 'MaxHR', 'Oldpeak', 'outcome', 'Sex_F', 'Sex_M', 'ChestPainType_ASY', 'ChestPainType_ATA', 'ChestPainType_NAP', 'ChestPainType_TA', 'RestingECG_LVH', 'RestingECG_Normal', 'RestingECG_ST', 'ExerciseAngina_N', 'ExerciseAngina_Y', 'ST_Slope_Down', 'ST_Slope_Flat', 'ST_Slope_Up'],
    num_rows: 918
})
[20]:
# Split dataframe into inputs and outputs
X, y = df_processed[feature_cols], df_processed[target]

Training#

[21]:
# Splitting into train and test
X_train, X_test, y_train, y_test = train_test_split(
    X,
    y,
    test_size=0.2,
    random_state=RANDOM_SEED,
)
classifier = LogisticRegression()
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
y_pred_prob = classifier.predict_proba(X_test)

Evaluation#

As demonstrated in evaluation tutorial, we define a metric dict:

[22]:
metric_names = [
    "binary_accuracy",
    "binary_precision",
    "binary_recall",
    "binary_f1_score",
    "binary_roc_curve",
    "binary_auroc",
]
metrics = [
    create_metric(metric_name, experimental=True) for metric_name in metric_names
]
metric_collection = MetricDict(metrics)
metric_collection(y_test.values, np.float64(y_pred))
[22]:
{'BinaryAccuracy': array(0.84782606, dtype=float32),
 'BinaryPrecision': array(0.88461536, dtype=float32),
 'BinaryRecall': array(0.8518519, dtype=float32),
 'BinaryF1Score': array(0.8679245, dtype=float32),
 'BinaryROC': ROCCurve(fpr=array([0.        , 0.15789473, 1.        ], dtype=float32), tpr=array([0.       , 0.8518519, 1.       ], dtype=float32), thresholds=array([1., 1., 0.])),
 'BinaryAUROC': 0.8469786}

Data Slicing#

In addition to overall metrics, it might be useful to see how the model performs on certain subpopulation or subsets. We can define these subsets using SliceSpec objects.

[23]:
spec_list = [
    {
        "Age": {
            "min_value": 30,
            "max_value": 50,
            "min_inclusive": True,
            "max_inclusive": False,
        },
    },
    {
        "Age": {
            "min_value": 50,
            "max_value": 70,
            "min_inclusive": True,
            "max_inclusive": False,
        },
    },
]
slice_spec = SliceSpec(spec_list)

Below, we are combining the raw features of the test data and the predictions so that we use them later for slice-specific evaluations.

[24]:
# Get positions of matching indices in df
matching_positions = y_test.index.get_indexer(df.index)

# Select rows from df using matching positions (valid positions are non-negative)
df_test = df.iloc[matching_positions[matching_positions >= 0]]
df_test["preds"] = y_pred
df_test["preds_prob"] = y_pred_prob[:, 1]
df_test.head()
[24]:
Age Sex ChestPainType RestingBP Cholesterol FastingBS RestingECG MaxHR ExerciseAngina Oldpeak ST_Slope outcome preds preds_prob
101 51 M ASY 130 179 0 Normal 100 N 0.0 Up 0 0 0.294409
23 44 M ATA 150 288 0 Normal 150 Y 3.0 Flat 1 1 0.902359
162 47 M ATA 160 263 0 Normal 174 N 0.0 Up 0 1 0.919186
112 47 M ASY 140 276 1 Normal 125 Y 0.0 Up 0 1 0.956736
165 46 M TA 140 272 1 Normal 175 N 2.0 Flat 1 1 0.915661

Age distribution in test data#

[25]:
fig = px.histogram(df_test, x="Age")
fig.update_layout(
    title="Age Distribution in Test Data",
    xaxis_title="Age",
    yaxis_title="Count",
    bargap=0.2,
)

fig.show()

Logging metrics and results to report#

Here, we gather evalutaions and add them to the report.

We can add a performance metric to the model card using the log_performance_metric method, which expects a dictionary where the keys are in the following format: slice_name/metric_name. For instance, overall/accuracy or Age:[30 - 50)/BinaryPrecision.

We first need to process the evaluation results to get the metrics in the right format.

[26]:
from cyclops.evaluate import evaluator


# Create Dataset object
heart_failure_data = Dataset.from_pandas(df_test)

result = evaluator.evaluate(
    dataset=heart_failure_data,
    metrics=metric_collection,  # type: ignore[list-item]
    target_columns=target,
    prediction_columns="preds_prob",
    slice_spec=slice_spec,
)
Filter -&gt; Age:[30 - 50): 0%| | 0/184 [00:00&lt;?, ? examples/s]

</pre>

Filter -> Age:[30 - 50): 0%| | 0/184 [00:00<?, ? examples/s]

end{sphinxVerbatim}

Filter -> Age:[30 - 50): 0%| | 0/184 [00:00<?, ? examples/s]

Filter -&gt; Age:[30 - 50): 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 184/184 [00:00&lt;00:00, 15699.42 examples/s]

</pre>

Filter -> Age:[30 - 50): 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 184/184 [00:00<00:00, 15699.42 examples/s]

end{sphinxVerbatim}

Filter -> Age:[30 - 50): 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 184/184 [00:00<00:00, 15699.42 examples/s]


Filter -&gt; Age:[50 - 70): 0%| | 0/184 [00:00&lt;?, ? examples/s]

</pre>

Filter -> Age:[50 - 70): 0%| | 0/184 [00:00<?, ? examples/s]

end{sphinxVerbatim}

Filter -> Age:[50 - 70): 0%| | 0/184 [00:00<?, ? examples/s]

Filter -&gt; Age:[50 - 70): 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 184/184 [00:00&lt;00:00, 20109.23 examples/s]

</pre>

Filter -> Age:[50 - 70): 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 184/184 [00:00<00:00, 20109.23 examples/s]

end{sphinxVerbatim}

Filter -> Age:[50 - 70): 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 184/184 [00:00<00:00, 20109.23 examples/s]


Filter -&gt; overall: 0%| | 0/184 [00:00&lt;?, ? examples/s]

</pre>

Filter -> overall: 0%| | 0/184 [00:00<?, ? examples/s]

end{sphinxVerbatim}

Filter -> overall: 0%| | 0/184 [00:00<?, ? examples/s]

Filter -&gt; overall: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 184/184 [00:00&lt;00:00, 19587.12 examples/s]

</pre>

Filter -> overall: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 184/184 [00:00<00:00, 19587.12 examples/s]

end{sphinxVerbatim}

Filter -> overall: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 184/184 [00:00<00:00, 19587.12 examples/s]


[27]:
results_flat = flatten_results_dict(
    results=result, remove_metrics=["BinaryROC"], model_name="model_for_preds_prob"
)
[28]:
result
[28]:
{'model_for_preds_prob': {'Age:[30 - 50)': {'BinaryAccuracy': array(0.6909722, dtype=float32),
   'BinaryPrecision': array(0.6566265, dtype=float32),
   'BinaryRecall': array(0.77304965, dtype=float32),
   'BinaryF1Score': array(0.71009773, dtype=float32),
   'BinaryROC': ROCCurve(fpr=array([0.        , 0.08163265, 0.08843537, 0.0952381 , 0.10204082,
          0.10884354, 0.11564626, 0.12244898, 0.1292517 , 0.1292517 ,
          0.13605443, 0.14285715, 0.14285715, 0.14965986, 0.15646258,
          0.1632653 , 0.17006803, 0.17687075, 0.18367347, 0.1904762 ,
          0.1904762 , 0.19727892, 0.19727892, 0.19727892, 0.20408164,
          0.21088435, 0.21768707, 0.2244898 , 0.2244898 , 0.23129252,
          0.23809524, 0.24489796, 0.24489796, 0.25170067, 0.2585034 ,
          0.26530612, 0.26530612, 0.26530612, 0.27210885, 0.27210885,
          0.27891156, 0.2857143 , 0.292517  , 0.2993197 , 0.30612245,
          0.31292516, 0.31292516, 0.3197279 , 0.3197279 , 0.3265306 ,
          0.33333334, 0.34013605, 0.34013605, 0.3469388 , 0.3537415 ,
          0.3605442 , 0.3605442 , 0.3605442 , 0.36734694, 0.37414965,
          0.3809524 , 0.3809524 , 0.3809524 , 0.3877551 , 0.3877551 ,
          0.39455783, 0.39455783, 0.40136054, 0.40816328, 0.414966  ,
          0.4217687 , 0.42857143, 0.43537414, 0.44217688, 0.4489796 ,
          0.45578232, 0.45578232, 0.45578232, 0.45578232, 0.46258503,
          0.46258503, 0.46938777, 0.47619048, 0.4829932 , 0.4829932 ,
          0.4829932 , 0.4829932 , 0.48979592, 0.49659863, 0.49659863,
          0.50340134, 0.5102041 , 0.5102041 , 0.5102041 , 0.5170068 ,
          0.52380955, 0.53061223, 0.53061223, 0.53741497, 0.53741497,
          0.53741497, 0.5442177 , 0.5510204 , 0.5578231 , 0.56462586,
          0.56462586, 1.        ], dtype=float32), tpr=array([0.        , 0.6524823 , 0.6524823 , 0.6524823 , 0.6524823 ,
          0.6524823 , 0.6524823 , 0.6524823 , 0.6524823 , 0.65957445,
          0.65957445, 0.65957445, 0.6666667 , 0.6666667 , 0.6666667 ,
          0.6666667 , 0.6666667 , 0.6666667 , 0.6666667 , 0.6666667 ,
          0.67375886, 0.67375886, 0.68085104, 0.6879433 , 0.6879433 ,
          0.6879433 , 0.6879433 , 0.6879433 , 0.69503546, 0.69503546,
          0.69503546, 0.69503546, 0.70212764, 0.70212764, 0.70212764,
          0.70212764, 0.7092199 , 0.71631205, 0.71631205, 0.7234042 ,
          0.7234042 , 0.7234042 , 0.7234042 , 0.7234042 , 0.7234042 ,
          0.7234042 , 0.73049647, 0.73049647, 0.73758864, 0.73758864,
          0.73758864, 0.73758864, 0.7446808 , 0.7446808 , 0.7446808 ,
          0.7446808 , 0.75177306, 0.75886524, 0.75886524, 0.75886524,
          0.75886524, 0.7659575 , 0.77304965, 0.77304965, 0.78014183,
          0.78014183, 0.78723407, 0.78723407, 0.78723407, 0.78723407,
          0.78723407, 0.78723407, 0.78723407, 0.78723407, 0.78723407,
          0.78723407, 0.79432625, 0.8014184 , 0.80851066, 0.80851066,
          0.81560284, 0.81560284, 0.81560284, 0.81560284, 0.822695  ,
          0.82978725, 0.83687943, 0.83687943, 0.83687943, 0.8439716 ,
          0.8439716 , 0.8439716 , 0.85106385, 0.858156  , 0.858156  ,
          0.858156  , 0.858156  , 0.8652482 , 0.8652482 , 0.87234044,
          0.8794326 , 0.8794326 , 0.8794326 , 0.8794326 , 0.8794326 ,
          0.8865248 , 1.        ], dtype=float32), thresholds=array([1.        , 1.        , 0.99055155, 0.98553264, 0.98378654,
          0.98161451, 0.97903449, 0.97787758, 0.97715916, 0.97573124,
          0.97260949, 0.97084677, 0.96900874, 0.96781353, 0.96570988,
          0.96400777, 0.95769454, 0.95673573, 0.95461643, 0.95461279,
          0.95099137, 0.94315781, 0.94306519, 0.94085118, 0.92930551,
          0.92763502, 0.92646777, 0.92400205, 0.92191999, 0.92049903,
          0.91961918, 0.91918616, 0.91566149, 0.91490278, 0.90352207,
          0.90315826, 0.90235894, 0.89592149, 0.89040243, 0.88024573,
          0.87158573, 0.8582023 , 0.84095632, 0.83595224, 0.83200382,
          0.82378074, 0.80092719, 0.79129611, 0.78801017, 0.77667184,
          0.75644709, 0.74625719, 0.74248886, 0.73839282, 0.70653354,
          0.7000161 , 0.6779978 , 0.65145323, 0.64863186, 0.60919444,
          0.57905783, 0.57242123, 0.57094358, 0.51445578, 0.48115904,
          0.44860503, 0.44643442, 0.4375548 , 0.38096433, 0.3655647 ,
          0.32119606, 0.31924904, 0.30951544, 0.29418801, 0.24374524,
          0.19704714, 0.19494952, 0.16945487, 0.10595291, 0.09428765,
          0.08925029, 0.07191137, 0.06325165, 0.06176812, 0.05641887,
          0.05155103, 0.05020997, 0.04747036, 0.03843419, 0.03201721,
          0.03141963, 0.03081027, 0.02392401, 0.02296072, 0.01858694,
          0.01627082, 0.013353  , 0.01235111, 0.01088882, 0.0107178 ,
          0.00959438, 0.00711859, 0.00698969, 0.00696386, 0.00676452,
          0.00590441, 0.        ])),
   'BinaryAUROC': 0.79712456},
  'Age:[50 - 70)': {'BinaryAccuracy': array(0.4556962, dtype=float32),
   'BinaryPrecision': array(0.41463414, dtype=float32),
   'BinaryRecall': array(0.4722222, dtype=float32),
   'BinaryF1Score': array(0.44155845, dtype=float32),
   'BinaryROC': ROCCurve(fpr=array([0.        , 0.02325581, 0.02325581, 0.02325581, 0.04651163,
          0.06976745, 0.09302326, 0.11627907, 0.11627907, 0.11627907,
          0.13953489, 0.1627907 , 0.1627907 , 0.18604651, 0.20930232,
          0.23255815, 0.25581396, 0.25581396, 0.27906978, 0.27906978,
          0.27906978, 0.30232558, 0.3255814 , 0.3488372 , 0.37209302,
          0.39534885, 0.39534885, 0.39534885, 0.41860464, 0.44186047,
          0.44186047, 0.4651163 , 0.4651163 , 0.4883721 , 0.5116279 ,
          0.53488374, 0.53488374, 0.53488374, 0.53488374, 0.53488374,
          0.55813956, 0.55813956, 0.5813953 , 0.60465115, 0.627907  ,
          0.6511628 , 0.6511628 , 0.6744186 , 0.6744186 , 0.6744186 ,
          0.6744186 , 0.6744186 , 0.6744186 , 0.6976744 , 0.7209302 ,
          0.74418604, 0.76744187, 0.76744187, 0.7906977 , 0.81395346,
          0.8372093 , 0.8372093 , 0.8604651 , 0.8604651 , 0.8604651 ,
          0.8604651 , 0.88372093, 0.88372093, 0.88372093, 0.90697676,
          0.90697676, 0.90697676, 0.9302326 , 0.9302326 , 0.95348835,
          0.9767442 , 1.        , 1.        , 1.        , 1.        ],
         dtype=float32), tpr=array([0.        , 0.        , 0.02777778, 0.05555556, 0.05555556,
          0.05555556, 0.05555556, 0.05555556, 0.08333334, 0.11111111,
          0.11111111, 0.11111111, 0.1388889 , 0.1388889 , 0.1388889 ,
          0.1388889 , 0.1388889 , 0.16666667, 0.16666667, 0.19444445,
          0.22222222, 0.22222222, 0.22222222, 0.22222222, 0.22222222,
          0.22222222, 0.25      , 0.2777778 , 0.2777778 , 0.2777778 ,
          0.30555555, 0.30555555, 0.33333334, 0.33333334, 0.33333334,
          0.33333334, 0.3611111 , 0.3888889 , 0.41666666, 0.44444445,
          0.44444445, 0.4722222 , 0.4722222 , 0.4722222 , 0.4722222 ,
          0.4722222 , 0.5       , 0.5       , 0.5277778 , 0.5555556 ,
          0.5833333 , 0.6111111 , 0.6388889 , 0.6388889 , 0.6388889 ,
          0.6388889 , 0.6388889 , 0.6666667 , 0.6666667 , 0.6666667 ,
          0.6666667 , 0.6944444 , 0.6944444 , 0.7222222 , 0.75      ,
          0.7777778 , 0.7777778 , 0.8055556 , 0.8333333 , 0.8333333 ,
          0.8611111 , 0.8888889 , 0.8888889 , 0.9166667 , 0.9166667 ,
          0.9166667 , 0.9166667 , 0.9444444 , 0.9722222 , 1.        ],
         dtype=float32), thresholds=array([1.        , 0.97588322, 0.97569033, 0.97430171, 0.97304652,
          0.97042585, 0.96306885, 0.9596382 , 0.95691879, 0.95633134,
          0.94945337, 0.93148816, 0.929611  , 0.92580679, 0.92240937,
          0.92151732, 0.91454548, 0.91136782, 0.90766332, 0.90595869,
          0.90537645, 0.89723695, 0.89459344, 0.89322999, 0.88448385,
          0.87967601, 0.85465758, 0.85208601, 0.82521005, 0.81630441,
          0.81141893, 0.79815241, 0.78546783, 0.77412672, 0.76424213,
          0.73118534, 0.66402369, 0.65540529, 0.60894819, 0.60212636,
          0.59879945, 0.59222464, 0.44611837, 0.44487396, 0.4303768 ,
          0.29440919, 0.24955591, 0.18558111, 0.17072263, 0.17036235,
          0.16317956, 0.15861375, 0.10313674, 0.08868506, 0.08779198,
          0.0777735 , 0.07511247, 0.06025561, 0.05799049, 0.05374022,
          0.04909588, 0.04282243, 0.04079814, 0.0403769 , 0.03449109,
          0.03318607, 0.02983247, 0.02859545, 0.02845785, 0.02788098,
          0.02657758, 0.02448971, 0.02292413, 0.02233516, 0.02169754,
          0.01812187, 0.01429376, 0.00842294, 0.00705887, 0.0065113 ])),
   'BinaryAUROC': 0.40633073},
  'overall': {'BinaryAccuracy': array(0.4293478, dtype=float32),
   'BinaryPrecision': array(0.32692307, dtype=float32),
   'BinaryRecall': array(0.49275362, dtype=float32),
   'BinaryF1Score': array(0.39306358, dtype=float32),
   'BinaryROC': ROCCurve(fpr=array([0.        , 0.00869565, 0.0173913 , 0.02608696, 0.03478261,
          0.04347826, 0.05217391, 0.06086956, 0.06956521, 0.06956521,
          0.06956521, 0.06956521, 0.07826087, 0.08695652, 0.09565217,
          0.10434783, 0.10434783, 0.11304348, 0.12173913, 0.13043478,
          0.13913043, 0.14782609, 0.15652174, 0.15652174, 0.16521738,
          0.16521738, 0.17391305, 0.1826087 , 0.1826087 , 0.19130434,
          0.2       , 0.2       , 0.2       , 0.20869565, 0.2173913 ,
          0.2173913 , 0.22608696, 0.2347826 , 0.24347825, 0.2521739 ,
          0.26086956, 0.26956522, 0.26956522, 0.27826086, 0.28695652,
          0.29565218, 0.3043478 , 0.3043478 , 0.31304348, 0.32173914,
          0.32173914, 0.33043477, 0.33043477, 0.33043477, 0.33913043,
          0.3478261 , 0.3478261 , 0.35652173, 0.35652173, 0.3652174 ,
          0.37391305, 0.38260868, 0.39130434, 0.39130434, 0.4       ,
          0.40869564, 0.4173913 , 0.4173913 , 0.4173913 , 0.42608696,
          0.4347826 , 0.44347826, 0.45217392, 0.46086955, 0.4695652 ,
          0.4695652 , 0.4695652 , 0.47826087, 0.4869565 , 0.4869565 ,
          0.4869565 , 0.49565217, 0.5043478 , 0.51304346, 0.5217391 ,
          0.5304348 , 0.5304348 , 0.53913045, 0.5478261 , 0.5565217 ,
          0.5652174 , 0.5652174 , 0.5652174 , 0.5652174 , 0.5652174 ,
          0.57391304, 0.5826087 , 0.5826087 , 0.5826087 , 0.59130436,
          0.59130436, 0.6       , 0.6       , 0.6       , 0.6086956 ,
          0.6086956 , 0.6173913 , 0.6173913 , 0.62608695, 0.6347826 ,
          0.6434783 , 0.65217394, 0.66086954, 0.6695652 , 0.67826086,
          0.6869565 , 0.6956522 , 0.70434785, 0.71304345, 0.71304345,
          0.7217391 , 0.7304348 , 0.7304348 , 0.73913044, 0.73913044,
          0.73913044, 0.73913044, 0.73913044, 0.73913044, 0.73913044,
          0.73913044, 0.7478261 , 0.7478261 , 0.75652176, 0.76521736,
          0.773913  , 0.7826087 , 0.79130435, 0.8       , 0.8086957 ,
          0.8086957 , 0.8173913 , 0.8173913 , 0.82608694, 0.82608694,
          0.82608694, 0.8347826 , 0.84347826, 0.84347826, 0.8521739 ,
          0.8521739 , 0.8608696 , 0.8608696 , 0.8608696 , 0.8608696 ,
          0.8695652 , 0.87826085, 0.8869565 , 0.8869565 , 0.8869565 ,
          0.8956522 , 0.8956522 , 0.8956522 , 0.8956522 , 0.8956522 ,
          0.90434784, 0.90434784, 0.9130435 , 0.9217391 , 0.93043476,
          0.9391304 , 0.9478261 , 0.95652175, 0.95652175, 0.9652174 ,
          0.9652174 , 0.9652174 , 0.9652174 , 0.973913  , 0.973913  ,
          0.9826087 , 0.99130434, 1.        , 1.        , 1.        ],
         dtype=float32), tpr=array([0.        , 0.        , 0.        , 0.        , 0.        ,
          0.        , 0.        , 0.        , 0.        , 0.01449275,
          0.02898551, 0.04347826, 0.04347826, 0.04347826, 0.04347826,
          0.04347826, 0.05797102, 0.05797102, 0.05797102, 0.05797102,
          0.05797102, 0.05797102, 0.05797102, 0.07246377, 0.07246377,
          0.08695652, 0.08695652, 0.08695652, 0.10144927, 0.10144927,
          0.10144927, 0.11594203, 0.13043478, 0.13043478, 0.13043478,
          0.14492753, 0.14492753, 0.14492753, 0.14492753, 0.14492753,
          0.14492753, 0.14492753, 0.1594203 , 0.1594203 , 0.1594203 ,
          0.1594203 , 0.1594203 , 0.17391305, 0.17391305, 0.17391305,
          0.1884058 , 0.1884058 , 0.20289855, 0.2173913 , 0.2173913 ,
          0.2173913 , 0.23188406, 0.23188406, 0.24637681, 0.24637681,
          0.24637681, 0.24637681, 0.24637681, 0.26086956, 0.26086956,
          0.26086956, 0.26086956, 0.2753623 , 0.28985506, 0.28985506,
          0.28985506, 0.28985506, 0.28985506, 0.28985506, 0.28985506,
          0.3043478 , 0.3188406 , 0.3188406 , 0.3188406 , 0.33333334,
          0.3478261 , 0.3478261 , 0.3478261 , 0.3478261 , 0.3478261 ,
          0.3478261 , 0.36231884, 0.36231884, 0.36231884, 0.36231884,
          0.36231884, 0.3768116 , 0.39130434, 0.4057971 , 0.42028984,
          0.42028984, 0.42028984, 0.4347826 , 0.44927537, 0.44927537,
          0.46376812, 0.46376812, 0.47826087, 0.49275362, 0.49275362,
          0.5072464 , 0.5072464 , 0.5217391 , 0.5217391 , 0.5217391 ,
          0.5217391 , 0.5217391 , 0.5217391 , 0.5217391 , 0.5217391 ,
          0.5217391 , 0.5217391 , 0.5217391 , 0.5217391 , 0.5362319 ,
          0.5362319 , 0.5362319 , 0.5507246 , 0.5507246 , 0.5652174 ,
          0.5797101 , 0.5942029 , 0.6086956 , 0.6231884 , 0.6376812 ,
          0.65217394, 0.65217394, 0.6666667 , 0.6666667 , 0.6666667 ,
          0.6666667 , 0.6666667 , 0.6666667 , 0.6666667 , 0.6666667 ,
          0.68115944, 0.68115944, 0.6956522 , 0.6956522 , 0.71014494,
          0.7246377 , 0.7246377 , 0.7246377 , 0.73913044, 0.73913044,
          0.7536232 , 0.7536232 , 0.76811594, 0.7826087 , 0.79710144,
          0.79710144, 0.79710144, 0.79710144, 0.8115942 , 0.82608694,
          0.82608694, 0.8405797 , 0.85507244, 0.8695652 , 0.884058  ,
          0.884058  , 0.89855075, 0.89855075, 0.89855075, 0.89855075,
          0.89855075, 0.89855075, 0.89855075, 0.9130435 , 0.9130435 ,
          0.92753625, 0.942029  , 0.95652175, 0.95652175, 0.9710145 ,
          0.9710145 , 0.9710145 , 0.9710145 , 0.98550725, 1.        ],
         dtype=float32), thresholds=array([1.        , 0.99055155, 0.98553264, 0.98378654, 0.98161451,
          0.97903449, 0.97787758, 0.97715916, 0.97588322, 0.97573124,
          0.97569033, 0.97430171, 0.97304652, 0.97260949, 0.97084677,
          0.97042585, 0.96900874, 0.96781353, 0.96570988, 0.96400777,
          0.96306885, 0.9596382 , 0.95769454, 0.95691879, 0.95673573,
          0.95633134, 0.95461643, 0.95461279, 0.95099137, 0.94945337,
          0.94315781, 0.94306519, 0.94085118, 0.93899952, 0.93148816,
          0.929611  , 0.92930551, 0.92763502, 0.92646777, 0.92580679,
          0.92400205, 0.92240937, 0.92191999, 0.92151732, 0.92049903,
          0.91961918, 0.91918616, 0.91566149, 0.91490278, 0.91454548,
          0.91136782, 0.90766332, 0.90595869, 0.90537645, 0.90352207,
          0.90315826, 0.90235894, 0.89723695, 0.89592149, 0.89459344,
          0.89322999, 0.89040243, 0.88448385, 0.88024573, 0.87967601,
          0.87158573, 0.8582023 , 0.85465758, 0.85208601, 0.84095632,
          0.83595224, 0.83200382, 0.82521005, 0.82378074, 0.81630441,
          0.81141893, 0.80092719, 0.79815241, 0.79129611, 0.78801017,
          0.78546783, 0.77667184, 0.77412672, 0.76424213, 0.75644709,
          0.74625719, 0.74248886, 0.73839282, 0.73118534, 0.70653354,
          0.7000161 , 0.6779978 , 0.66402369, 0.65540529, 0.65145323,
          0.64863186, 0.60919444, 0.60894819, 0.60212636, 0.59879945,
          0.59222464, 0.57905783, 0.57242123, 0.57094358, 0.51445578,
          0.48115904, 0.44860503, 0.44643442, 0.44611837, 0.44487396,
          0.4375548 , 0.4303768 , 0.38096433, 0.3655647 , 0.32119606,
          0.31924904, 0.30951544, 0.29440919, 0.29418801, 0.24955591,
          0.24374524, 0.19704714, 0.19494952, 0.18558111, 0.17072263,
          0.17036235, 0.16945487, 0.16317956, 0.15861375, 0.10595291,
          0.10313674, 0.09428765, 0.08925029, 0.08868506, 0.08779198,
          0.0777735 , 0.07511247, 0.07191137, 0.06325165, 0.06176812,
          0.06025561, 0.05799049, 0.05641887, 0.05374022, 0.05155103,
          0.05020997, 0.04909588, 0.04747036, 0.04282243, 0.04079814,
          0.0403769 , 0.03843419, 0.03449109, 0.03318607, 0.03201721,
          0.03141963, 0.03081027, 0.02983247, 0.02859545, 0.02845785,
          0.02788098, 0.02657758, 0.02448971, 0.02392401, 0.02296072,
          0.02292413, 0.02233516, 0.02169754, 0.01858694, 0.01812187,
          0.01627082, 0.01429376, 0.013353  , 0.01235111, 0.01088882,
          0.0107178 , 0.00959438, 0.00842294, 0.00711859, 0.00705887,
          0.00698969, 0.00696386, 0.00676452, 0.0065113 , 0.00590441])),
   'BinaryAUROC': 0.39395085}}}
[29]:
results_flat
[29]:
{'Age:[30 - 50)/BinaryAccuracy': array(0.6909722, dtype=float32),
 'Age:[30 - 50)/BinaryPrecision': array(0.6566265, dtype=float32),
 'Age:[30 - 50)/BinaryRecall': array(0.77304965, dtype=float32),
 'Age:[30 - 50)/BinaryF1Score': array(0.71009773, dtype=float32),
 'Age:[30 - 50)/BinaryAUROC': 0.79712456,
 'Age:[50 - 70)/BinaryAccuracy': array(0.4556962, dtype=float32),
 'Age:[50 - 70)/BinaryPrecision': array(0.41463414, dtype=float32),
 'Age:[50 - 70)/BinaryRecall': array(0.4722222, dtype=float32),
 'Age:[50 - 70)/BinaryF1Score': array(0.44155845, dtype=float32),
 'Age:[50 - 70)/BinaryAUROC': 0.40633073,
 'overall/BinaryAccuracy': array(0.4293478, dtype=float32),
 'overall/BinaryPrecision': array(0.32692307, dtype=float32),
 'overall/BinaryRecall': array(0.49275362, dtype=float32),
 'overall/BinaryF1Score': array(0.39306358, dtype=float32),
 'overall/BinaryAUROC': 0.39395085}

We first need to process the evaluation results to get the metrics in the right format. The descriptions dictionary will appear as you hover on metrics in the report, so feel free to change them as it’s appropriate for your usage.

[30]:
for name, metric in results_flat.items():
    split, name = name.split("/")  # noqa: PLW2901
    descriptions = {
        "BinaryPrecision": "The proportion of predicted positive instances that are correctly predicted.",
        "BinaryRecall": "The proportion of actual positive instances that are correctly predicted. Also known as recall or true positive rate.",
        "BinaryAccuracy": "The proportion of all instances that are correctly predicted.",
        "BinaryAUROC": "The area under the receiver operating characteristic curve (AUROC) is a measure of the performance of a binary classification model.",
        "BinaryAveragePrecision": "The area under the precision-recall curve (AUPRC) is a measure of the performance of a binary classification model.",
        "BinaryF1Score": "The harmonic mean of precision and recall.",
    }
    report.log_quantitative_analysis(
        "performance",
        name=name,
        value=metric.tolist(),
        description=descriptions[name],
        metric_slice=split,
        pass_fail_thresholds=0.7,
        pass_fail_threshold_fns=lambda x, threshold: bool(x >= threshold),
    )

We can also use the ClassificationPlotter(as demonstrated in Evaluation example) to plot the performance metrics and the add the figure to the model card using the log_plotly_figure method.

[31]:
plotter = ClassificationPlotter(task_type="binary", class_names=["0", "1"])
plotter.set_template("plotly_white")
[32]:
# extracting the ROC curves and AUROC results for all the slices
model_name = "model_for_preds_prob"
roc_curves = {
    slice_name: slice_results["BinaryROC"]
    for slice_name, slice_results in result[model_name].items()
}
aurocs = {
    slice_name: slice_results["BinaryAUROC"]
    for slice_name, slice_results in result[model_name].items()
}

# plotting the ROC curves for all the slices
roc_plot = plotter.roc_curve_comparison(roc_curves, aurocs=aurocs)
report.log_plotly_figure(
    fig=roc_plot,
    caption="ROC Curve for All Patients",
    section_name="quantitative analysis",
)
roc_plot.show()
[33]:
# Extracting the overall classification metric values.
overall_performance = {
    metric_name: metric_value
    for metric_name, metric_value in result[model_name]["overall"].items()
    if metric_name not in ["BinaryROC", "BinaryPrecisionRecallCurve"]
}
# Plotting the overall classification metric values.
overall_performance_plot = plotter.metrics_value(
    overall_performance,
    title="Overall Performance",
)
report.log_plotly_figure(
    fig=overall_performance_plot,
    caption="Overall Performance",
    section_name="quantitative analysis",
)
overall_performance_plot.show()
[34]:
# Extracting the metric values for all the slices.
slice_metrics = {
    slice_name: {
        metric_name: metric_value
        for metric_name, metric_value in slice_results.items()
        if metric_name not in ["BinaryROC", "BinaryPrecisionRecallCurve"]
    }
    for slice_name, slice_results in result[model_name].items()
}
# Plotting the metric values for all the slices.
slice_metrics_plot = plotter.metrics_comparison_bar(slice_metrics)
report.log_plotly_figure(
    fig=slice_metrics_plot,
    caption="Slice Metric Comparison",
    section_name="quantitative analysis",
)
slice_metrics_plot.show()

Report Generation#

ModelCard#

First, let’s document the model details section. A ModelCard has several Sections and each Section includes multiple Fields. Model details could be one of the sections in our model card, and it has the following fields by default:

  • ``description``: A high-level description of the model and its usage for a general audience.

  • ``version``: The version of the model.

  • ``owners``: The individuals or organizations that own the model.

  • ``license``: The license under which the model is made available.

  • ``citation``: The citation for the model.

  • ``references``: Links to resources that are relevant to the model.

  • ``path``: The path to where the model is stored.

  • ``regulatory_requirements``: The regulatory requirements that are relevant to the model.

We can add additional fields to the model details section by passing a dictionary to the log_from_dict method and specifying the section name as model_details. You can also use the log_descriptor method to add a new field object with a description attribute to any section of the model card.

[35]:
report.log_from_dict(
    data={
        "name": "Heart Failure Prediction Model",
        "description": "The model was trained on the Kaggle Heart Failure \
        Prediction Dataset to predict risk of heart failure.",
    },
    section_name="model_details",
)

report.log_version(
    version_str="0.0.1",
    date=str(date.today()),
    description="Initial Release",
)
report.log_owner(
    name="CyclOps Team",
    contact="vectorinstitute.github.io/cyclops/",
    email="cyclops@vectorinstitute.ai",
)
report.log_license(identifier="Apache-2.0")
report.log_reference(
    link="https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html",  # noqa: E501
)

Considerations#

Next, let’s populate the considerations section, which includes the following fields by default: - ``users``: The intended users of the model. - ``use_cases``: The use cases for the model. These could be primary, downstream or out-of-scope use cases. - ``fairness_assessment``: A description of the benefits and harms of the model for different groups as well as the steps taken to mitigate the harms. - ``ethical_considerations``: The risks associated with using the model and the steps taken to mitigate them. This can be populated using the log_risk method.

[36]:
report.log_from_dict(
    data={
        "users": [
            {"description": "Hospitals"},
            {"description": "Clinicians"},
        ],
    },
    section_name="considerations",
)
report.log_user(description="ML Engineers")
report.log_use_case(
    description="Predicting risk of heart failure.",
    kind="primary",
)
report.log_use_case(
    description="Predicting risk of pathologies and conditions other\
    than heart failure.",
    kind="out-of-scope",
)
report.log_fairness_assessment(
    affected_group="sex, age",
    benefit="Improved health outcomes for patients.",
    harm="Biased predictions for patients in certain groups (e.g. older patients) \
        may lead to worse health outcomes.",
    mitigation_strategy="We will monitor the performance of the model on these groups \
        and retrain the model if the performance drops below a certain threshold.",
)
report.log_risk(
    risk="The model may be used to make decisions that affect the health of patients.",
    mitigation_strategy="The model should be continuously monitored for performance \
        and retrained if the performance drops below a certain threshold.",
)

Exporting report#

Once the model card is populated, you can generate the report using the export method. The report is generated in the form of an HTML file. A JSON file containing the model card data will also be generated along with the HTML file. By default, the files will be saved in a folder named cyclops_reports in the current working directory. You can change the path by passing a output_dir argument when instantiating the ModelCardReport class.

[37]:
np.random.seed(42)

synthetic_timestamps = pd.date_range(
    start="1/1/2020", periods=10, freq="D"
).values.astype(str)


report._model_card.overview = None
report_path = report.export(
    output_filename="heart_failure_report_periodic.html",
    synthetic_timestamp=synthetic_timestamps[0],
    last_n_evals=3,
)

shutil.copy(f"{report_path}", ".")
metric_save = None
for i in tqdm(range(len(synthetic_timestamps[1:]))):
    if i == 3:
        report._model_card.quantitative_analysis.performance_metrics.append(
            metric_save,
        )
    report._model_card.overview = None
    for metric in report._model_card.quantitative_analysis.performance_metrics:
        metric.value = np.clip(
            metric.value + np.random.normal(0, 0.1),
            0,
            1,
        )
        metric.tests[0].passed = bool(metric.value >= 0.7)
    if i == 2:
        metrics = []
        for metric in report._model_card.quantitative_analysis.performance_metrics:
            if metric.type == "BinaryAccuracy" and metric.slice == "Age:[30 - 50)":
                metric_save = copy.deepcopy(metric)
            else:
                metrics.append(metric)
        report._model_card.quantitative_analysis.performance_metrics = metrics
    report_path = report.export(
        output_filename="heart_failure_report_periodic.html",
        synthetic_timestamp=synthetic_timestamps[i + 1],
        last_n_evals=3,
    )
    shutil.copy(f"{report_path}", ".")
shutil.rmtree("./cyclops_report")
more-to-come:

class:

stderr

0%| | 0/9 [00:00&lt;?, ?it/s]

</pre>

0%| | 0/9 [00:00<?, ?it/s]

end{sphinxVerbatim}

0%| | 0/9 [00:00<?, ?it/s]

more-to-come:

class:

stderr

11%|β–ˆ | 1/9 [00:00&lt;00:05, 1.44it/s]

</pre>

11%|β–ˆ | 1/9 [00:00<00:05, 1.44it/s]

end{sphinxVerbatim}

11%|β–ˆ | 1/9 [00:00<00:05, 1.44it/s]

more-to-come:

class:

stderr

22%|β–ˆβ–ˆβ– | 2/9 [00:01&lt;00:05, 1.34it/s]

</pre>

22%|β–ˆβ–ˆβ– | 2/9 [00:01<00:05, 1.34it/s]

end{sphinxVerbatim}

22%|β–ˆβ–ˆβ– | 2/9 [00:01<00:05, 1.34it/s]

more-to-come:

class:

stderr

33%|β–ˆβ–ˆβ–ˆβ–Ž | 3/9 [00:02&lt;00:06, 1.03s/it]

</pre>

33%|β–ˆβ–ˆβ–ˆβ–Ž | 3/9 [00:02<00:06, 1.03s/it]

end{sphinxVerbatim}

33%|β–ˆβ–ˆβ–ˆβ–Ž | 3/9 [00:02<00:06, 1.03s/it]

more-to-come:

class:

stderr

44%|β–ˆβ–ˆβ–ˆβ–ˆβ– | 4/9 [00:03&lt;00:04, 1.11it/s]

</pre>

44%|β–ˆβ–ˆβ–ˆβ–ˆβ– | 4/9 [00:03<00:04, 1.11it/s]

end{sphinxVerbatim}

44%|β–ˆβ–ˆβ–ˆβ–ˆβ– | 4/9 [00:03<00:04, 1.11it/s]

more-to-come:

class:

stderr

56%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 5/9 [00:04&lt;00:03, 1.18it/s]

</pre>

56%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 5/9 [00:04<00:03, 1.18it/s]

end{sphinxVerbatim}

56%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 5/9 [00:04<00:03, 1.18it/s]

more-to-come:

class:

stderr

67%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‹ | 6/9 [00:05&lt;00:02, 1.25it/s]

</pre>

67%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‹ | 6/9 [00:05<00:02, 1.25it/s]

end{sphinxVerbatim}

67%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‹ | 6/9 [00:05<00:02, 1.25it/s]

more-to-come:

class:

stderr

78%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Š | 7/9 [00:06&lt;00:01, 1.10it/s]

</pre>

78%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Š | 7/9 [00:06<00:01, 1.10it/s]

end{sphinxVerbatim}

78%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Š | 7/9 [00:06<00:01, 1.10it/s]

more-to-come:

class:

stderr

89%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰ | 8/9 [00:06&lt;00:00, 1.16it/s]

</pre>

89%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰ | 8/9 [00:06<00:00, 1.16it/s]

end{sphinxVerbatim}

89%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰ | 8/9 [00:06<00:00, 1.16it/s]

100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 9/9 [00:07&lt;00:00, 1.21it/s]

</pre>

100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 9/9 [00:07<00:00, 1.21it/s]

end{sphinxVerbatim}

100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 9/9 [00:07<00:00, 1.21it/s]

100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 9/9 [00:07&lt;00:00, 1.18it/s]

</pre>

100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 9/9 [00:07<00:00, 1.18it/s]

end{sphinxVerbatim}

100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 9/9 [00:07<00:00, 1.18it/s]


You’re now able to view the report report.