Train SISSO#
import os
import psutil
import json
import pandas as pd
import numpy as np
import warnings
from pathlib import Path
from IPython.display import display, Markdown
from sissopp.postprocess.load_models import load_model
from sissopp import FeatureSpace, SISSORegressor, Inputs
from sissopp.py_interface import get_fs_solver
from mlproject.data.preprocessing import get_dataset
from mlproject.training.feature_selection import get_relevant_features, GAFeatureSelector
warnings.filterwarnings("ignore")
# Only targets where significant improvements are observed with Matminer+LOB descriptor set
target_names = ["max_pfc", "log_g_vrh",
"log_k_vrh", "log_klat_300",
"log_kp_300", "log_msd_all_300",
"log_msd_all_600", "log_msd_mean_600"]
parent_dir = os.getcwd()
os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
Provide absolute path to https://github.com/DigiMatChem/paper-ml-with-lobster-descriptors/tree/main/data after cloning the repository locally to data_parent_dir variable below
data_parent_dir = "absolute/path/to/paper-ml-with-lobster-descriptors/data/"
Approach 1: Using top 20 descriptors using feature importance scores (Quick and Cheap)#
Provide path to feature importance scores from feature reduction pipeline results i.e., Aggregated all relevant feature selection (ARFS) scores. This could be generated using the code provided here
We use top 20 descriptors as input to train SISSO models
# path to the output of the code linked above
feat_imp_dir = "absolute/path/to/arfs_descriptors"
sissopp_binary_path = "/path/to/compiled/sisso++"
sissopp_inputs = {
"data_file": "data.csv",
"property_key": "",
"desc_dim": 3,
"n_sis_select": 100,
"max_leaves": 4,
"max_rung": 1, # Simply change value to 2 for rung 2 models
"calc_type": "regression",
"min_abs_feat_val": 1e-5,
"max_abs_feat_val": 1e8,
"n_residual": 10,
"n_models_store": 1,
"n_rung_generate":1,
"n_rung_store": 0,
"leave_out_frac": 0.0,
"leave_out_inds": [],
"verbose": False,
"opset": ["add", "sub", "abs_diff", "mult", "div", "inv", "abs", "exp", "log", "sq", "cb", "sqrt", "cbrt", "neg_exp"]
}
for target in target_names:
# read the importance scores summary
arfs_summ = pd.read_json(f"{feat_imp_dir}/arfs_summary_{target}.json")
# get names of top 20 descriptors
top20 = set(list(arfs_summ.sort_values(by="mean", ascending=False).head(20).index))
target_df, feat_df = get_dataset(feat_type="matminer_lob", target_name=target,
data_parent_dir=data_parent_dir)
os.makedirs(target,exist_ok=True)
model_input = sissopp_inputs.copy()
model_input["property_key"] = target
model_rung = model_input["max_rung"]
work_dir = Path(f"{target}/rung_{model_rung}")
work_dir.mkdir(exist_ok=True)
os.chdir(work_dir)
# Save input datafile
pd.concat([feat_df.loc[:, list(top20)], target_df], axis=1).to_csv("data.csv")
with open("sisso.json", "w") as f:
json.dump(model_input, f, indent=4)
inputs_base = Inputs("sisso.json")
feature_space = FeatureSpace(inputs_base)
sisso = SISSORegressor(inputs_base, feature_space)
sisso.fit()
os.chdir(parent_dir)
Load saved models to get the equations#
for target in target_names:
m = load_model(train_file=f"{target}/rung_1/models/train_dim_1_model_0.dat")
coef_values =[round(i, 3) for i in m.coefs[0]]
# Adapt the string vairable below `coef_string` if reading higher dimensional models eg., train_dim_2_model_0.dat file.
# Higher dimensional models will have more coefficients, thus need to adapt the string to diplay them.
coef_string = f"#### $c_0$: {coef_values[-1]}, $a_0$: {coef_values[0]}"
display(Markdown("#### "+ target))
display(Markdown("#### "+ m.latex_str.replace(', ', '\_').strip()))
display(Markdown(coef_string))
display(Markdown("-----"))
Approach 2: GA-assisted feature selection for SISSO#
⚠️ Caution: Long-Runtime
The following code snippet is computationally expensive and, in its current implementation, may take more than 24 hours to converge for single target.
The following code snippet is computationally expensive and, in its current implementation, may take more than 24 hours to converge for single target.
num_jobs = psutil.cpu_count(logical=False) # This will use all physical cores on the system. Please reduce it as per needs
model_type = "ga_sisso"
sissopp_binary_path = "/path/to/compiled/sisso++"
sissopp_inputs = {
"data_file": "data.csv",
"property_key": "",
"desc_dim": 3,
"n_sis_select": 100,
"max_leaves": 4,
"max_rung": 2, # Descriptors for rung 2 models are optimized, if want rung 1, simply change this value to 1.
"calc_type": "regression",
"min_abs_feat_val": 1e-5,
"max_abs_feat_val": 1e8,
"n_residual": 10,
"n_models_store": 1,
"n_rung_generate":1,
"n_rung_store": 0,
"leave_out_frac": 0.0,
"leave_out_inds": [],
"verbose": False,
"opset": ["add", "sub", "abs_diff", "mult", "div", "inv", "abs", "exp", "log", "sq", "cb", "sqrt", "cbrt", "neg_exp"]
}
for target_name in target_names:
for feat_type in ["matminer_lob"]:
target, feat = get_dataset(feat_type=feat_type, target_name=target_name,
data_parent_dir= data_parent_dir)
os.makedirs(f"{model_type}_{target_name}_{feat_type}", exist_ok=True)
os.chdir(f"{model_type}_{target_name}_{feat_type}")
feat.dropna(axis=1, inplace=True)
pipe, X_train_fil = get_relevant_features(
X_train=feat,
y_train=target.values.flatten(),
grootcv_n_iter=50,
**{"all_rel_feats__n_jobs": num_jobs})
sissopp_inputs["property_key"] = target_name
selector = GAFeatureSelector(
X=X_train_fil,
y=target.values.flatten(),
model=None,
scoring="neg_mean_absolute_error",
X_test=None,
y_test=None,
n_jobs=psutil.cpu_count(logical=False),
return_train_score=False,
**{"num_features": 20,
"sissopp_binary_path": sissopp_binary_path,
"sissopp_inputs": sissopp_inputs,
"mpi_tasks": num_jobs,
"population_size": 20,
"generations": 50}
)
selected_features = selector.run(strategy="de")
with open("feature_usage_counts.json", "w") as f:
json.dump(selector.feature_usage_counts, f)
X_train_final = X_train_fil.loc[:, selected_features]
pd.concat([X_train_final, target], axis=1).to_csv("data.csv")
with open("sisso.json", "w") as f:
json.dump(sissopp_inputs, f, indent=4)
inputs = Inputs("sisso.json")
_, model = get_fs_solver(inputs, allow_overwrite=False)
model.fit()
os.chdir(parent_dir)