Hi, I would like to know if this piece of code could be converted to be used in out of memory. The data has 68mil rows and goes out of memory at 250,000 rows at 30GB ram. Hard disk it 73GB. Would it be possible to train on all data please thanks. See below code:
!pip install rdkit
!pip install duckdb
import duckdb
import pandas as pd
train_path = '/kaggle/input/leash-predict-chemical-bindings/train.parquet'
test_path = '/kaggle/input/leash-predict-chemical-bindings/test.parquet'
con = duckdb.connect()
df = con.query(f"""(SELECT *
FROM parquet_scan('{train_path}')
WHERE binds = 0
ORDER BY random()
LIMIT 200000)
UNION ALL
(SELECT *
FROM parquet_scan('{train_path}')
WHERE binds = 1
ORDER BY random()
LIMIT 200000)""").df()
con.close()
df.head()
"""## Feature Preprocessing
Lets grab the smiles for the fully assembled molecule `molecule_smiles` and generate ecfps for it. We could choose different radiuses or bits, but 2 and 1024 is pretty standard.
"""
from rdkit import Chem
from rdkit.Chem import AllChem
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import average_precision_score
from sklearn.preprocessing import OneHotEncoder
# Convert SMILES to RDKit molecules
df['molecule'] = df['molecule_smiles'].apply(Chem.MolFromSmiles)
# Generate ECFPs
def generate_ecfp(molecule, radius=2, bits=1024):
if molecule is None:
return None
return list(AllChem.GetMorganFingerprintAsBitVect(molecule, radius, nBits=bits))
df['ecfp'] = df['molecule'].apply(generate_ecfp)
"""## Train Model"""
# One-hot encode the protein_name
onehot_encoder = OneHotEncoder(sparse_output=False)
protein_onehot = onehot_encoder.fit_transform(df['protein_name'].values.reshape(-1, 1))
# Combine ECFPs and one-hot encoded protein_name
X = [ecfp + protein for ecfp, protein in zip(df['ecfp'].tolist(), protein_onehot.tolist())]
y = df['binds'].tolist()
# Split the data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Create and train the random forest model
rf_model = RandomForestClassifier(n_estimators=100, random_state=42)
rf_model.fit(X_train, y_train)
# Make predictions on the test set
y_pred_proba = rf_model.predict_proba(X_test)[:, 1] # Probability of the positive class
# Calculate the mean average precision
map_score = average_precision_score(y_test, y_pred_proba)
print(f"Mean Average Precision (mAP): {map_score:.2f}")
import os
# Process the test.parquet file chunk by chunk
test_file = '/kaggle/input/leash-predict-chemical-bindings/test.csv'
output_file = 'submission.csv' # Specify the path and filename for the output file
# Read the test.parquet file into a pandas DataFrame
for df_test in pd.read_csv(test_file, chunksize=100000):
# Generate ECFPs for the molecule_smiles
df_test['molecule'] = df_test['molecule_smiles'].apply(Chem.MolFromSmiles)
df_test['ecfp'] = df_test['molecule'].apply(generate_ecfp)
# One-hot encode the protein_name
protein_onehot = onehot_encoder.transform(df_test['protein_name'].values.reshape(-1, 1))
# Combine ECFPs and one-hot encoded protein_name
X_test = [ecfp + protein for ecfp, protein in zip(df_test['ecfp'].tolist(), protein_onehot.tolist())]
# Predict the probabilities
probabilities = rf_model.predict_proba(X_test)[:, 1]
# Create a DataFrame with 'id' and 'probability' columns
output_df = pd.DataFrame({'id': df_test['id'], 'binds': probabilities})
# Save the output DataFrame to a CSV file
output_df.to_csv(output_file, index=False, mode='a', header=not os.path.exists(output_file))
Thanks & Best Regards
Michael