Matplotlib
Figure & subplots, line/scatter/hist/bar/imshow/boxplot/violin/heatmap, twin axes, annotations, styles, and publication-ready export for ML visualizations.
Figure & Subplots
Line & Scatter Plots
ax.plot / ax.scatter
Training curves, decision boundaries, and feature relationships
โพ
Syntax
Example
ax.plot(x, y,
color='#4f9ef7', linewidth=2,
linestyle='-', # '--','-.',':',
'solid'
marker='o', markersize=4,
label='Train Loss', alpha=0.9
)
ax.scatter(x, y,
c=colors, # colour by class/value
cmap='viridis',
s=40, # marker size
alpha=0.6, edgecolors='k',
linewidths=0.5
)
python
# Training vs validation loss curve
fig, ax = plt.subplots(figsize=(8, 4))
epochs = np.arange(1, len(history['loss']) + 1)
ax.plot(epochs, history['loss'], label='Train', color='#4f9ef7', lw=2)
ax.plot(epochs, history['val_loss'], label='Val', color='#f7734f', lw=2,
linestyle='--')
ax.fill_between(epochs, history['loss'], history['val_loss'],
alpha=0.1, color='#f7734f') # gap shading
# Mark best epoch
best = np.argmin(history['val_loss'])
ax.axvline(best + 1, color='green', ls='--', lw=1, label=f'Best ({best+1})')
ax.legend(); ax.set_xlabel('Epoch'); ax.set_ylabel('Loss')
plt.savefig('loss_curve.png', dpi=150, bbox_inches='tight')
# Scatter: 2D PCA coloured by class
fig, ax = plt.subplots(figsize=(7, 5))
for cls in np.unique(y):
mask = y == cls
ax.scatter(X_pca[mask, 0], X_pca[mask, 1],
label=f'Class {cls}', alpha=0.6, s=30)
ax.legend(); ax.set_xlabel('PC1'); ax.set_ylabel('PC2')
Distribution Plots
ax.hist / ax.boxplot / ax.violinplot
Inspect feature distributions, outliers, and class-conditional
shapes
โพ
Syntax
Example
ax.hist(x,
bins=30, density=False, # density=True โ PDF (area=1)
histtype='stepfilled', #
'bar','step','stepfilled'
alpha=0.7, edgecolor='k')
ax.boxplot(data, # list of arrays or 2D
array
labels=['A','B'],
notch=True, # confidence interval notch
showfliers=True, # show outliers
patch_artist=True) # filled boxes
ax.violinplot(dataset,
showmedians=True, showextrema=True)
python
# Overlapping histograms by class
fig, ax = plt.subplots(figsize=(8, 4))
for cls, color in zip([0, 1], ['#4f9ef7', '#f7734f']):
ax.hist(X[y == cls, 0], bins=40, density=True,
alpha=0.5, color=color, label=f'Class {cls}')
ax.legend(); ax.set_xlabel('Feature 0')
# Boxplot per feature (check spread and outliers)
fig, ax = plt.subplots(figsize=(10, 4))
bp = ax.boxplot([X[:, i] for i in range(X.shape[1])],
patch_artist=True, showfliers=True)
for patch in bp['boxes']:
patch.set_facecolor('#4f9ef750')
ax.set_xticklabels(feature_names, rotation=45)
ax.set_ylabel('Value')
# Violin plot โ distribution shape per class
fig, ax = plt.subplots(figsize=(8, 4))
vp = ax.violinplot([X[y==0, 0], X[y==1, 0]],
showmedians=True, showextrema=True)
for body in vp['bodies']:
body.set_alpha(0.7)
ax.set_xticks([1,2]); ax.set_xticklabels(['Class 0', 'Class 1'])
Heatmaps & Images
ax.imshow โ Confusion Matrix / Correlation Heatmap / Feature Maps
Visualize 2D matrices, images, and activation maps with colorbars
and annotations
โพ
Syntax
Confusion Matrix
Correlation Heatmap
im = ax.imshow(data,
cmap='Blues', #
'viridis','RdBu_r','coolwarm'
vmin=None, vmax=None, # clip colorscale
aspect='auto', # 'equal' for square
pixels
origin='upper', # 'lower' for math
coords
interpolation='nearest' # no blur for
discrete matrices
)
fig.colorbar(im, ax=ax, shrink=0.8)
# add colorbar
python
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(y_true, y_pred, labels=None):
cm = confusion_matrix(y_true, y_pred)
cm_norm = cm.astype(float) / cm.sum(axis=1, keepdims=True)
fig, ax = plt.subplots(figsize=(6, 5))
im = ax.imshow(cm_norm, cmap='Blues',
vmin=0, vmax=1, interpolation='nearest')
fig.colorbar(im, ax=ax, shrink=0.8)
# Annotate each cell
thresh = cm_norm.max() / 2
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, f'{cm[i,j]}\n({cm_norm[i,j]:.1%})',
ha='center', va='center', fontsize=9,
color='white' if cm_norm[i,j] > thresh else 'black')
n = cm.shape[0]
ax.set_xticks(np.arange(n)); ax.set_yticks(np.arange(n))
if labels: ax.set_xticklabels(labels); ax.set_yticklabels(labels)
ax.set_xlabel('Predicted'); ax.set_ylabel('True')
ax.set_title('Confusion Matrix (Normalized)')
plt.savefig('cm.png', dpi=150, bbox_inches='tight')
return fig
python
import pandas as pd
def plot_correlation_heatmap(df, threshold=0.0):
corr = df.corr().round(2)
# Mask upper triangle (optional)
mask = np.triu(np.ones_like(corr, dtype=bool), k=1)
fig, ax = plt.subplots(figsize=(len(df.columns) * 0.8,
len(df.columns) * 0.7))
im = ax.imshow(corr, cmap='RdBu_r', vmin=-1, vmax=1)
fig.colorbar(im, ax=ax)
# Annotate cells
for i in range(len(corr)):
for j in range(len(corr)if not mask[i,j] else 0):
val = corr.iloc[i, j]
if np.abs(val) >= threshold:
ax.text(j, i, f'{val}', ha='center', va='center',
fontsize=8,
color='white' if np.abs(val) > 0.5 else 'black')
ax.set_xticks(np.arange(len(corr)))
ax.set_yticks(np.arange(len(corr)))
ax.set_xticklabels(corr.columns, rotation=45, ha='right')
ax.set_yticklabels(corr.columns)
ax.set_title('Feature Correlation Matrix')
return fig
Bar Plots & Feature Importance
ax.bar / ax.barh / ax.errorbar
Feature importance, class counts, and metric comparisons with
error bars
โพ
Example
python
# Feature importance plot (sorted)
importances = model.feature_importances_
sorted_idx = np.argsort(importances)
colors = ['#e74c3c' if i >= np.percentile(importances, 80)
else '#4f9ef7' for i in importances[sorted_idx]]
fig, ax = plt.subplots(figsize=(8, max(4, len(importances)//2)))
ax.barh(np.array(feature_names)[sorted_idx],
importances[sorted_idx], color=colors, edgecolor='k', lw=0.3)
ax.axvline(0, color='k', lw=0.5)
ax.set_xlabel('Importance')
ax.set_title('Feature Importances (top 20% highlighted)')
# Grouped bar: model comparison with std error
models = ['LR', 'RF', 'GBM', 'NN']
means = [0.82, 0.89, 0.91, 0.90]
stds = [0.02, 0.01, 0.01, 0.015]
fig, ax = plt.subplots(figsize=(7, 4))
x = np.arange(len(models))
bars = ax.bar(x, means, yerr=stds, capsize=5,
color='#4f9ef7', alpha=0.8,
error_kw=dict(elinewidth=1.5, ecolor='k'))
ax.set_xticks(x); ax.set_xticklabels(models)
ax.set_ylim(0.75, 0.95); ax.set_ylabel('ROC-AUC')
ax.set_title('Model Comparison (5-fold CV ยฑ std)')
ax.bar_label(bars, labels=[f'{m:.3f}' for m in means], padding=3)
Annotations & Twin Axes
ax.annotate / ax.axhline / ax.axvline / ax.twinx
Add reference lines, text annotations, and dual y-axes for
multi-metric plots
โพ
Example
python
# Twin y-axis: loss and accuracy on same plot
fig, ax1 = plt.subplots(figsize=(9, 4))
ax2 = ax1.twinx()
epochs = np.arange(1, 51)
ax1.plot(epochs, train_loss, 'b-', label='Train Loss', lw=2)
ax1.plot(epochs, val_loss, 'b--', label='Val Loss', lw=2)
ax2.plot(epochs, train_acc, 'r-', label='Train Acc', lw=2)
ax2.plot(epochs, val_acc, 'r--', label='Val Acc', lw=2)
ax1.set_ylabel('Loss', color='blue')
ax2.set_ylabel('Accuracy', color='red')
ax1.tick_params(axis='y', labelcolor='blue')
ax2.tick_params(axis='y', labelcolor='red')
# Combined legend from both axes
lines1, labels1 = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax1.legend(lines1 + lines2, labels1 + labels2, loc='upper right')
# Annotate specific point
best_ep = np.argmin(val_loss)
ax1.annotate(f'Best val loss\n{val_loss[best_ep]:.4f}',
xy=(best_ep+1, val_loss[best_ep]),
xytext=(best_ep+5, val_loss[best_ep]+0.1),
arrowprops=dict(arrowstyle='->', color='gray'),
fontsize=9, color='gray')
plt.savefig('training_report.png', dpi=150, bbox_inches='tight')
Style, Themes & Export
plt.style / rcParams / savefig
Apply consistent themes and export publication-quality figures
โพ
Example
python
# Apply a clean style globally
plt.style.use('seaborn-v0_8-whitegrid') # or 'ggplot','dark_background'
# Custom global defaults via rcParams
plt.rcParams.update({
'figure.facecolor': 'white',
'font.family': 'DejaVu Sans',
'font.size': 11,
'axes.titlesize': 13,
'axes.labelsize': 11,
'axes.spines.top': False,
'axes.spines.right':False,
'figure.dpi': 100,
'savefig.dpi': 150,
'savefig.bbox': 'tight',
})
# Save as PNG (raster) or PDF/SVG (vector)
fig.savefig('figure.png', dpi=150, bbox_inches='tight')
fig.savefig('figure.pdf', bbox_inches='tight') # for publications
# Context manager: temporary style for one block
with plt.style.context('dark_background'):
fig, ax = plt.subplots()
ax.plot(x, y, color='cyan')
plt.savefig('dark_plot.png')
# Style reverts here
# Available styles
print(plt.style.available)