Figure & Subplots

๐Ÿ–ผ๏ธ
plt.figure / plt.subplots / fig.add_gridspec
Create figures, axes grids, and complex layouts โ€” the foundation of every plot
figuresubplotsgridspec
โ–พ
Syntax
Example
Internals
import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec fig, ax = plt.subplots( nrows=1, ncols=1, figsize=(10, 6), # width, height in inches dpi=100, # dots per inch (use 150+ for reports) sharex=False, sharey=False, constrained_layout=True # replaces tight_layout (no overlaps) ) # GridSpec โ€” unequal subplot sizes fig = plt.figure(figsize=(12, 8)) gs = gridspec.GridSpec(nrows=2, ncols=2, height_ratios=[2, 1], hspace=0.4, wspace=0.3) ax_main = fig.add_subplot(gs[0, :]) # top row, spans all cols ax_left = fig.add_subplot(gs[1, 0]) ax_right = fig.add_subplot(gs[1, 1])
python
# ML dashboard: loss + accuracy + confusion matrix
fig, axes = plt.subplots(1, 3, figsize=(15, 4),
                           constrained_layout=True)
ax_loss, ax_acc, ax_cm = axes

# Annotate figure with super-title
fig.suptitle('Model Training Report', fontsize=16, fontweight='bold')

# Close properly to free memory (in loops)
plt.close(fig)          # or plt.close('all')

# Use object-oriented API (ax.*) NOT plt.* in multi-subplot code
ax.set_xlabel('Epoch')
ax.set_ylabel('Loss')
ax.set_title('Training')
ax.legend(loc='upper right', framealpha=0.9)

Figure vs Axes (Artist hierarchy):

  • Figure โ€” top-level container (the whole canvas).
  • Axes โ€” a single plot area within the figure. All drawing happens on Axes.
  • plt.* (pyplot API) operates on the "current" axes โ€” unsafe in loops and notebooks. Always use the OO API (ax.*) for multi-subplot work.
  • constrained_layout=True automatically adjusts subplot params to prevent overlapping. Prefer it over fig.tight_layout().

Line & Scatter Plots

๐Ÿ“‰
ax.plot / ax.scatter
Training curves, decision boundaries, and feature relationships
plotscattertraining curve
โ–พ
Syntax
Example
ax.plot(x, y, color='#4f9ef7', linewidth=2, linestyle='-', # '--','-.',':', 'solid' marker='o', markersize=4, label='Train Loss', alpha=0.9 ) ax.scatter(x, y, c=colors, # colour by class/value cmap='viridis', s=40, # marker size alpha=0.6, edgecolors='k', linewidths=0.5 )
python
# Training vs validation loss curve
fig, ax = plt.subplots(figsize=(8, 4))
epochs = np.arange(1, len(history['loss']) + 1)
ax.plot(epochs, history['loss'],     label='Train', color='#4f9ef7', lw=2)
ax.plot(epochs, history['val_loss'], label='Val',   color='#f7734f', lw=2,
        linestyle='--')
ax.fill_between(epochs, history['loss'], history['val_loss'],
                alpha=0.1, color='#f7734f')   # gap shading
# Mark best epoch
best = np.argmin(history['val_loss'])
ax.axvline(best + 1, color='green', ls='--', lw=1, label=f'Best ({best+1})')
ax.legend(); ax.set_xlabel('Epoch'); ax.set_ylabel('Loss')
plt.savefig('loss_curve.png', dpi=150, bbox_inches='tight')

# Scatter: 2D PCA coloured by class
fig, ax = plt.subplots(figsize=(7, 5))
for cls in np.unique(y):
    mask = y == cls
    ax.scatter(X_pca[mask, 0], X_pca[mask, 1],
               label=f'Class {cls}', alpha=0.6, s=30)
ax.legend(); ax.set_xlabel('PC1'); ax.set_ylabel('PC2')

Distribution Plots

๐Ÿ“Š
ax.hist / ax.boxplot / ax.violinplot
Inspect feature distributions, outliers, and class-conditional shapes
histogramboxplotviolin
โ–พ
Syntax
Example
ax.hist(x, bins=30, density=False, # density=True โ†’ PDF (area=1) histtype='stepfilled', # 'bar','step','stepfilled' alpha=0.7, edgecolor='k') ax.boxplot(data, # list of arrays or 2D array labels=['A','B'], notch=True, # confidence interval notch showfliers=True, # show outliers patch_artist=True) # filled boxes ax.violinplot(dataset, showmedians=True, showextrema=True)
python
# Overlapping histograms by class
fig, ax = plt.subplots(figsize=(8, 4))
for cls, color in zip([0, 1], ['#4f9ef7', '#f7734f']):
    ax.hist(X[y == cls, 0], bins=40, density=True,
            alpha=0.5, color=color, label=f'Class {cls}')
ax.legend(); ax.set_xlabel('Feature 0')

# Boxplot per feature (check spread and outliers)
fig, ax = plt.subplots(figsize=(10, 4))
bp = ax.boxplot([X[:, i] for i in range(X.shape[1])],
                patch_artist=True, showfliers=True)
for patch in bp['boxes']:
    patch.set_facecolor('#4f9ef750')
ax.set_xticklabels(feature_names, rotation=45)
ax.set_ylabel('Value')

# Violin plot โ€” distribution shape per class
fig, ax = plt.subplots(figsize=(8, 4))
vp = ax.violinplot([X[y==0, 0], X[y==1, 0]],
                    showmedians=True, showextrema=True)
for body in vp['bodies']:
    body.set_alpha(0.7)
ax.set_xticks([1,2]); ax.set_xticklabels(['Class 0', 'Class 1'])

Heatmaps & Images

๐ŸŒก๏ธ
ax.imshow โ€” Confusion Matrix / Correlation Heatmap / Feature Maps
Visualize 2D matrices, images, and activation maps with colorbars and annotations
imshowcolorbarconfusion matrixheatmap
โ–พ
Syntax
Confusion Matrix
Correlation Heatmap
im = ax.imshow(data, cmap='Blues', # 'viridis','RdBu_r','coolwarm' vmin=None, vmax=None, # clip colorscale aspect='auto', # 'equal' for square pixels origin='upper', # 'lower' for math coords interpolation='nearest' # no blur for discrete matrices ) fig.colorbar(im, ax=ax, shrink=0.8) # add colorbar
python
from sklearn.metrics import confusion_matrix

def plot_confusion_matrix(y_true, y_pred, labels=None):
    cm = confusion_matrix(y_true, y_pred)
    cm_norm = cm.astype(float) / cm.sum(axis=1, keepdims=True)

    fig, ax = plt.subplots(figsize=(6, 5))
    im = ax.imshow(cm_norm, cmap='Blues',
                   vmin=0, vmax=1, interpolation='nearest')
    fig.colorbar(im, ax=ax, shrink=0.8)

    # Annotate each cell
    thresh = cm_norm.max() / 2
    for i in range(cm.shape[0]):
        for j in range(cm.shape[1]):
            ax.text(j, i, f'{cm[i,j]}\n({cm_norm[i,j]:.1%})',
                   ha='center', va='center', fontsize=9,
                   color='white' if cm_norm[i,j] > thresh else 'black')

    n = cm.shape[0]
    ax.set_xticks(np.arange(n)); ax.set_yticks(np.arange(n))
    if labels: ax.set_xticklabels(labels); ax.set_yticklabels(labels)
    ax.set_xlabel('Predicted'); ax.set_ylabel('True')
    ax.set_title('Confusion Matrix (Normalized)')
    plt.savefig('cm.png', dpi=150, bbox_inches='tight')
    return fig
python
import pandas as pd

def plot_correlation_heatmap(df, threshold=0.0):
    corr = df.corr().round(2)
    # Mask upper triangle (optional)
    mask = np.triu(np.ones_like(corr, dtype=bool), k=1)

    fig, ax = plt.subplots(figsize=(len(df.columns) * 0.8,
                                     len(df.columns) * 0.7))
    im = ax.imshow(corr, cmap='RdBu_r', vmin=-1, vmax=1)
    fig.colorbar(im, ax=ax)

    # Annotate cells
    for i in range(len(corr)):
        for j in range(len(corr)if not mask[i,j] else 0):
            val = corr.iloc[i, j]
            if np.abs(val) >= threshold:
                ax.text(j, i, f'{val}', ha='center', va='center',
                       fontsize=8,
                       color='white' if np.abs(val) > 0.5 else 'black')

    ax.set_xticks(np.arange(len(corr)))
    ax.set_yticks(np.arange(len(corr)))
    ax.set_xticklabels(corr.columns, rotation=45, ha='right')
    ax.set_yticklabels(corr.columns)
    ax.set_title('Feature Correlation Matrix')
    return fig

Bar Plots & Feature Importance

๐Ÿ“‹
ax.bar / ax.barh / ax.errorbar
Feature importance, class counts, and metric comparisons with error bars
barfeature importanceerror bars
โ–พ
Example
python
# Feature importance plot (sorted)
importances = model.feature_importances_
sorted_idx = np.argsort(importances)
colors = ['#e74c3c' if i >= np.percentile(importances, 80)
          else '#4f9ef7' for i in importances[sorted_idx]]

fig, ax = plt.subplots(figsize=(8, max(4, len(importances)//2)))
ax.barh(np.array(feature_names)[sorted_idx],
        importances[sorted_idx], color=colors, edgecolor='k', lw=0.3)
ax.axvline(0, color='k', lw=0.5)
ax.set_xlabel('Importance')
ax.set_title('Feature Importances (top 20% highlighted)')

# Grouped bar: model comparison with std error
models = ['LR', 'RF', 'GBM', 'NN']
means  = [0.82, 0.89, 0.91, 0.90]
stds   = [0.02, 0.01, 0.01, 0.015]

fig, ax = plt.subplots(figsize=(7, 4))
x = np.arange(len(models))
bars = ax.bar(x, means, yerr=stds, capsize=5,
              color='#4f9ef7', alpha=0.8,
              error_kw=dict(elinewidth=1.5, ecolor='k'))
ax.set_xticks(x); ax.set_xticklabels(models)
ax.set_ylim(0.75, 0.95); ax.set_ylabel('ROC-AUC')
ax.set_title('Model Comparison (5-fold CV ยฑ std)')
ax.bar_label(bars, labels=[f'{m:.3f}' for m in means], padding=3)

Annotations & Twin Axes

โœ๏ธ
ax.annotate / ax.axhline / ax.axvline / ax.twinx
Add reference lines, text annotations, and dual y-axes for multi-metric plots
annotatetwinxreference lines
โ–พ
Example
python
# Twin y-axis: loss and accuracy on same plot
fig, ax1 = plt.subplots(figsize=(9, 4))
ax2 = ax1.twinx()

epochs = np.arange(1, 51)
ax1.plot(epochs, train_loss, 'b-',  label='Train Loss', lw=2)
ax1.plot(epochs, val_loss,   'b--', label='Val Loss',   lw=2)
ax2.plot(epochs, train_acc,  'r-',  label='Train Acc',  lw=2)
ax2.plot(epochs, val_acc,    'r--', label='Val Acc',    lw=2)

ax1.set_ylabel('Loss',     color='blue')
ax2.set_ylabel('Accuracy', color='red')
ax1.tick_params(axis='y', labelcolor='blue')
ax2.tick_params(axis='y', labelcolor='red')

# Combined legend from both axes
lines1, labels1 = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax1.legend(lines1 + lines2, labels1 + labels2, loc='upper right')

# Annotate specific point
best_ep = np.argmin(val_loss)
ax1.annotate(f'Best val loss\n{val_loss[best_ep]:.4f}',
             xy=(best_ep+1, val_loss[best_ep]),
             xytext=(best_ep+5, val_loss[best_ep]+0.1),
             arrowprops=dict(arrowstyle='->', color='gray'),
             fontsize=9, color='gray')

plt.savefig('training_report.png', dpi=150, bbox_inches='tight')

Style, Themes & Export

๐ŸŽจ
plt.style / rcParams / savefig
Apply consistent themes and export publication-quality figures
stylercParamsexport
โ–พ
Example
python
# Apply a clean style globally
plt.style.use('seaborn-v0_8-whitegrid')   # or 'ggplot','dark_background'

# Custom global defaults via rcParams
plt.rcParams.update({
    'figure.facecolor': 'white',
    'font.family':      'DejaVu Sans',
    'font.size':        11,
    'axes.titlesize':   13,
    'axes.labelsize':   11,
    'axes.spines.top':  False,
    'axes.spines.right':False,
    'figure.dpi':       100,
    'savefig.dpi':      150,
    'savefig.bbox':     'tight',
})

# Save as PNG (raster) or PDF/SVG (vector)
fig.savefig('figure.png', dpi=150, bbox_inches='tight')
fig.savefig('figure.pdf', bbox_inches='tight')   # for publications

# Context manager: temporary style for one block
with plt.style.context('dark_background'):
    fig, ax = plt.subplots()
    ax.plot(x, y, color='cyan')
    plt.savefig('dark_plot.png')
# Style reverts here

# Available styles
print(plt.style.available)