Skip to content

LSTM

A 2-layer Long Short-Term Memory neural network for sequence-based pitch prediction.

Overview

  • Type: Sequence
  • Library: PyTorch
  • Registry name: lstm
  • Class: LSTMModel
  • Network: PitchPredictor

Architecture

Input (batch, window_size, n_features)
    → LSTM (2 layers, hidden_size=64, dropout=0.3)
    → Take last hidden state
    → Fully connected → num_classes

Configuration

# configs/models/lstm.yaml
model_type: lstm
hidden_size: 64
num_layers: 2
dropout: 0.3
epochs: 20
learning_rate: 0.001
batch_size: 256
Parameter Default Description
hidden_size 64 LSTM hidden dimension
num_layers 2 Number of stacked LSTM layers
dropout 0.3 Dropout rate
epochs 20 Maximum training epochs
learning_rate 0.001 Adam optimizer learning rate
batch_size 32 Training batch size
patience 5 Early stopping patience

Usage

from pitch_sequencing import get_model

model = get_model("lstm", {
    "hidden_size": 64,
    "num_layers": 2,
    "epochs": 20,
    "batch_size": 256
})

# X_train shape: (n_samples, window_size, n_features)
model.fit(X_train, y_train, X_val=X_val, y_val=y_val)

predictions = model.predict(X_test)
probabilities = model.predict_proba(X_test)

API Reference

pitch_sequencing.models.lstm.LSTMModel

Bases: BaseModel

LSTM wrapper implementing BaseModel interface.

Source code in src/pitch_sequencing/models/lstm.py
class LSTMModel(BaseModel):
    """LSTM wrapper implementing BaseModel interface."""

    def __init__(self, config=None):
        config = config or {}
        self.hidden_size = config.get("hidden_size", 64)
        self.num_layers = config.get("num_layers", 2)
        self.dropout = config.get("dropout", 0.3)
        self.epochs = config.get("epochs", 20)
        self.lr = config.get("learning_rate", 0.001)
        self.batch_size = config.get("batch_size", 256)
        self._model = None
        self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self._num_classes = None
        self._history = None

    @property
    def name(self) -> str:
        return "LSTM"

    @property
    def model_type(self) -> str:
        return "sequence"

    def fit(self, X_train, y_train, X_val=None, y_val=None, **kwargs):
        input_size = X_train.shape[2]
        self._num_classes = len(np.unique(y_train))

        self._model = PitchPredictor(
            input_size=input_size,
            hidden_size=self.hidden_size,
            num_layers=self.num_layers,
            num_classes=self._num_classes,
            dropout=self.dropout,
        )

        train_ds = PitchSequenceDataset(X_train, y_train)
        train_loader = DataLoader(train_ds, batch_size=self.batch_size, shuffle=True)

        if X_val is not None and y_val is not None:
            val_ds = PitchSequenceDataset(X_val, y_val)
        else:
            # Use last 20% of training data as validation
            split = int(len(X_train) * 0.8)
            val_ds = PitchSequenceDataset(X_train[split:], y_train[split:])
        val_loader = DataLoader(val_ds, batch_size=self.batch_size, shuffle=False)

        self._history = train_torch_model(
            self._model, train_loader, val_loader,
            epochs=self.epochs, lr=self.lr, device=self._device,
        )

    def predict(self, X) -> np.ndarray:
        ds = PitchSequenceDataset(X, np.zeros(len(X), dtype=np.int64))
        loader = DataLoader(ds, batch_size=self.batch_size, shuffle=False)
        preds, _ = predict_torch_model(self._model, loader, self._device)
        return preds

    def predict_proba(self, X) -> np.ndarray:
        ds = PitchSequenceDataset(X, np.zeros(len(X), dtype=np.int64))
        loader = DataLoader(ds, batch_size=self.batch_size, shuffle=False)
        _, probs = predict_torch_model(self._model, loader, self._device)
        return probs

    def get_params(self) -> dict:
        return {
            "hidden_size": self.hidden_size,
            "num_layers": self.num_layers,
            "dropout": self.dropout,
            "epochs": self.epochs,
            "learning_rate": self.lr,
            "batch_size": self.batch_size,
        }