Skip to content

Transformer

A Transformer encoder network using self-attention for pitch sequence prediction.

Overview

  • Type: Sequence
  • Library: PyTorch
  • Registry name: transformer
  • Class: TransformerModel
  • Network: PitchTransformer

Architecture

Input (batch, window_size, n_features)
    → Linear embedding → d_model dimensions
    → Sinusoidal positional encoding
    → TransformerEncoder (2 layers, 4 attention heads)
    → Mean pooling over sequence
    → Fully connected → num_classes

Configuration

# configs/models/transformer.yaml
model_type: transformer
d_model: 64
nhead: 4
num_layers: 2
dropout: 0.3
epochs: 20
learning_rate: 0.001
batch_size: 256
Parameter Default Description
d_model 64 Embedding dimension
nhead 4 Number of attention heads
num_layers 2 Number of Transformer encoder layers
dropout 0.3 Dropout rate
epochs 20 Maximum training epochs
learning_rate 0.001 Adam optimizer learning rate
batch_size 256 Training batch size
patience 5 Early stopping patience

Usage

from pitch_sequencing import get_model

model = get_model("transformer", {
    "d_model": 64,
    "nhead": 4,
    "num_layers": 2,
    "epochs": 20
})

model.fit(X_train, y_train, X_val=X_val, y_val=y_val)

predictions = model.predict(X_test)
probabilities = model.predict_proba(X_test)

API Reference

pitch_sequencing.models.transformer.TransformerModel

Bases: BaseModel

Transformer wrapper implementing BaseModel interface.

Source code in src/pitch_sequencing/models/transformer.py
class TransformerModel(BaseModel):
    """Transformer wrapper implementing BaseModel interface."""

    def __init__(self, config=None):
        config = config or {}
        self.d_model = config.get("d_model", 64)
        self.nhead = config.get("nhead", 4)
        self.num_layers = config.get("num_layers", 2)
        self.dim_feedforward = config.get("dim_feedforward", 128)
        self.dropout = config.get("dropout", 0.2)
        self.epochs = config.get("epochs", 30)
        self.lr = config.get("learning_rate", 0.0005)
        self.batch_size = config.get("batch_size", 256)
        self._model = None
        self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self._history = None

    @property
    def name(self) -> str:
        return "Transformer"

    @property
    def model_type(self) -> str:
        return "sequence"

    def fit(self, X_train, y_train, X_val=None, y_val=None, **kwargs):
        input_features = X_train.shape[2]
        num_classes = len(np.unique(y_train))

        self._model = PitchTransformer(
            input_features=input_features,
            num_classes=num_classes,
            d_model=self.d_model,
            nhead=self.nhead,
            num_layers=self.num_layers,
            dim_feedforward=self.dim_feedforward,
            dropout=self.dropout,
        )

        train_ds = PitchSequenceDataset(X_train, y_train)
        train_loader = DataLoader(train_ds, batch_size=self.batch_size, shuffle=True)

        if X_val is not None and y_val is not None:
            val_ds = PitchSequenceDataset(X_val, y_val)
        else:
            split = int(len(X_train) * 0.8)
            val_ds = PitchSequenceDataset(X_train[split:], y_train[split:])
        val_loader = DataLoader(val_ds, batch_size=self.batch_size, shuffle=False)

        self._history = train_torch_model(
            self._model, train_loader, val_loader,
            epochs=self.epochs, lr=self.lr, device=self._device,
        )

    def predict(self, X) -> np.ndarray:
        ds = PitchSequenceDataset(X, np.zeros(len(X), dtype=np.int64))
        loader = DataLoader(ds, batch_size=self.batch_size, shuffle=False)
        preds, _ = predict_torch_model(self._model, loader, self._device)
        return preds

    def predict_proba(self, X) -> np.ndarray:
        ds = PitchSequenceDataset(X, np.zeros(len(X), dtype=np.int64))
        loader = DataLoader(ds, batch_size=self.batch_size, shuffle=False)
        _, probs = predict_torch_model(self._model, loader, self._device)
        return probs

    def get_params(self) -> dict:
        return {
            "d_model": self.d_model,
            "nhead": self.nhead,
            "num_layers": self.num_layers,
            "dim_feedforward": self.dim_feedforward,
            "dropout": self.dropout,
            "epochs": self.epochs,
            "learning_rate": self.lr,
        }