Skip to content

Transformers

formed.integrations.transformers.analyzers

Text analyzers using pretrained transformers tokenizers.

This module provides text analysis tools that leverage pretrained tokenizers from the Hugging Face transformers library to tokenize text into surface forms.

Available Classes
  • PretrainedAnalyzer: Analyzer using pretrained transformer tokenizers

Examples:

>>> from formed.integrations.transformers.analyzers import PretrainedAnalyzer
>>>
>>> # Initialize with model name
>>> analyzer = PretrainedAnalyzer("bert-base-uncased")
>>> result = analyzer("Hello world!")
>>> print(result.surfaces)
['hello', 'world', '!']

PretrainedTransformerAnalyzer dataclass

PretrainedTransformerAnalyzer(tokenizer)

Text analyzer using pretrained transformer tokenizers.

This analyzer uses tokenizers from the Hugging Face transformers library to split text into tokens (surface forms). It provides a simple interface for text tokenization that's compatible with the formed ML pipeline.

PARAMETER DESCRIPTION
tokenizer

Either a tokenizer name/path string or a PreTrainedTokenizerBase instance. If a string, the tokenizer will be loaded using AutoTokenizer.

TYPE: str | PathLike | PreTrainedTokenizerBase

Examples:

>>> # Initialize with model name
>>> analyzer = PretrainedAnalyzer("bert-base-uncased")
>>> result = analyzer("Hello, world!")
>>> print(result.surfaces)
['hello', ',', 'world', '!']
>>>
>>> # Initialize with tokenizer instance
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("roberta-base")
>>> analyzer = PretrainedAnalyzer(tokenizer)
>>> result = analyzer("Machine learning is great!")
>>> print(result.surfaces)
['Machine', 'Ġlearning', 'Ġis', 'Ġgreat', '!']
Note

Tokenizers are cached using LRU cache by the load_pretrained_tokenizer utility. The returned AnalyzedText only contains surface forms; other fields like postags are None.

tokenizer instance-attribute

tokenizer

formed.integrations.transformers.training

MlflowTrainerCallback

MlflowTrainerCallback()

Bases: TrainerCallback

Source code in src/formed/integrations/transformers/training.py
16
17
18
19
def __init__(self) -> None:
    from formed.integrations.mlflow.workflow import MlflowLogger

    self._mlflow_logger: MlflowLogger | None = None

on_train_begin

on_train_begin(args, state, control, **kwargs)
Source code in src/formed/integrations/transformers/training.py
21
22
23
24
25
26
27
28
29
30
31
32
33
def on_train_begin(
    self,
    args: TrainingArguments,
    state: TrainerState,
    control: TrainerControl,
    **kwargs: Any,
) -> None:
    from formed.integrations.mlflow.workflow import use_mlflow_logger

    logger = use_step_logger(__name__)
    self._mlflow_logger = use_mlflow_logger()
    if self._mlflow_logger is None:
        logger.warning("MLflow logger is not available. Skipping logging.")

on_log

on_log(args, state, control, logs, model=None, **kwargs)
Source code in src/formed/integrations/transformers/training.py
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
def on_log(  # type: ignore[override]
    self,
    args: TrainingArguments,
    state: TrainerState,
    control: TrainerControl,
    logs: Mapping[str, Any],
    model: torch.nn.Module | None = None,
    **kwargs: Any,
) -> None:
    if self._mlflow_logger is None:
        return

    logger = use_step_logger(__name__)

    if not state.is_world_process_zero:
        return

    for key, value in logs.items():
        numerical_value: int | float
        if isinstance(value, (int, float)):
            numerical_value = value
        elif isinstance(value, torch.Tensor) and value.numel() == 1:
            numerical_value = value.item()
        else:
            logger.warning(
                f'Trainer is attempting to log a value of "{value}" of type {type(value)} for key "{key}" as a metric. '
                "MLflow's log_metric() only accepts float and int types so we dropped this attribute."
            )
            continue

        self._mlflow_logger.log_metric(key, numerical_value, step=state.global_step)

formed.integrations.transformers.utils

load_pretrained_transformer cached

load_pretrained_transformer(
    model_name_or_path,
    auto_class=None,
    submodule=None,
    **kwargs,
)
Source code in src/formed/integrations/transformers/utils.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
@lru_cache(maxsize=8)
def load_pretrained_transformer(
    model_name_or_path: str | PathLike,
    auto_class: str | type[_BaseAutoModelClass] | None = None,
    submodule: str | None = None,
    **kwargs,
) -> PreTrainedModel:
    if auto_class is None:
        auto_class = AutoModel
    elif isinstance(auto_class, str):
        assert ":" in auto_class, "auto_class string must be in 'module:ClassName' format"
        module_name, class_name = auto_class.rsplit(":", 1)
        module = importlib.import_module(module_name)
        auto_class = getattr(module, class_name)

    assert isinstance(auto_class, type) and issubclass(auto_class, _BaseAutoModelClass), (
        "auto_class must be a subclass of transformers._BaseAutoModelClass"
    )

    with suppress(FileNotFoundError):
        model_name_or_path = minato.cached_path(model_name_or_path)
    model = auto_class.from_pretrained(str(model_name_or_path), **kwargs)
    if submodule:
        model = xgetattr(model, submodule)
    return model

load_pretrained_tokenizer cached

load_pretrained_tokenizer(model_name_or_path, **kwargs)
Source code in src/formed/integrations/transformers/utils.py
41
42
43
44
45
46
47
48
@lru_cache(maxsize=8)
def load_pretrained_tokenizer(
    model_name_or_path: str | PathLike,
    **kwargs,
) -> PreTrainedTokenizerBase:
    with suppress(FileNotFoundError):
        model_name_or_path = minato.cached_path(model_name_or_path)
    return AutoTokenizer.from_pretrained(str(model_name_or_path), **kwargs)

formed.integrations.transformers.workflow

Workflow steps for Hugging Face Transformers integration.

This module provides workflow steps for loading, tokenizing, training, and converting transformer models using the Hugging Face Transformers library.

Available Steps
  • transformers::tokenize: Tokenize a dataset using a pre-trained tokenizer.
  • transformers::load_model: Load a pre-trained transformer model.
  • transformers::load_tokenizer: Load a pre-trained tokenizer.
  • transformers::train_model: Train a transformer model using the Hugging Face Trainer.
  • transformers::convert_tokenizer: Convert a transformer tokenizer to a formed Tokenizer (requires ml integration).

DataCollator module-attribute

DataCollator = Callable

PretrainedModelT module-attribute

PretrainedModelT = TypeVar(
    "PretrainedModelT", bound=PreTrainedModel
)

TransformersPretrainedModelFormat

Bases: Generic[PretrainedModelT], Format[PretrainedModelT]

identifier property

identifier

Get the unique identifier for this format.

RETURNS DESCRIPTION
str

Format identifier string.

write

write(artifact, directory)
Source code in src/formed/integrations/transformers/workflow.py
47
48
49
50
51
52
53
54
def write(self, artifact: PretrainedModelT, directory: Path) -> None:
    artifact.save_pretrained(str(directory / "model"))
    metadata = {
        "module": artifact.__class__.__module__,
        "class": artifact.__class__.__name__,
    }
    metadata_path = directory / "metadata.json"
    metadata_path.write_text(json.dumps(metadata, ensure_ascii=False))

read

read(directory)
Source code in src/formed/integrations/transformers/workflow.py
56
57
58
59
60
61
62
63
64
65
66
def read(self, directory: Path) -> PretrainedModelT:
    metadata_path = directory / "metadata.json"
    metadata = json.loads(metadata_path.read_text())
    module_name = metadata["module"]
    class_name = metadata["class"]
    module = importlib.import_module(module_name)
    model_class = getattr(module, class_name)
    if not issubclass(model_class, PreTrainedModel):
        raise ValueError(f"Class {class_name} is not a subclass of PreTrainedModel")
    model = model_class.from_pretrained(str(directory / "model"))
    return cast(PretrainedModelT, model)

is_default_of classmethod

is_default_of(obj)

Check if this format is the default for the given object type.

PARAMETER DESCRIPTION
obj

Object to check.

TYPE: Any

RETURNS DESCRIPTION
bool

True if this format should be used by default for this type.

Source code in src/formed/workflow/format.py
101
102
103
104
105
106
107
108
109
110
111
112
@classmethod
def is_default_of(cls, obj: Any) -> bool:
    """Check if this format is the default for the given object type.

    Args:
        obj: Object to check.

    Returns:
        True if this format should be used by default for this type.

    """
    return False

tokenize_dataset

tokenize_dataset(
    dataset,
    tokenizer,
    text_column="text",
    padding=False,
    truncation=False,
    return_special_tokens_mask=False,
    max_length=None,
)

Tokenize a dataset using a pre-trained tokenizer.

This step applies tokenization to a text column in the dataset, removing the original text column and adding tokenized features.

PARAMETER DESCRIPTION
dataset

Dataset or DatasetDict to tokenize.

TYPE: Dataset | DatasetDict

tokenizer

Tokenizer identifier, path, or instance.

TYPE: str | PathLike | PreTrainedTokenizerBase

text_column

Name of the text column to tokenize.

TYPE: str DEFAULT: 'text'

padding

Padding strategy.

TYPE: bool | Literal['max_length', 'longest', 'do_not_pad'] DEFAULT: False

truncation

Truncation strategy.

TYPE: bool | Literal['only_first', 'only_second', 'longest_first', 'do_not_truncate'] DEFAULT: False

return_special_tokens_mask

Whether to return special tokens mask.

TYPE: bool DEFAULT: False

max_length

Maximum sequence length.

TYPE: int | None DEFAULT: None

RETURNS DESCRIPTION
Dataset | DatasetDict

Tokenized dataset with the text column removed.

Source code in src/formed/integrations/transformers/workflow.py
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
@step("transformers::tokenize", format=DatasetFormat())
def tokenize_dataset(
    dataset: datasets.Dataset | datasets.DatasetDict,
    tokenizer: str | PathLike | PreTrainedTokenizerBase,
    text_column: str = "text",
    padding: bool | Literal["max_length", "longest", "do_not_pad"] = False,
    truncation: bool | Literal["only_first", "only_second", "longest_first", "do_not_truncate"] = False,
    return_special_tokens_mask: bool = False,
    max_length: int | None = None,
) -> datasets.Dataset | datasets.DatasetDict:
    """Tokenize a dataset using a pre-trained tokenizer.

    This step applies tokenization to a text column in the dataset,
    removing the original text column and adding tokenized features.

    Args:
        dataset: Dataset or DatasetDict to tokenize.
        tokenizer: Tokenizer identifier, path, or instance.
        text_column: Name of the text column to tokenize.
        padding: Padding strategy.
        truncation: Truncation strategy.
        return_special_tokens_mask: Whether to return special tokens mask.
        max_length: Maximum sequence length.

    Returns:
        Tokenized dataset with the text column removed.
    """
    if not isinstance(tokenizer, PreTrainedTokenizerBase):
        tokenizer = load_pretrained_tokenizer(tokenizer)

    def tokenize_function(examples: Mapping[str, Any]) -> Any:
        return tokenizer(
            examples[text_column],
            padding=padding,
            truncation=truncation,
            max_length=max_length,
            return_special_tokens_mask=return_special_tokens_mask,
        )

    return dataset.map(
        tokenize_function,
        batched=True,
        remove_columns=[text_column],
    )

load_pretrained_model

load_pretrained_model(
    model_name_or_path,
    auto_class=AutoModel,
    submodule=None,
    **kwargs,
)

Load a pre-trained transformer model.

PARAMETER DESCRIPTION
model_name_or_path

Model identifier or path to model directory.

TYPE: str | PathLike

auto_class

Auto model class to use for loading (name or class).

TYPE: str | type[_BaseAutoModelClass] DEFAULT: AutoModel

submodule

Optional submodule to extract from the model.

TYPE: str | None DEFAULT: None

**kwargs

Additional arguments to pass to the model constructor.

TYPE: Any DEFAULT: {}

RETURNS DESCRIPTION
PreTrainedModel

Loaded pre-trained transformer model.

Source code in src/formed/integrations/transformers/workflow.py
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
@step("transformers::load_model", cacheable=False)
def load_pretrained_model(
    model_name_or_path: str | PathLike,
    auto_class: str | type[_BaseAutoModelClass] = transformers.AutoModel,
    submodule: str | None = None,
    **kwargs: Any,
) -> transformers.PreTrainedModel:
    """Load a pre-trained transformer model.

    Args:
        model_name_or_path: Model identifier or path to model directory.
        auto_class: Auto model class to use for loading (name or class).
        submodule: Optional submodule to extract from the model.
        **kwargs: Additional arguments to pass to the model constructor.

    Returns:
        Loaded pre-trained transformer model.
    """
    if isinstance(auto_class, str):
        auto_class = getattr(transformers, auto_class)
    assert isinstance(auto_class, type) and issubclass(auto_class, _BaseAutoModelClass)
    return load_pretrained_transformer.__wrapped__(
        model_name_or_path=model_name_or_path,
        auto_class=auto_class,
        submodule=submodule,
        **kwargs,
    )

load_pretrained_tokenizer_step

load_pretrained_tokenizer_step(
    pretrained_model_name_or_path, **kwargs
)

Load a pre-trained tokenizer.

PARAMETER DESCRIPTION
pretrained_model_name_or_path

Model identifier or path to model directory.

TYPE: str | PathLike

**kwargs

Additional arguments to pass to the tokenizer constructor.

TYPE: Any DEFAULT: {}

RETURNS DESCRIPTION
PreTrainedTokenizerBase

Loaded pre-trained tokenizer.

Source code in src/formed/integrations/transformers/workflow.py
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
@step("transformers::load_tokenizer", cacheable=False)
def load_pretrained_tokenizer_step(
    pretrained_model_name_or_path: str | PathLike,
    **kwargs: Any,
) -> PreTrainedTokenizerBase:
    """Load a pre-trained tokenizer.

    Args:
        pretrained_model_name_or_path: Model identifier or path to model directory.
        **kwargs: Additional arguments to pass to the tokenizer constructor.

    Returns:
        Loaded pre-trained tokenizer.
    """
    return load_pretrained_tokenizer(pretrained_model_name_or_path, **kwargs)

train_transformer_model

train_transformer_model(
    model,
    args,
    data_collator=None,
    dataset=None,
    processing_class=None,
    model_init=None,
    compute_loss_func=None,
    compute_metrics=None,
    callbacks=None,
    optimizers=(None, None),
    optimizer_cls_and_kwargs=None,
    preprocess_logits_for_metrics=None,
    train_dataset_key="train",
    eval_dataset_key="validation",
)

Train a transformer model using the Hugging Face Trainer.

This step trains a transformer model on the provided datasets using the Hugging Face Trainer API.

PARAMETER DESCRIPTION
model

Pre-trained model to train.

TYPE: PreTrainedModel

args

Training arguments configuration.

TYPE: Lazy[TrainingArguments]

data_collator

Optional data collator for batching.

TYPE: DataCollator | None DEFAULT: None

dataset

Training/validation datasets.

TYPE: None | (Dataset | DatasetDict | Mapping[str, Dataset | DatasetDict]) DEFAULT: None

processing_class

Optional processing class (tokenizer, processor, etc.).

TYPE: None | (PreTrainedTokenizerBase | BaseImageProcessor | FeatureExtractionMixin | ProcessorMixin) DEFAULT: None

model_init

Optional model initialization function.

TYPE: Callable[[], PreTrainedModel] | None DEFAULT: None

compute_loss_func

Optional custom loss computation function.

TYPE: Callable | None DEFAULT: None

compute_metrics

Optional metrics computation function.

TYPE: Callable[[EvalPrediction], dict] | None DEFAULT: None

callbacks

Optional training callbacks.

TYPE: list[TrainerCallback] | None DEFAULT: None

optimizers

Optional optimizer and learning rate scheduler.

TYPE: tuple[Lazy[Optimizer] | None, Lazy[LambdaLR] | None] DEFAULT: (None, None)

optimizer_cls_and_kwargs

Optional optimizer class and keyword arguments.

TYPE: tuple[type[Optimizer], dict[str, Any]] | None DEFAULT: None

preprocess_logits_for_metrics

Optional logits preprocessing function.

TYPE: Callable[[Tensor, Tensor], Tensor] | None DEFAULT: None

train_dataset_key

Key for training dataset split.

TYPE: str DEFAULT: 'train'

eval_dataset_key

Key for evaluation dataset split.

TYPE: str DEFAULT: 'validation'

RETURNS DESCRIPTION
PreTrainedModel

Trained transformer model.

Source code in src/formed/integrations/transformers/workflow.py
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
@step("transformers::train_model", format=TransformersPretrainedModelFormat())
def train_transformer_model(
    model: PreTrainedModel,
    args: Lazy[TrainingArguments],
    data_collator: DataCollator | None = None,  # pyright: ignore[reportInvalidTypeForm]
    dataset: None
    | (datasets.Dataset | datasets.DatasetDict | Mapping[str, datasets.Dataset | datasets.DatasetDict]) = None,
    processing_class: None
    | (PreTrainedTokenizerBase | BaseImageProcessor | FeatureExtractionMixin | ProcessorMixin) = None,
    model_init: Callable[[], PreTrainedModel] | None = None,
    compute_loss_func: Callable | None = None,
    compute_metrics: Callable[[EvalPrediction], dict] | None = None,
    callbacks: list[TrainerCallback] | None = None,
    optimizers: tuple[
        Lazy[torch.optim.Optimizer] | None,
        Lazy[torch.optim.lr_scheduler.LambdaLR] | None,
    ] = (None, None),
    optimizer_cls_and_kwargs: tuple[type[torch.optim.Optimizer], dict[str, Any]] | None = None,
    preprocess_logits_for_metrics: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] | None = None,
    train_dataset_key: str = "train",
    eval_dataset_key: str = "validation",
) -> PreTrainedModel:
    """Train a transformer model using the Hugging Face Trainer.

    This step trains a transformer model on the provided datasets using
    the Hugging Face Trainer API.

    Args:
        model: Pre-trained model to train.
        args: Training arguments configuration.
        data_collator: Optional data collator for batching.
        dataset: Training/validation datasets.
        processing_class: Optional processing class (tokenizer, processor, etc.).
        model_init: Optional model initialization function.
        compute_loss_func: Optional custom loss computation function.
        compute_metrics: Optional metrics computation function.
        callbacks: Optional training callbacks.
        optimizers: Optional optimizer and learning rate scheduler.
        optimizer_cls_and_kwargs: Optional optimizer class and keyword arguments.
        preprocess_logits_for_metrics: Optional logits preprocessing function.
        train_dataset_key: Key for training dataset split.
        eval_dataset_key: Key for evaluation dataset split.

    Returns:
        Trained transformer model.
    """
    workdir = use_step_workdir()

    args_ = args.construct(output_dir=str(workdir))

    train_dataset: datasets.Dataset | datasets.DatasetDict | None = None
    eval_dataset: datasets.Dataset | datasets.DatasetDict | None = None
    if isinstance(dataset, datasets.Dataset):
        train_dataset = dataset
        eval_dataset = None
    else:
        train_dataset = dataset.get(train_dataset_key) if dataset and args_.do_train else None
        eval_dataset = dataset.get(eval_dataset_key) if dataset and args_.do_eval else None

    lazy_optimizer, lazy_lr_scheduler = optimizers
    optimizer = lazy_optimizer.construct(params=model.parameters()) if lazy_optimizer else None
    lr_scheduler = lazy_lr_scheduler.construct(optimizer=optimizer) if lazy_lr_scheduler else None

    trainer = transformers.Trainer(
        model=model,
        args=args_,
        data_collator=data_collator,  # pyright: ignore[reportArgumentType]
        train_dataset=train_dataset,  # pyright: ignore[reportArgumentType]
        eval_dataset=eval_dataset,  # pyright: ignore[reportArgumentType]
        processing_class=processing_class,
        model_init=model_init,
        compute_loss_func=compute_loss_func,
        compute_metrics=compute_metrics,
        callbacks=callbacks,
        optimizers=optimizer_cls_and_kwargs or (optimizer, lr_scheduler),  # type: ignore[reportArgumentType]
        preprocess_logits_for_metrics=preprocess_logits_for_metrics,
    )

    trainer.train()

    return model

convert_tokenizer

convert_tokenizer(
    tokenizer,
    pad_token=VALUE,
    unk_token=VALUE,
    bos_token=VALUE,
    eos_token=VALUE,
    freeze=True,
    accessor=None,
    characters=None,
    text_vector=None,
    token_vectors=None,
)

Convert a transformer tokenizer to a formed Tokenizer.

This step converts a Hugging Face tokenizer into a formed Tokenizer with specified special tokens.

PARAMETER DESCRIPTION
tokenizer

Tokenizer identifier, path, or instance.

TYPE: str | PathLike | PreTrainedTokenizerBase

pad_token

Padding token (uses tokenizer default if not specified).

TYPE: str | None | NotSpecified DEFAULT: VALUE

unk_token

Unknown token (uses tokenizer default if not specified).

TYPE: str | None | NotSpecified DEFAULT: VALUE

bos_token

Beginning-of-sequence token (uses tokenizer default if not specified).

TYPE: str | None | NotSpecified DEFAULT: VALUE

eos_token

End-of-sequence token (uses tokenizer default if not specified).

TYPE: str | None | NotSpecified DEFAULT: VALUE

freeze

Whether to freeze the vocabulary.

TYPE: bool DEFAULT: True

accessor

Optional accessor for token extraction.

TYPE: str | Callable | None DEFAULT: None

RETURNS DESCRIPTION
Tokenizer

Converted formed Tokenizer.

RAISES DESCRIPTION
AssertionError

If pad_token is not specified and not available in the tokenizer.

Source code in src/formed/integrations/transformers/workflow.py
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
@step("transformers::convert_tokenizer", format="json")
def convert_tokenizer(
    tokenizer: str | PathLike | PreTrainedTokenizerBase,
    pad_token: str | None | NotSpecified = NotSpecified.VALUE,
    unk_token: str | None | NotSpecified = NotSpecified.VALUE,
    bos_token: str | None | NotSpecified = NotSpecified.VALUE,
    eos_token: str | None | NotSpecified = NotSpecified.VALUE,
    freeze: bool = True,
    accessor: str | Callable | None = None,
    characters: TokenCharactersIndexer | None = None,
    text_vector: TensorTransform | None = None,
    token_vectors: TensorSequenceTransform | None = None,
) -> Tokenizer:
    """Convert a transformer tokenizer to a formed Tokenizer.

    This step converts a Hugging Face tokenizer into a formed Tokenizer
    with specified special tokens.

    Args:
        tokenizer: Tokenizer identifier, path, or instance.
        pad_token: Padding token (uses tokenizer default if not specified).
        unk_token: Unknown token (uses tokenizer default if not specified).
        bos_token: Beginning-of-sequence token (uses tokenizer default if not specified).
        eos_token: End-of-sequence token (uses tokenizer default if not specified).
        freeze: Whether to freeze the vocabulary.
        accessor: Optional accessor for token extraction.

    Returns:
        Converted formed Tokenizer.

    Raises:
        AssertionError: If pad_token is not specified and not available in the tokenizer.
    """
    given_tokenizer = tokenizer

    if isinstance(tokenizer, (str, PathLike)):
        tokenizer = load_pretrained_tokenizer(tokenizer)

    def get_token(given: str | None | NotSpecified, default: Any) -> str | None:
        if not isinstance(given, NotSpecified):
            return given
        if isinstance(default, str):
            return default
        return None

    vocab = tokenizer.get_vocab().copy()
    pad_token = get_token(pad_token, tokenizer.pad_token)
    unk_token = get_token(unk_token, tokenizer.unk_token)
    bos_token = get_token(bos_token, tokenizer.bos_token)
    eos_token = get_token(eos_token, tokenizer.eos_token)

    assert isinstance(pad_token, str), "pad_token must be specified or available in the tokenizer"

    surface_indexer = TokenSequenceIndexer(
        vocab=vocab,
        pad_token=pad_token,
        unk_token=unk_token,
        bos_token=bos_token,
        eos_token=eos_token,
        freeze=freeze,
    )
    analyzer = PretrainedTransformerAnalyzer(given_tokenizer)
    return Tokenizer(
        surfaces=surface_indexer,
        characters=characters,
        text_vector=text_vector,
        token_vectors=token_vectors,
        analyzer=Param.cast(analyzer),
        accessor=accessor,
    )