# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import warnings
from collections.abc import Callable
from contextlib import nullcontext
from copy import copy
from typing import Any, Literal, TYPE_CHECKING
import torch
from tensordict import NestedKey, set_list_to_stack, TensorDictBase, unravel_key
from tensordict.utils import _zip_strict, is_seq_of_nested_key, logger as torchrl_logger
from torch.nn.utils.rnn import pad_sequence
from torchrl.data import Composite, Unbounded
from torchrl.data.tensor_specs import DEVICE_TYPING
from torchrl.envs import EnvBase, Transform
from torchrl.envs.transforms.ray_service import _RayServiceMetaClass, RayTransform
from torchrl.envs.transforms.transforms import Compose
from torchrl.envs.transforms.utils import _set_missing_tolerance
from torchrl.modules.llm.policies.common import LLMWrapperBase
if TYPE_CHECKING:
import transformers
class RayKLRewardTransform(RayTransform):
"""A Ray-based implementation of :class:`~torchrl.envs.llm.transforms.kl.KLRewardTransform`.
This class creates a Ray remote actor from KLRewardTransform that can be shared across multiple workers.
All method calls are delegated to the remote actor, ensuring that multiple environments can
share the same KL computation resources.
To avoid serialization issues with large models, this class supports model factories
that create models on the remote actor rather than passing full models through Ray channels.
Args:
ref_model (LLMWrapperBase, optional): the reference model. Prefer using a model factory instead
to avoid serialization issues.
Keyword Args:
ref_model_factory (Callable[[], LLMWrapperBase], optional): A callable that returns a reference model.
This allows for explicit resource control and avoids serialization issues.
num_cpus (int, optional): Number of CPUs to allocate to the Ray actor. Defaults to 1.
num_gpus (int, optional): Number of GPUs to allocate to the Ray actor. Defaults to 0.
device (torch.device, optional): Device to use on the remote Ray actor for tensor operations.
The local Ray transform will handle CPU serialization and device restoration automatically.
Defaults to None.
actor_name (str, optional): Name of the Ray actor to use. If provided, the actor will be reused if it already exists.
**kwargs: Additional keyword arguments to pass to KLRewardTransform.
Note:
When using model factories, the corresponding model argument (ref_model) should be None.
Model factories are preferred for large models to avoid serialization overhead.
Examples:
>>> # Option 1: Using model factory for explicit resource control
>>> def create_ref_model():
... return TransformersWrapper(ref_model, tokenizer=tokenizer, generate=False, return_log_probs=True)
>>> transform = RayKLRewardTransform(
... ref_model=None,
... ref_model_factory=create_ref_model,
... num_gpus=1,
... device=torch.device("cuda")
... )
>>> # Option 2: Pass model directly (Ray handles serialization)
>>> transform = RayKLRewardTransform(ref_model=ref_model, device=torch.device("cuda"))
"""
def __init__(
self,
ref_model: LLMWrapperBase | None = None,
*,
ref_model_factory: Callable[[], LLMWrapperBase] | None = None,
num_cpus: int | None = None,
num_gpus: int = 0,
device: DEVICE_TYPING | None = None,
actor_name: str | None = None,
**kwargs,
):
# Validate arguments: model and factory should not both be provided
if ref_model is not None and ref_model_factory is not None:
raise ValueError(
"Cannot provide both 'ref_model' and 'ref_model_factory'. Choose one."
)
if ref_model is None and ref_model_factory is None:
raise ValueError(
"Must provide exactly one of 'ref_model' or 'ref_model_factory'."
)
# Store creation parameters for actor creation
self._ref_model = ref_model
self._ref_model_factory = ref_model_factory
self._creation_kwargs = kwargs
# Store device separately for passing to remote actor
self._remote_device = device
# Default num_cpus
if num_cpus is None:
num_cpus = 1
# Call parent constructor without device (Ray transform handles CPU/device mapping)
super().__init__(
num_cpus=num_cpus,
num_gpus=num_gpus,
device=None, # Don't store device locally
actor_name=actor_name,
**kwargs,
)
def _create_actor(self, **kwargs):
"""Create the remote KLRewardTransform actor."""
# Create the remote KLRewardTransform with resource specifications
RemoteKLRewardTransform = self._ray.remote(
num_cpus=self._num_cpus, num_gpus=self._num_gpus
)(KLRewardTransform)
if self._actor_name is not None:
RemoteKLRewardTransform = RemoteKLRewardTransform.options(
name=self._actor_name
)
# Determine how to create model on the remote actor
ref_model_arg = self._ref_model
# If we have factory, we'll pass it and set model to None
creation_kwargs = self._creation_kwargs.copy()
if self._ref_model_factory is not None:
creation_kwargs["ref_model_factory"] = self._ref_model_factory
ref_model_arg = None
# Pass device to the remote actor
if self._remote_device is not None:
creation_kwargs["device"] = self._remote_device
# Create the shared actor
actor = RemoteKLRewardTransform.remote(
ref_model=ref_model_arg, **creation_kwargs
)
return actor
def __repr__(self):
"""String representation."""
try:
if hasattr(self, "_actor") and self._actor is not None:
return self._ray.get(self._actor.__repr__.remote())
else:
return "RayKLRewardTransform(actor=None)"
except Exception:
return f"RayKLRewardTransform(actor={getattr(self, '_actor', 'None')})"
[docs]
class RetrieveLogProb(Transform):
"""A transform to retrieve log-probabilities from a model for KL divergence computation.
This transform computes log-probabilities from a reference model, which can then be used
to compute KL divergence with another model's log-probabilities. It's designed to work
with the :class:`~torchrl.envs.llm.transforms.kl.RetrieveKL` and :class:`~torchrl.envs.llm.transforms.kl.KLComputation` transforms.
Args:
model (LLMWrapperBase): the model to use to compute the log-probs.
Keyword Args:
log_probs_full_key (NestedKey): the key where the log-probs are stored.
If not provided, the key will be retrieved from the model's `log_probs_key` attribute
(i.e., `(model.log_probs_key, "full")`).
assistant_only (bool): whether to zero out the log-probs of the non-assistant tokens (i.e., steps of history
where the role is not `"assistant"`). Defaults to `True`.
.. note:: When `assistant_only=True`, the model must have `input_mode='history'` to properly identify
assistant tokens. For other input modes (`"text"` or `"tokens"`), set `assistant_only=False`.
This ensures users are conscious of the limitation that assistant token identification requires
structured conversation history.
tokenizer_kwargs (dict): the keyword arguments to pass to the tokenizer to be used to apply the chat template to the history when `assistant_only` is `True`.
To control the tokenization in the ref_model, pass the tokenizer kwargs to the ref_model constructor.
Defaults to `{"return_assistant_tokens_mask": True, "tokenize": True, "return_dict": True, "padding": False, "add_generation_prompt": False}`.
tokenizer (transformers.AutoTokenizer): the tokenizer to be used to tokenize the input and compute the assitant mask. If not provided, the tokenizer will be inferred from the `ref_model`.
detach (bool): whether to exclude the log-probs from the gradient computation. Defaults to `True`.
device (torch.device): the device to use for tensor creation. Defaults to `None`.
padding_side (str): the side of the padding when using pad_sequence. Defaults to `"left"`.
Examples:
>>> from torchrl.data.llm import History
>>> from torchrl.modules.llm import TransformersWrapper
>>> from torchrl.modules.llm.policies import ChatHistory
>>> from transformers import AutoTokenizer, OPTConfig, OPTForCausalLM
>>> from tensordict import TensorDict, set_list_to_stack
>>> import torch
>>>
>>> # Set up list to stack for History
>>> set_list_to_stack(True).set()
>>>
>>> # Create chat data
>>> chats = [
... [
... {"role": "system", "content": "You are a helpful assistant."},
... {"role": "user", "content": "Hello, how are you?"},
... {"role": "assistant", "content": "I'm doing well, thank you!"},
... ],
... [
... {"role": "system", "content": "You are a helpful assistant."},
... {"role": "user", "content": "What's the weather like?"},
... {"role": "assistant", "content": "I can't check the weather for you."},
... ],
... ]
>>> history = History.from_chats(chats)
>>> print(f"Created history with shape: {history.shape}")
Created history with shape: torch.Size([2, 3])
>>>
>>> # Setup tokenizer and model
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m")
>>> tokenizer.pad_token = tokenizer.eos_token
>>> model = OPTForCausalLM(OPTConfig()).eval()
>>>
>>> # Create reference model
>>> ref_model = TransformersWrapper(
... model,
... tokenizer=tokenizer,
... input_mode="history",
... generate=False,
... return_log_probs=True,
... pad_output=True,
... )
>>>
>>> # Create the RetrieveLogProb transform
>>> transform = RetrieveLogProb(
... ref_model,
... assistant_only=True,
... tokenizer=tokenizer,
... )
>>>
>>> # Prepare data using ChatHistory
>>> chat_history = ChatHistory(full=history)
>>> data = TensorDict(history=chat_history, batch_size=(2,))
>>>
>>> # Apply the transform to get reference log probabilities
>>> result = transform(data)
>>> log_probs_key = (ref_model.log_probs_key, "full")
>>> ref_log_probs = result.get(log_probs_key)
>>> print(f"Log-probs shape: {ref_log_probs.shape}")
Log-probs shape: torch.Size([2, 26])
.. note::
By default, the log-probabilities are stored as a list of tensors (one per sample, with variable length).
Use `as_padded_tensor=True` in `.get()` to obtain a batchable tensor (with padding).
The reference log probabilities are computed only for assistant tokens when `assistant_only=True`.
**Input Mode Compatibility:**
- When `assistant_only=True` (default), the model must have `input_mode='history'` to properly identify assistant tokens.
- When `assistant_only=False`, the transform works with any input mode (`"history"`, `"text"`, or `"tokens"`).
- This design ensures users are conscious of the limitation that assistant token identification requires structured conversation history.
.. seealso::
:class:`~torchrl.envs.llm.transforms.kl.RetrieveKL`: A higher-level transform that combines two `RetrieveLogProb` instances with `KLComputation`.
:class:`~torchrl.envs.llm.transforms.kl.KLComputation`: A transform that computes KL divergence between two log-prob tensors.
:class:`~torchrl.envs.llm.transforms.kl.KLRewardTransform`: A legacy transform for KL reward computation (use `RetrieveKL` instead).
"""
def __init__(
self,
model: LLMWrapperBase,
*,
log_probs_full_key: NestedKey | None = None,
assistant_only: bool = True,
tokenizer_kwargs: dict | None = None,
detach: bool = True,
device: torch.device | None = None,
tokenizer: transformers.AutoTokenizer | None = None,
padding_side: str = "left",
):
# Set up keys
if log_probs_full_key is None:
log_probs_full_key = (model.log_probs_key, "full")
elif (
not isinstance(log_probs_full_key, tuple)
or log_probs_full_key[-1] != "full"
):
warnings.warn(
f"The log_probs_full_key {log_probs_full_key} is not a tuple or does not end with 'full'. "
"This may cause issues with the KL computation. "
"Please use a tuple with the log_probs_key and 'full' as the last element."
)
self.log_probs_full_key = log_probs_full_key
# Set up input/output keys
in_keys = list(model.in_keys)
out_keys = [self.log_probs_full_key]
super().__init__(in_keys=in_keys, out_keys=out_keys)
# Store model and configuration
self.model = model
self.assistant_only = assistant_only
self.detach = detach
self.device = device
self.tokenizer = tokenizer
self.padding_side = padding_side
# Set up tokenizer kwargs
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
tokenizer_kwargs.setdefault("return_assistant_tokens_mask", True)
tokenizer_kwargs.setdefault("tokenize", True)
tokenizer_kwargs.setdefault("return_dict", True)
tokenizer_kwargs.setdefault("padding", False)
tokenizer_kwargs.setdefault("add_generation_prompt", False)
self.tokenizer_kwargs = tokenizer_kwargs
# Validate model configuration (after setting assistant_only)
self._validate_model_config(model)
def _validate_model_config(self, model: LLMWrapperBase):
"""Validate model configuration."""
if not getattr(model, "return_log_probs", True):
raise ValueError(
"The model must have `return_log_probs=True` to use the `RetrieveLogProb` transform."
)
if getattr(model, "generate", True):
raise ValueError(
"The model must have `generate=False` to use the `RetrieveLogProb` transform."
)
# Check input mode compatibility with assistant_only
input_mode = getattr(model, "input_mode", "history")
if self.assistant_only and input_mode != "history":
raise ValueError(
f"The model must have `input_mode='history'` when `assistant_only=True`. "
f"Current input_mode is '{input_mode}'. "
f"To use input_mode '{input_mode}', set `assistant_only=False`."
)
[docs]
def forward(self, tensordict: TensorDictBase) -> TensorDictBase:
next_td = tensordict.get("next")
next_is_none = False
if next_td is None:
next_is_none = True
next_td = tensordict
output = self._step(tensordict, next_td)
if next_is_none:
return output
return tensordict.set("next", output)
def _mask_assistant_tokens(
self, td: TensorDictBase, lp_key: NestedKey
) -> torch.Tensor:
"""Mask log-probs to only include assistant tokens.
Args:
td: TensorDict containing the data
lp_key: Key for log-probs in the TensorDict
Returns:
Masked log-probs tensor
"""
with torch.device(self.device) if self.device is not None else nullcontext():
# Get assistant mask
assistant_masks = td.get(("masks", "all_assistant_mask"), as_list=True) # type: ignore[misc]
log_probs = td.get(lp_key, as_list=True) # type: ignore[misc]
log_probs = [
torch.masked_fill(lp, ~mask, 0.0)
for lp, mask in _zip_strict(log_probs, assistant_masks)
]
if self.model.pad_output:
log_probs = pad_sequence(
log_probs,
batch_first=True,
padding_value=0.0,
padding_side=self.padding_side,
)
else:
log_probs = torch.nested.as_nested_tensor(
log_probs, layout=self.model.layout
)
return log_probs
@set_list_to_stack(True)
def _step(
self, tensordict: TensorDictBase, next_tensordict: TensorDictBase
) -> TensorDictBase:
# Compute log-probs using the model
# Use tensordict since we want to process the "full" entry
ref_td = self.model(tensordict.copy())
tmp_log_probs_key = (self.model.log_probs_key, "full")
# Apply assistant masking if requested
if self.assistant_only:
log_probs = self._mask_assistant_tokens(ref_td, tmp_log_probs_key)
ref_td.set(tmp_log_probs_key, log_probs)
# Rename and store the log-probs
if tmp_log_probs_key != self.log_probs_full_key:
ref_td.rename_key_(tmp_log_probs_key, self.log_probs_full_key)
next_tensordict.update(ref_td, keys_to_update=(self.log_probs_full_key,))
return next_tensordict
class RayRetrieveKL(RayTransform):
"""A Ray-based implementation of :class:`~torchrl.envs.llm.transforms.kl.RetrieveKL`.
This class creates a Ray remote actor from RetrieveKL that can be shared across multiple workers.
All method calls are delegated to the remote actor, ensuring that multiple environments can
share the same KL computation resources.
To avoid serialization issues with large models, this class supports model factories
that create models on the remote actor rather than passing full models through Ray channels.
Args:
gen_model (LLMWrapperBase | Literal["from_collector"]): the generation model, or "from_collector" for lazy initialization.
Prefer using a model factory instead to avoid serialization issues.
ref_model (LLMWrapperBase | None): the reference model. Prefer using a model factory instead
to avoid serialization issues.
Keyword Args:
gen_model_factory (Callable[[], LLMWrapperBase], optional): A callable that returns a generation model.
This allows for explicit resource control and avoids serialization issues.
ref_model_factory (Callable[[], LLMWrapperBase], optional): A callable that returns a reference model.
This allows for explicit resource control and avoids serialization issues.
num_cpus (int, optional): Number of CPUs to allocate to the Ray actor. Defaults to 1.
num_gpus (int, optional): Number of GPUs to allocate to the Ray actor. Defaults to 0.
device (torch.device, optional): Device to use on the remote Ray actor for tensor operations.
The local Ray transform will handle CPU serialization and device restoration automatically.
Defaults to None.
actor_name (str, optional): Name of the Ray actor to use. If provided, the actor will be reused if it already exists.
**kwargs: Additional keyword arguments to pass to RetrieveKL.
Note:
When using model factories, the corresponding model arguments (gen_model, ref_model) should be None.
Model factories are preferred for large models to avoid serialization overhead.
Examples:
>>> # Option 1: Using model factories for explicit resource control
>>> def create_gen_model():
... return TransformersWrapper(model, tokenizer=tokenizer, generate=False, return_log_probs=True)
>>> def create_ref_model():
... return TransformersWrapper(ref_model, tokenizer=tokenizer, generate=False, return_log_probs=True)
>>> transform = RayRetrieveKL(
... gen_model=None, ref_model=None,
... gen_model_factory=create_gen_model,
... ref_model_factory=create_ref_model,
... num_gpus=1,
... device=torch.device("cuda")
... )
>>> # Option 2: Pass models directly (Ray handles serialization)
>>> transform = RayRetrieveKL(gen_model=gen_model, ref_model=ref_model, device=torch.device("cuda"))
"""
def __init__(
self,
gen_model: LLMWrapperBase | Literal["from_collector"] | None = "from_collector",
ref_model: LLMWrapperBase | None = None,
*,
gen_model_factory: Callable[[], LLMWrapperBase] | None = None,
ref_model_factory: Callable[[], LLMWrapperBase] | None = None,
num_cpus: int | None = None,
num_gpus: int = 0,
device: DEVICE_TYPING | None = None,
actor_name: str | None = None,
**kwargs,
):
# Validate arguments: models and factories should not both be provided
if gen_model is not None and gen_model_factory is not None:
raise ValueError(
"Cannot provide both 'gen_model' and 'gen_model_factory'. Choose one."
)
if ref_model is not None and ref_model_factory is not None:
raise ValueError(
"Cannot provide both 'ref_model' and 'ref_model_factory'. Choose one."
)
# Store creation parameters for actor creation
self._gen_model = gen_model
self._ref_model = ref_model
self._gen_model_factory = gen_model_factory
self._ref_model_factory = ref_model_factory
self._creation_kwargs = kwargs
# Store device separately for passing to remote actor
self._remote_device = device
# Default num_cpus
if num_cpus is None:
num_cpus = 1
# Call parent constructor without device (Ray transform handles CPU/device mapping)
super().__init__(
num_cpus=num_cpus,
num_gpus=num_gpus,
device=None, # Don't store device locally
actor_name=actor_name,
**kwargs,
)
def _create_actor(self, **kwargs):
"""Create the remote RetrieveKL actor."""
# Create the remote RetrieveKL with resource specifications
RemoteRetrieveKL = self._ray.remote(
num_cpus=self._num_cpus, num_gpus=self._num_gpus
)(RetrieveKL)
if self._actor_name is not None:
RemoteRetrieveKL = RemoteRetrieveKL.options(name=self._actor_name)
# Determine how to create models on the remote actor
gen_model_arg = self._gen_model
ref_model_arg = self._ref_model
# If we have factories, we'll pass them and set models to None
creation_kwargs = self._creation_kwargs.copy()
if self._gen_model_factory is not None:
creation_kwargs["gen_model_factory"] = self._gen_model_factory
gen_model_arg = None
if self._ref_model_factory is not None:
creation_kwargs["ref_model_factory"] = self._ref_model_factory
ref_model_arg = None
# Pass device to the remote actor
if self._remote_device is not None:
creation_kwargs["device"] = self._remote_device
# Create the shared actor
actor = RemoteRetrieveKL.remote(
gen_model=gen_model_arg, ref_model=ref_model_arg, **creation_kwargs
)
return actor
def __repr__(self):
"""String representation."""
try:
if hasattr(self, "_actor") and self._actor is not None:
return self._ray.get(self._actor.__repr__.remote())
else:
return "RayRetrieveKL(actor=None)"
except Exception:
return f"RayRetrieveKL(actor={getattr(self, '_actor', 'None')})"
[docs]
class RetrieveKL(Compose, metaclass=_RayServiceMetaClass):
"""A transform to retrieve the KL divergence between two models' log-probabilities.
This transform combines two :class:`~torchrl.envs.llm.transforms.kl.RetrieveLogProb` instances
with a :class:`~torchrl.envs.llm.transforms.kl.KLComputation` to compute KL divergence
between a generation model and a reference model.
.. note::
Both gen_model and ref_model must use the same pad_output value (True or False), otherwise KL computation will fail.
Args:
gen_model (LLMWrapperBase): the generation model, wrapped in such a way that it does not generate but computes the log-probs.
In cases where the transform is used within a :class:`~torchrl.collectors.llm.LLMCollector` run on a remote worker, the
policy may not be available ahead of time. In this case, the `gen_model` can be set to `"from_collector"` (default) to retrieve the
policy from the collector. See :meth:`~torchrl.modules.llm.policies.LLMWrapperBase.get_new_version` for more details
about generating a new version of the policy to gather the log-probs.
ref_model (LLMWrapperBase): the reference model, wrapped in such a way that it does not generate but computes the log-probs.
Keyword Args:
gen_model_factory (Callable[[], LLMWrapperBase], optional): A callable that returns a generation model.
This allows for explicit resource control and avoids serialization issues when using Ray.
ref_model_factory (Callable[[], LLMWrapperBase], optional): A callable that returns a reference model.
This allows for explicit resource control and avoids serialization issues when using Ray.
assistant_only (bool): whether to only retrieve the log-probs of the assistant tokens (i.e., steps of history
where the role is `"assistant"`). Defaults to `True`.
.. note:: When `assistant_only=True`, both models must have `input_mode='history'` to properly identify assistant tokens.
For other input modes (`"text"` or `"tokens"`), set `assistant_only=False`.
This ensures users are conscious of the limitation that assistant token identification requires structured conversation history.
gen_log_probs_full_key (str): the key where the log-probs of the generation model are stored. Defaults to `("log_probs", "full")`.
ref_log_probs_full_key (str): the key where the log-probs of the reference model are stored. Defaults to `("ref_log_probs", "full")`.
history_key (str): the key where the history is stored. Defaults to `"history"`.
tokenizer_kwargs (dict): the keyword arguments to pass to the tokenizer to be used to apply the chat template to the history when `assistant_only` is `True`.
To control the tokenization in the actor, pass the tokenizer kwargs to the actor constructor.
Defaults to `{"return_assistant_tokens_mask": True, "tokenize": True, "return_tensors": "pt", "padding": True, "add_generation_prompt": False}`.
detach (bool): whether to exclude the log-probs from the gradient computation. Defaults to `True`.
device (torch.device): the device to cast the tensors to. This is not the device of the specs, but the device
onto which the tensors will be moved. It allows to keep the model on a different device
than the upcoming data itself. When using Ray service, this device will be used on the remote actor.
Defaults to `None`.
tokenizer (transformers.AutoTokenizer): the tokenizer to be used to tokenize the input and compute the assitant mask. If not provided, the tokenizer will be inferred from the `actor`.
padding_side (str): the side of the padding when using pad_sequence. Defaults to `"left"`.
kl_key (NestedKey): the key where the KL divergence is stored. Defaults to `"kl_penalty"`.
add_to_reward (bool): whether to add the KL divergence to the reward. Defaults to `True`.
coeff (float): the coefficient for the KL term when adding to reward. Defaults to `1.0`.
padding_side (str): the side of the padding when using pad_sequence. Defaults to `"left"`.
use_ray_service (bool, optional): if ``True``, returns a :class:`RayRetrieveKL` instance instead,
which creates a Ray actor for shared KL computation across multiple environments.
Defaults to ``False``.
actor_name (str, optional): the name of the Ray actor to use. Defaults to `None`.
**kwargs: additional arguments to pass to the `RetrieveLogProb` transform.
Examples:
>>> from torchrl.data.llm import History
>>> from torchrl.modules.llm import TransformersWrapper
>>> from torchrl.modules.llm.policies import ChatHistory
>>> from transformers import AutoTokenizer, OPTConfig, OPTForCausalLM
>>> from tensordict import TensorDict, set_list_to_stack
>>> import torch
>>>
>>> # Set up list to stack for History
>>> set_list_to_stack(True).set()
>>>
>>> # Create chat data
>>> chats = [
... [
... {"role": "system", "content": "You are a helpful assistant."},
... {"role": "user", "content": "Hello, how are you?"},
... {"role": "assistant", "content": "I'm doing well, thank you!"},
... ],
... [
... {"role": "system", "content": "You are a helpful assistant."},
... {"role": "user", "content": "What's the weather like?"},
... {"role": "assistant", "content": "I can't check the weather for you."},
... ],
... ]
>>> history = History.from_chats(chats)
>>> print(f"Created history with shape: {history.shape}")
Created history with shape: torch.Size([2, 3])
>>>
>>> # Setup tokenizer and model
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m")
>>> tokenizer.pad_token = tokenizer.eos_token
>>> model = OPTForCausalLM(OPTConfig()).eval()
>>>
>>> # Create generation and reference models
>>> gen_model = TransformersWrapper(
... model,
... tokenizer=tokenizer,
... input_mode="history",
... generate=False,
... return_log_probs=True,
... pad_output=True,
... log_probs_key="gen_log_probs",
... )
>>> ref_model = TransformersWrapper(
... model,
... tokenizer=tokenizer,
... input_mode="history",
... generate=False,
... return_log_probs=True,
... pad_output=True,
... log_probs_key="ref_log_probs",
... )
>>>
>>> # Create RetrieveKL transform
>>> transform = RetrieveKL(
... gen_model=gen_model,
... ref_model=ref_model,
... assistant_only=True,
... tokenizer=tokenizer,
... )
>>>
>>> # Prepare data with next tensordict using ChatHistory
>>> chat_history = ChatHistory(full=history)
>>> next_td = TensorDict(history=chat_history, batch_size=(2,))
>>> data = TensorDict(history=chat_history, next=next_td, batch_size=(2,))
>>>
>>> # Apply transform
>>> result = transform(data)
>>> kl = result["next"].get("kl_penalty")
>>> print(f"KL shape: {kl.shape}")
KL shape: torch.Size([2, 26])
Note:
**Input Mode Compatibility:**
- When `assistant_only=True`, both models must have `input_mode='history'` to properly identify assistant tokens.
- When `assistant_only=False`, the transform works with any input mode (`"history"`, `"text"`, or `"tokens"`).
- This design ensures users are conscious of the limitation that assistant token identification requires structured conversation history.
.. seealso::
:class:`~torchrl.envs.llm.transforms.kl.RetrieveLogProb`: The base transform for retrieving log-probabilities from a single model.
:class:`~torchrl.envs.llm.transforms.kl.KLComputation`: The transform that computes KL divergence between two log-prob tensors.
:class:`~torchrl.envs.llm.transforms.kl.KLRewardTransform`: A legacy transform for KL reward computation (use `RetrieveKL` instead).
"""
_RayServiceClass = RayRetrieveKL
def __init__(
self,
gen_model: LLMWrapperBase | Literal["from_collector"] = "from_collector",
ref_model: LLMWrapperBase | None = None,
*,
gen_model_factory: Callable[[], LLMWrapperBase] | None = None,
ref_model_factory: Callable[[], LLMWrapperBase] | None = None,
assistant_only: bool = True,
history_key: str = "history",
tokenizer_kwargs: dict[str, Any] | None = None,
detach: bool = True,
device: torch.device | None = None,
tokenizer: transformers.AutoTokenizer | None = None,
padding_side: str = "left",
gen_log_probs_full_key: NestedKey = ("log_probs", "full"),
ref_log_probs_full_key: NestedKey = ("ref_log_probs", "full"),
kl_key: NestedKey = "kl_penalty",
add_to_reward: bool = True,
coeff: float = 1.0,
use_ray_service: bool = False,
**kwargs,
):
# Handle model factories - create models if factories are provided
if gen_model_factory is not None:
if gen_model is not None and gen_model != "from_collector":
raise ValueError(
"Cannot provide both 'gen_model' and 'gen_model_factory'. Choose one."
)
gen_model = gen_model_factory()
if ref_model_factory is not None:
if ref_model is not None:
raise ValueError(
"Cannot provide both 'ref_model' and 'ref_model_factory'. Choose one."
)
ref_model = ref_model_factory()
if isinstance(gen_model, str) and gen_model == "from_collector":
# Lazy init
self._initialized = False
self._init_params = {
"ref_model": ref_model,
"gen_model_factory": gen_model_factory,
"ref_model_factory": ref_model_factory,
"assistant_only": assistant_only,
"history_key": history_key,
"tokenizer_kwargs": tokenizer_kwargs,
"detach": detach,
"device": device,
"tokenizer": tokenizer,
"gen_log_probs_full_key": gen_log_probs_full_key,
"ref_log_probs_full_key": ref_log_probs_full_key,
"kl_key": kl_key,
"add_to_reward": add_to_reward,
"coeff": coeff,
"padding_side": padding_side,
**kwargs,
}
super().__init__()
return
self._initialized = True
# Check pad_output consistency if both models are provided
if hasattr(gen_model, "pad_output") and hasattr(ref_model, "pad_output"):
if gen_model.pad_output != ref_model.pad_output:
raise ValueError(
f"pad_output mismatch: gen_model.pad_output={gen_model.pad_output}, "
f"ref_model.pad_output={ref_model.pad_output}. "
"Both models must use the same padding strategy for KL computation."
)
if not getattr(gen_model, "return_log_probs", True):
raise ValueError(
"The generation model must have `return_log_probs=True` to use the `RetrieveKL` transform."
)
elif getattr(gen_model, "generate", False):
raise ValueError(
"The generation model must have `generate=False` to use the `RetrieveKL` transform."
)
if not getattr(ref_model, "return_log_probs", True):
raise ValueError(
"The reference model must have `return_log_probs=True` to use the `RetrieveKL` transform."
)
elif getattr(ref_model, "generate", False):
raise ValueError(
"The reference model must have `generate=False` to use the `RetrieveKL` transform."
)
if getattr(gen_model, "log_probs_key", "gen_log_probs") == getattr(
ref_model, "log_probs_key", "log_probs"
):
raise ValueError(
"The generation and reference models must have different `log_prob_key` values to use the `RetrieveKL` transform."
)
if gen_model is None:
raise ValueError("gen_model cannot be None when not using 'from_collector'")
if ref_model is None:
raise ValueError("ref_model cannot be None")
t1 = RetrieveLogProb(
gen_model,
log_probs_full_key=gen_log_probs_full_key,
assistant_only=assistant_only,
tokenizer_kwargs=tokenizer_kwargs,
detach=detach,
device=device,
tokenizer=tokenizer,
padding_side=padding_side,
**kwargs,
)
t2 = RetrieveLogProb(
ref_model,
log_probs_full_key=ref_log_probs_full_key,
assistant_only=assistant_only,
tokenizer_kwargs=tokenizer_kwargs,
detach=detach,
device=device,
tokenizer=tokenizer,
padding_side=padding_side,
**kwargs,
)
t3 = KLComputation(
gen_log_probs_full_key=gen_log_probs_full_key,
ref_log_probs_full_key=ref_log_probs_full_key,
kl_key=kl_key,
add_to_reward=add_to_reward,
coeff=coeff,
)
super().__init__(t1, t2, t3)
def _init_deferred(self):
torchrl_logger.info("Initializing RetrieveKL transform")
container = self.container
if container is None:
# also logging, since this will be sometimes hidden within the AttributeError
torchrl_logger.warning(
"The container is not set. Please set the container before calling this method."
)
raise ValueError(
"The container is not set. Please set the container before calling this method."
)
container.empty_cache()
self.empty_cache()
collector = self.collector
if collector is None:
# also logging, since this will be sometimes hidden within the AttributeError
torchrl_logger.warning(
"The collector is not set. Please set the collector before calling this method."
)
raise ValueError(
"The collector is not set. Please set the collector before calling this method."
)
ref_model = self._init_params["ref_model"]
pad_output = getattr(ref_model, "pad_output", None)
gen_log_probs_full_key = self._init_params["gen_log_probs_full_key"]
if (
not isinstance(gen_log_probs_full_key, tuple)
or gen_log_probs_full_key[-1] != "full"
):
raise ValueError(
f"The gen_log_probs_full_key {gen_log_probs_full_key} is not a tuple or does not end with 'full'. "
"This may cause issues with the KL computation. "
"Please use a tuple with the log_probs_key and 'full' as the last element."
)
log_probs_key = gen_log_probs_full_key[:-1]
gen_model = collector.policy.get_new_version(
generate=False,
return_log_probs=True,
log_probs_key=log_probs_key,
input_mode=ref_model.input_mode,
input_key=(ref_model.input_mode, "full"),
pad_output=pad_output, # Pass pad_output from ref_model
)
# Create the transforms manually instead of calling __init__
t1 = RetrieveLogProb(
gen_model,
log_probs_full_key=gen_log_probs_full_key,
assistant_only=self._init_params["assistant_only"],
tokenizer_kwargs=self._init_params["tokenizer_kwargs"],
detach=self._init_params["detach"],
device=self._init_params["device"],
tokenizer=self._init_params["tokenizer"],
padding_side=self._init_params["padding_side"],
)
ref_log_probs_full_key = self._init_params["ref_log_probs_full_key"]
if (
not isinstance(ref_log_probs_full_key, tuple)
or ref_log_probs_full_key[-1] != "full"
):
raise ValueError(
f"The ref_log_probs_full_key {ref_log_probs_full_key} is not a tuple or does not end with 'full'. "
"This may cause issues with the KL computation. "
"Please use a tuple with the log_probs_key and 'full' as the last element."
)
t2 = RetrieveLogProb(
ref_model,
log_probs_full_key=ref_log_probs_full_key,
assistant_only=self._init_params["assistant_only"],
tokenizer_kwargs=self._init_params["tokenizer_kwargs"],
detach=self._init_params["detach"],
device=self._init_params["device"],
tokenizer=self._init_params["tokenizer"],
padding_side=self._init_params["padding_side"],
)
t3 = KLComputation(
gen_log_probs_full_key=gen_log_probs_full_key,
ref_log_probs_full_key=ref_log_probs_full_key,
kl_key=self._init_params["kl_key"],
add_to_reward=self._init_params["add_to_reward"],
coeff=self._init_params["coeff"],
)
# Replace the transforms in the Compose
self.transforms.extend([t1, t2, t3])
del self._init_params
self._initialized = True
torchrl_logger.info("Successfully initialized")
def _step(
self, tensordict: TensorDictBase, next_tensordict: TensorDictBase
) -> TensorDictBase:
if not self._initialized:
self._init_deferred()
return super()._step(tensordict, next_tensordict)
def _reset(
self, tensordict: TensorDictBase, tensordict_reset: TensorDictBase
) -> TensorDictBase:
if not self._initialized:
self._init_deferred()
return super()._reset(tensordict, tensordict_reset)
[docs]
def forward(self, tensordict: TensorDictBase) -> TensorDictBase:
if not self._initialized:
self._init_deferred()
return super().forward(tensordict)
def _inv_call(self, tensordict: TensorDictBase) -> TensorDictBase:
if not self._initialized:
self._init_deferred()
return super()._inv_call(tensordict)
[docs]
class KLComputation(Transform):
"""A transform to compute KL divergence between two log-prob tensors and optionally add it to the reward.
This transform computes KL divergence between generation and reference log-probabilities
and can optionally subtract it from the reward (for KL penalty). It's designed to work
with the :class:`~torchrl.envs.llm.transforms.kl.RetrieveLogProb` and :class:`~torchrl.envs.llm.transforms.kl.RetrieveKL` transforms.
.. note::
Both input log-prob tensors must use the same padding strategy (pad_output) for correct KL computation.
Args:
gen_log_probs_full_key (NestedKey): the key where the generation model log-probs are stored.
Defaults to `("gen_log_probs", "full")`.
ref_log_probs_full_key (NestedKey): the key where the reference model log-probs are stored.
Defaults to `("ref_log_probs", "full")`.
kl_key (NestedKey): the key where the KL divergence is stored. Defaults to `"kl_penalty"`.
add_to_reward (bool): whether to add the KL divergence to the reward. Defaults to `True`.
coeff (float): the coefficient for the KL term when adding to reward. Defaults to `1.0`.
padding_side (str): the side of the padding when using pad_sequence. Defaults to `"left"`.
Examples:
>>> from tensordict import TensorDict
>>> import torch
>>>
>>> # Create sample log-probs
>>> gen_log_probs = torch.randn(2, 10) # 2 samples, 10 tokens each
>>> ref_log_probs = torch.randn(2, 10)
>>>
>>> # Create data with next tensordict
>>> next_td = TensorDict(
... {
... ("gen_log_probs", "full"): gen_log_probs,
... ("ref_log_probs", "full"): ref_log_probs,
... "reward": torch.randn(2, 10, 1),
... },
... batch_size=(2,)
... )
>>> data = TensorDict(next=next_td, batch_size=(2,))
>>>
>>> # Create KLComputation transform
>>> kl_transform = KLComputation(
... gen_log_probs_key=("gen_log_probs", "full"),
... ref_log_probs_key=("ref_log_probs", "full"),
... kl_key="kl_penalty",
... add_to_reward=True,
... coef=1.0,
... )
>>>
>>> # Apply transform
>>> result = kl_transform(data)
>>> kl = result["next"].get("kl_penalty")
>>> print(f"KL shape: {kl.shape}")
KL shape: torch.Size([2, 10])
.. seealso::
:class:`~torchrl.envs.llm.transforms.kl.RetrieveLogProb`: The base transform for retrieving log-probabilities from a single model.
:class:`~torchrl.envs.llm.transforms.kl.RetrieveKL`: A higher-level transform that combines two `RetrieveLogProb` instances with `KLComputation`.
:class:`~torchrl.envs.llm.transforms.kl.KLRewardTransform`: A legacy transform for KL reward computation (use `RetrieveKL` instead).
"""
def __init__(
self,
gen_log_probs_full_key: NestedKey = ("log_probs", "full"),
ref_log_probs_full_key: NestedKey = ("ref_log_probs", "full"),
*,
kl_key: NestedKey = "kl_penalty",
add_to_reward: bool = True,
coeff: float = 1.0,
padding_side: str = "left",
):
in_keys = [gen_log_probs_full_key, ref_log_probs_full_key]
if add_to_reward:
in_keys.append("reward")
out_keys = [kl_key]
if add_to_reward:
out_keys.append("reward")
super().__init__(in_keys=in_keys, out_keys=out_keys)
self.gen_log_probs_full_key = gen_log_probs_full_key
self.ref_log_probs_full_key = ref_log_probs_full_key
self.kl_key = kl_key
self.add_to_reward = add_to_reward
self.coeff = coeff
self.padding_side = padding_side
[docs]
def forward(self, tensordict: TensorDictBase) -> TensorDictBase:
next_td = tensordict.get("next")
has_next_td = True
if next_td is None:
next_td = tensordict
has_next_td = False
next_td = self._step(tensordict, next_td)
if has_next_td:
return tensordict.set("next", next_td)
return next_td
def _step(
self, tensordict: TensorDictBase, next_tensordict: TensorDictBase
) -> TensorDictBase:
# Get log-probs
gen_log_probs = next_tensordict.get(self.gen_log_probs_full_key, as_list=True) # type: ignore[misc]
ref_log_probs = next_tensordict.get(self.ref_log_probs_full_key, as_list=True) # type: ignore[misc]
if gen_log_probs is None or ref_log_probs is None:
raise ValueError(
f"Log-probs not found. Expected keys: {self.gen_log_probs_key}, {self.ref_log_probs_key}"
)
# Debug: Check lengths and shapes
if len(gen_log_probs) != len(ref_log_probs):
raise ValueError(
f"Batch size mismatch: gen_log_probs has {len(gen_log_probs)} samples, ref_log_probs has {len(ref_log_probs)} samples"
)
# Check individual sequence lengths
for i, (gen_lp, ref_lp) in enumerate(_zip_strict(gen_log_probs, ref_log_probs)):
if gen_lp.shape != ref_lp.shape:
raise ValueError(
f"Sample {i} has different shapes: gen_log_probs[{i}].shape={gen_lp.shape}, ref_log_probs[{i}].shape={ref_lp.shape}"
)
# Compute KL divergence: KL(p||q) = E_p[log p - log q]
# Here gen_log_probs = log p, ref_log_probs = log q
kl = [
gen_lp - ref_lp
for gen_lp, ref_lp in _zip_strict(gen_log_probs, ref_log_probs)
]
kl = torch.nested.as_nested_tensor(kl, layout=torch.strided)
next_tensordict.set(self.kl_key, kl)
# Add to reward if requested
if self.add_to_reward:
reward = next_tensordict.get("reward", as_list=True) # type: ignore[misc]
if reward is not None:
if isinstance(reward, list):
if reward[0].ndim != kl[0].ndim + 1:
raise ValueError(
f"The rewards have shape {reward[0].shape} but the kl has shape {kl[0].shape}. "
f"The rewards should have one more dimension than the KL."
)
reward = [
r - self.coeff * k.unsqueeze(-1)
for r, k in _zip_strict(reward, kl)
]
next_tensordict.set(
"reward",
torch.nested.as_nested_tensor(reward, layout=torch.strided),
)
else:
if reward.ndim != kl.ndim + 1:
raise ValueError(
f"The rewards have shape {reward.shape} but the kl has shape {kl.shape}. "
f"The rewards should have one more dimension than the KL."
)
reward = reward - self.coeff * kl.unsqueeze(-1)
next_tensordict.set("reward", reward)
return next_tensordict