Skip to content

vllm.v1.worker.gpu.states

NO_LORA_ID module-attribute

NO_LORA_ID = 0

_NP_INT64_MAX module-attribute

_NP_INT64_MAX = max

_NP_INT64_MIN module-attribute

_NP_INT64_MIN = min

ExtraData dataclass

Source code in vllm/v1/worker/gpu/states.py
@dataclass
class ExtraData:
    lora_request: LoRARequest | None
    in_progress_prompt_logprobs: list[LogprobsTensors] = field(default_factory=list)

in_progress_prompt_logprobs class-attribute instance-attribute

in_progress_prompt_logprobs: list[LogprobsTensors] = field(
    default_factory=list
)

lora_request instance-attribute

lora_request: LoRARequest | None

__init__

__init__(
    lora_request: LoRARequest | None,
    in_progress_prompt_logprobs: list[
        LogprobsTensors
    ] = list(),
) -> None

Param

Source code in vllm/v1/worker/gpu/states.py
class Param:
    def __init__(
        self,
        size: int,
        dtype: torch.dtype,
        device: torch.device,
        pin_memory: bool,
    ):
        self.buffer = CpuGpuBuffer(
            size,
            dtype=dtype,
            device=device,
            pin_memory=pin_memory,
        )
        self.np = np.zeros_like(self.buffer.np)

    def copy_np_to_gpu(self, x: np.ndarray) -> torch.Tensor:
        n = x.shape[0]
        self.buffer.np[:n] = x
        return self.buffer.copy_to_gpu(n)

buffer instance-attribute

buffer = CpuGpuBuffer(
    size, dtype=dtype, device=device, pin_memory=pin_memory
)

np instance-attribute

np = zeros_like(np)

__init__

__init__(
    size: int,
    dtype: dtype,
    device: device,
    pin_memory: bool,
)
Source code in vllm/v1/worker/gpu/states.py
def __init__(
    self,
    size: int,
    dtype: torch.dtype,
    device: torch.device,
    pin_memory: bool,
):
    self.buffer = CpuGpuBuffer(
        size,
        dtype=dtype,
        device=device,
        pin_memory=pin_memory,
    )
    self.np = np.zeros_like(self.buffer.np)

copy_np_to_gpu

copy_np_to_gpu(x: ndarray) -> Tensor
Source code in vllm/v1/worker/gpu/states.py
def copy_np_to_gpu(self, x: np.ndarray) -> torch.Tensor:
    n = x.shape[0]
    self.buffer.np[:n] = x
    return self.buffer.copy_to_gpu(n)

RequestState

Source code in vllm/v1/worker/gpu/states.py
class RequestState:
    def __init__(
        self,
        max_num_reqs: int,
        max_model_len: int,
        max_num_batched_tokens: int,
        num_speculative_steps: int,
        vocab_size: int,
        device: torch.device,
        pin_memory: bool,
    ):
        self.max_num_reqs = max_num_reqs
        self.max_model_len = max_model_len
        self.max_num_batched_tokens = max_num_batched_tokens
        self.num_speculative_steps = num_speculative_steps
        self.vocab_size = vocab_size
        self.device = device
        self.pin_memory = pin_memory

        self.req_id_to_index: dict[str, int] = {}
        self.index_to_req_id: dict[int, str] = {}
        self.free_indices = list(range(max_num_reqs))
        self.extra_data: dict[str, ExtraData] = {}

        self.prompt_len = np.zeros(self.max_num_reqs, dtype=np.int32)
        # NOTE(woosuk): This tensor can be extremely large (e.g., several GBs)
        # depending on the configured max_num_reqs and max_model_len.
        self.prefill_token_ids = UvaBuffer(
            self.max_num_reqs, self.max_model_len, dtype=torch.int32
        )
        # NOTE(woosuk): We don't use UVA for prefill_len because its GPU view
        # can be used outside of update_states and prepare_inputs.
        # Without async barrier, using UVA can cause race conditions.
        self.prefill_len = self._make_buffer(self.max_num_reqs, dtype=torch.int32)
        # Number of computed tokens.
        self.num_computed_prefill_tokens = np.zeros(self.max_num_reqs, dtype=np.int32)
        self.num_computed_tokens = torch.zeros(
            self.max_num_reqs, dtype=torch.int32, device=device
        )

        # Last sampled tokens.
        self.last_sampled_tokens = torch.zeros(
            self.max_num_reqs,
            1,
            dtype=torch.int64,
            device=device,
        )

        # Draft tokens.
        self.draft_tokens = torch.zeros(
            self.max_num_reqs,
            self.num_speculative_steps,
            dtype=torch.int64,
            device=device,
        )
        self.next_prefill_tokens = torch.zeros(
            self.max_num_reqs, dtype=torch.int32, device=device
        )

        # LoRA.
        self.lora_ids = np.zeros(self.max_num_reqs, dtype=np.int32)
        self.lora_ids.fill(NO_LORA_ID)

        # Sampling parameters.
        self.temperature = self._make_param(self.max_num_reqs, torch.float32)
        self.top_p = self._make_param(self.max_num_reqs, torch.float32)
        self.top_k = self._make_param(self.max_num_reqs, torch.int32)
        self.repetition_penalty = self._make_param(self.max_num_reqs, torch.float32)
        self.frequency_penalty = self._make_param(self.max_num_reqs, torch.float32)
        self.presence_penalty = self._make_param(self.max_num_reqs, torch.float32)
        self.seeds = self._make_param(self.max_num_reqs, torch.int64)

        self.num_logprobs = np.empty(self.max_num_reqs, dtype=np.int32)
        # -1 means no logprobs are requested.
        self.num_logprobs.fill(-1)
        self.needs_prompt_logprobs = np.zeros(self.max_num_reqs, dtype=bool)

        # Statistics for penalties.
        # TODO(woosuk): These tensors are rarely used but can be extremely large.
        # Optimize the memory usage.
        self.prompt_bin_counts = torch.zeros(
            self.max_num_reqs, self.vocab_size, dtype=torch.int32, device=self.device
        )
        self.output_bin_counts = torch.zeros(
            self.max_num_reqs, self.vocab_size, dtype=torch.int32, device=self.device
        )

    def _make_param(self, size: int, dtype: torch.dtype) -> "Param":
        return Param(size, dtype=dtype, device=self.device, pin_memory=self.pin_memory)

    def _make_buffer(self, size: int, dtype: torch.dtype) -> CpuGpuBuffer:
        return CpuGpuBuffer(
            size, dtype=dtype, device=self.device, pin_memory=self.pin_memory
        )

    @property
    def num_reqs(self) -> int:
        return len(self.req_id_to_index)

    def add_request(
        self,
        req_id: str,
        prompt_len: int,
        prefill_token_ids: list[int],
        num_computed_tokens: int,
        sampling_params: SamplingParams,
        lora_request: LoRARequest | None,
    ) -> None:
        assert len(self.free_indices) > 0, "No free indices"
        req_idx = self.free_indices.pop()
        self.req_id_to_index[req_id] = req_idx
        self.index_to_req_id[req_idx] = req_id
        self.extra_data[req_id] = ExtraData(lora_request)

        self.prompt_len[req_idx] = prompt_len
        prefill_len = len(prefill_token_ids)
        assert prefill_len >= prompt_len, (
            f"prefill_len {prefill_len} < prompt_len {prompt_len}"
        )
        self.prefill_len.np[req_idx] = prefill_len
        self.prefill_token_ids.np[req_idx, :prefill_len] = prefill_token_ids

        self.num_computed_prefill_tokens[req_idx] = num_computed_tokens
        # FIXME(woosuk): This triggers a GPU operation whenever adding a new request.
        # Optimize this.
        self.num_computed_tokens[req_idx] = num_computed_tokens

        if lora_request is not None:
            self.lora_ids[req_idx] = lora_request.lora_int_id
        else:
            self.lora_ids[req_idx] = NO_LORA_ID

        self.temperature.np[req_idx] = sampling_params.temperature
        self.top_p.np[req_idx] = sampling_params.top_p
        if 0 < sampling_params.top_k < self.vocab_size:
            top_k = sampling_params.top_k
        else:
            top_k = self.vocab_size
        self.top_k.np[req_idx] = top_k
        self.repetition_penalty.np[req_idx] = sampling_params.repetition_penalty
        self.frequency_penalty.np[req_idx] = sampling_params.frequency_penalty
        self.presence_penalty.np[req_idx] = sampling_params.presence_penalty

        if use_penalty(sampling_params):
            bincount(
                self.prefill_token_ids.gpu[req_idx],
                prefill_len,
                prompt_len,
                self.prompt_bin_counts[req_idx],
                self.output_bin_counts[req_idx],
            )

        if sampling_params.seed is not None:
            seed = sampling_params.seed
        else:
            seed = np.random.randint(_NP_INT64_MIN, _NP_INT64_MAX)
        self.seeds.np[req_idx] = seed

        if sampling_params.logprobs is not None:
            num_logprobs = sampling_params.logprobs
        else:
            num_logprobs = -1
        self.num_logprobs[req_idx] = num_logprobs

        # For now, only support prompt logprobs for the prompt tokens.
        needs_prompt_logprobs = sampling_params.prompt_logprobs is not None
        self.needs_prompt_logprobs[req_idx] = needs_prompt_logprobs

    def remove_request(self, req_id: str) -> None:
        self.extra_data.pop(req_id, None)
        req_idx = self.req_id_to_index.pop(req_id, None)
        if req_idx is None:
            # Request not found.
            return
        self.index_to_req_id.pop(req_idx, None)
        self.free_indices.append(req_idx)

    def make_sampling_metadata(
        self,
        idx_mapping: torch.Tensor,
        idx_mapping_np: np.ndarray,
        pos: torch.Tensor,
    ) -> SamplingMetadata:
        temperature = self.temperature.np[idx_mapping_np]
        temperature = self.temperature.copy_np_to_gpu(temperature)

        top_p = self.top_p.np[idx_mapping_np]
        no_top_p = np.all(top_p == 1.0)
        top_p = self.top_p.copy_np_to_gpu(top_p) if not no_top_p else None

        top_k = self.top_k.np[idx_mapping_np]
        no_top_k = np.all(top_k == self.vocab_size)
        top_k = self.top_k.copy_np_to_gpu(top_k) if not no_top_k else None

        rep_penalty = self.repetition_penalty.np[idx_mapping_np]
        rep_penalty = self.repetition_penalty.copy_np_to_gpu(rep_penalty)
        freq_penalty = self.frequency_penalty.np[idx_mapping_np]
        freq_penalty = self.frequency_penalty.copy_np_to_gpu(freq_penalty)
        pres_penalty = self.presence_penalty.np[idx_mapping_np]
        pres_penalty = self.presence_penalty.copy_np_to_gpu(pres_penalty)

        seeds = self.seeds.np[idx_mapping_np]
        seeds = self.seeds.copy_np_to_gpu(seeds)

        num_logprobs = self.num_logprobs[idx_mapping_np]
        max_num_logprobs: int | None = int(np.max(num_logprobs))
        if max_num_logprobs == -1:
            max_num_logprobs = None

        return SamplingMetadata(
            temperature=temperature,
            top_p=top_p,
            top_k=top_k,
            repetition_penalty=rep_penalty,
            frequency_penalty=freq_penalty,
            presence_penalty=pres_penalty,
            seeds=seeds,
            pos=pos,
            max_num_logprobs=max_num_logprobs,
            idx_mapping=idx_mapping,
            prompt_bin_counts=self.prompt_bin_counts,
            output_bin_counts=self.output_bin_counts,
        )

    def make_lora_inputs(
        self,
        req_ids: list[str],
        idx_mapping: np.ndarray,
        num_scheduled_tokens: np.ndarray,
    ) -> tuple[tuple[int, ...], tuple[int, ...], set[LoRARequest]]:
        lora_ids = self.lora_ids[idx_mapping]
        prompt_lora_mapping = tuple(lora_ids)
        token_lora_mapping = tuple(lora_ids.repeat(num_scheduled_tokens))

        active_lora_requests: set[LoRARequest] = set()
        for req_id in req_ids:
            lora_request = self.extra_data[req_id].lora_request
            if lora_request is not None:
                active_lora_requests.add(lora_request)
        return prompt_lora_mapping, token_lora_mapping, active_lora_requests

device instance-attribute

device = device

draft_tokens instance-attribute

draft_tokens = zeros(
    max_num_reqs,
    num_speculative_steps,
    dtype=int64,
    device=device,
)

extra_data instance-attribute

extra_data: dict[str, ExtraData] = {}

free_indices instance-attribute

free_indices = list(range(max_num_reqs))

frequency_penalty instance-attribute

frequency_penalty = _make_param(max_num_reqs, float32)

index_to_req_id instance-attribute

index_to_req_id: dict[int, str] = {}

last_sampled_tokens instance-attribute

last_sampled_tokens = zeros(
    max_num_reqs, 1, dtype=int64, device=device
)

lora_ids instance-attribute

lora_ids = zeros(max_num_reqs, dtype=int32)

max_model_len instance-attribute

max_model_len = max_model_len

max_num_batched_tokens instance-attribute

max_num_batched_tokens = max_num_batched_tokens

max_num_reqs instance-attribute

max_num_reqs = max_num_reqs

needs_prompt_logprobs instance-attribute

needs_prompt_logprobs = zeros(max_num_reqs, dtype=bool)

next_prefill_tokens instance-attribute

next_prefill_tokens = zeros(
    max_num_reqs, dtype=int32, device=device
)

num_computed_prefill_tokens instance-attribute

num_computed_prefill_tokens = zeros(
    max_num_reqs, dtype=int32
)

num_computed_tokens instance-attribute

num_computed_tokens = zeros(
    max_num_reqs, dtype=int32, device=device
)

num_logprobs instance-attribute

num_logprobs = empty(max_num_reqs, dtype=int32)

num_reqs property

num_reqs: int

num_speculative_steps instance-attribute

num_speculative_steps = num_speculative_steps

output_bin_counts instance-attribute

output_bin_counts = zeros(
    max_num_reqs, vocab_size, dtype=int32, device=device
)

pin_memory instance-attribute

pin_memory = pin_memory

prefill_len instance-attribute

prefill_len = _make_buffer(max_num_reqs, dtype=int32)

prefill_token_ids instance-attribute

prefill_token_ids = UvaBuffer(
    max_num_reqs, max_model_len, dtype=int32
)

presence_penalty instance-attribute

presence_penalty = _make_param(max_num_reqs, float32)

prompt_bin_counts instance-attribute

prompt_bin_counts = zeros(
    max_num_reqs, vocab_size, dtype=int32, device=device
)

prompt_len instance-attribute

prompt_len = zeros(max_num_reqs, dtype=int32)

repetition_penalty instance-attribute

repetition_penalty = _make_param(max_num_reqs, float32)

req_id_to_index instance-attribute

req_id_to_index: dict[str, int] = {}

seeds instance-attribute

seeds = _make_param(max_num_reqs, int64)

temperature instance-attribute

temperature = _make_param(max_num_reqs, float32)

top_k instance-attribute

top_k = _make_param(max_num_reqs, int32)

top_p instance-attribute

top_p = _make_param(max_num_reqs, float32)

vocab_size instance-attribute

vocab_size = vocab_size

__init__

__init__(
    max_num_reqs: int,
    max_model_len: int,
    max_num_batched_tokens: int,
    num_speculative_steps: int,
    vocab_size: int,
    device: device,
    pin_memory: bool,
)
Source code in vllm/v1/worker/gpu/states.py
def __init__(
    self,
    max_num_reqs: int,
    max_model_len: int,
    max_num_batched_tokens: int,
    num_speculative_steps: int,
    vocab_size: int,
    device: torch.device,
    pin_memory: bool,
):
    self.max_num_reqs = max_num_reqs
    self.max_model_len = max_model_len
    self.max_num_batched_tokens = max_num_batched_tokens
    self.num_speculative_steps = num_speculative_steps
    self.vocab_size = vocab_size
    self.device = device
    self.pin_memory = pin_memory

    self.req_id_to_index: dict[str, int] = {}
    self.index_to_req_id: dict[int, str] = {}
    self.free_indices = list(range(max_num_reqs))
    self.extra_data: dict[str, ExtraData] = {}

    self.prompt_len = np.zeros(self.max_num_reqs, dtype=np.int32)
    # NOTE(woosuk): This tensor can be extremely large (e.g., several GBs)
    # depending on the configured max_num_reqs and max_model_len.
    self.prefill_token_ids = UvaBuffer(
        self.max_num_reqs, self.max_model_len, dtype=torch.int32
    )
    # NOTE(woosuk): We don't use UVA for prefill_len because its GPU view
    # can be used outside of update_states and prepare_inputs.
    # Without async barrier, using UVA can cause race conditions.
    self.prefill_len = self._make_buffer(self.max_num_reqs, dtype=torch.int32)
    # Number of computed tokens.
    self.num_computed_prefill_tokens = np.zeros(self.max_num_reqs, dtype=np.int32)
    self.num_computed_tokens = torch.zeros(
        self.max_num_reqs, dtype=torch.int32, device=device
    )

    # Last sampled tokens.
    self.last_sampled_tokens = torch.zeros(
        self.max_num_reqs,
        1,
        dtype=torch.int64,
        device=device,
    )

    # Draft tokens.
    self.draft_tokens = torch.zeros(
        self.max_num_reqs,
        self.num_speculative_steps,
        dtype=torch.int64,
        device=device,
    )
    self.next_prefill_tokens = torch.zeros(
        self.max_num_reqs, dtype=torch.int32, device=device
    )

    # LoRA.
    self.lora_ids = np.zeros(self.max_num_reqs, dtype=np.int32)
    self.lora_ids.fill(NO_LORA_ID)

    # Sampling parameters.
    self.temperature = self._make_param(self.max_num_reqs, torch.float32)
    self.top_p = self._make_param(self.max_num_reqs, torch.float32)
    self.top_k = self._make_param(self.max_num_reqs, torch.int32)
    self.repetition_penalty = self._make_param(self.max_num_reqs, torch.float32)
    self.frequency_penalty = self._make_param(self.max_num_reqs, torch.float32)
    self.presence_penalty = self._make_param(self.max_num_reqs, torch.float32)
    self.seeds = self._make_param(self.max_num_reqs, torch.int64)

    self.num_logprobs = np.empty(self.max_num_reqs, dtype=np.int32)
    # -1 means no logprobs are requested.
    self.num_logprobs.fill(-1)
    self.needs_prompt_logprobs = np.zeros(self.max_num_reqs, dtype=bool)

    # Statistics for penalties.
    # TODO(woosuk): These tensors are rarely used but can be extremely large.
    # Optimize the memory usage.
    self.prompt_bin_counts = torch.zeros(
        self.max_num_reqs, self.vocab_size, dtype=torch.int32, device=self.device
    )
    self.output_bin_counts = torch.zeros(
        self.max_num_reqs, self.vocab_size, dtype=torch.int32, device=self.device
    )

_make_buffer

_make_buffer(size: int, dtype: dtype) -> CpuGpuBuffer
Source code in vllm/v1/worker/gpu/states.py
def _make_buffer(self, size: int, dtype: torch.dtype) -> CpuGpuBuffer:
    return CpuGpuBuffer(
        size, dtype=dtype, device=self.device, pin_memory=self.pin_memory
    )

_make_param

_make_param(size: int, dtype: dtype) -> Param
Source code in vllm/v1/worker/gpu/states.py
def _make_param(self, size: int, dtype: torch.dtype) -> "Param":
    return Param(size, dtype=dtype, device=self.device, pin_memory=self.pin_memory)

add_request

add_request(
    req_id: str,
    prompt_len: int,
    prefill_token_ids: list[int],
    num_computed_tokens: int,
    sampling_params: SamplingParams,
    lora_request: LoRARequest | None,
) -> None
Source code in vllm/v1/worker/gpu/states.py
def add_request(
    self,
    req_id: str,
    prompt_len: int,
    prefill_token_ids: list[int],
    num_computed_tokens: int,
    sampling_params: SamplingParams,
    lora_request: LoRARequest | None,
) -> None:
    assert len(self.free_indices) > 0, "No free indices"
    req_idx = self.free_indices.pop()
    self.req_id_to_index[req_id] = req_idx
    self.index_to_req_id[req_idx] = req_id
    self.extra_data[req_id] = ExtraData(lora_request)

    self.prompt_len[req_idx] = prompt_len
    prefill_len = len(prefill_token_ids)
    assert prefill_len >= prompt_len, (
        f"prefill_len {prefill_len} < prompt_len {prompt_len}"
    )
    self.prefill_len.np[req_idx] = prefill_len
    self.prefill_token_ids.np[req_idx, :prefill_len] = prefill_token_ids

    self.num_computed_prefill_tokens[req_idx] = num_computed_tokens
    # FIXME(woosuk): This triggers a GPU operation whenever adding a new request.
    # Optimize this.
    self.num_computed_tokens[req_idx] = num_computed_tokens

    if lora_request is not None:
        self.lora_ids[req_idx] = lora_request.lora_int_id
    else:
        self.lora_ids[req_idx] = NO_LORA_ID

    self.temperature.np[req_idx] = sampling_params.temperature
    self.top_p.np[req_idx] = sampling_params.top_p
    if 0 < sampling_params.top_k < self.vocab_size:
        top_k = sampling_params.top_k
    else:
        top_k = self.vocab_size
    self.top_k.np[req_idx] = top_k
    self.repetition_penalty.np[req_idx] = sampling_params.repetition_penalty
    self.frequency_penalty.np[req_idx] = sampling_params.frequency_penalty
    self.presence_penalty.np[req_idx] = sampling_params.presence_penalty

    if use_penalty(sampling_params):
        bincount(
            self.prefill_token_ids.gpu[req_idx],
            prefill_len,
            prompt_len,
            self.prompt_bin_counts[req_idx],
            self.output_bin_counts[req_idx],
        )

    if sampling_params.seed is not None:
        seed = sampling_params.seed
    else:
        seed = np.random.randint(_NP_INT64_MIN, _NP_INT64_MAX)
    self.seeds.np[req_idx] = seed

    if sampling_params.logprobs is not None:
        num_logprobs = sampling_params.logprobs
    else:
        num_logprobs = -1
    self.num_logprobs[req_idx] = num_logprobs

    # For now, only support prompt logprobs for the prompt tokens.
    needs_prompt_logprobs = sampling_params.prompt_logprobs is not None
    self.needs_prompt_logprobs[req_idx] = needs_prompt_logprobs

make_lora_inputs

make_lora_inputs(
    req_ids: list[str],
    idx_mapping: ndarray,
    num_scheduled_tokens: ndarray,
) -> tuple[
    tuple[int, ...], tuple[int, ...], set[LoRARequest]
]
Source code in vllm/v1/worker/gpu/states.py
def make_lora_inputs(
    self,
    req_ids: list[str],
    idx_mapping: np.ndarray,
    num_scheduled_tokens: np.ndarray,
) -> tuple[tuple[int, ...], tuple[int, ...], set[LoRARequest]]:
    lora_ids = self.lora_ids[idx_mapping]
    prompt_lora_mapping = tuple(lora_ids)
    token_lora_mapping = tuple(lora_ids.repeat(num_scheduled_tokens))

    active_lora_requests: set[LoRARequest] = set()
    for req_id in req_ids:
        lora_request = self.extra_data[req_id].lora_request
        if lora_request is not None:
            active_lora_requests.add(lora_request)
    return prompt_lora_mapping, token_lora_mapping, active_lora_requests

make_sampling_metadata

make_sampling_metadata(
    idx_mapping: Tensor,
    idx_mapping_np: ndarray,
    pos: Tensor,
) -> SamplingMetadata
Source code in vllm/v1/worker/gpu/states.py
def make_sampling_metadata(
    self,
    idx_mapping: torch.Tensor,
    idx_mapping_np: np.ndarray,
    pos: torch.Tensor,
) -> SamplingMetadata:
    temperature = self.temperature.np[idx_mapping_np]
    temperature = self.temperature.copy_np_to_gpu(temperature)

    top_p = self.top_p.np[idx_mapping_np]
    no_top_p = np.all(top_p == 1.0)
    top_p = self.top_p.copy_np_to_gpu(top_p) if not no_top_p else None

    top_k = self.top_k.np[idx_mapping_np]
    no_top_k = np.all(top_k == self.vocab_size)
    top_k = self.top_k.copy_np_to_gpu(top_k) if not no_top_k else None

    rep_penalty = self.repetition_penalty.np[idx_mapping_np]
    rep_penalty = self.repetition_penalty.copy_np_to_gpu(rep_penalty)
    freq_penalty = self.frequency_penalty.np[idx_mapping_np]
    freq_penalty = self.frequency_penalty.copy_np_to_gpu(freq_penalty)
    pres_penalty = self.presence_penalty.np[idx_mapping_np]
    pres_penalty = self.presence_penalty.copy_np_to_gpu(pres_penalty)

    seeds = self.seeds.np[idx_mapping_np]
    seeds = self.seeds.copy_np_to_gpu(seeds)

    num_logprobs = self.num_logprobs[idx_mapping_np]
    max_num_logprobs: int | None = int(np.max(num_logprobs))
    if max_num_logprobs == -1:
        max_num_logprobs = None

    return SamplingMetadata(
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
        repetition_penalty=rep_penalty,
        frequency_penalty=freq_penalty,
        presence_penalty=pres_penalty,
        seeds=seeds,
        pos=pos,
        max_num_logprobs=max_num_logprobs,
        idx_mapping=idx_mapping,
        prompt_bin_counts=self.prompt_bin_counts,
        output_bin_counts=self.output_bin_counts,
    )

remove_request

remove_request(req_id: str) -> None
Source code in vllm/v1/worker/gpu/states.py
def remove_request(self, req_id: str) -> None:
    self.extra_data.pop(req_id, None)
    req_idx = self.req_id_to_index.pop(req_id, None)
    if req_idx is None:
        # Request not found.
        return
    self.index_to_req_id.pop(req_idx, None)
    self.free_indices.append(req_idx)

UvaBuffer

Source code in vllm/v1/worker/gpu/states.py
class UvaBuffer:
    def __init__(self, *size: int | torch.SymInt, dtype: torch.dtype):
        assert is_uva_available()
        self.cpu = torch.zeros(*size, dtype=dtype, device="cpu", pin_memory=True)
        self.np = self.cpu.numpy()
        self.gpu = get_cuda_view_from_cpu_tensor(self.cpu)

cpu instance-attribute

cpu = zeros(
    *size, dtype=dtype, device="cpu", pin_memory=True
)

gpu instance-attribute

np instance-attribute

np = numpy()

__init__

__init__(*size: int | SymInt, dtype: dtype)
Source code in vllm/v1/worker/gpu/states.py
def __init__(self, *size: int | torch.SymInt, dtype: torch.dtype):
    assert is_uva_available()
    self.cpu = torch.zeros(*size, dtype=dtype, device="cpu", pin_memory=True)
    self.np = self.cpu.numpy()
    self.gpu = get_cuda_view_from_cpu_tensor(self.cpu)

use_penalty

use_penalty(sampling_params: SamplingParams) -> bool
Source code in vllm/v1/worker/gpu/states.py
def use_penalty(sampling_params: SamplingParams) -> bool:
    return (
        sampling_params.repetition_penalty != 1.0
        or sampling_params.frequency_penalty != 0.0
        or sampling_params.presence_penalty != 0.0
    )