# File: text-generation-inference-main/clients/python/text_generation/__init__.py __version__ = '0.7.0' DEPRECATION_WARNING = '`text_generation` clients are deprecated and will be removed in the near future. Please use the `InferenceClient` from the `huggingface_hub` package instead.' from text_generation.client import Client, AsyncClient from text_generation.inference_api import InferenceAPIClient, InferenceAPIAsyncClient __all__ = ['Client', 'AsyncClient', 'InferenceAPIClient', 'InferenceAPIAsyncClient'] # File: text-generation-inference-main/clients/python/text_generation/client.py import json import requests import warnings from aiohttp import ClientSession, ClientTimeout from pydantic import ValidationError from typing import Dict, Optional, List, AsyncIterator, Iterator, Union from text_generation import DEPRECATION_WARNING from text_generation.types import StreamResponse, Response, Request, Parameters, Grammar, CompletionRequest, Completion, CompletionComplete, ChatRequest, ChatCompletionChunk, ChatComplete, Message, Tool from text_generation.errors import parse_error warnings.simplefilter('always', DeprecationWarning) class Client: def __init__(self, base_url: str, headers: Optional[Dict[str, str]]=None, cookies: Optional[Dict[str, str]]=None, timeout: int=10): warnings.warn(DEPRECATION_WARNING, DeprecationWarning) self.base_url = base_url self.headers = headers self.cookies = cookies self.timeout = timeout def completion(self, prompt: str, frequency_penalty: Optional[float]=None, max_tokens: Optional[int]=None, repetition_penalty: Optional[float]=None, seed: Optional[int]=None, stream: bool=False, temperature: Optional[float]=None, top_p: Optional[float]=None, stop: Optional[List[str]]=None): request = CompletionRequest(model='tgi', prompt=prompt, frequency_penalty=frequency_penalty, max_tokens=max_tokens, repetition_penalty=repetition_penalty, seed=seed, stream=stream, temperature=temperature, top_p=top_p, stop=stop) if not stream: resp = requests.post(f'{self.base_url}/v1/completions', json=request.dict(), headers=self.headers, cookies=self.cookies, timeout=self.timeout) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) return Completion(**payload) else: return self._completion_stream_response(request) def _completion_stream_response(self, request): resp = requests.post(f'{self.base_url}/v1/completions', json=request.dict(), headers=self.headers, cookies=self.cookies, timeout=self.timeout, stream=True) for byte_payload in resp.iter_lines(): if byte_payload == b'\n': continue payload = byte_payload.decode('utf-8') if payload.startswith('data:'): json_payload = json.loads(payload.lstrip('data:').rstrip('\n')) try: response = CompletionComplete(**json_payload) yield response except ValidationError: raise parse_error(resp.status, json_payload) def chat(self, messages: List[Message], repetition_penalty: Optional[float]=None, frequency_penalty: Optional[float]=None, logit_bias: Optional[List[float]]=None, logprobs: Optional[bool]=None, top_logprobs: Optional[int]=None, max_tokens: Optional[int]=None, n: Optional[int]=None, presence_penalty: Optional[float]=None, stream: bool=False, seed: Optional[int]=None, temperature: Optional[float]=None, top_p: Optional[float]=None, tools: Optional[List[Tool]]=None, tool_prompt: Optional[str]=None, tool_choice: Optional[str]=None, stop: Optional[List[str]]=None): request = ChatRequest(model='tgi', messages=messages, repetition_penalty=repetition_penalty, frequency_penalty=frequency_penalty, logit_bias=logit_bias, logprobs=logprobs, top_logprobs=top_logprobs, max_tokens=max_tokens, n=n, presence_penalty=presence_penalty, stream=stream, seed=seed, temperature=temperature, top_p=top_p, tools=tools, tool_prompt=tool_prompt, tool_choice=tool_choice, stop=stop) if not stream: resp = requests.post(f'{self.base_url}/v1/chat/completions', json=request.dict(), headers=self.headers, cookies=self.cookies, timeout=self.timeout) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) return ChatComplete(**payload) else: return self._chat_stream_response(request) def _chat_stream_response(self, request): resp = requests.post(f'{self.base_url}/v1/chat/completions', json=request.dict(), headers=self.headers, cookies=self.cookies, timeout=self.timeout, stream=True) for byte_payload in resp.iter_lines(): if byte_payload == b'\n': continue payload = byte_payload.decode('utf-8') if payload.startswith('data:'): json_payload = json.loads(payload.lstrip('data:').rstrip('\n')) try: response = ChatCompletionChunk(**json_payload) yield response except ValidationError: raise parse_error(resp.status, json_payload) def generate(self, prompt: str, do_sample: bool=False, max_new_tokens: int=20, best_of: Optional[int]=None, repetition_penalty: Optional[float]=None, frequency_penalty: Optional[float]=None, return_full_text: bool=False, seed: Optional[int]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: bool=False, decoder_input_details: bool=False, top_n_tokens: Optional[int]=None, grammar: Optional[Grammar]=None) -> Response: parameters = Parameters(best_of=best_of, details=True, do_sample=do_sample, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty, frequency_penalty=frequency_penalty, return_full_text=return_full_text, seed=seed, stop=stop_sequences if stop_sequences is not None else [], temperature=temperature, top_k=top_k, top_p=top_p, truncate=truncate, typical_p=typical_p, watermark=watermark, decoder_input_details=decoder_input_details, top_n_tokens=top_n_tokens, grammar=grammar) request = Request(inputs=prompt, stream=False, parameters=parameters) resp = requests.post(self.base_url, json=request.dict(), headers=self.headers, cookies=self.cookies, timeout=self.timeout) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) return Response(**payload[0]) def generate_stream(self, prompt: str, do_sample: bool=False, max_new_tokens: int=20, repetition_penalty: Optional[float]=None, frequency_penalty: Optional[float]=None, return_full_text: bool=False, seed: Optional[int]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: bool=False, top_n_tokens: Optional[int]=None, grammar: Optional[Grammar]=None) -> Iterator[StreamResponse]: parameters = Parameters(best_of=None, details=True, decoder_input_details=False, do_sample=do_sample, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty, frequency_penalty=frequency_penalty, return_full_text=return_full_text, seed=seed, stop=stop_sequences if stop_sequences is not None else [], temperature=temperature, top_k=top_k, top_p=top_p, truncate=truncate, typical_p=typical_p, watermark=watermark, top_n_tokens=top_n_tokens, grammar=grammar) request = Request(inputs=prompt, stream=True, parameters=parameters) resp = requests.post(self.base_url, json=request.dict(), headers=self.headers, cookies=self.cookies, timeout=self.timeout, stream=True) if resp.status_code != 200: raise parse_error(resp.status_code, resp.json()) for byte_payload in resp.iter_lines(): if byte_payload == b'\n': continue payload = byte_payload.decode('utf-8') if payload.startswith('data:'): json_payload = json.loads(payload.lstrip('data:').rstrip('/n')) try: response = StreamResponse(**json_payload) except ValidationError: raise parse_error(resp.status_code, json_payload) yield response class AsyncClient: def __init__(self, base_url: str, headers: Optional[Dict[str, str]]=None, cookies: Optional[Dict[str, str]]=None, timeout: int=10): warnings.warn(DEPRECATION_WARNING, DeprecationWarning) self.base_url = base_url self.headers = headers self.cookies = cookies self.timeout = ClientTimeout(timeout) async def completion(self, prompt: str, frequency_penalty: Optional[float]=None, max_tokens: Optional[int]=None, repetition_penalty: Optional[float]=None, seed: Optional[int]=None, stream: bool=False, temperature: Optional[float]=None, top_p: Optional[float]=None, stop: Optional[List[str]]=None) -> Union[Completion, AsyncIterator[CompletionComplete]]: request = CompletionRequest(model='tgi', prompt=prompt, frequency_penalty=frequency_penalty, max_tokens=max_tokens, repetition_penalty=repetition_penalty, seed=seed, stream=stream, temperature=temperature, top_p=top_p, stop=stop) if not stream: return await self._completion_single_response(request) else: return self._completion_stream_response(request) async def _completion_single_response(self, request): async with ClientSession(headers=self.headers, cookies=self.cookies, timeout=self.timeout) as session: async with session.post(f'{self.base_url}/v1/completions', json=request.dict()) as resp: payload = await resp.json() if resp.status != 200: raise parse_error(resp.status, payload) return Completion(**payload) async def _completion_stream_response(self, request): async with ClientSession(headers=self.headers, cookies=self.cookies, timeout=self.timeout) as session: async with session.post(f'{self.base_url}/v1/completions', json=request.dict()) as resp: async for byte_payload in resp.content: if byte_payload == b'\n': continue payload = byte_payload.decode('utf-8') if payload.startswith('data:'): json_payload = json.loads(payload.lstrip('data:').rstrip('\n')) try: response = CompletionComplete(**json_payload) yield response except ValidationError: raise parse_error(resp.status, json_payload) async def chat(self, messages: List[Message], repetition_penalty: Optional[float]=None, frequency_penalty: Optional[float]=None, logit_bias: Optional[List[float]]=None, logprobs: Optional[bool]=None, top_logprobs: Optional[int]=None, max_tokens: Optional[int]=None, n: Optional[int]=None, presence_penalty: Optional[float]=None, stream: bool=False, seed: Optional[int]=None, temperature: Optional[float]=None, top_p: Optional[float]=None, tools: Optional[List[Tool]]=None, tool_prompt: Optional[str]=None, tool_choice: Optional[str]=None, stop: Optional[List[str]]=None) -> Union[ChatComplete, AsyncIterator[ChatCompletionChunk]]: request = ChatRequest(model='tgi', messages=messages, repetition_penalty=repetition_penalty, frequency_penalty=frequency_penalty, logit_bias=logit_bias, logprobs=logprobs, top_logprobs=top_logprobs, max_tokens=max_tokens, n=n, presence_penalty=presence_penalty, stream=stream, seed=seed, temperature=temperature, top_p=top_p, tools=tools, tool_prompt=tool_prompt, tool_choice=tool_choice, stop=stop) if not stream: return await self._chat_single_response(request) else: return self._chat_stream_response(request) async def _chat_single_response(self, request): async with ClientSession(headers=self.headers, cookies=self.cookies, timeout=self.timeout) as session: async with session.post(f'{self.base_url}/v1/chat/completions', json=request.dict()) as resp: payload = await resp.json() if resp.status != 200: raise parse_error(resp.status, payload) return ChatComplete(**payload) async def _chat_stream_response(self, request): async with ClientSession(headers=self.headers, cookies=self.cookies, timeout=self.timeout) as session: async with session.post(f'{self.base_url}/v1/chat/completions', json=request.dict()) as resp: async for byte_payload in resp.content: if byte_payload == b'\n': continue payload = byte_payload.decode('utf-8') if payload.startswith('data:'): payload_data = payload.lstrip('data:').rstrip('\n').removeprefix(' ') if payload_data == '[DONE]': break json_payload = json.loads(payload_data) try: response = ChatCompletionChunk(**json_payload) yield response except ValidationError: raise parse_error(resp.status, json_payload) async def generate(self, prompt: str, do_sample: bool=False, max_new_tokens: int=20, best_of: Optional[int]=None, repetition_penalty: Optional[float]=None, frequency_penalty: Optional[float]=None, return_full_text: bool=False, seed: Optional[int]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: bool=False, decoder_input_details: bool=False, top_n_tokens: Optional[int]=None, grammar: Optional[Grammar]=None) -> Response: parameters = Parameters(best_of=best_of, details=True, decoder_input_details=decoder_input_details, do_sample=do_sample, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty, frequency_penalty=frequency_penalty, return_full_text=return_full_text, seed=seed, stop=stop_sequences if stop_sequences is not None else [], temperature=temperature, top_k=top_k, top_p=top_p, truncate=truncate, typical_p=typical_p, watermark=watermark, top_n_tokens=top_n_tokens, grammar=grammar) request = Request(inputs=prompt, stream=False, parameters=parameters) async with ClientSession(headers=self.headers, cookies=self.cookies, timeout=self.timeout) as session: async with session.post(self.base_url, json=request.dict()) as resp: payload = await resp.json() if resp.status != 200: raise parse_error(resp.status, payload) return Response(**payload[0]) async def generate_stream(self, prompt: str, do_sample: bool=False, max_new_tokens: int=20, repetition_penalty: Optional[float]=None, frequency_penalty: Optional[float]=None, return_full_text: bool=False, seed: Optional[int]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: bool=False, top_n_tokens: Optional[int]=None, grammar: Optional[Grammar]=None) -> AsyncIterator[StreamResponse]: parameters = Parameters(best_of=None, details=True, decoder_input_details=False, do_sample=do_sample, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty, frequency_penalty=frequency_penalty, return_full_text=return_full_text, seed=seed, stop=stop_sequences if stop_sequences is not None else [], temperature=temperature, top_k=top_k, top_p=top_p, truncate=truncate, typical_p=typical_p, watermark=watermark, top_n_tokens=top_n_tokens, grammar=grammar) request = Request(inputs=prompt, stream=True, parameters=parameters) async with ClientSession(headers=self.headers, cookies=self.cookies, timeout=self.timeout) as session: async with session.post(self.base_url, json=request.dict()) as resp: if resp.status != 200: raise parse_error(resp.status, await resp.json()) async for byte_payload in resp.content: if byte_payload == b'\n': continue payload = byte_payload.decode('utf-8') if payload.startswith('data:'): json_payload = json.loads(payload.lstrip('data:').rstrip('/n')) try: response = StreamResponse(**json_payload) except ValidationError: raise parse_error(resp.status, json_payload) yield response # File: text-generation-inference-main/clients/python/text_generation/errors.py from typing import Dict class ValidationError(Exception): def __init__(self, message: str): super().__init__(message) class GenerationError(Exception): def __init__(self, message: str): super().__init__(message) class OverloadedError(Exception): def __init__(self, message: str): super().__init__(message) class IncompleteGenerationError(Exception): def __init__(self, message: str): super().__init__(message) class BadRequestError(Exception): def __init__(self, message: str): super().__init__(message) class ShardNotReadyError(Exception): def __init__(self, message: str): super().__init__(message) class ShardTimeoutError(Exception): def __init__(self, message: str): super().__init__(message) class NotFoundError(Exception): def __init__(self, message: str): super().__init__(message) class RateLimitExceededError(Exception): def __init__(self, message: str): super().__init__(message) class NotSupportedError(Exception): def __init__(self, model_id: str): message = f'Model `{model_id}` is not available for inference with this client. \nUse `huggingface_hub.inference_api.InferenceApi` instead.' super(NotSupportedError, self).__init__(message) class UnknownError(Exception): def __init__(self, message: str): super().__init__(message) def parse_error(status_code: int, payload: Dict[str, str]) -> Exception: message = payload['error'] if 'error_type' in payload: error_type = payload['error_type'] if error_type == 'generation': return GenerationError(message) if error_type == 'incomplete_generation': return IncompleteGenerationError(message) if error_type == 'overloaded': return OverloadedError(message) if error_type == 'validation': return ValidationError(message) if status_code == 400: return BadRequestError(message) if status_code == 403 or status_code == 424: return ShardNotReadyError(message) if status_code == 504: return ShardTimeoutError(message) if status_code == 404: return NotFoundError(message) if status_code == 429: return RateLimitExceededError(message) return UnknownError(message) # File: text-generation-inference-main/clients/python/text_generation/inference_api.py import os import requests from typing import Dict, Optional, List from huggingface_hub.utils import build_hf_headers from text_generation import Client, AsyncClient, __version__ from text_generation.types import DeployedModel from text_generation.errors import NotSupportedError, parse_error INFERENCE_ENDPOINT = os.environ.get('HF_INFERENCE_ENDPOINT', 'https://api-inference.huggingface.co') def deployed_models(headers: Optional[Dict]=None) -> List[DeployedModel]: resp = requests.get('https://api-inference.huggingface.co/framework/text-generation-inference', headers=headers, timeout=5) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) models = [DeployedModel(**raw_deployed_model) for raw_deployed_model in payload] return models def check_model_support(repo_id: str, headers: Optional[Dict]=None) -> bool: resp = requests.get(f'https://api-inference.huggingface.co/status/{repo_id}', headers=headers, timeout=5) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) framework = payload['framework'] supported = framework == 'text-generation-inference' return supported class InferenceAPIClient(Client): def __init__(self, repo_id: str, token: Optional[str]=None, timeout: int=10): headers = build_hf_headers(token=token, library_name='text-generation', library_version=__version__) if not check_model_support(repo_id, headers): raise NotSupportedError(repo_id) base_url = f'{INFERENCE_ENDPOINT}/models/{repo_id}' super(InferenceAPIClient, self).__init__(base_url, headers=headers, timeout=timeout) class InferenceAPIAsyncClient(AsyncClient): def __init__(self, repo_id: str, token: Optional[str]=None, timeout: int=10): headers = build_hf_headers(token=token, library_name='text-generation', library_version=__version__) if not check_model_support(repo_id, headers): raise NotSupportedError(repo_id) base_url = f'{INFERENCE_ENDPOINT}/models/{repo_id}' super(InferenceAPIAsyncClient, self).__init__(base_url, headers=headers, timeout=timeout) # File: text-generation-inference-main/clients/python/text_generation/types.py from enum import Enum from pydantic import BaseModel, field_validator, ConfigDict from typing import Optional, List, Union, Any from text_generation.errors import ValidationError class GrammarType(str, Enum): Json = 'json' Regex = 'regex' class Grammar(BaseModel): type: GrammarType value: Union[str, dict] class ToolCall(BaseModel): id: int type: str function: dict class Message(BaseModel): role: str content: Optional[str] = None name: Optional[str] = None tool_calls: Optional[Any] = None class Tool(BaseModel): type: str function: dict class Function(BaseModel): name: Optional[str] arguments: str class ChoiceDeltaToolCall(BaseModel): index: int id: str type: str function: Function class ChoiceDelta(BaseModel): role: str content: Optional[str] = None tool_calls: Optional[ChoiceDeltaToolCall] = None class Choice(BaseModel): index: int delta: ChoiceDelta logprobs: Optional[dict] = None finish_reason: Optional[str] = None class CompletionRequest(BaseModel): model: str prompt: str repetition_penalty: Optional[float] = None frequency_penalty: Optional[float] = None max_tokens: Optional[int] = None stream: bool = False seed: Optional[int] = None temperature: Optional[float] = None top_p: Optional[float] = None stop: Optional[List[str]] = None class CompletionComplete(BaseModel): index: int text: str logprobs: Optional[Any] finish_reason: str class Completion(BaseModel): id: str object: str created: int model: str system_fingerprint: str choices: List[CompletionComplete] class ChatRequest(BaseModel): model: str messages: List[Message] repetition_penalty: Optional[float] = None frequency_penalty: Optional[float] = None logit_bias: Optional[List[float]] = None logprobs: Optional[bool] = None top_logprobs: Optional[int] = None max_tokens: Optional[int] = None n: Optional[int] = None presence_penalty: Optional[float] = None stream: bool = False seed: Optional[int] = None temperature: Optional[float] = None top_p: Optional[float] = None tools: Optional[List[Tool]] = None tool_prompt: Optional[str] = None tool_choice: Optional[str] = None stop: Optional[List[str]] = None class ChatCompletionComplete(BaseModel): index: int message: Message logprobs: Optional[Any] finish_reason: str usage: Optional[Any] = None class ChatComplete(BaseModel): id: str object: str created: int model: str system_fingerprint: str choices: List[ChatCompletionComplete] usage: Any class ChatCompletionChunk(BaseModel): id: str object: str created: int model: str system_fingerprint: str choices: List[Choice] class Parameters(BaseModel): do_sample: bool = False max_new_tokens: int = 20 repetition_penalty: Optional[float] = None frequency_penalty: Optional[float] = None return_full_text: bool = False stop: List[str] = [] seed: Optional[int] = None temperature: Optional[float] = None top_k: Optional[int] = None top_p: Optional[float] = None truncate: Optional[int] = None typical_p: Optional[float] = None best_of: Optional[int] = None watermark: bool = False details: bool = False decoder_input_details: bool = False top_n_tokens: Optional[int] = None grammar: Optional[Grammar] = None @field_validator('best_of') def valid_best_of(cls, field_value, values): if field_value is not None: if field_value <= 0: raise ValidationError('`best_of` must be strictly positive') if field_value > 1 and values.data['seed'] is not None: raise ValidationError('`seed` must not be set when `best_of` is > 1') sampling = values.data['do_sample'] | (values.data['temperature'] is not None) | (values.data['top_k'] is not None) | (values.data['top_p'] is not None) | (values.data['typical_p'] is not None) if field_value > 1 and (not sampling): raise ValidationError('you must use sampling when `best_of` is > 1') return field_value @field_validator('repetition_penalty') def valid_repetition_penalty(cls, v): if v is not None and v <= 0: raise ValidationError('`repetition_penalty` must be strictly positive') return v @field_validator('frequency_penalty') def valid_frequency_penalty(cls, v): if v is not None and v <= 0: raise ValidationError('`frequency_penalty` must be strictly positive') return v @field_validator('seed') def valid_seed(cls, v): if v is not None and v < 0: raise ValidationError('`seed` must be positive') return v @field_validator('temperature') def valid_temp(cls, v): if v is not None and v <= 0: raise ValidationError('`temperature` must be strictly positive') return v @field_validator('top_k') def valid_top_k(cls, v): if v is not None and v <= 0: raise ValidationError('`top_k` must be strictly positive') return v @field_validator('top_p') def valid_top_p(cls, v): if v is not None and (v <= 0 or v >= 1.0): raise ValidationError('`top_p` must be > 0.0 and < 1.0') return v @field_validator('truncate') def valid_truncate(cls, v): if v is not None and v <= 0: raise ValidationError('`truncate` must be strictly positive') return v @field_validator('typical_p') def valid_typical_p(cls, v): if v is not None and (v <= 0 or v >= 1.0): raise ValidationError('`typical_p` must be > 0.0 and < 1.0') return v @field_validator('top_n_tokens') def valid_top_n_tokens(cls, v): if v is not None and v <= 0: raise ValidationError('`top_n_tokens` must be strictly positive') return v @field_validator('grammar') def valid_grammar(cls, v): if v is not None: if v.type == GrammarType.Regex and (not v.value): raise ValidationError('`value` cannot be empty for `regex` grammar') if v.type == GrammarType.Json and (not v.value): raise ValidationError('`value` cannot be empty for `json` grammar') return v class Request(BaseModel): inputs: str parameters: Optional[Parameters] = None stream: bool = False @field_validator('inputs') def valid_input(cls, v): if not v: raise ValidationError('`inputs` cannot be empty') return v @field_validator('stream') def valid_best_of_stream(cls, field_value, values): parameters = values.data['parameters'] if parameters is not None and parameters.best_of is not None and (parameters.best_of > 1) and field_value: raise ValidationError('`best_of` != 1 is not supported when `stream` == True') return field_value class InputToken(BaseModel): id: int text: str logprob: Optional[float] = None class Token(BaseModel): id: int text: str logprob: Optional[float] = None special: bool class FinishReason(str, Enum): Length = 'length' EndOfSequenceToken = 'eos_token' StopSequence = 'stop_sequence' class BestOfSequence(BaseModel): generated_text: str finish_reason: FinishReason generated_tokens: int seed: Optional[int] = None prefill: List[InputToken] tokens: List[Token] top_tokens: Optional[List[List[Token]]] = None class Details(BaseModel): finish_reason: FinishReason generated_tokens: int seed: Optional[int] = None prefill: List[InputToken] tokens: List[Token] top_tokens: Optional[List[List[Token]]] = None best_of_sequences: Optional[List[BestOfSequence]] = None class Response(BaseModel): generated_text: str details: Details class StreamDetails(BaseModel): finish_reason: FinishReason generated_tokens: int seed: Optional[int] = None class StreamResponse(BaseModel): token: Token top_tokens: Optional[List[Token]] = None generated_text: Optional[str] = None details: Optional[StreamDetails] = None class DeployedModel(BaseModel): model_config = ConfigDict(protected_namespaces=()) model_id: str sha: str # File: text-generation-inference-main/server/text_generation_server/adapters/config.py from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Dict, Set, Tuple import torch from text_generation_server.adapters.weights import AdapterWeights @dataclass class ModuleMap: module_name: str module_weights: Dict[str, Tuple[torch.Tensor, str]] @dataclass class AdapterConfig(ABC): base_model_name_or_path: str @abstractmethod def map_weights_for_model(self, adapter_weights: Dict[int, AdapterWeights], weight_names: Tuple[str]) -> Tuple[ModuleMap, Set[str]]: pass # File: text-generation-inference-main/server/text_generation_server/adapters/lora.py from collections import defaultdict from dataclasses import dataclass from typing import Dict, List, Optional, Set, Tuple, Type, Union import torch from peft import LoraConfig as _LoraConfig from torch.distributed import ProcessGroup from text_generation_server.adapters.config import AdapterConfig, ModuleMap from text_generation_server.adapters.weights import AdapterBatchMetadata, AdapterWeights, BatchAdapterWeights from text_generation_server.utils.sgmv import BGMV_MAX_RANK, MAX_RANK_CUSTOM, get_tmp_tensors, orient_for_rank, pad_rank, use_cutlass_shrink def get_start_stop_idxs_for_rank(offset, size, rank, world_size): block_size = size // world_size start = offset + rank * block_size stop = offset + (rank + 1) * block_size return (start, stop) def shard_on_dim(t: torch.Tensor, dim: int, process_group: torch.distributed.ProcessGroup): world_size = process_group.size() rank = process_group.rank() size = t.shape[dim] (start, stop) = get_start_stop_idxs_for_rank(0, size, rank, world_size) if dim == 0: tensor = t[start:stop] elif dim == 1: tensor = t[:, start:stop] else: raise NotImplementedError("Let's make that generic when needed") return tensor def shard_lora_weights(weights_a: List[torch.Tensor], weights_b: List[torch.Tensor], split_dim: int, process_group: ProcessGroup) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: weights_a = [shard_on_dim(w, dim=split_dim, process_group=process_group) for w in weights_a] weights_b = [shard_on_dim(w, dim=1, process_group=process_group) for w in weights_b] return (weights_a, weights_b) @dataclass class LoraConfig(AdapterConfig): r: int target_modules: Optional[Union[List[str], str]] fan_in_fan_out: bool lora_alpha: int use_rslora: bool def map_weights_for_model(self, adapter_weights: Dict[int, AdapterWeights], weight_names: Tuple[str]) -> Tuple[ModuleMap, Set[str]]: adapter_weight_names = set() module_map = {} for weight_name in weight_names: lora_a_name = f'base_model.model.{weight_name}.lora_A.weight' lora_b_name = f'base_model.model.{weight_name}.lora_B.weight' if lora_a_name not in adapter_weights or lora_b_name not in adapter_weights: continue module_map[weight_name] = {'lora_A': (adapter_weights[lora_a_name], lora_a_name), 'lora_B': (adapter_weights[lora_b_name], lora_b_name)} adapter_weight_names.add(lora_a_name) adapter_weight_names.add(lora_b_name) return (module_map, adapter_weight_names) @classmethod def load(cls, adapter_id: str, api_token: str) -> 'LoraConfig': hf_config = _LoraConfig.from_pretrained(adapter_id, token=api_token) return cls(base_model_name_or_path=hf_config.base_model_name_or_path, r=hf_config.r, target_modules=hf_config.target_modules, fan_in_fan_out=hf_config.fan_in_fan_out, lora_alpha=hf_config.lora_alpha, use_rslora=hf_config.use_rslora if hasattr(hf_config, 'use_rslora') else False) class LoraWeights(AdapterWeights): def __init__(self, weights_a: List[torch.Tensor], weights_b: List[torch.Tensor], adapter_config: LoraConfig): self.lora_a_r = weights_a[0].size(1) if len(weights_a) > 0 else 1 self.lora_b_r = weights_b[0].size(0) if len(weights_a) > 0 else 1 self._use_cutlass_shrink = use_cutlass_shrink(self.lora_a_r) self._is_transposed = False weights_a = [orient_for_rank(w, w.size(1)).contiguous() for w in weights_a] self._weights_a = torch.stack(weights_a) self._weights_b = torch.stack(weights_b) self.adapter_config = adapter_config @property def weights_a(self) -> torch.Tensor: if self._is_transposed: self._transpose_weights() return self._weights_a @property def weights_b(self) -> torch.Tensor: if self._is_transposed: self._transpose_weights() return self._weights_b @property def weights_a_t(self) -> torch.Tensor: if not self._is_transposed: self._transpose_weights() return self._weights_a @property def weights_b_t(self) -> torch.Tensor: if not self._is_transposed: self._transpose_weights() return self._weights_b def _transpose_weights(self): if self._use_cutlass_shrink: self._weights_a = self._weights_a.transpose(1, 2).contiguous() self._weights_b = self._weights_b.transpose(1, 2).contiguous() self._is_transposed = not self._is_transposed @classmethod def get_batch_types(cls) -> List[Type[BatchAdapterWeights]]: return [BatchLoraWeights] @classmethod def prepare_weights(cls, config: LoraConfig, module_map: Dict[str, Dict], layer_type: str, unused_weight_names: Set[str], nlayers: int, dtype: torch.dtype, world_size: int, process_group: ProcessGroup, target_to_layer: Dict[str, Tuple[str, torch.Tensor]]) -> Optional[AdapterWeights]: lora_a_list = [None] * nlayers lora_b_list = [None] * nlayers for layer_id in range(nlayers): key = (layer_id, layer_type) (weight_name, layer) = target_to_layer[key] base_weight = layer.base_layer.linear.weight base_device = base_weight.device if weight_name not in module_map: return None (lora_a, lora_a_name) = module_map[weight_name]['lora_A'] lora_a = lora_a.to(base_device, dtype) (lora_b, lora_b_name) = module_map[weight_name]['lora_B'] lora_b = lora_b.to(base_device, dtype) scale = get_scaling_factor(config.lora_alpha, config.r, uses_rslora=config.use_rslora) unused_weight_names.discard(lora_a_name) unused_weight_names.discard(lora_b_name) lora_a_list[layer_id] = lora_a.transpose(0, 1) lora_b_list[layer_id] = lora_b.transpose(0, 1) * scale lora_a_list = [pad_rank(w, dim=1, world_size=world_size) for w in lora_a_list] lora_b_list = [pad_rank(w, dim=0, world_size=world_size) for w in lora_b_list] if lora_a_list: padded_rank = lora_a_list[0].size(1) config.r = padded_rank return LoraWeights(*shard_lora_weights(weights_a=lora_a_list, weights_b=lora_b_list, split_dim=0 if layer_type in {'o_proj', 'down_proj', 'lm_head'} else 1, process_group=process_group), config) @dataclass class RankSegments: rank: int lora_a_ptr: torch.Tensor lora_b_ptr: torch.Tensor tmp_shrink: torch.Tensor tmp_expand: torch.Tensor segment_starts: torch.Tensor segment_ends: torch.Tensor indices: torch.Tensor @dataclass class BatchLoraWeights(BatchAdapterWeights): lora_a: Dict[int, torch.Tensor] lora_b: Dict[int, torch.Tensor] adapter_index_configs: Dict[int, LoraConfig] rank_data: Dict[int, RankSegments] use_sgmv: bool def has_adapter(self, adapter_index: int) -> bool: return adapter_index in self.adapter_index_configs def can_vectorize(self, pg: ProcessGroup) -> bool: return all((rank_data.rank // pg.size() <= MAX_RANK_CUSTOM for rank_data in self.rank_data.values())) @classmethod def load(self, adapter_weights: Dict[int, AdapterWeights], meta: AdapterBatchMetadata, prefill: bool, prefill_head_indices: Optional[torch.Tensor]) -> Optional['BatchLoraWeights']: adapter_weights = {k: _convert_lora(v) for (k, v) in adapter_weights.items()} adapter_weights = {k: v for (k, v) in adapter_weights.items() if isinstance(v, LoraWeights)} if not adapter_weights: return None first_weights = next(iter(adapter_weights.values())) device = first_weights.weights_a.device segment_indices = meta.segment_indices lora_a = {idx: adapter_weights[idx].weights_a for idx in segment_indices if idx in adapter_weights} lora_b = {idx: adapter_weights[idx].weights_b for idx in segment_indices if idx in adapter_weights} max_rank = max((adapter_weights[idx].lora_a_r for idx in segment_indices if idx in adapter_weights), default=0) if prefill or max_rank > BGMV_MAX_RANK: use_sgmv = True lora_a_ptr = torch.tensor([adapter_weights[idx].weights_a.data_ptr() if idx in adapter_weights else 0 for idx in segment_indices], dtype=torch.int64, device=device) lora_b_ptr = torch.tensor([adapter_weights[idx].weights_b.data_ptr() if idx in adapter_weights else 0 for idx in segment_indices], dtype=torch.int64, device=device) else: use_sgmv = False lora_a_ptr = torch.tensor([adapter_weights[idx].weights_a_t.data_ptr() if idx in adapter_weights else 0 for idx in segment_indices], dtype=torch.int64, device=device) lora_b_ptr = torch.tensor([adapter_weights[idx].weights_b_t.data_ptr() if idx in adapter_weights else 0 for idx in segment_indices], dtype=torch.int64, device=device) adapter_index_configs = {idx: adapter_weights[idx].adapter_config for idx in segment_indices if idx in adapter_weights} adapter_to_segment = {v: k for (k, v) in enumerate(segment_indices)} rank_indices = defaultdict(list) for (segment_idx, adapter_idx) in enumerate(segment_indices): if adapter_idx not in adapter_weights: continue rank_indices[adapter_weights[adapter_idx].lora_a_r].append(segment_idx) if prefill_head_indices is not None: (j, prefill_head_segment_starts, prefill_head_segment_ends) = (1, [0], [0]) for head_index in prefill_head_indices: if head_index < meta.adapter_segments[j]: prefill_head_segment_ends[-1] += 1 else: prefill_head_segment_starts.append(prefill_head_segment_ends[-1]) prefill_head_segment_ends.append(prefill_head_segment_ends[-1] + 1) j += 1 rank_data = {} for (rank, indices) in rank_indices.items(): tmp_shrink = None tmp_expand = None segment_starts = None segment_ends = None batch_indices = None if use_sgmv: lora_a_ptr_indices = lora_a_ptr[indices] (tmp_shrink, tmp_expand) = get_tmp_tensors(lora_a_ptr_indices.size(0), rank, device) segment_starts = meta.adapter_segments[indices] segment_ends = meta.adapter_segments[[i + 1 for i in indices]] if prefill_head_indices is not None: for (i, segment_index) in enumerate(indices): segment_starts[i] = prefill_head_segment_starts[segment_index] segment_ends[i] = prefill_head_segment_ends[segment_index] else: rank_indices = set(indices) batch_indices = [adapter_to_segment[idx] for idx in meta.adapter_indices.tolist()] batch_indices = [idx if idx in rank_indices else -1 for idx in batch_indices] batch_indices = torch.tensor(batch_indices, dtype=torch.int64, device=device) rank_data[rank] = RankSegments(rank=rank, tmp_shrink=tmp_shrink, tmp_expand=tmp_expand, lora_a_ptr=lora_a_ptr[indices], lora_b_ptr=lora_b_ptr[indices], segment_starts=segment_starts, segment_ends=segment_ends, indices=batch_indices) return BatchLoraWeights(lora_a=lora_a, lora_b=lora_b, adapter_index_configs=adapter_index_configs, rank_data=rank_data, use_sgmv=use_sgmv) def get_scaling_factor(lora_alpha: int, r: int, uses_rslora: bool=False) -> float: if uses_rslora: return lora_alpha / r ** 0.5 return lora_alpha / r def _convert_lora(v: AdapterWeights) -> AdapterWeights: if hasattr(v, 'lora_weights'): return v.lora_weights return v # File: text-generation-inference-main/server/text_generation_server/adapters/weights.py from abc import ABC, abstractclassmethod from collections import defaultdict from dataclasses import dataclass from typing import Dict, List, Optional, Set, Type import torch @dataclass class AdapterBatchMetadata: adapter_indices: torch.Tensor adapter_set: Set[int] adapter_segments: torch.Tensor segment_indices: List[int] class AdapterWeights(ABC): @abstractclassmethod def get_batch_types(cls) -> List[Type['BatchAdapterWeights']]: pass @property def speculative_tokens(self) -> int: return 0 class BatchAdapterWeights(ABC): @abstractclassmethod def has_adapter(self, adapter_index: int) -> bool: pass @abstractclassmethod def load(cls, adapter_weights: Dict[int, AdapterWeights], meta: 'AdapterBatchMetadata', prefill: bool, prefill_head_indices: torch.Tensor) -> Optional['BatchAdapterWeights']: pass class LayerAdapterWeights: def __init__(self): self.adapter_weights: Dict[int, AdapterWeights] = {} def add_adapter(self, adapter_idx: int, weights: AdapterWeights): self.adapter_weights[adapter_idx] = weights def remove_adapter(self, adapter_idx: int): if adapter_idx not in self.adapter_weights: return del self.adapter_weights[adapter_idx] def is_empty(self) -> bool: return len(self.adapter_weights) == 0 def get_data(self, meta: AdapterBatchMetadata, prefill: bool, prefill_head_indices: Optional[torch.Tensor]) -> Dict[str, BatchAdapterWeights]: adapter_batch_types: Dict[Type[BatchAdapterWeights], Dict[int, AdapterWeights]] = defaultdict(dict) for (adapter_index, adapter_weights) in self.adapter_weights.items(): for batch_type in adapter_weights.get_batch_types(): adapter_batch_types[batch_type][adapter_index] = adapter_weights batch_data = {} for (batch_type, adapter_weights) in adapter_batch_types.items(): batched_weights = batch_type.load(adapter_weights, meta, prefill, prefill_head_indices) if batched_weights is not None: batch_data = batched_weights return batch_data @dataclass class AdapterBatchData: meta: AdapterBatchMetadata data: Dict[str, Dict[str, BatchAdapterWeights]] prefill: bool @staticmethod def from_meta(meta: AdapterBatchMetadata, weights: Dict[str, LayerAdapterWeights], prefill: bool, prefill_head_indices: Optional[torch.Tensor]) -> 'AdapterBatchData': data = {} for (k, v) in weights.items(): if v.is_empty(): continue data[k] = v.get_data(meta, prefill, prefill_head_indices if k == 'lm_head' else None) return AdapterBatchData(meta=meta, data=data, prefill=prefill) def ranks(self) -> Set[int]: ranks = set() for lora_data in self.data.values(): if lora_data is None: continue for rank_data in lora_data.rank_data.values(): ranks.add(rank_data.rank) return ranks def layer_names(self) -> Set[str]: return set(self.data.keys()) def adapter_keys(self) -> Set[str]: adapter_keys = set() for layer_data in self.data.values(): adapter_keys.update(layer_data.keys()) return adapter_keys @property def max_rank(self) -> int: ranks = self.ranks() return max(ranks) if len(ranks) > 0 else 0 # File: text-generation-inference-main/server/text_generation_server/cache.py import torch from typing import Dict, Optional, TypeVar from text_generation_server.models.types import Batch B = TypeVar('B', bound=Batch) class Cache: def __init__(self): self.cache: Dict[int, B] = {} def pop(self, batch_id: int) -> Optional[B]: return self.cache.pop(batch_id, None) def set(self, entry: B): if entry is not None: self.cache[entry.batch_id] = entry def delete(self, batch_id: int): batch = self.pop(batch_id) if batch is not None: del batch if torch.cuda.is_available(): torch.cuda.empty_cache() def clear(self): keys = list(self.cache.keys()) for k in keys: self.delete(k) def __len__(self): return len(self.cache.keys()) # File: text-generation-inference-main/server/text_generation_server/cli.py import os import sys import typer from pathlib import Path from loguru import logger from typing import Optional from enum import Enum from huggingface_hub import hf_hub_download from text_generation_server.utils.adapter import parse_lora_adapters app = typer.Typer() class Quantization(str, Enum): bitsandbytes = 'bitsandbytes' bitsandbytes_nf4 = 'bitsandbytes-nf4' bitsandbytes_fp4 = 'bitsandbytes-fp4' gptq = 'gptq' awq = 'awq' eetq = 'eetq' exl2 = 'exl2' fp8 = 'fp8' marlin = 'marlin' class Dtype(str, Enum): float16 = 'float16' bloat16 = 'bfloat16' @app.command() def serve(model_id: str, revision: Optional[str]=None, sharded: bool=False, quantize: Optional[Quantization]=None, speculate: Optional[int]=None, dtype: Optional[Dtype]=None, trust_remote_code: bool=False, uds_path: Path='/tmp/text-generation-server', logger_level: str='INFO', json_output: bool=False, otlp_endpoint: Optional[str]=None, otlp_service_name: str='text-generation-inference.server', max_input_tokens: Optional[int]=None): if sharded: assert os.getenv('RANK', None) is not None, 'RANK must be set when sharded is True' assert os.getenv('WORLD_SIZE', None) is not None, 'WORLD_SIZE must be set when sharded is True' assert os.getenv('MASTER_ADDR', None) is not None, 'MASTER_ADDR must be set when sharded is True' assert os.getenv('MASTER_PORT', None) is not None, 'MASTER_PORT must be set when sharded is True' logger.remove() logger.add(sys.stdout, format='{message}', filter='text_generation_server', level=logger_level, serialize=json_output, backtrace=True, diagnose=False) from text_generation_server import server from text_generation_server.tracing import setup_tracing if otlp_endpoint is not None: setup_tracing(otlp_service_name=otlp_service_name, otlp_endpoint=otlp_endpoint) lora_adapters = parse_lora_adapters(os.getenv('LORA_ADAPTERS')) if lora_adapters: logger.warning('LoRA adapters enabled (experimental feature).') if 'CUDA_GRAPHS' in os.environ: logger.warning('LoRA adapters incompatible with CUDA Graphs. Disabling CUDA Graphs.') global CUDA_GRAPHS CUDA_GRAPHS = None quantize = None if quantize is None else quantize.value dtype = None if dtype is None else dtype.value if dtype is not None and quantize not in {None, 'bitsandbytes', 'bitsandbytes-nf4', 'bitsandbytes-fp4'}: raise RuntimeError('Only 1 can be set between `dtype` and `quantize`, as they both decide how goes the final model.') server.serve(model_id, lora_adapters, revision, sharded, quantize, speculate, dtype, trust_remote_code, uds_path, max_input_tokens) @app.command() def download_weights(model_id: str, revision: Optional[str]=None, extension: str='.safetensors', auto_convert: bool=True, logger_level: str='INFO', json_output: bool=False, trust_remote_code: bool=False, merge_lora: bool=False): logger.remove() logger.add(sys.stdout, format='{message}', filter='text_generation_server', level=logger_level, serialize=json_output, backtrace=True, diagnose=False) from text_generation_server import utils try: utils.weight_files(model_id, revision, extension) logger.info('Files are already present on the host. Skipping download.') return except (utils.LocalEntryNotFoundError, FileNotFoundError, utils.EntryNotFoundError): pass is_local_model = Path(model_id).exists() and Path(model_id).is_dir() or os.getenv('WEIGHTS_CACHE_OVERRIDE', None) is not None if not is_local_model: if merge_lora: try: hf_hub_download(model_id, revision=revision, filename='adapter_config.json') utils.download_and_unload_peft(model_id, revision, trust_remote_code=trust_remote_code) is_local_model = True utils.weight_files(model_id, revision, extension) return except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): pass else: try: utils.peft.download_peft(model_id, revision, trust_remote_code=trust_remote_code) except Exception: pass try: import json config = hf_hub_download(model_id, revision=revision, filename='config.json') with open(config, 'r') as f: config = json.load(f) base_model_id = config.get('base_model_name_or_path', None) if base_model_id and base_model_id != model_id: try: logger.info(f'Downloading parent model {base_model_id}') download_weights(model_id=base_model_id, revision='main', extension=extension, auto_convert=auto_convert, logger_level=logger_level, json_output=json_output, trust_remote_code=trust_remote_code) except Exception: pass except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): pass try: filenames = utils.weight_hub_files(model_id, revision, extension) utils.download_weights(filenames, model_id, revision) return except utils.EntryNotFoundError as e: if not extension == '.safetensors' or not auto_convert: raise e elif (Path(model_id) / 'adapter_config.json').exists(): try: utils.download_and_unload_peft(model_id, revision, trust_remote_code=trust_remote_code) utils.weight_files(model_id, revision, extension) return except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): pass elif (Path(model_id) / 'config.json').exists(): try: import json config = Path(model_id) / 'config.json' with open(config, 'r') as f: config = json.load(f) base_model_id = config.get('base_model_name_or_path', None) if base_model_id: try: logger.info(f'Downloading parent model {base_model_id}') download_weights(model_id=base_model_id, revision='main', extension=extension, auto_convert=auto_convert, logger_level=logger_level, json_output=json_output, trust_remote_code=trust_remote_code) except Exception: pass except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): pass try: try: local_pt_files = utils.weight_files(model_id, revision, '.bin') except Exception: local_pt_files = utils.weight_files(model_id, revision, '.pt') except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): if extension == '.safetensors': logger.warning(f'No safetensors weights found for model {model_id} at revision {revision}. Downloading PyTorch weights.') pt_filenames = utils.weight_hub_files(model_id, revision, '.bin') local_pt_files = utils.download_weights(pt_filenames, model_id, revision) if auto_convert: if not trust_remote_code: logger.warning('🚨🚨BREAKING CHANGE in 2.0🚨🚨: Safetensors conversion is disabled without `--trust-remote-code` because Pickle files are unsafe and can essentially contain remote code execution!Please check for more information here: https://huggingface.co/docs/text-generation-inference/basic_tutorials/safety') logger.warning(f'No safetensors weights found for model {model_id} at revision {revision}. Converting PyTorch weights to safetensors.') local_st_files = [p.parent / f"{p.stem.lstrip('pytorch_')}.safetensors" for p in local_pt_files] try: import transformers import json if is_local_model: config_filename = os.path.join(model_id, 'config.json') else: config_filename = hf_hub_download(model_id, revision=revision, filename='config.json') with open(config_filename, 'r') as f: config = json.load(f) architecture = config['architectures'][0] class_ = getattr(transformers, architecture) discard_names = getattr(class_, '_tied_weights_keys', []) except Exception: discard_names = [] utils.convert_files(local_pt_files, local_st_files, discard_names) @app.command() def quantize(model_id: str, output_dir: str, revision: Optional[str]=None, logger_level: str='INFO', json_output: bool=False, trust_remote_code: bool=False, upload_to_model_id: Optional[str]=None, percdamp: float=0.01, act_order: bool=False, groupsize: int=128): if revision is None: revision = 'main' download_weights(model_id=model_id, revision=revision, logger_level=logger_level, json_output=json_output) from text_generation_server.layers.gptq.quantize import quantize quantize(model_id=model_id, bits=4, groupsize=groupsize, output_dir=output_dir, revision=revision, trust_remote_code=trust_remote_code, upload_to_model_id=upload_to_model_id, percdamp=percdamp, act_order=act_order, sym=True) if __name__ == '__main__': app() # File: text-generation-inference-main/server/text_generation_server/interceptor.py import torch import grpc from google.rpc import status_pb2, code_pb2 from grpc_status import rpc_status from grpc_interceptor.server import AsyncServerInterceptor from loguru import logger from typing import Callable, Any class ExceptionInterceptor(AsyncServerInterceptor): async def intercept(self, method: Callable, request_or_iterator: Any, context: grpc.ServicerContext, method_name: str) -> Any: try: response = method(request_or_iterator, context) return await response except Exception as err: method_name = method_name.split('/')[-1] logger.exception(f'Method {method_name} encountered an error.') if isinstance(err, RuntimeError): exit(1) if torch.cuda.is_available(): torch.cuda.empty_cache() await context.abort_with_status(rpc_status.to_status(status_pb2.Status(code=code_pb2.INTERNAL, message=str(err)))) # File: text-generation-inference-main/server/text_generation_server/layers/__init__.py from text_generation_server.layers.tensor_parallel import TensorParallelColumnLinear, TensorParallelRowLinear, TensorParallelEmbedding from text_generation_server.layers.linear import get_linear, FastLinear from text_generation_server.layers.speculative import SpeculativeHead from text_generation_server.layers.layernorm import load_layer_norm from text_generation_server.layers.conv import load_conv2d from text_generation_server.layers.lora import LoraLinear, TensorParallelMultiAdapterLinear, TensorParallelAdapterRowLinear __all__ = ['get_linear', 'FastLinear', 'TensorParallelColumnLinear', 'TensorParallelRowLinear', 'TensorParallelEmbedding', 'SpeculativeHead', 'LoraLinear', 'TensorParallelMultiAdapterLinear', 'TensorParallelAdapterRowLinear', 'load_layer_norm', 'load_conv2d'] # File: text-generation-inference-main/server/text_generation_server/layers/attention/__init__.py from text_generation_server.utils.import_utils import SYSTEM import os from .common import Seqlen if os.getenv('USE_FLASH_ATTENTION', '').lower() == 'false': raise ImportError('`USE_FLASH_ATTENTION` is false.') if SYSTEM == 'cuda': from .cuda import attention, paged_attention, reshape_and_cache, SUPPORTS_WINDOWING elif SYSTEM == 'rocm': from .rocm import attention, paged_attention, reshape_and_cache, SUPPORTS_WINDOWING elif SYSTEM == 'ipex': from .ipex import attention, paged_attention, reshape_and_cache, SUPPORTS_WINDOWING else: raise ImportError(f"System {SYSTEM} doesn't support flash/paged attention") __all__ = ['attention', 'paged_attention', 'reshape_and_cache', 'SUPPORTS_WINDOWING', 'Seqlen'] # File: text-generation-inference-main/server/text_generation_server/layers/attention/common.py from dataclasses import dataclass from text_generation_server.models.globals import ATTENTION import torch from typing import Optional if ATTENTION in {'flashinfer', 'flashdecoding'}: @dataclass class Seqlen: input_lengths: torch.Tensor prefix_lengths: torch.Tensor cu_seqlen_q: Optional[torch.Tensor] cu_seqlen_k: Optional[torch.Tensor] max_q: int max_k: int def __init__(self, input_lengths, prefix_lengths, cu_seqlen_q=None, max_q=None, max_k=None): self.input_lengths = input_lengths self.prefix_lengths = prefix_lengths device = self.input_lengths.device shape = self.input_lengths.shape if cu_seqlen_q is None: cu_seqlen_q = torch.arange(shape[0] + 1, device=device, dtype=torch.int32) max_q = 1 else: assert max_q is not None assert max_k is not None cu_seqlen_k = torch.zeros(shape[-1] + 1, device=device, dtype=torch.int32) total = self.input_lengths + self.prefix_lengths torch.cumsum(total, -1, out=cu_seqlen_k[1:]) self.cu_seqlen_q = cu_seqlen_q self.cu_seqlen_k = cu_seqlen_k self.max_q = max_q self.max_k = max_k def clamp(self, max): return self else: @dataclass class Seqlen: input_lengths: torch.Tensor prefix_lengths: torch.Tensor cu_seqlen_q: torch.Tensor max_q: int max_k: int def clamp(self, max): raise NotImplementedError('Not implemented seqlen for paged') return Seqlen(torch.clamp(self.input_lengths, max=max)) # File: text-generation-inference-main/server/text_generation_server/layers/attention/cuda.py import torch from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.models.globals import ATTENTION, BLOCK_SIZE from text_generation_server.layers.attention import Seqlen from typing import Optional (major, minor) = torch.cuda.get_device_capability() is_sm75 = major == 7 and minor == 5 _PARTITION_SIZE = 512 try: from vllm._C import cache_ops except Exception as e: raise ImportError(f'Could not import vllm paged attention. Make sure your installation is correct. Complete error: {e}') def reshape_and_cache(key: torch.Tensor, value: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, slots: torch.Tensor): if ATTENTION in {'flashdecoding', 'flashinfer'}: shape = key_cache.shape key_cache.view(-1, shape[-2], shape[-1])[slots] = key value_cache.view(-1, shape[-2], shape[-1])[slots] = value else: cache_ops.reshape_and_cache(key, value, key_cache, value_cache, slots, 'auto', 1.0) def paged_attention(query: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, kv_head_mapping: torch.Tensor, softmax_scale: float, block_tables: torch.Tensor, seqlen: Seqlen, max_s: int, softcap: Optional[float]=None): block_size = BLOCK_SIZE (num_seqs, num_heads, head_size) = query.shape max_num_partitions = (max_s + _PARTITION_SIZE - 1) // _PARTITION_SIZE if ATTENTION == 'flashinfer': from text_generation_server.layers.attention.flashinfer import decode_state return decode_state.get().forward(query.contiguous(), paged_kv_cache=(key_cache, value_cache), logits_soft_cap=softcap, sm_scale=softmax_scale) elif ATTENTION == 'flashdecoding': max_q = 1 max_k = max_s import flash_attn_2_cuda if softcap is None: softcap = 0.0 out = flash_attn_2_cuda.varlen_fwd(query, key_cache, value_cache, None, seqlen.cu_seqlen_q, seqlen.cu_seqlen_k, None, None, block_tables, None, max_q, max_k, 0.0, softmax_scale, False, True, -1, -1, softcap, False, None) return out[0] else: if softcap is not None: raise RuntimeError("Paged attention doesn't support softcapping") input_lengths = seqlen.input_lengths from vllm._C import ops out = torch.empty_like(query) use_v1 = max_s <= 8192 and (max_num_partitions == 1 or num_seqs * num_heads > 512) if use_v1: ops.paged_attention_v1(out, query, key_cache, value_cache, kv_head_mapping, softmax_scale, block_tables, input_lengths, block_size, max_s, None, 'auto', 1.0) else: assert _PARTITION_SIZE % block_size == 0 tmp_output = torch.empty(size=(num_seqs, num_heads, max_num_partitions, head_size), dtype=out.dtype, device=out.device) exp_sums = torch.empty(size=(num_seqs, num_heads, max_num_partitions), dtype=torch.float32, device=out.device) max_logits = torch.empty_like(exp_sums) ops.paged_attention_v2(out, exp_sums, max_logits, tmp_output, query, key_cache, value_cache, kv_head_mapping, softmax_scale, block_tables, input_lengths, block_size, max_s, None, 'auto', 1.0) return out try: is_ampere_or_newer = major >= 8 and minor >= 0 if not is_ampere_or_newer: raise ImportError('FlashAttention only supports Ampere GPUs or newer.') import flash_attn_2_cuda V2 = True except ImportError: try: import flash_attn_cuda V2 = False except ImportError as e: if major >= 8: architecture_suffix = f'-{SYSTEM}' raise ImportError(f'Flash Attention V2 is not installed.\nUse the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) or install flash attention v2 with `cd server && make install install-flash-attention-v2{architecture_suffix}`') elif is_sm75: raise ImportError('Flash Attention is not installed.\nUse the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) or install flash attention with `cd server && make install install-flash-attention`') from e else: raise ImportError(f'GPU with CUDA capability {major} {minor} is not supported') from e SUPPORTS_WINDOWING = V2 if ATTENTION == 'flashinfer': def attention(q: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, seqlen: Seqlen, block_tables: torch.Tensor, softmax_scale, window_size_left=-1, causal=True, softcap=0.0): from text_generation_server.layers.attention.flashinfer import prefill_with_paged_kv_state return prefill_with_paged_kv_state.get().forward(q.contiguous(), causal=causal, paged_kv_cache=(key_cache, value_cache), logits_soft_cap=softcap, sm_scale=softmax_scale, window_left=window_size_left) elif V2: def attention(q, key_cache: torch.Tensor, value_cache: torch.Tensor, seqlen: Seqlen, block_tables: torch.Tensor, softmax_scale, window_size_left=-1, causal=True, softcap=0.0): out = torch.empty_like(q) if window_size_left <= 0 and window_size_left != -1: raise ValueError('`window_size_left` must be > 0 or -1') return flash_attn_2_cuda.varlen_fwd(q, key_cache, value_cache, out, seqlen.cu_seqlen_q, seqlen.cu_seqlen_k, None, None, block_tables, None, seqlen.max_q, seqlen.max_k, 0.0, softmax_scale, False, causal, window_size_left, 0, softcap, False, None)[0] else: def attention(q, k, v, key_cache: torch.Tensor, value_cache: torch.Tensor, cu_seqlens, max_s, softmax_scale, window_size_left=-1, causal=None, softcap=None): if window_size_left != -1: raise NotImplementedError('window_size_left is only available with flash attn v2') if softcap is not None: raise NotImplementedError('softcap is only available with flash attn v2') if k.shape[1] != q.shape[1]: if k.shape[1] == 1: k = k.expand(-1, q.shape[1], -1) else: original_shape = k.shape k = k.unsqueeze(2).expand(-1, -1, q.shape[1] // k.shape[1], -1).reshape(original_shape[0], -1, original_shape[2]) if v.shape[1] != q.shape[1]: if v.shape[1] == 1: v = v.expand(-1, q.shape[1], -1) else: original_shape = v.shape v = v.unsqueeze(2).expand(-1, -1, q.shape[1] // v.shape[1], -1).reshape(original_shape[0], -1, original_shape[2]) out = torch.empty_like(q) flash_attn_cuda.fwd(q, k, v, out, cu_seqlens, cu_seqlens, max_s, max_s, 0.0, softmax_scale, False, True, False, 0, None) return out # File: text-generation-inference-main/server/text_generation_server/layers/attention/flash_attn_triton.py """""" import torch import triton import triton.language as tl torch_dtype: tl.constexpr = torch.float16 @triton.jit def cdiv_fn(x, y): return (x + y - 1) // y @triton.jit def max_fn(x, y): return tl.math.max(x, y) @triton.jit def dropout_offsets(philox_seed, philox_offset, dropout_p, m, n, stride): ms = tl.arange(0, m) ns = tl.arange(0, n) return philox_offset + ms[:, None] * stride + ns[None, :] @triton.jit def dropout_rng(philox_seed, philox_offset, dropout_p, m, n, stride): rng_offsets = dropout_offsets(philox_seed, philox_offset, dropout_p, m, n, stride).to(tl.uint32) return tl.rand(philox_seed, rng_offsets) @triton.jit def dropout_mask(philox_seed, philox_offset, dropout_p, m, n, stride): rng_output = dropout_rng(philox_seed, philox_offset, dropout_p, m, n, stride) rng_keep = rng_output > dropout_p return rng_keep @triton.jit def load_fn(block_ptr, first, second, pad): if first and second: tensor = tl.load(block_ptr, boundary_check=(0, 1), padding_option=pad) elif first: tensor = tl.load(block_ptr, boundary_check=(0,), padding_option=pad) elif second: tensor = tl.load(block_ptr, boundary_check=(1,), padding_option=pad) else: tensor = tl.load(block_ptr) return tensor @triton.jit def _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m, actual_seqlen_k, dropout_p, philox_seed, batch_philox_offset, encoded_softmax_block_ptr, block_min, block_max, offs_n_causal, masked_blocks, n_extra_tokens, bias_ptr, IS_CAUSAL: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, OFFS_M: tl.constexpr, OFFS_N: tl.constexpr, PRE_LOAD_V: tl.constexpr, MASK_STEPS: tl.constexpr, ENABLE_DROPOUT: tl.constexpr, RETURN_ENCODED_SOFTMAX: tl.constexpr, PADDED_HEAD: tl.constexpr): for start_n in range(block_min, block_max, BLOCK_N): k = load_fn(K_block_ptr, PADDED_HEAD, MASK_STEPS and n_extra_tokens != 0, 'zero') if PRE_LOAD_V: v = load_fn(V_block_ptr, MASK_STEPS and n_extra_tokens != 0, PADDED_HEAD, 'zero') qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) if MASK_STEPS: if start_n + BLOCK_N == block_max and n_extra_tokens != 0: boundary_m = tl.full([BLOCK_M], actual_seqlen_k, dtype=tl.int32) size_n = start_n + OFFS_N[None, :] mask = size_n < boundary_m[:, None] qk = tl.where(mask, qk, float('-inf')) if IS_CAUSAL: causal_boundary = start_n + offs_n_causal causal_mask = OFFS_M[:, None] >= causal_boundary[None, :] qk = tl.where(causal_mask, qk, float('-inf')) qk += tl.dot(q, k) if bias_ptr is not None: bias = load_fn(bias_ptr, False, MASK_STEPS and n_extra_tokens != 0, 'zero') qk += bias * 1.44269504089 m_ij = tl.maximum(m_i, tl.max(qk, 1)) qk = qk - m_ij[:, None] p = tl.math.exp2(qk) l_ij = tl.sum(p, 1) if ENABLE_DROPOUT: philox_offset = batch_philox_offset + start_m * BLOCK_M * actual_seqlen_k + start_n - BLOCK_N keep = dropout_mask(philox_seed, philox_offset, dropout_p, BLOCK_M, BLOCK_N, actual_seqlen_k) if RETURN_ENCODED_SOFTMAX: tl.store(encoded_softmax_block_ptr, tl.where(keep, p, -p).to(encoded_softmax_block_ptr.type.element_ty)) p = tl.where(keep, p, 0.0) elif RETURN_ENCODED_SOFTMAX: tl.store(encoded_softmax_block_ptr, p.to(encoded_softmax_block_ptr.type.element_ty)) alpha = tl.math.exp2(m_i - m_ij) acc = acc * alpha[:, None] if not PRE_LOAD_V: v = load_fn(V_block_ptr, MASK_STEPS and n_extra_tokens != 0, PADDED_HEAD, 'zero') l_i = l_i * alpha + l_ij m_i = m_ij acc += tl.dot(p.to(V_block_ptr.type.element_ty), v) V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0)) K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N)) if bias_ptr is not None: bias_ptr = tl.advance(bias_ptr, (0, BLOCK_N)) if RETURN_ENCODED_SOFTMAX: encoded_softmax_block_ptr = tl.advance(encoded_softmax_block_ptr, (0, BLOCK_N)) return (acc, l_i, m_i) @triton.autotune(configs=[triton.Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'waves_per_eu': 2, 'PRE_LOAD_V': False}, num_stages=1, num_warps=8), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'waves_per_eu': 2, 'PRE_LOAD_V': False}, num_stages=1, num_warps=4), triton.Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'waves_per_eu': 2, 'PRE_LOAD_V': False}, num_stages=1, num_warps=8), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 3, 'PRE_LOAD_V': True}, num_stages=1, num_warps=4), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 3, 'PRE_LOAD_V': False}, num_stages=1, num_warps=4), triton.Config({'BLOCK_M': 64, 'BLOCK_N': 64, 'waves_per_eu': 4, 'PRE_LOAD_V': False}, num_stages=1, num_warps=8), triton.Config({'BLOCK_M': 32, 'BLOCK_N': 32, 'waves_per_eu': 4, 'PRE_LOAD_V': False}, num_stages=1, num_warps=8), triton.Config({'BLOCK_M': 16, 'BLOCK_N': 16, 'waves_per_eu': 1, 'PRE_LOAD_V': False}, num_stages=1, num_warps=4), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 1, 'PRE_LOAD_V': False}, num_stages=1, num_warps=4)], key=['IS_CAUSAL', 'dropout_p', 'BLOCK_DMODEL']) @triton.jit def attn_fwd(Q, K, V, bias, sm_scale, L, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, stride_bz, stride_bh, stride_bm, stride_bn, cu_seqlens_q, cu_seqlens_k, dropout_p, philox_seed, philox_offset_base, encoded_softmax, HQ: tl.constexpr, HK: tl.constexpr, ACTUAL_BLOCK_DMODEL: tl.constexpr, MAX_SEQLENS_Q: tl.constexpr, MAX_SEQLENS_K: tl.constexpr, VARLEN: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, PRE_LOAD_V: tl.constexpr, BIAS_TYPE: tl.constexpr, ENABLE_DROPOUT: tl.constexpr, RETURN_ENCODED_SOFTMAX: tl.constexpr): start_m = tl.program_id(0) off_h_q = tl.program_id(1) off_z = tl.program_id(2) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) if VARLEN: cu_seqlens_q_start = tl.load(cu_seqlens_q + off_z) cu_seqlens_q_end = tl.load(cu_seqlens_q + off_z + 1) seqlen_q = cu_seqlens_q_end - cu_seqlens_q_start if start_m * BLOCK_M > seqlen_q: return cu_seqlens_k_start = tl.load(cu_seqlens_k + off_z) cu_seqlens_k_end = tl.load(cu_seqlens_k + off_z + 1) seqlen_k = cu_seqlens_k_end - cu_seqlens_k_start else: cu_seqlens_q_start = 0 cu_seqlens_k_start = 0 seqlen_q = MAX_SEQLENS_Q seqlen_k = MAX_SEQLENS_K n_blocks = cdiv_fn(seqlen_k, BLOCK_N) if IS_CAUSAL: n_blocks_seqlen = cdiv_fn((start_m + 1) * BLOCK_M + seqlen_k - seqlen_q, BLOCK_N) n_blocks = min(n_blocks, n_blocks_seqlen) if n_blocks <= 0: o_offset = off_z * stride_oz + cu_seqlens_q_start * stride_om + off_h_q * stride_oh O_block_ptr = tl.make_block_ptr(base=Out + o_offset, shape=(seqlen_q, BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=Out.type.element_ty) return GROUP_SIZE: tl.constexpr = HQ // HK if GROUP_SIZE != 1: off_h_k = off_h_q // GROUP_SIZE else: off_h_k = off_h_q n_extra_tokens = 0 if seqlen_k < BLOCK_N: n_extra_tokens = BLOCK_N - seqlen_k elif seqlen_k % BLOCK_N: n_extra_tokens = seqlen_k % BLOCK_N PADDED_HEAD: tl.constexpr = ACTUAL_BLOCK_DMODEL != BLOCK_DMODEL q_offset = off_z * stride_qz + off_h_q * stride_qh + cu_seqlens_q_start * stride_qm Q_block_ptr = tl.make_block_ptr(base=Q + q_offset, shape=(seqlen_q, ACTUAL_BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) k_offset = off_z * stride_kz + off_h_k * stride_kh + cu_seqlens_k_start * stride_kn K_block_ptr = tl.make_block_ptr(base=K + k_offset, shape=(ACTUAL_BLOCK_DMODEL, seqlen_k), strides=(stride_kk, stride_kn), offsets=(0, 0), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1)) v_offset = off_z * stride_vz + off_h_k * stride_vh + cu_seqlens_k_start * stride_vk V_block_ptr = tl.make_block_ptr(base=V + v_offset, shape=(seqlen_k, ACTUAL_BLOCK_DMODEL), strides=(stride_vk, stride_vn), offsets=(0, 0), block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0)) if BIAS_TYPE != 0: bias_ptr = tl.make_block_ptr(base=bias + off_h_q * stride_bh, shape=(seqlen_q, seqlen_k), strides=(stride_bm, stride_bn), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0)) else: bias_ptr = None if ENABLE_DROPOUT: batch_philox_offset = philox_offset_base + (off_z * HQ + off_h_q) * seqlen_q * seqlen_k else: batch_philox_offset = 0 if RETURN_ENCODED_SOFTMAX: encoded_softmax_block_ptr = tl.make_block_ptr(base=encoded_softmax + off_h_q * seqlen_q * seqlen_k, shape=(seqlen_q, seqlen_k), strides=(seqlen_k, 1), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0)) else: encoded_softmax_block_ptr = 0 m_i = tl.full([BLOCK_M], float('-inf'), dtype=tl.float32) l_i = tl.full([BLOCK_M], 1.0, dtype=tl.float32) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) qk_scale = sm_scale * 1.44269504089 q = load_fn(Q_block_ptr, True, PADDED_HEAD, 'zero') q = (q * qk_scale).to(Q_block_ptr.type.element_ty) padded_block_k = n_extra_tokens != 0 is_modulo_mn = not padded_block_k and seqlen_q % BLOCK_M == 0 if IS_CAUSAL: masked_blocks = BLOCK_M // BLOCK_N + (not is_modulo_mn) else: masked_blocks = padded_block_k masked_blocks = min(masked_blocks, n_blocks) n_full_blocks = n_blocks - masked_blocks block_min = 0 block_max = n_blocks * BLOCK_N if n_full_blocks > 0: block_max = (n_blocks - masked_blocks) * BLOCK_N (acc, l_i, m_i) = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m, seqlen_k, dropout_p, philox_seed, batch_philox_offset, encoded_softmax_block_ptr, block_min, block_max, 0, 0, 0, bias_ptr, False, BLOCK_M, BLOCK_DMODEL, BLOCK_N, offs_m, offs_n, PRE_LOAD_V, False, ENABLE_DROPOUT, RETURN_ENCODED_SOFTMAX, PADDED_HEAD) block_min = block_max block_max = n_blocks * BLOCK_N tl.debug_barrier() if masked_blocks > 0: offs_n_causal = offs_n + (seqlen_q - seqlen_k) if IS_CAUSAL else 0 K_block_ptr = tl.advance(K_block_ptr, (0, n_full_blocks * BLOCK_N)) V_block_ptr = tl.advance(V_block_ptr, (n_full_blocks * BLOCK_N, 0)) if bias_ptr is not None: bias_ptr = tl.advance(bias_ptr, (0, n_full_blocks * BLOCK_N)) if RETURN_ENCODED_SOFTMAX: encoded_softmax_block_ptr = tl.advance(encoded_softmax_block_ptr, (0, n_full_blocks)) (acc, l_i, m_i) = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m, seqlen_k, dropout_p, philox_seed, batch_philox_offset, encoded_softmax_block_ptr, block_min, block_max, offs_n_causal, masked_blocks, n_extra_tokens, bias_ptr, IS_CAUSAL, BLOCK_M, BLOCK_DMODEL, BLOCK_N, offs_m, offs_n, PRE_LOAD_V, True, ENABLE_DROPOUT, RETURN_ENCODED_SOFTMAX, PADDED_HEAD) acc = acc / l_i[:, None] if ENABLE_DROPOUT: acc = acc / (1 - dropout_p) end_m_idx = (start_m + 1) * BLOCK_M start_m_idx = start_m * BLOCK_M causal_start_idx = seqlen_q - seqlen_k acc = acc.to(Out.type.element_ty) if IS_CAUSAL: if causal_start_idx > start_m_idx and causal_start_idx < end_m_idx: out_mask_boundary = tl.full((BLOCK_DMODEL,), causal_start_idx, dtype=tl.int32) mask_m_offsets = start_m_idx + tl.arange(0, BLOCK_M) out_ptrs_mask = mask_m_offsets[:, None] >= out_mask_boundary[None, :] z = 0.0 acc = tl.where(out_ptrs_mask, acc, z.to(acc.type.element_ty)) o_offset = off_z * stride_oz + cu_seqlens_q_start * stride_om + off_h_q * stride_oh O_block_ptr = tl.make_block_ptr(base=Out + o_offset, shape=(seqlen_q, ACTUAL_BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) tl.store(O_block_ptr, acc, boundary_check=(0, 1)) def check_args(q, k, v, o, varlen=True, max_seqlens=None, cu_seqlens_q=None, cu_seqlens_k=None): assert q.dim() == k.dim() and q.dim() == v.dim() if varlen: assert q.dim() == 3 (total_q, nheads_q, head_size) = q.shape (total_k, nheads_k, _) = k.shape assert cu_seqlens_q is not None assert cu_seqlens_k is not None assert len(cu_seqlens_q) == len(cu_seqlens_k) else: assert q.dim() == 4 (batch, nheads_q, seqlen_q, head_size) = q.shape (_, nheads_k, seqlen_k, _) = k.shape assert max_seqlens > 0 assert k.shape == v.shape assert q.shape[-1] == k.shape[-1] and q.shape[-1] == v.shape[-1] assert q.dtype == k.dtype and q.dtype == v.dtype assert head_size <= 128 assert o.shape == q.shape assert nheads_q % nheads_k == 0 class _attention(torch.autograd.Function): @staticmethod def forward(ctx, q, k, v, o, cu_seqlens_q, cu_seqlens_k, max_seqlens_q, max_seqlens_k, causal=False, sm_scale=1.0, bias=None): if o is None: o = torch.empty_like(q, dtype=v.dtype) check_args(q, k, v, o, varlen=True, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k) if True: (total_q, nheads_q, head_size) = q.shape (total_k, nheads_k, _) = k.shape batch = len(cu_seqlens_q) - 1 q_strides = (0, q.stride(1), q.stride(0), q.stride(2)) k_strides = (0, k.stride(1), k.stride(0), k.stride(2)) v_strides = (0, v.stride(1), v.stride(0), v.stride(2)) o_strides = (0, o.stride(1), o.stride(0), o.stride(2)) else: (batch, seqlen_q, nheads_q, head_size) = q.shape (_, seqlen_k, nheads_k, _) = k.shape q_strides = (q.stride(0), q.stride(2), q.stride(1), q.stride(3)) k_strides = (k.stride(0), k.stride(2), k.stride(1), k.stride(3)) v_strides = (v.stride(0), v.stride(2), v.stride(1), v.stride(3)) o_strides = (o.stride(0), o.stride(2), o.stride(1), o.stride(3)) padded_d_model = 1 << (head_size - 1).bit_length() padded_d_model = max(padded_d_model, 16) def grid(META): return (triton.cdiv(max_seqlens_q, META['BLOCK_M']), nheads_q, batch) encoded_softmax = None philox_seed = 114514 philox_offset = 1919810 if bias is not None: bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2), bias.stride(3)) else: bias_strides = (0, 0, 0, 0) attn_fwd[grid](q, k, v, bias, sm_scale, None, o, *q_strides, *k_strides, *v_strides, *o_strides, *bias_strides, cu_seqlens_q, cu_seqlens_k, dropout_p=0.0, philox_seed=philox_seed, philox_offset_base=philox_offset, encoded_softmax=encoded_softmax, HQ=nheads_q, HK=nheads_k, ACTUAL_BLOCK_DMODEL=head_size, MAX_SEQLENS_Q=max_seqlens_q, MAX_SEQLENS_K=max_seqlens_k, IS_CAUSAL=causal, VARLEN=True, BLOCK_DMODEL=padded_d_model, BIAS_TYPE=0 if bias is None else 1, ENABLE_DROPOUT=False, RETURN_ENCODED_SOFTMAX=False) ctx.grid = grid ctx.sm_scale = sm_scale ctx.BLOCK_DMODEL = head_size ctx.causal = causal ctx.dropout_p = 0.0 ctx.philox_seed = philox_seed ctx.philox_offset = philox_offset ctx.encoded_softmax = encoded_softmax ctx.return_encoded_softmax = False return (o, encoded_softmax) triton_attention = _attention.apply # File: text-generation-inference-main/server/text_generation_server/layers/attention/flashinfer.py from typing import Optional from contextvars import ContextVar from contextlib import contextmanager import flashinfer import torch prefill_state: ContextVar[flashinfer.BatchPrefillWithRaggedKVCacheWrapper] = ContextVar('prefill_state') prefill_with_paged_kv_state: ContextVar[flashinfer.BatchPrefillWithPagedKVCacheWrapper] = ContextVar('prefill_with_paged_kv_state') decode_state: ContextVar[flashinfer.BatchDecodeWithPagedKVCacheWrapper] = ContextVar('decode_state') workspace: Optional[torch.Tensor] = None def get_workspace(device): global workspace if workspace is None: workspace = torch.empty(128 * 1024 * 1024, dtype=torch.uint8, device=device) return workspace def create_prefill_with_paged_kv_state(*, device: torch.device): workspace_buffer = get_workspace(device) return flashinfer.BatchPrefillWithPagedKVCacheWrapper(workspace_buffer, kv_layout='NHD', use_cuda_graph=False) @contextmanager def use_prefill_with_paged_kv_state(*, state: flashinfer.BatchPrefillWithPagedKVCacheWrapper, block_tables: torch.Tensor, cu_seqlens: torch.Tensor, input_lengths: torch.Tensor, num_heads: int, num_kv_heads: int, head_size: int, page_size: int, query_dtype: str='float16'): indptr = torch.zeros(input_lengths.shape[0] + 1, device=input_lengths.device, dtype=torch.int32) torch.add(input_lengths, page_size - 1, out=indptr[1:]) indptr[1:].div_(page_size, rounding_mode='floor') indptr[1:].cumsum_(-1) if page_size == 1: last_page_len = torch.ones(input_lengths.shape[0], dtype=torch.int32, device=input_lengths.device) else: last_page_len = torch.empty(input_lengths.shape[0], dtype=torch.int32, device=input_lengths.device) torch.sub(input_lengths, 1, out=last_page_len) last_page_len.remainder_(page_size) last_page_len += 1 token = prefill_with_paged_kv_state.set(state) try: state.begin_forward(qo_indptr=cu_seqlens, paged_kv_indptr=indptr, paged_kv_indices=block_tables, paged_kv_last_page_len=last_page_len, num_qo_heads=num_heads, num_kv_heads=num_kv_heads, head_dim=head_size, q_data_type=query_dtype, page_size=page_size) yield finally: state.end_forward() if token is not None: prefill_with_paged_kv_state.reset(token) def create_prefill_state(*, device: torch.device): workspace_buffer = get_workspace(device) return flashinfer.BatchPrefillWithRaggedKVCacheWrapper(workspace_buffer, kv_layout='NHD', use_cuda_graph=False) @contextmanager def use_prefill_state(*, state: flashinfer.BatchPrefillWithRaggedKVCacheWrapper, cu_seqlens: torch.Tensor, num_heads: int, num_kv_heads: int, head_size: int, query_dtype: str='float16'): token = prefill_state.set(state) try: state.begin_forward(qo_indptr=cu_seqlens, kv_indptr=cu_seqlens, num_qo_heads=num_heads, num_kv_heads=num_kv_heads, head_dim=head_size, q_data_type=query_dtype) yield finally: state.end_forward() if token is not None: prefill_state.reset(token) def create_decode_state(*, device: torch.device, num_heads: int, num_kv_heads: int): workspace_buffer = get_workspace(device) return flashinfer.BatchDecodeWithPagedKVCacheWrapper(workspace_buffer, kv_layout='NHD', use_cuda_graph=False, use_tensor_cores=num_heads // num_kv_heads > 4) def create_decode_state_cuda_graphs(*, device: torch.device, block_tables: torch.Tensor, block_tables_ptr: torch.Tensor, last_page_len: torch.Tensor, num_heads: int, num_kv_heads: int): workspace_buffer = get_workspace(device) return flashinfer.BatchDecodeWithPagedKVCacheWrapper(workspace_buffer, kv_layout='NHD', use_cuda_graph=True, paged_kv_indices_buffer=block_tables, paged_kv_indptr_buffer=block_tables_ptr, paged_kv_last_page_len_buffer=last_page_len, use_tensor_cores=num_heads // num_kv_heads > 4) @contextmanager def use_decode_state(*, state: flashinfer.BatchDecodeWithPagedKVCacheWrapper, input_lengths: torch.Tensor, block_tables: torch.Tensor, num_heads: int, num_kv_heads: int, head_size: int, page_size: int, query_dtype: str='float16'): indptr = torch.zeros(input_lengths.shape[0] + 1, device=input_lengths.device, dtype=torch.int32) torch.add(input_lengths, page_size - 1, out=indptr[1:]) indptr[1:].div_(page_size, rounding_mode='floor') indptr[1:].cumsum_(-1) last_page_len = torch.empty(input_lengths.shape[0], dtype=torch.int32, device=input_lengths.device) torch.sub(input_lengths, 1, out=last_page_len) last_page_len.remainder_(page_size) last_page_len += 1 token = decode_state.set(state) try: state.begin_forward(indptr=indptr, indices=block_tables, last_page_len=last_page_len, num_qo_heads=num_heads, num_kv_heads=num_kv_heads, head_dim=head_size, page_size=page_size, q_data_type=query_dtype) yield finally: state.end_forward() if token is not None: decode_state.reset(token) # File: text-generation-inference-main/server/text_generation_server/layers/attention/ipex.py import intel_extension_for_pytorch as ipex import torch from text_generation_server.models.flash_causal_lm import BLOCK_SIZE from text_generation_server.layers.attention import Seqlen from typing import Optional SUPPORTS_WINDOWING = False def attention(q: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, seqlen: Seqlen, block_tables: torch.Tensor, softmax_scale, window_size_left=-1, causal=True, softcap: Optional[float]=None): out = torch.empty_like(q) ipex.llm.functional.varlen_attention(q.contiguous() if q.device.type == 'xpu' else q, key_cache.contiguous() if key_cache.device.type == 'xpu' else key_cache, value_cache.contiguous() if value_cache.device.type == 'xpu' else value_cache, out, seqlen.cu_seqlen_q, seqlen.cu_seqlen_q, seqlen.max_q, seqlen.max_q, 0.0, softmax_scale, False, causal, False, None) return out def reshape_and_cache(key: torch.Tensor, value: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, slots: torch.Tensor): ipex.llm.modules.PagedAttention.reshape_and_cache(key, value, key_cache, value_cache, slots) def paged_attention(query: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, kv_head_mapping: torch.Tensor, softmax_scale: float, block_tables: torch.Tensor, seqlen: Seqlen, max_s: int, softcap: Optional[float]=None): out = torch.empty_like(query) ipex.llm.modules.PagedAttention.single_query_cached_kv_attention(out, query, key_cache, value_cache, kv_head_mapping, softmax_scale, block_tables, seqlen.input_lengths, BLOCK_SIZE, max_s, None) return out # File: text-generation-inference-main/server/text_generation_server/layers/attention/rocm.py import os import torch from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.models.globals import ATTENTION from text_generation_server.layers.attention import Seqlen from text_generation_server.utils.log import log_master from loguru import logger (major, minor) = torch.cuda.get_device_capability() is_sm75 = major == 7 and minor == 5 _PARTITION_SIZE = 512 use_triton = os.getenv('ROCM_USE_FLASH_ATTN_V2_TRITON', '').lower() in {'true', '1'} ENGINE = 'triton' if use_triton else 'ck' try: from vllm._C import cache_ops except Exception as e: raise ImportError(f'Could not import vllm paged attention. Make sure your installation is correct. Complete error: {e}') def reshape_and_cache(key: torch.Tensor, value: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, slots: torch.Tensor): if ATTENTION == 'flashdecoding': shape = key_cache.shape key_cache.view(-1, shape[-2], shape[-1])[slots] = key value_cache.view(-1, shape[-2], shape[-1])[slots] = value else: cache_ops.reshape_and_cache(key, value, key_cache, value_cache, slots, 'auto', 1.0) def paged_attention(query: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, kv_head_mapping: torch.Tensor, softmax_scale: float, block_tables: torch.Tensor, input_lengths: Seqlen, max_s: int): block_size = value_cache.shape[3] (num_seqs, num_heads, head_size) = query.shape max_num_partitions = (max_s + _PARTITION_SIZE - 1) // _PARTITION_SIZE input_lengths = input_lengths.input_lengths out = torch.empty_like(query) from vllm._C import ops use_v1 = max_s <= 8192 and (max_num_partitions == 1 or num_seqs * num_heads > 512) if use_v1: ops.paged_attention_v1(out, query, key_cache, value_cache, kv_head_mapping, softmax_scale, block_tables, input_lengths, block_size, max_s, None, 'auto', 1.0) else: assert _PARTITION_SIZE % block_size == 0 tmp_output = torch.empty(size=(num_seqs, num_heads, max_num_partitions, head_size), dtype=out.dtype, device=out.device) exp_sums = torch.empty(size=(num_seqs, num_heads, max_num_partitions), dtype=torch.float32, device=out.device) max_logits = torch.empty_like(exp_sums) ops.paged_attention_v2(out, exp_sums, max_logits, tmp_output, query, key_cache, value_cache, kv_head_mapping, softmax_scale, block_tables, input_lengths, block_size, max_s, None, 'auto', 1.0) return out if ENGINE != 'triton': try: import flash_attn_2_cuda log_master(logger.info, 'ROCm: using Flash Attention 2 Composable Kernel implementation.') except ImportError as e: if major >= 8: architecture_suffix = f'-{SYSTEM}' raise ImportError(f'Flash Attention V2 is not installed.\nUse the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) or install flash attention v2 with `cd server && make install install-flash-attention-v2{architecture_suffix}`') elif is_sm75: raise ImportError('Flash Attention is not installed.\nUse the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) or install flash attention with `cd server && make install install-flash-attention`') from e else: for idx in range(torch.cuda.device_count()): name = torch.cuda.get_device_name(idx) if 'MI210' not in name and 'MI250' not in name: raise ImportError(f'AMD GPU {torch.cuda.get_device_name(idx)} does not support flash-attention') raise ImportError(f'AMD GPU with ROCm capability {major} {minor} is not supported') from e SUPPORTS_WINDOWING = False if ENGINE == 'ck': def attention(q, k, v, cu_seqlens, max_s, softmax_scale, window_size_left=-1, causal=True): if window_size_left <= 0 and window_size_left != -1: raise ValueError('`window_size_left` must be > 0 or -1') out = torch.empty_like(q) return flash_attn_2_cuda.varlen_fwd(q, k, v, out, cu_seqlens, cu_seqlens, max_s, max_s, 0.0, softmax_scale, False, causal, False, None) elif ENGINE == 'triton': from .flash_attn_triton import triton_attention def attention(q, k, v, cu_seqlens, max_s, softmax_scale, window_size_left=-1, causal=True): out = torch.empty_like(q) (output, _) = triton_attention(q, k, v, out, cu_seqlens, cu_seqlens, max_s, max_s, causal, softmax_scale) return output else: raise RuntimeError(f'Unknown attention engine {ENGINE}') # File: text-generation-inference-main/server/text_generation_server/layers/awq/conversion_utils.py import torch from typing import List AWQ_PACK_ORDER = [0, 2, 4, 6, 1, 3, 5, 7] REVERSE_AWQ_PACK_ORDER = [0, 4, 1, 5, 2, 6, 3, 7] def pack(imatrix: torch.Tensor, direction: str='column'): shifts = torch.arange(0, 32, 4, dtype=torch.int32, device=imatrix.device) imatrix = imatrix.to(torch.int8) & 15 if direction == 'column': imatrix = imatrix.view(-1, imatrix.shape[1] // (32 // 4), 32 // 4) qmatrix = torch.bitwise_left_shift(imatrix, shifts[None, None, :]).sum(dim=-1) elif direction == 'row': imatrix = imatrix.view(imatrix.shape[0] // (32 // 4), 32 // 4, -1) qmatrix = torch.bitwise_left_shift(imatrix, shifts[None, :, None]).sum(dim=1) qmatrix = qmatrix.to(torch.int32) return qmatrix def unpack(qmatrix: torch.Tensor, direction: str='column'): shifts = torch.arange(0, 32, 4, device=qmatrix.device) if direction == 'column': imatrix = torch.bitwise_right_shift(qmatrix[:, :, None], shifts[None, None, :]).view(qmatrix.shape[0], -1) elif direction == 'row': imatrix = torch.bitwise_right_shift(qmatrix[:, None, :], shifts[None, :, None]).view(-1, qmatrix.shape[-1]) imatrix = imatrix.to(torch.int8) & 15 return imatrix def apply_order(imatrix: torch.Tensor, direction: str='column', order: List[int]=AWQ_PACK_ORDER): if direction == 'column': imatrix = imatrix.view(-1, 32 // 4)[:, order].view(imatrix.shape) elif direction == 'row': imatrix = imatrix.view(32 // 4, -1)[order, :].view(imatrix.shape) return imatrix def fast_awq_to_gptq(qweight, qzeros): izeros = unpack(qzeros, direction='column') iweights = unpack(qweight, direction='column') izeros = apply_order(izeros, direction='column', order=REVERSE_AWQ_PACK_ORDER) iweights = apply_order(iweights, direction='column', order=REVERSE_AWQ_PACK_ORDER) izeros = izeros - 1 qzeros = pack(izeros, direction='column') qweight = pack(iweights, direction='row') return (qweight, qzeros) # File: text-generation-inference-main/server/text_generation_server/layers/awq/quantize/qmodule.py from typing import Optional import torch import torch.nn as nn import awq_inference_engine class WQLinear(nn.Module): def __init__(self, w_bit, group_size, qweight, qzeros, scales, bias: Optional[torch.Tensor]): super().__init__() if w_bit not in [4]: raise NotImplementedError('Only 4-bit are supported for now.') self.in_features = qweight.shape[0] self.out_features = qweight.shape[1] * 32 // w_bit self.w_bit = w_bit self.group_size = group_size if group_size != -1 else self.in_features assert self.in_features % self.group_size == 0 assert self.out_features % (32 // self.w_bit) == 0 self.qweight = qweight self.qzeros = qzeros self.scales = scales self.bias = bias @torch.no_grad() def forward(self, x): out_shape = x.shape[:-1] + (self.out_features,) out = awq_inference_engine.gemm_forward_cuda(x.reshape(-1, x.shape[-1]), self.qweight, self.scales, self.qzeros, 8) out = out + self.bias if self.bias is not None else out return out.reshape(out_shape) # File: text-generation-inference-main/server/text_generation_server/layers/bnb.py from dataclasses import dataclass import bitsandbytes as bnb import torch from bitsandbytes.nn import Int8Params, Params4bit from text_generation_server.utils.weights import UnquantizedWeight @dataclass class BNBWeight(UnquantizedWeight): weight: torch.Tensor def get_linear(self, bias: torch.Tensor): return Linear8bitLt(self.weight, bias, has_fp16_weights=False, threshold=6.0) class Linear8bitLt(torch.nn.Module): def __init__(self, weight, bias, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0, index=None): super().__init__() assert not memory_efficient_backward, 'memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0' self.state = bnb.MatmulLtState() self.index = index self.state.threshold = threshold self.state.has_fp16_weights = has_fp16_weights self.state.memory_efficient_backward = memory_efficient_backward if threshold > 0.0 and (not has_fp16_weights): self.state.use_pool = True self.weight = Int8Params(weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights) self.weight.cuda(weight.device) self.bias = bias def init_8bit_state(self): self.state.CB = self.weight.CB self.state.SCB = self.weight.SCB self.weight.CB = None self.weight.SCB = None def forward(self, x: torch.Tensor): self.state.is_training = self.training if self.weight.CB is not None: self.init_8bit_state() if self.bias is not None and self.bias.dtype != x.dtype: self.bias.data = self.bias.data.to(x.dtype) out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state) if not self.state.has_fp16_weights: if self.state.CB is not None and self.state.CxB is not None: del self.state.CB self.weight.data = self.state.CxB return out @dataclass class BNBFP4Weight(UnquantizedWeight): weight: torch.Tensor def get_linear(self, bias: torch.Tensor): return Linear4bit(self.weight, bias, quant_type='fp4') @dataclass class BNBNF4Weight(UnquantizedWeight): weight: torch.Tensor def get_linear(self, bias: torch.Tensor): return Linear4bit(self.weight, bias, quant_type='nf4') class Linear4bit(torch.nn.Module): def __init__(self, weight, bias, quant_type): super().__init__() self.weight = Params4bit(weight.data, requires_grad=False, compress_statistics=True, quant_type=quant_type) self.compute_dtype = None self.weight.cuda(weight.device) self.bias = bias def forward(self, x: torch.Tensor): if self.bias is not None and self.bias.dtype != x.dtype: self.bias.data = self.bias.data.to(x.dtype) if getattr(self.weight, 'quant_state', None) is None: print('FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.') inp_dtype = x.dtype if self.compute_dtype is not None: x = x.to(self.compute_dtype) bias = None if self.bias is None else self.bias.to(self.compute_dtype) out = bnb.matmul_4bit(x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state) out = out.to(inp_dtype) return out # File: text-generation-inference-main/server/text_generation_server/layers/conv.py from accelerate import init_empty_weights import torch @classmethod def load_conv2d(cls, prefix, weights, in_channels, out_channels, kernel_size, stride): weight = weights.get_tensor(f'{prefix}.weight') bias = weights.get_tensor(f'{prefix}.bias') with init_empty_weights(): conv2d = cls(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride) conv2d.weight = torch.nn.Parameter(weight) conv2d.bias = torch.nn.Parameter(bias) return conv2d @classmethod def load_conv2d_no_bias(cls, prefix, weights, in_channels, out_channels, kernel_size, stride): weight = weights.get_tensor(f'{prefix}.weight') with init_empty_weights(): conv2d = cls(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride) conv2d.weight = torch.nn.Parameter(weight) conv2d.bias = None return conv2d torch.nn.Conv2d.load = load_conv2d torch.nn.Conv2d.load_no_bias = load_conv2d_no_bias # File: text-generation-inference-main/server/text_generation_server/layers/eetq.py from dataclasses import dataclass import torch from EETQ import quant_weights, w8_a16_gemm from text_generation_server.utils.weights import UnquantizedWeight @dataclass class EETQWeight(UnquantizedWeight): weight: torch.Tensor def get_linear(self, bias: torch.Tensor): try: from text_generation_server.layers.eetq import EETQLinear return EETQLinear(self.weight, bias) except ImportError: raise ImportError('Please install EETQ from https://github.com/NetEase-FuXi/EETQ') class EETQLinear(torch.nn.Module): def __init__(self, weight, bias) -> None: super().__init__() device = weight.device if weight.dtype != torch.float16: weight = weight.to(dtype=torch.float16) weight = torch.t(weight).contiguous().cpu() (weight, scale) = quant_weights(weight, torch.int8, False) self.weight = weight.cuda(device) self.scale = scale.cuda(device) self.bias = bias.cuda(device) if bias is not None else None def forward(self, input: torch.Tensor) -> torch.Tensor: output = w8_a16_gemm(input, self.weight, self.scale) output = output + self.bias if self.bias is not None else output return output # File: text-generation-inference-main/server/text_generation_server/layers/exl2.py from dataclasses import dataclass from typing import List, Union import torch from text_generation_server.utils.weights import Weight, Weights, WeightsLoader @dataclass class Exl2Weight(Weight): q_weight: torch.Tensor q_scale: torch.Tensor q_invperm: torch.Tensor q_scale_max: torch.Tensor q_groups: torch.Tensor def __post_init__(self): self.q_scale_max /= 256 self.q_invperm = self.q_invperm.short() @property def device(self) -> torch.device: return self.q_weight.device def get_linear(self, bias: torch.Tensor): from text_generation_server.layers.gptq import ExllamaQuantLinear return ExllamaQuantLinear(self, bias) class Exl2WeightsLoader(WeightsLoader): def get_weights(self, weights: 'Weights', prefix: str): try: q_weight = weights.get_tensor(f'{prefix}.q_weight') except RuntimeError: raise RuntimeError('Cannot load `exl2`-quantized weight, make sure the model is already quantized.') q_scale = weights.get_tensor(f'{prefix}.q_scale') q_invperm = weights.get_tensor(f'{prefix}.q_invperm') q_scale_max = weights.get_tensor(f'{prefix}.q_scale_max') q_groups = weights.get_tensor(f'{prefix}.q_groups') return Exl2Weight(q_weight=q_weight, q_scale=q_scale, q_invperm=q_invperm, q_scale_max=q_scale_max, q_groups=q_groups) def get_weights_col_packed(self, weights: Weights, prefix: str, block_sizes: Union[int, List[int]]): raise RuntimeError('Column-packed weights are not supported for exl') def get_weights_col(self, weights: Weights, prefix: str): return self.get_weights(weights, prefix) def get_multi_weights_col(self, weights: Weights, prefixes: List[str], dim: int): raise ValueError('get_multi_weights_col is not supported for exl2') def get_weights_row(self, weights: Weights, prefix: str): return self.get_weights(weights, prefix) # File: text-generation-inference-main/server/text_generation_server/layers/fp8.py import torch from dataclasses import dataclass from typing import Optional, Union, List from loguru import logger from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.weights import Weight, WeightsLoader, UnquantizedWeight, Weights from text_generation_server.utils.log import log_master, log_once import importlib.util FBGEMM_MM_AVAILABLE = False FBGEMM_DYN_AVAILABLE = False def is_fbgemm_gpu_available(): try: return importlib.util.find_spec('fbgemm_gpu.experimental.gen_ai') is not None except ModuleNotFoundError: return False if is_fbgemm_gpu_available(): if SYSTEM == 'cuda': (major, _) = torch.cuda.get_device_capability() FBGEMM_MM_AVAILABLE = major == 9 FBGEMM_DYN_AVAILABLE = major >= 8 else: log_master(logger.warning, 'FBGEMM fp8 kernels are not installed.') def get_fp8_linear() -> torch.nn.Module: if SYSTEM == 'cuda': (major, _) = torch.cuda.get_device_capability() if major == 8: from text_generation_server.layers.marlin import GPTQMarlinFP8Linear return GPTQMarlinFP8Linear return Fp8Linear def fp8_quantize(weight, scale_upper_bound=None, qdtype=torch.float8_e4m3fn, scalar=False): if FBGEMM_DYN_AVAILABLE and (not scalar): (qweight, scale) = torch.ops.fbgemm.quantize_fp8_per_row(weight, bs=None, scale_ub=scale_upper_bound, output_dtype=qdtype) return (qweight, scale) finfo = torch.finfo(qdtype) scale = finfo.max / weight.abs().max().clamp(min=1e-12, max=scale_upper_bound) qweight = (weight * scale).clamp(min=finfo.min, max=finfo.max) qweight = qweight.to(qdtype) scale = scale.float().reciprocal() return (qweight, scale) class HybridFP8UnquantLoader(WeightsLoader): def __init__(self, activation_scale_ub: Optional[float], to_fp8: bool): self.activation_scale_ub = activation_scale_ub self.to_fp8 = to_fp8 def get_weights(self, weights: 'Weights', prefix: str): w = weights.get_tensor(f'{prefix}.weight') if w.dtype == torch.float8_e4m3fn: scale = weights.get_tensor(f'{prefix}.weight_scale', to_dtype=False).reshape(-1) return Fp8Weight(weight=w, weight_scale=scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype) if self.to_fp8: return Fp8Weight(weight=w, dtype=weights.dtype) return UnquantizedWeight(w) def get_weights_col_packed(self, weights: Weights, prefix: str, block_sizes: Union[int, List[int]]): w = weights.get_packed_sharded(f'{prefix}.weight', dim=0, block_sizes=block_sizes) if w.dtype == torch.float8_e4m3fn: scale = weights.get_packed_sharded(f'{prefix}.weight_scale', dim=0, block_sizes=block_sizes, to_dtype=False).reshape(-1) return Fp8Weight(weight=w, weight_scale=scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype) if self.to_fp8: return Fp8Weight(weight=w, dtype=weights.dtype) return UnquantizedWeight(w) def get_multi_weights_col(self, weights: 'Weights', prefixes: List[str], dim: int): w = [weights.get_sharded(f'{p}.weight', dim=0, to_device=False) for p in prefixes] w = torch.cat(w, dim=dim).to(weights.device) if w.dtype == torch.float8_e4m3fn: scale = [weights.get_sharded(f'{p}.weight_scale', dim=0, to_dtype=False) for p in prefixes] scale = torch.cat(scale, dim=0).reshape(-1) return Fp8Weight(weight=w, weight_scale=scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype) if self.to_fp8: return Fp8Weight(weight=w, dtype=weights.dtype) return UnquantizedWeight(w) def get_weights_row(self, weights: 'Weights', prefix: str): w = weights.get_sharded(f'{prefix}.weight', dim=1) if w.dtype == torch.float8_e4m3fn: scale = weights.get_tensor(f'{prefix}.weight_scale', to_dtype=False).reshape(-1) return Fp8Weight(weight=w, weight_scale=scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype) if self.to_fp8: return Fp8Weight(weight=w, dtype=weights.dtype) return UnquantizedWeight(w) @dataclass class Fp8Weight(Weight): weight: torch.Tensor dtype: torch.dtype weight_scale: Optional[torch.Tensor] = None activation_scale_ub: Optional[float] = None def get_linear(self, bias: torch.Tensor): if self.weight_scale is None: return get_fp8_linear().from_unquant(self.weight, bias, self.dtype) return get_fp8_linear().from_fp8(self.weight, self.weight_scale, self.activation_scale_ub, bias, self.dtype) class Fp8Linear(torch.nn.Module): def __init__(self, qweight, scale, scale_upper_bound, bias, dtype) -> None: super().__init__() if FBGEMM_MM_AVAILABLE: log_once(logger.info, 'Using FBGEMM fp8 optimized kernels') self.dtype = dtype self.qweight = qweight self.scale = scale self.scale_upper_bound = torch.tensor([scale_upper_bound], dtype=torch.float32, device=qweight.device) if scale_upper_bound is not None else None self.bias = bias if bias is not None else None @classmethod def from_unquant(cls, weight, bias, dtype): (qweight, scale) = fp8_quantize(weight, scalar=not FBGEMM_MM_AVAILABLE) return cls(qweight=qweight, scale=scale, scale_upper_bound=None, bias=bias, dtype=dtype) @classmethod def from_fp8(cls, weight, scale, input_scale, bias, dtype): return cls(qweight=weight, scale=scale, scale_upper_bound=input_scale, bias=bias, dtype=dtype) def forward(self, input: torch.Tensor) -> torch.Tensor: if FBGEMM_MM_AVAILABLE: (qinput, scale) = fp8_quantize(input, scale_upper_bound=self.scale_upper_bound) y = torch.ops.fbgemm.f8f8bf16_rowwise(qinput, self.qweight, scale, self.scale, use_fast_accum=True, bias=self.bias) return y.to(self.dtype) (qinput, scale) = fp8_quantize(input, scalar=True) (output, _) = torch._scaled_mm(qinput, self.qweight.t(), out_dtype=self.dtype, scale_a=scale, scale_b=self.scale, bias=self.bias) return output # File: text-generation-inference-main/server/text_generation_server/layers/gptq/__init__.py import os from dataclasses import dataclass from typing import List, Optional, Union import torch from loguru import logger from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.log import log_once from text_generation_server.utils.weights import Weight, Weights, WeightsLoader @dataclass class GPTQWeight(Weight): qweight: torch.Tensor qzeros: torch.Tensor scales: torch.Tensor g_idx: Optional[torch.Tensor] bits: int groupsize: int use_awq_kernel: bool use_exllama: bool def __post_init__(self): if self.scales.dtype == torch.float: self.scales = self.scales.half() @property def device(self) -> torch.device: return self.qweight.device def get_linear(self, bias: torch.Tensor): if self.use_awq_kernel: if SYSTEM == 'rocm': raise NotImplementedError("AWQ GEMM kernel can't be used on ROCm systems, please use `--quantize gptq` instead to use Exllama/GPTQ kernels for AWQ inference.") try: from text_generation_server.layers.awq.quantize.qmodule import WQLinear return WQLinear(w_bit=self.bits, group_size=self.groupsize, qweight=self.qweight, qzeros=self.qzeros, scales=self.scales, bias=bias) except ImportError: raise NotImplementedError('You do not seem to have awq installed, either install it (cd server && make install-awq), or try using GPTQ `---quantize gptq` a conversion AWQ->GPTQ will happen on the fly') elif self.use_exllama: try: from text_generation_server.layers.gptq import ExllamaQuantLinear except ImportError: raise NotImplementedError('Exllama gptq kernels are not installed. Install them `cd server/exllama_kernels && python setup.py install && cd ../exllamav2_kernels && python setup.py install`') return ExllamaQuantLinear(self, bias) else: from text_generation_server.layers.gptq.quant_linear import QuantLinear return QuantLinear(self.qweight, self.qzeros, self.scales, self.g_idx, bias, self.bits, self.groupsize) class GPTQWeightsLoader(WeightsLoader): def __init__(self, *, bits: int, desc_act: bool, groupsize: int, quant_method: str, quantize: str, sym: bool): self.bits = bits self.desc_act = desc_act self.groupsize = groupsize self.quant_method = quant_method self.quantize = quantize self.sym = sym def get_weights(self, weights: Weights, prefix: str): self._get_gptq_params(weights) use_exllama = True if self.bits != 4: use_exllama = False if self.desc_act: log_once(logger.warning, 'Disabling exllama because desc_act=True') use_exllama = False try: qweight = weights.get_tensor(f'{prefix}.qweight') except RuntimeError: raise RuntimeError('Cannot load `gptq` weight, make sure the model is already quantized, or quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`') if self.quantize == 'gptq' and self.quant_method == 'gptq': g_idx = weights.get_tensor(f'{prefix}.g_idx') else: g_idx = None from text_generation_server.layers.gptq import HAS_EXLLAMA, CAN_EXLLAMA, GPTQWeight if use_exllama: if not HAS_EXLLAMA: if CAN_EXLLAMA: log_once(logger.warning, 'Exllama GPTQ cuda kernels (which are faster) could have been used, but are not currently installed, try using BUILD_EXTENSIONS=True') use_exllama = False else: log_once(logger.info, f'Using exllama kernels v{HAS_EXLLAMA}') qzeros = weights.get_tensor(f'{prefix}.qzeros') scales = weights.get_tensor(f'{prefix}.scales') if use_exllama and g_idx is not None: g_idx = g_idx - g_idx[0] if self.quantize == 'gptq' and self.quant_method == 'awq': log_once(logger.info, 'Converting AWQ model to Exllama/GPTQ packing format.') from text_generation_server.layers.awq.conversion_utils import fast_awq_to_gptq (qweight, qzeros) = fast_awq_to_gptq(qweight, qzeros) if use_exllama: g_idx = None else: g_idx = (torch.arange(qweight.shape[0] * (32 // self.bits), device=qweight.device) // self.groupsize).to(dtype=torch.int32) return GPTQWeight(qweight=qweight, qzeros=qzeros, scales=scales, g_idx=g_idx, bits=self.bits, groupsize=self.groupsize, use_exllama=use_exllama) def get_weights_col_packed(self, weights: Weights, prefix: str, block_sizes: Union[int, List[int]]): try: qweight = weights.get_packed_sharded(f'{prefix}.qweight', dim=1, block_sizes=block_sizes) except RuntimeError: raise RuntimeError(f'Cannot load `{self.quantize}` weight, make sure the model is already quantized.') scales = weights.get_packed_sharded(f'{prefix}.scales', dim=1, block_sizes=block_sizes) scales = scales.to(dtype=weights.dtype) self._get_gptq_params(weights) qzeros = weights.get_packed_sharded(f'{prefix}.qzeros', dim=1, block_sizes=block_sizes) if self.quantize == 'gptq' and self.quant_method == 'gptq': g_idx = weights.get_tensor(f'{prefix}.g_idx') elif self.quantize == 'gptq' and self.quant_method == 'awq': log_once(logger.info, 'Converting AWQ model to Exllama/GPTQ packing format.') from text_generation_server.layers.awq.conversion_utils import fast_awq_to_gptq (qweight, qzeros) = fast_awq_to_gptq(qweight, qzeros) g_idx = (torch.arange(qweight.shape[0] * (32 // self.bits), device=qweight.device) // self.groupsize).to(dtype=torch.int32) else: g_idx = None return GPTQWeight(qweight=qweight, qzeros=qzeros, scales=scales, g_idx=g_idx, bits=self.bits, groupsize=self.groupsize, use_awq_kernel=self.quantize == 'awq', use_exllama=False) def get_multi_weights_col(self, weights: Weights, prefixes: List[str], dim: int): try: qweight = torch.cat([weights.get_sharded(f'{p}.qweight', dim=1) for p in prefixes], dim=1) except RuntimeError: raise RuntimeError(f'Cannot load `{self.quantize}` weight, make sure the model is already quantized') scales = torch.cat([weights.get_sharded(f'{p}.scales', dim=1) for p in prefixes], dim=1) self._get_gptq_params(weights) qzeros = torch.cat([weights.get_sharded(f'{p}.qzeros', dim=1) for p in prefixes], dim=1) from text_generation_server.layers.gptq import HAS_EXLLAMA use_exllama = self.bits == 4 and HAS_EXLLAMA and (self.quantize == 'gptq') and (not self.desc_act) if self.quantize == 'gptq' and self.quant_method == 'gptq': w = [weights.get_tensor(f'{p}.g_idx') for p in prefixes] for w2 in w[1:]: torch.testing.assert_close(w2, w[0]) g_idx = w[0] elif self.quantize == 'gptq' and self.quant_method == 'awq': log_once(logger.info, 'Converting AWQ model to Exllama/GPTQ packing format.') from text_generation_server.layers.awq.conversion_utils import fast_awq_to_gptq (qweight, qzeros) = fast_awq_to_gptq(qweight, qzeros) if use_exllama: g_idx = None else: g_idx = (torch.arange(qweight.shape[0] * (32 // self.bits), device=qweight.device) // self.groupsize).to(dtype=torch.int32) else: g_idx = None return GPTQWeight(qweight=qweight, qzeros=qzeros, scales=scales, g_idx=g_idx, bits=self.bits, groupsize=self.groupsize, use_awq_kernel=self.quantize == 'awq', use_exllama=use_exllama) def get_weights_row(self, weights: Weights, prefix: str): self._get_gptq_params(weights) use_exllama = True if self.bits != 4: use_exllama = False if self.desc_act: log_once(logger.warning, 'Disabling exllama because desc_act=True') use_exllama = False try: qweight = weights.get_sharded(f'{prefix}.qweight', dim=0) except RuntimeError: raise RuntimeError('Cannot load `gptq` weight, make sure the model is already quantized, or quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`') if self.quantize == 'gptq' and self.quant_method == 'gptq': g_idx = weights.get_sharded(f'{prefix}.g_idx', dim=0) else: g_idx = None if weights.process_group.size() > 1: if g_idx is not None: if not torch.equal(g_idx.cpu(), torch.tensor([i // self.groupsize for i in range(g_idx.shape[0])], dtype=torch.int32)) and (not (g_idx == 0).all()): use_exllama = False from text_generation_server.layers.gptq import CAN_EXLLAMA, HAS_EXLLAMA, GPTQWeight if use_exllama: if not HAS_EXLLAMA: if CAN_EXLLAMA: log_once(logger.warning, 'Exllama GPTQ cuda kernels (which are faster) could have been used, but are not currently installed, try using BUILD_EXTENSIONS=True') use_exllama = False else: log_once(logger.info, f'Using exllama kernels v{HAS_EXLLAMA}') if use_exllama and self.groupsize != -1: qzeros = weights.get_sharded(f'{prefix}.qzeros', dim=0) scales = weights.get_sharded(f'{prefix}.scales', dim=0) else: qzeros = weights.get_tensor(f'{prefix}.qzeros') scales = weights.get_tensor(f'{prefix}.scales') if use_exllama and g_idx is not None: g_idx = g_idx - g_idx[0] if self.quantize == 'gptq' and self.quant_method == 'awq': log_once(logger.info, 'Converting AWQ model to Exllama/GPTQ packing format.') from text_generation_server.layers.awq.conversion_utils import fast_awq_to_gptq (qweight, qzeros) = fast_awq_to_gptq(qweight, qzeros) if use_exllama: g_idx = None else: g_idx = (torch.arange(qweight.shape[0] * (32 // self.bits), device=qweight.device) // self.groupsize).to(dtype=torch.int32) return GPTQWeight(qweight=qweight, qzeros=qzeros, scales=scales, g_idx=g_idx, bits=self.bits, groupsize=self.groupsize, use_awq_kernel=self.quantize == 'awq', use_exllama=use_exllama) def _get_gptq_params(self, weights: Weights): if weights._has_tensor('gptq_bits') and weights._has_tensor('gptq_groupsize'): self.bits = weights.get_tensor('gptq_bits').item() self.groupsize = weights.get_tensor('gptq_groupsize').item() self.desc_act = False self.sym = weights.get_tensor('gptq_sym').item() if weights._has_tensor('gptq_sym') else False self.quant_method = 'gptq' try: (major, _minor) = torch.cuda.get_device_capability() except Exception: major = 1 HAS_EXLLAMA = False CAN_EXLLAMA = major >= 8 or SYSTEM == 'rocm' V2 = os.getenv('EXLLAMA_VERSION', '2') == '2' if os.getenv('DISABLE_EXLLAMA') == 'True': HAS_EXLLAMA = False elif CAN_EXLLAMA: try: if V2: from text_generation_server.layers.gptq.exllamav2 import QuantLinear as ExllamaQuantLinear, create_exllama_buffers, set_device HAS_EXLLAMA = '2' else: from text_generation_server.layers.gptq.exllama import Ex4bitLinear as ExllamaQuantLinear, create_exllama_buffers, set_device HAS_EXLLAMA = '1' except ImportError: pass # File: text-generation-inference-main/server/text_generation_server/layers/gptq/custom_autotune.py """""" import builtins import math import time from typing import Dict import triton class Autotuner(triton.KernelInterface): def __init__(self, fn, arg_names, configs, key, reset_to_zero, prune_configs_by: Dict=None, nearest_power_of_two: bool=False): if not configs: self.configs = [triton.Config({}, num_warps=4, num_stages=2)] else: self.configs = configs self.key_idx = [arg_names.index(k) for k in key] self.nearest_power_of_two = nearest_power_of_two self.cache = {} self.hook = lambda args: 0 if reset_to_zero is not None: self.reset_idx = [arg_names.index(k) for k in reset_to_zero] def _hook(args): for i in self.reset_idx: args[i].zero_() self.hook = _hook self.arg_names = arg_names if prune_configs_by: (perf_model, top_k) = (prune_configs_by['perf_model'], prune_configs_by['top_k']) if 'early_config_prune' in prune_configs_by: early_config_prune = prune_configs_by['early_config_prune'] else: (perf_model, top_k, early_config_prune) = (None, None, None) (self.perf_model, self.configs_top_k) = (perf_model, top_k) self.early_config_prune = early_config_prune self.fn = fn def _bench(self, *args, config, **meta): conflicts = meta.keys() & config.kwargs.keys() if conflicts: raise ValueError(f"Conflicting meta-parameters: {', '.join(conflicts)}. Make sure that you don't re-define auto-tuned symbols.") current = dict(meta, **config.kwargs) def kernel_call(): if config.pre_hook: config.pre_hook(self.nargs) self.hook(args) self.fn.run(*args, num_warps=config.num_warps, num_stages=config.num_stages, **current) try: return triton.testing.do_bench(kernel_call, quantiles=(0.5, 0.2, 0.8), rep=40) except triton.OutOfResources: return [float('inf'), float('inf'), float('inf')] def run(self, *args, **kwargs): self.nargs = dict(zip(self.arg_names, args)) if len(self.configs) > 1: key = tuple((args[i] for i in self.key_idx)) if self.nearest_power_of_two: key = tuple([2 ** int(math.log2(x) + 0.5) for x in key]) if key not in self.cache: pruned_configs = self.prune_configs(kwargs) bench_start = time.time() timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs} bench_end = time.time() self.bench_time = bench_end - bench_start self.cache[key] = builtins.min(timings, key=timings.get) self.hook(args) self.configs_timings = timings config = self.cache[key] else: config = self.configs[0] self.best_config = config if config.pre_hook is not None: config.pre_hook(self.nargs) return self.fn.run(*args, num_warps=config.num_warps, num_stages=config.num_stages, **kwargs, **config.kwargs) def prune_configs(self, kwargs): pruned_configs = self.configs if self.early_config_prune: pruned_configs = self.early_config_prune(self.configs, self.nargs) if self.perf_model: top_k = self.configs_top_k if isinstance(top_k, float) and top_k <= 1.0: top_k = int(len(self.configs) * top_k) if len(pruned_configs) > top_k: est_timing = {config: self.perf_model(**self.nargs, **kwargs, **config.kwargs, num_stages=config.num_stages, num_warps=config.num_warps) for config in pruned_configs} pruned_configs = sorted(est_timing.keys(), key=lambda x: est_timing[x])[:top_k] return pruned_configs def warmup(self, *args, **kwargs): self.nargs = dict(zip(self.arg_names, args)) for config in self.prune_configs(kwargs): self.fn.warmup(*args, num_warps=config.num_warps, num_stages=config.num_stages, **kwargs, **config.kwargs) self.nargs = None def autotune(configs, key, prune_configs_by=None, reset_to_zero=None, nearest_power_of_two=False): def decorator(fn): return Autotuner(fn, fn.arg_names, configs, key, reset_to_zero, prune_configs_by, nearest_power_of_two) return decorator def matmul248_kernel_config_pruner(configs, nargs): m = max(2 ** int(math.ceil(math.log2(nargs['M']))), 16) n = max(2 ** int(math.ceil(math.log2(nargs['N']))), 16) k = max(2 ** int(math.ceil(math.log2(nargs['K']))), 16) used = set() for config in configs: block_size_m = min(m, config.kwargs['BLOCK_SIZE_M']) block_size_n = min(n, config.kwargs['BLOCK_SIZE_N']) block_size_k = min(k, config.kwargs['BLOCK_SIZE_K']) group_size_m = config.kwargs['GROUP_SIZE_M'] if (block_size_m, block_size_n, block_size_k, group_size_m, config.num_stages, config.num_warps) in used: continue used.add((block_size_m, block_size_n, block_size_k, group_size_m, config.num_stages, config.num_warps)) yield triton.Config({'BLOCK_SIZE_M': block_size_m, 'BLOCK_SIZE_N': block_size_n, 'BLOCK_SIZE_K': block_size_k, 'GROUP_SIZE_M': group_size_m}, num_stages=config.num_stages, num_warps=config.num_warps) # File: text-generation-inference-main/server/text_generation_server/layers/gptq/exllama.py from text_generation_server.layers.gptq import GPTQWeight import torch from exllama_kernels import make_q4, q4_matmul, prepare_buffers, set_tuning_params none_tensor = torch.empty((1, 1), device='meta') def ext_make_q4(qweight, qzeros, scales, g_idx, device): return make_q4(qweight, qzeros, scales, g_idx if g_idx is not None else none_tensor, device) def ext_q4_matmul(x, q4, q4_width): outshape = x.shape[:-1] + (q4_width,) x = x.view(-1, x.shape[-1]) output = torch.empty((x.shape[0], q4_width), dtype=torch.float16, device=x.device) q4_matmul(x, q4, output) return output.view(outshape) MAX_DQ = 1 MAX_INNER = 1 ACT_ORDER = False DEVICE = None TEMP_STATE = None TEMP_DQ = None def set_device(device): global DEVICE DEVICE = device def create_exllama_buffers(max_total_tokens: int): global MAX_DQ, MAX_INNER, ACT_ORDER, DEVICE, TEMP_STATE, TEMP_DQ assert DEVICE is not None, 'call set_device first' if not ACT_ORDER: max_total_tokens = 1 temp_state = torch.zeros((max_total_tokens, MAX_INNER), dtype=torch.float16, device=DEVICE) temp_dq = torch.zeros((1, MAX_DQ), dtype=torch.float16, device=DEVICE) prepare_buffers(DEVICE, temp_state, temp_dq) matmul_recons_thd = 8 matmul_fused_remap = False matmul_no_half2 = False set_tuning_params(matmul_recons_thd, matmul_fused_remap, matmul_no_half2) (TEMP_STATE, TEMP_DQ) = (temp_state, temp_dq) class Ex4bitLinear(torch.nn.Module): def __init__(self, weight: GPTQWeight, bias): super().__init__() global MAX_DQ, MAX_INNER, ACT_ORDER, DEVICE assert weight.bits == 4 self.device = weight.qweight.device self.qweight = weight.qweight self.qzeros = weight.qzeros self.scales = weight.scales self.g_idx = weight.g_idx.cpu() if weight.g_idx is not None else None self.bias = bias if bias is not None else None if self.g_idx is not None and ((self.g_idx == 0).all() or torch.equal(weight.g_idx.cpu(), torch.tensor([i // weight.groupsize for i in range(weight.g_idx.shape[0])], dtype=torch.int32))): self.empty_g_idx = True self.g_idx = None assert self.device.type == 'cuda' assert self.device.index is not None self.q4 = ext_make_q4(self.qweight, self.qzeros, self.scales, self.g_idx, self.device.index) self.height = weight.qweight.shape[0] * 8 self.width = weight.qweight.shape[1] self.groupsize = None if self.qzeros.shape[0] > 1: self.groupsize = self.qweight.shape[0] * 8 // self.qzeros.shape[0] if self.groupsize is not None: assert weight.groupsize == self.groupsize if self.g_idx is not None: if self.groupsize is None: raise ValueError('Found group index but no groupsize. What do?') self.act_order = True else: self.act_order = False DEVICE = self.qweight.device MAX_DQ = max(MAX_DQ, self.qweight.numel() * 8) if self.act_order: MAX_INNER = max(MAX_INNER, self.height, self.width) ACT_ORDER = True def forward(self, x): out = ext_q4_matmul(x, self.q4, self.width) if self.bias is not None: out.add_(self.bias) return out # File: text-generation-inference-main/server/text_generation_server/layers/gptq/exllamav2.py from dataclasses import dataclass from typing import Optional import torch import torch.nn as nn from loguru import logger from text_generation_server.layers.exl2 import Exl2Weight from text_generation_server.layers.gptq import GPTQWeight from text_generation_server.utils.log import log_master try: from exllamav2.ext import exllamav2_ext make_q_matrix = exllamav2_ext.make_q_matrix gemm_half_q_half = exllamav2_ext.gemm_half_q_half except ImportError: log_master(logger.warning, 'exllamav2_kernels not installed.') raise none_tensor = torch.empty((1, 1), device='meta') @dataclass class _ExtraTensors: q_group_map: Optional[torch.Tensor] = None q_invperm: Optional[torch.Tensor] = None q_perm: Optional[torch.Tensor] = None def ext_gemm_half_q_half(x, q_handle, q4_width, force_cuda): output_shape = x.shape[:-1] + (q4_width,) x = x.view(-1, x.shape[-1]) output = torch.empty((x.shape[0], q4_width), dtype=torch.half, device=x.device) gemm_half_q_half(x, q_handle, output, force_cuda) return output.view(output_shape) def make_group_map(q_groups: torch.Tensor, num_qrows: int): gr = q_groups.tolist() group_map = [] num_groups = len(gr) // 2 for i in range(num_groups): bits = gr[i * 2] if i < num_groups - 1: qrows = gr[i * 2 + 3] - gr[i * 2 + 1] else: qrows = num_qrows - gr[i * 2 + 1] rows = qrows * 32 // bits for j in range(rows): group_map += [i] group_map += [rows - j] return torch.tensor(group_map, dtype=torch.short, device=q_groups.device) def ext_make_q_matrix(w: Exl2Weight | GPTQWeight, extra: _ExtraTensors, temp_dq, key: Optional[str]=None): max_dq_rows = 0 if isinstance(w, Exl2Weight): extra.q_group_map = make_group_map(w.q_groups, w.q_weight.shape[0]) extra.q_perm = torch.argsort(w.q_invperm).short() return make_q_matrix(w.q_weight, extra.q_perm, w.q_invperm, w.q_scale, w.q_scale_max, w.q_groups, extra.q_group_map, none_tensor, none_tensor, none_tensor, none_tensor, temp_dq, max_dq_rows) elif isinstance(w, GPTQWeight): if w.scales.dtype == torch.float: w.scales = w.scales.half() if w.g_idx is not None and (not (w.g_idx == 0).all().item()): extra.q_perm = torch.empty((w.qweight.shape[0] * 8,), dtype=torch.short, device=w.qweight.device) extra.q_invperm = torch.empty_like(extra.q_perm) return make_q_matrix(w.qweight, extra.q_perm, extra.q_invperm, none_tensor, none_tensor, none_tensor, none_tensor, w.qzeros, w.scales, w.g_idx.cpu(), none_tensor, temp_dq, max_dq_rows) else: return make_q_matrix(w.qweight, none_tensor, none_tensor, none_tensor, none_tensor, none_tensor, none_tensor, w.qzeros, w.scales, none_tensor, none_tensor, temp_dq, max_dq_rows) else: RuntimeError('Cannot create handle') DEVICE = None LAYERS = [] def set_device(device): global DEVICE DEVICE = device def create_exllama_buffers(max_total_tokens: int): global LAYERS, DEVICE if len(LAYERS) == 0: return scratch_bytes = max((layer.scratch_space_fixed(max_input_len=max_total_tokens, max_batch_size=1) for layer in LAYERS)) temp_dq = ExLlamaV2DeviceTensors(DEVICE, scratch_bytes) for layer in LAYERS: layer.post_init(temp_dq) class QuantLinear(nn.Module): QUANT_TYPE = 'exllamav2' '' def __init__(self, weight: Exl2Weight | GPTQWeight, bias: torch.Tensor): super().__init__() self.q_handle = None self.q_tensors = weight self.extra_tensors = _ExtraTensors() if isinstance(weight, Exl2Weight): self.infeatures = weight.q_invperm.shape[0] self.outfeatures = weight.q_weight.shape[1] elif isinstance(weight, GPTQWeight): if weight.bits != 4: raise ValueError(f'Exllamav2 kernel supports only bits=4, requested bits={weight.bits}. Something is wrong in the model initialization.') self.infeatures = weight.qweight.shape[0] // weight.bits * 32 self.outfeatures = weight.qweight.shape[1] self.padding = -self.outfeatures % 32 self.outfeatures = self.outfeatures + self.padding self.device = weight.device self.bias = bias if bias is not None else None global LAYERS LAYERS.append(self) def post_init(self, temp_dq): device = self.q_tensors.device assert device.type == 'cuda' assert device.index is not None temp_dq = temp_dq.get_scratch_slice(self.temp_dq_size()) self.temp_dq = temp_dq self.q_handle = ext_make_q_matrix(self.q_tensors, self.extra_tensors, temp_dq) def forward(self, x, force_cuda=False): output = ext_gemm_half_q_half(x, self.q_handle, self.outfeatures, force_cuda) if self.bias is not None: output.add_(self.bias) return output def temp_dq_size(self): return self.infeatures * self.outfeatures * 2 + 128 def temp_fwd_size(self, max_input_len, max_batch_size): return self.outfeatures * max_input_len * max_batch_size * 4 + 128 def scratch_space_fixed(self, max_input_len, max_batch_size): return self.temp_dq_size() + self.temp_fwd_size(max_input_len, max_batch_size) class ExLlamaV2DeviceTensors: device_idx: int scratch_bytes: int scratch_idx: int scratch: torch.tensor = None def __init__(self, device, scratch_bytes): self.device = device self.scratch_bytes = scratch_bytes def prepare(self): self.scratch = torch.empty((self.scratch_bytes // 2,), dtype=torch.half, device=self.device) def get_scratch_slice(self, size_bytes): if self.scratch is None: self.prepare() size_bytes = (size_bytes + 127) // 128 * 128 size_half = size_bytes // 2 scratch_slice = self.scratch.narrow(0, 0, size_half) return scratch_slice # File: text-generation-inference-main/server/text_generation_server/layers/gptq/quant_linear.py import math import numpy as np import torch import torch.nn as nn from torch.cuda.amp import custom_fwd import triton import triton.language as tl from . import custom_autotune @custom_autotune.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=2, num_warps=8), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=2, num_warps=4)], key=['M', 'N', 'K'], nearest_power_of_two=True, prune_configs_by={'early_config_prune': custom_autotune.matmul248_kernel_config_pruner, 'perf_model': None, 'top_k': None}) @triton.jit def matmul_248_kernel(a_ptr, b_ptr, c_ptr, scales_ptr, zeros_ptr, g_ptr, M, N, K, bits, maxq, stride_am, stride_ak, stride_bk, stride_bn, stride_cm, stride_cn, stride_scales, stride_zeros, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr): infearure_per_bits = 32 // bits pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) num_pid_k = tl.cdiv(K, BLOCK_SIZE_K) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + pid % group_size_m pid_n = pid % num_pid_in_group // group_size_m offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) offs_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak) a_mask = offs_am[:, None] < M b_ptrs = b_ptr + (offs_k[:, None] // infearure_per_bits * stride_bk + offs_bn[None, :] * stride_bn) g_ptrs = g_ptr + offs_k scales_ptrs = scales_ptr + offs_bn[None, :] zeros_ptrs = zeros_ptr + offs_bn[None, :] // infearure_per_bits shifter = offs_k % infearure_per_bits * bits zeros_shifter = offs_bn % infearure_per_bits * bits accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for k in range(0, num_pid_k): g_idx = tl.load(g_ptrs) scales = tl.load(scales_ptrs + g_idx[:, None] * stride_scales) zeros = tl.load(zeros_ptrs + g_idx[:, None] * stride_zeros) zeros = zeros >> zeros_shifter[None, :] & maxq zeros = zeros + 1 & maxq a = tl.load(a_ptrs, mask=a_mask, other=0.0) b = tl.load(b_ptrs) b = b >> shifter[:, None] & maxq b = (b - zeros) * scales accumulator += tl.dot(a, b) a_ptrs += BLOCK_SIZE_K b_ptrs += BLOCK_SIZE_K // infearure_per_bits * stride_bk g_ptrs += BLOCK_SIZE_K c_ptrs = c_ptr + stride_cm * offs_am[:, None] + stride_cn * offs_bn[None, :] c_mask = (offs_am[:, None] < M) & (offs_bn[None, :] < N) tl.store(c_ptrs, accumulator, mask=c_mask) def matmul248(input, qweight, scales, qzeros, g_idx, bits, maxq): with torch.cuda.device(input.device): output = torch.empty((input.shape[0], qweight.shape[1]), device=input.device, dtype=torch.float16) def grid(META): return (triton.cdiv(input.shape[0], META['BLOCK_SIZE_M']) * triton.cdiv(qweight.shape[1], META['BLOCK_SIZE_N']),) matmul_248_kernel[grid](input, qweight, output, scales, qzeros, g_idx, input.shape[0], qweight.shape[1], input.shape[1], bits, maxq, input.stride(0), input.stride(1), qweight.stride(0), qweight.stride(1), output.stride(0), output.stride(1), scales.stride(0), qzeros.stride(0)) return output class QuantLinearFunction(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward(ctx, input, qweight, scales, qzeros, g_idx, bits, maxq): output = matmul248(input, qweight, scales, qzeros, g_idx, bits, maxq) return output class QuantLinear(nn.Module): def __init__(self, qweight, qzeros, scales, g_idx, bias, bits, groupsize): super().__init__() self.register_buffer('qweight', qweight) self.register_buffer('qzeros', qzeros) self.register_buffer('scales', scales) self.register_buffer('g_idx', g_idx) if bias is not None: self.register_buffer('bias', bias) else: self.bias = None if bits not in [2, 4, 8]: raise NotImplementedError('Only 2,4,8 bits are supported.') self.bits = bits self.maxq = 2 ** self.bits - 1 self.groupsize = groupsize self.outfeatures = qweight.shape[1] self.infeatures = qweight.shape[0] * 32 // bits @classmethod def new(cls, bits, groupsize, infeatures, outfeatures, bias): if bits not in [2, 4, 8]: raise NotImplementedError('Only 2,4,8 bits are supported.') qweight = torch.zeros((infeatures // 32 * bits, outfeatures), dtype=torch.int32) qzeros = torch.zeros((math.ceil(infeatures / groupsize), outfeatures // 32 * bits), dtype=torch.int32) scales = torch.zeros((math.ceil(infeatures / groupsize), outfeatures), dtype=torch.float16) g_idx = torch.tensor([i // groupsize for i in range(infeatures)], dtype=torch.int32) if bias: bias = torch.zeros(outfeatures, dtype=torch.float16) else: bias = None return cls(qweight, qzeros, scales, g_idx, bias, bits, groupsize) def pack(self, linear, scales, zeros, g_idx=None): self.g_idx = g_idx.clone() if g_idx is not None else self.g_idx scales = scales.t().contiguous() zeros = zeros.t().contiguous() scale_zeros = zeros * scales self.scales = scales.clone().half() if linear.bias is not None: self.bias = linear.bias.clone().half() intweight = [] for idx in range(self.infeatures): intweight.append(torch.round((linear.weight.data[:, idx] + scale_zeros[self.g_idx[idx]]) / self.scales[self.g_idx[idx]]).to(torch.int)[:, None]) intweight = torch.cat(intweight, dim=1) intweight = intweight.t().contiguous() intweight = intweight.numpy().astype(np.uint32) qweight = np.zeros((intweight.shape[0] // 32 * self.bits, intweight.shape[1]), dtype=np.uint32) i = 0 row = 0 while row < qweight.shape[0]: if self.bits in [2, 4, 8]: for j in range(i, i + 32 // self.bits): qweight[row] |= intweight[j] << self.bits * (j - i) i += 32 // self.bits row += 1 else: raise NotImplementedError('Only 2,4,8 bits are supported.') qweight = qweight.astype(np.int32) self.qweight = torch.from_numpy(qweight) zeros -= 1 zeros = zeros.numpy().astype(np.uint32) qzeros = np.zeros((zeros.shape[0], zeros.shape[1] // 32 * self.bits), dtype=np.uint32) i = 0 col = 0 while col < qzeros.shape[1]: if self.bits in [2, 4, 8]: for j in range(i, i + 32 // self.bits): qzeros[:, col] |= zeros[:, j] << self.bits * (j - i) i += 32 // self.bits col += 1 else: raise NotImplementedError('Only 2,4,8 bits are supported.') qzeros = qzeros.astype(np.int32) self.qzeros = torch.from_numpy(qzeros) def forward(self, x): out_shape = x.shape[:-1] + (self.outfeatures,) out = QuantLinearFunction.apply(x.reshape(-1, x.shape[-1]), self.qweight, self.scales, self.qzeros, self.g_idx, self.bits, self.maxq) out = out + self.bias if self.bias is not None else out return out.reshape(out_shape) # File: text-generation-inference-main/server/text_generation_server/layers/gptq/quantize.py import time import torch.nn as nn import math import json import os import torch import transformers from texttable import Texttable from transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer from huggingface_hub import HfApi from accelerate import init_empty_weights from text_generation_server.utils import initialize_torch_distributed, Weights from text_generation_server.utils.hub import weight_files from text_generation_server.layers.gptq.quant_linear import QuantLinear from loguru import logger from typing import Optional from text_generation_server.layers.gptq.utils import torch_snr_error from text_generation_server.utils.weights import DefaultWeightsLoader, UnquantizedWeight DEV = torch.device('cuda:0') class Quantizer(nn.Module): def __init__(self, shape=1): super(Quantizer, self).__init__() self.register_buffer('maxq', torch.tensor(0)) self.register_buffer('scale', torch.zeros(shape)) self.register_buffer('zero', torch.zeros(shape)) def configure(self, bits, perchannel=False, sym=True, mse=False, norm=2.4, grid=100, maxshrink=0.8, trits=False): self.maxq = torch.tensor(2 ** bits - 1) self.perchannel = perchannel self.sym = sym self.mse = mse self.norm = norm self.grid = grid self.maxshrink = maxshrink if trits: self.maxq = torch.tensor(-1) self.scale = torch.zeros_like(self.scale) def _quantize(self, x, scale, zero, maxq): if maxq < 0: return (x > scale / 2).float() * scale + (x < zero / 2).float() * zero q = torch.clamp(torch.round(x / scale) + zero, 0, maxq) return scale * (q - zero) def find_params(self, x, weight=False): dev = x.device self.maxq = self.maxq.to(dev) shape = x.shape if self.perchannel: if weight: x = x.flatten(1) else: if len(shape) == 4: x = x.permute([1, 0, 2, 3]) x = x.flatten(1) if len(shape) == 3: x = x.reshape((-1, shape[-1])).t() if len(shape) == 2: x = x.t() else: x = x.flatten().unsqueeze(0) tmp = torch.zeros(x.shape[0], device=dev) xmin = torch.minimum(x.min(1)[0], tmp) xmax = torch.maximum(x.max(1)[0], tmp) if self.sym: xmax = torch.maximum(torch.abs(xmin), xmax) tmp = xmin < 0 if torch.any(tmp): xmin[tmp] = -xmax[tmp] tmp = (xmin == 0) & (xmax == 0) xmin[tmp] = -1 xmax[tmp] = +1 if self.maxq < 0: self.scale = xmax self.zero = xmin else: self.scale = (xmax - xmin) / self.maxq if self.sym: self.zero = torch.full_like(self.scale, (self.maxq + 1) / 2) else: self.zero = torch.round(-xmin / self.scale) if self.mse: best = torch.full([x.shape[0]], float('inf'), device=dev) for i in range(int(self.maxshrink * self.grid)): p = 1 - i / self.grid xmin1 = p * xmin xmax1 = p * xmax scale1 = (xmax1 - xmin1) / self.maxq zero1 = torch.round(-xmin1 / scale1) if not self.sym else self.zero q = self._quantize(x, scale1.unsqueeze(1), zero1.unsqueeze(1), self.maxq) q -= x q.abs_() q.pow_(self.norm) err = torch.sum(q, 1) tmp = err < best if torch.any(tmp): best[tmp] = err[tmp] self.scale[tmp] = scale1[tmp] self.zero[tmp] = zero1[tmp] if not self.perchannel: if weight: tmp = shape[0] else: tmp = shape[1] if len(shape) != 3 else shape[2] self.scale = self.scale.repeat(tmp) self.zero = self.zero.repeat(tmp) if weight: shape = [-1] + [1] * (len(shape) - 1) self.scale = self.scale.reshape(shape) self.zero = self.zero.reshape(shape) return if len(shape) == 4: self.scale = self.scale.reshape((1, -1, 1, 1)) self.zero = self.zero.reshape((1, -1, 1, 1)) if len(shape) == 3: self.scale = self.scale.reshape((1, 1, -1)) self.zero = self.zero.reshape((1, 1, -1)) if len(shape) == 2: self.scale = self.scale.unsqueeze(0) self.zero = self.zero.unsqueeze(0) def quantize(self, x): if self.ready(): return self._quantize(x, self.scale, self.zero, self.maxq) return x def enabled(self): return self.maxq > 0 def ready(self): return torch.all(self.scale != 0) class GPTQ: def __init__(self, layer, observe=False): self.layer = layer self.dev = self.layer.weight.device W = layer.weight.data.clone() if isinstance(self.layer, nn.Conv2d): W = W.flatten(1) if isinstance(self.layer, transformers.Conv1D): W = W.t() self.rows = W.shape[0] self.columns = W.shape[1] self.H = torch.zeros((self.columns, self.columns), device=self.dev) self.nsamples = 0 self.quantizer = Quantizer() self.observe = observe def add_batch(self, inp, out): if self.observe: self.inp1 = inp self.out1 = out else: self.inp1 = None self.out1 = None if len(inp.shape) == 2: inp = inp.unsqueeze(0) tmp = inp.shape[0] if isinstance(self.layer, nn.Linear) or isinstance(self.layer, transformers.Conv1D): if len(inp.shape) == 3: inp = inp.reshape((-1, inp.shape[-1])) inp = inp.t() if isinstance(self.layer, nn.Conv2d): unfold = nn.Unfold(self.layer.kernel_size, dilation=self.layer.dilation, padding=self.layer.padding, stride=self.layer.stride) inp = unfold(inp) inp = inp.permute([1, 0, 2]) inp = inp.flatten(1) self.H *= self.nsamples / (self.nsamples + tmp) self.nsamples += tmp inp = math.sqrt(2 / self.nsamples) * inp.float() self.H += inp.matmul(inp.t()) def print_loss(self, name, q_weight, weight_error, timecost): table = Texttable() length = 28 name = name + ' ' * (length - len(name)) if len(name) <= length else name[:length] table.header(['name', 'weight_error', 'fp_inp_SNR', 'q_inp_SNR', 'time']) self.layer.weight.data = q_weight.reshape(self.layer.weight.shape).to(self.layer.weight.data.dtype) if self.inp1 is not None: quantizer = Quantizer() quantizer.configure(8, perchannel=False, sym=True, mse=False) quantizer.find_params(self.inp1) q_in = quantizer.quantize(self.inp1).type(torch.float16) q_out = self.layer(q_in) q_SNR = torch_snr_error(q_out, self.out1).item() fp_SNR = torch_snr_error(self.layer(self.inp1), self.out1).item() else: q_SNR = '-' fp_SNR = '-' table.add_row([name, weight_error, fp_SNR, q_SNR, timecost]) print(table.draw().split('\n')[-2]) def fasterquant(self, blocksize=128, percdamp=0.01, groupsize=-1, act_order=False, name=''): self.layer.to(self.dev) W = self.layer.weight.data.clone() if isinstance(self.layer, nn.Conv2d): W = W.flatten(1) if isinstance(self.layer, transformers.Conv1D): W = W.t() W = W.float() tick = time.time() if not self.quantizer.ready(): self.quantizer.find_params(W, weight=True) H = self.H if not self.observe: del self.H dead = torch.diag(H) == 0 H[dead, dead] = 1 W[:, dead] = 0 if act_order: perm = torch.argsort(torch.diag(H), descending=True) W = W[:, perm] H = H[perm][:, perm] Losses = torch.zeros_like(W) Q = torch.zeros_like(W) damp = percdamp * torch.mean(torch.diag(H)) diag = torch.arange(self.columns, device=self.dev) H[diag, diag] += damp H = torch.linalg.cholesky(H) H = torch.cholesky_inverse(H) try: H = torch.linalg.cholesky(H, upper=True) except Exception: H = torch.linalg.cholesky(H + 1e-05 * torch.eye(H.shape[0]).to(H.device), upper=True) Hinv = H g_idx = [] scale = [] zero = [] now_idx = 1 for i1 in range(0, self.columns, blocksize): i2 = min(i1 + blocksize, self.columns) count = i2 - i1 W1 = W[:, i1:i2].clone() Q1 = torch.zeros_like(W1) Err1 = torch.zeros_like(W1) Losses1 = torch.zeros_like(W1) Hinv1 = Hinv[i1:i2, i1:i2] for i in range(count): w = W1[:, i] d = Hinv1[i, i] if groupsize != -1: if (i1 + i) % groupsize == 0: self.quantizer.find_params(W[:, i1 + i:i1 + i + groupsize], weight=True) if (i1 + i) // groupsize - now_idx == -1: scale.append(self.quantizer.scale) zero.append(self.quantizer.zero) now_idx += 1 q = self.quantizer.quantize(w.unsqueeze(1)).flatten() Q1[:, i] = q Losses1[:, i] = (w - q) ** 2 / d ** 2 err1 = (w - q) / d W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0)) Err1[:, i] = err1 Q[:, i1:i2] = Q1 Losses[:, i1:i2] = Losses1 / 2 W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:]) torch.cuda.synchronize() error = torch.sum(Losses).item() groupsize = groupsize if groupsize != -1 else self.columns g_idx = [i // groupsize for i in range(self.columns)] g_idx = torch.tensor(g_idx, dtype=torch.int32, device=Q.device) if act_order: invperm = torch.argsort(perm) Q = Q[:, invperm] g_idx = g_idx[invperm] if isinstance(self.layer, transformers.Conv1D): Q = Q.t() self.print_loss(name=name, q_weight=Q, weight_error=error, timecost=time.time() - tick) if scale == []: scale.append(self.quantizer.scale) zero.append(self.quantizer.zero) scale = torch.cat(scale, dim=1) zero = torch.cat(zero, dim=1) return (scale, zero, g_idx, error) def free(self): self.inp1 = None self.out1 = None self.H = None self.Losses = None self.Trace = None torch.cuda.empty_cache() def get_wikitext2(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='train') testdata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='test') try: tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False, trust_remote_code=trust_remote_code) except Exception: tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True, trust_remote_code=trust_remote_code) trainenc = tokenizer('\n\n'.join(traindata['text']), return_tensors='pt') testenc = tokenizer('\n\n'.join(testdata['text']), return_tensors='pt') import random random.seed(seed) trainloader = [] for _ in range(nsamples): i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) return (trainloader, testenc) def get_ptb(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset('ptb_text_only', 'penn_treebank', split='train') valdata = load_dataset('ptb_text_only', 'penn_treebank', split='validation') try: tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False, trust_remote_code=trust_remote_code) except Exception: tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True, trust_remote_code=trust_remote_code) trainenc = tokenizer('\n\n'.join(traindata['sentence']), return_tensors='pt') testenc = tokenizer('\n\n'.join(valdata['sentence']), return_tensors='pt') import random random.seed(seed) trainloader = [] for _ in range(nsamples): i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) return (trainloader, testenc) def get_c4(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset('allenai/c4', 'allenai--c4', data_files={'train': 'en/c4-train.00000-of-01024.json.gz'}, split='train', use_auth_token=False) valdata = load_dataset('allenai/c4', 'allenai--c4', data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'}, split='validation', use_auth_token=False) try: tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False, trust_remote_code=trust_remote_code) except Exception: tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True, trust_remote_code=trust_remote_code) import random random.seed(seed) trainloader = [] for _ in range(nsamples): while True: i = random.randint(0, len(traindata) - 1) trainenc = tokenizer(traindata[i]['text'], return_tensors='pt') if trainenc.input_ids.shape[1] >= seqlen: break i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) import random random.seed(0) valenc = [] for _ in range(256): while True: i = random.randint(0, len(valdata) - 1) tmp = tokenizer(valdata[i]['text'], return_tensors='pt') if tmp.input_ids.shape[1] >= seqlen: break i = random.randint(0, tmp.input_ids.shape[1] - seqlen - 1) j = i + seqlen valenc.append(tmp.input_ids[:, i:j]) valenc = torch.hstack(valenc) class TokenizerWrapper: def __init__(self, input_ids): self.input_ids = input_ids valenc = TokenizerWrapper(valenc) return (trainloader, valenc) def get_ptb_new(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset('ptb_text_only', 'penn_treebank', split='train') testdata = load_dataset('ptb_text_only', 'penn_treebank', split='test') try: tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False, trust_remote_code=trust_remote_code) except Exception: tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True, trust_remote_code=trust_remote_code) trainenc = tokenizer(' '.join(traindata['sentence']), return_tensors='pt') testenc = tokenizer(' '.join(testdata['sentence']), return_tensors='pt') import random random.seed(seed) trainloader = [] for _ in range(nsamples): i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) return (trainloader, testenc) def get_c4_new(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset('allenai/c4', 'allenai--c4', data_files={'train': 'en/c4-train.00000-of-01024.json.gz'}, split='train') valdata = load_dataset('allenai/c4', 'allenai--c4', data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'}, split='validation') try: tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False, trust_remote_code=trust_remote_code) except Exception: tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True, trust_remote_code=trust_remote_code) import random random.seed(seed) trainloader = [] for _ in range(nsamples): while True: i = random.randint(0, len(traindata) - 1) trainenc = tokenizer(traindata[i]['text'], return_tensors='pt') if trainenc.input_ids.shape[1] >= seqlen: break i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) valenc = tokenizer(' '.join(valdata[:1100]['text']), return_tensors='pt') valenc = valenc.input_ids[:, :256 * seqlen] class TokenizerWrapper: def __init__(self, input_ids): self.input_ids = input_ids valenc = TokenizerWrapper(valenc) return (trainloader, valenc) def get_loaders(name, nsamples=128, seed=0, seqlen=2048, model_id='', trust_remote_code=False): if 'wikitext2' in name: return get_wikitext2(nsamples, seed, seqlen, model_id, trust_remote_code) if 'ptb' in name: if 'new' in name: return get_ptb_new(nsamples, seed, seqlen, model_id, trust_remote_code) return get_ptb(nsamples, seed, seqlen, model_id, trust_remote_code) if 'c4' in name: if 'new' in name: return get_c4_new(nsamples, seed, seqlen, model_id, trust_remote_code) return get_c4(nsamples, seed, seqlen, model_id, trust_remote_code) def find_layers(module, layers=(nn.Conv2d, nn.Linear), name=''): if isinstance(module, layers) and 'lm_head' not in name: return {name: module} res = {} for (name1, child) in module.named_children(): res.update(find_layers(child, layers=layers, name=name + '.' + name1 if name != '' else name1)) return res @torch.no_grad() def sequential(model, dataloader, dev, nsamples, bits, groupsize, *, hooks, percdamp=0.01, sym: bool=False, act_order: bool=False): print('Starting ...') use_cache = model.config.use_cache model.config.use_cache = False try: layers = model.model.layers prefix = 'model.layers' except Exception: layers = model.transformer.h prefix = 'transformer.h' dtype = next(iter(model.parameters())).dtype inps = torch.zeros((nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev) cache = {'i': 0} extra = {} class Catcher(nn.Module): def __init__(self, module): super().__init__() self.module = module def forward(self, inp, **kwargs): inps[cache['i']] = inp cache['i'] += 1 extra.update(kwargs.copy()) raise ValueError layers[0] = Catcher(layers[0]) for batch in dataloader: try: model(batch[0].cuda()) except ValueError: pass layers[0] = layers[0].module torch.cuda.empty_cache() for hook in hooks: hook.remove() outs = torch.zeros_like(inps) extra = {k: v.to(dev) if isinstance(v, torch.Tensor) else v for (k, v) in extra.items()} print('Ready.') quantizers = {} for i in range(len(layers)): print(f'Quantizing layer {i + 1}/{len(layers)}..') print('+------------------+--------------+------------+-----------+-------+') print('| name | weight_error | fp_inp_SNR | q_inp_SNR | time |') print('+==================+==============+============+===========+=======+') layer = layers[i] layer.load() full = find_layers(layer) sequential = [list(full.keys())] for names in sequential: subset = {n: full[n] for n in names} gptq = {} for name in subset: gptq[name] = GPTQ(subset[name]) gptq[name].quantizer.configure(bits, perchannel=True, sym=sym, mse=False) pass def add_batch(name): nonlocal gptq def tmp(_, inp, out): gptq[name].add_batch(inp[0].data, out.data) return tmp handles = [] for name in subset: handles.append(subset[name].register_forward_hook(add_batch(name))) for j in range(nsamples): outs[j] = layer(inps[j].unsqueeze(0), **extra)[0] for h in handles: h.remove() for name in subset: (scale, zero, g_idx, error) = gptq[name].fasterquant(percdamp=percdamp, groupsize=groupsize, act_order=act_order, name=name) quantizers[f'{prefix}.{i}.{name}'] = (gptq[name].quantizer.cpu(), scale.cpu(), zero.cpu(), g_idx.cpu(), bits, groupsize) gptq[name].free() for j in range(nsamples): outs[j] = layer(inps[j].unsqueeze(0), **extra)[0] layer.unload() del layer del gptq torch.cuda.empty_cache() (inps, outs) = (outs, inps) print('+------------------+--------------+------------+-----------+-------+') print('\n') model.config.use_cache = use_cache return quantizers def make_quant_linear(module, names, bits, groupsize, name=''): if isinstance(module, QuantLinear): return for attr in dir(module): tmp = getattr(module, attr) name1 = name + '.' + attr if name != '' else attr if name1 in names: delattr(module, attr) setattr(module, attr, QuantLinear.new(bits, groupsize, tmp.in_features, tmp.out_features, tmp.bias is not None)) for (name1, child) in module.named_children(): make_quant_linear(child, names, bits, groupsize, name + '.' + name1 if name != '' else name1) def pack(model, quantizers, bits, groupsize): layers = find_layers(model) layers = {n: layers[n] for n in quantizers} make_quant_linear(model, quantizers, bits, groupsize) qlayers = find_layers(model, (QuantLinear,)) print('Packing ...') for name in qlayers: print(name) (quantizers[name], scale, zero, g_idx, _, _) = quantizers[name] qlayers[name].pack(layers[name], scale, zero, g_idx) print('Done.') return model def setdeepattr(module, full_name, tensor): current = module tokens = full_name.split('.') for token in tokens[:-1]: current = getattr(current, token) setattr(current, tokens[-1], tensor) def getdeepattr(module, full_name): current = module tokens = full_name.split('.') for token in tokens: current = getattr(current, token) return current def load_weights_pre_hook(module_name, weights, recursive=False): def inner(module, args): print(f'Pre hook {module_name}') local_params = {} for (k, v) in module.named_parameters(): if not recursive and k.count('.') != 1: continue local_params[k] = v for (k, v) in module.named_buffers(): if not recursive and k.count('.') != 1: continue local_params[k] = v for local_param in local_params: current_tensor = getdeepattr(module, local_param) if current_tensor.device == torch.device('meta'): if module_name: tensor_name = f'{module_name}.{local_param}' else: tensor_name = local_param tensor = weights.get_tensor(tensor_name) setdeepattr(module, local_param, nn.Parameter(tensor)) else: tensor = current_tensor.to(device=torch.device('cuda:0')) if current_tensor.requires_grad: tensor = nn.Parameter(tensor) setdeepattr(module, local_param, tensor) return inner def load_weights_post_hook(module_name, weights, recursive=False): def inner(module, args, output): print(f'Post hook {module_name}') local_params = {} for (k, v) in module.named_parameters(): if not recursive and k.count('.') != 1: continue local_params[k] = v for (k, v) in module.named_buffers(): if not recursive and k.count('.') != 1: continue local_params[k] = v for local_param in local_params: current_tensor = getdeepattr(module, local_param) setdeepattr(module, local_param, nn.Parameter(current_tensor.to(device=torch.device('cpu')))) return output return inner def quantize(model_id: str, bits: int, groupsize: int, output_dir: str, revision: str, trust_remote_code: bool, upload_to_model_id: Optional[str], percdamp: float, act_order: bool, sym: bool): print('loading model') config = AutoConfig.from_pretrained(model_id, trust_remote_code=trust_remote_code) with init_empty_weights(): model = AutoModelForCausalLM.from_config(config, torch_dtype=torch.float16, trust_remote_code=trust_remote_code) model = model.eval() print('LOADED model') files = weight_files(model_id, revision, extension='.safetensors') (process_group, _, _) = initialize_torch_distributed() weights = Weights(files, device=torch.device('cuda:0'), dtype=torch.float16, process_group=process_group, aliases={'embed_tokens.weight': ['lm_head.weight']}, weights_loader=DefaultWeightsLoader(UnquantizedWeight)) hooks = [] for (name, module) in model.named_modules(): def load(module, name): def _load(): load_weights_pre_hook(name, weights, recursive=True)(module, None) return _load def unload(module, name): def _unload(): load_weights_post_hook(name, weights, recursive=True)(module, None, None) return _unload module.load = load(module, name) module.unload = unload(module, name) hooks.append(module.register_forward_pre_hook(load_weights_pre_hook(name, weights))) hooks.append(module.register_forward_hook(load_weights_post_hook(name, weights))) model.seqlen = 2048 dataset = 'wikitext2' nsamples = 128 seed = None (dataloader, testloader) = get_loaders(dataset, nsamples=nsamples, seed=seed, model_id=model_id, seqlen=model.seqlen, trust_remote_code=trust_remote_code) tick = time.time() quantizers = sequential(model, dataloader, DEV, nsamples, bits, groupsize, percdamp=percdamp, act_order=act_order, hooks=hooks, sym=sym) print(time.time() - tick) pack(model, quantizers, bits, groupsize) from safetensors.torch import save_file from transformers.modeling_utils import shard_checkpoint state_dict = model.state_dict() state_dict = {k: v.cpu().contiguous() for (k, v) in state_dict.items()} max_shard_size = '10GB' (shards, index) = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name='model.safetensors') os.makedirs(output_dir, exist_ok=True) for (shard_file, shard) in shards.items(): save_file(shard, os.path.join(output_dir, shard_file), metadata={'format': 'pt', 'quantized': 'gptq', 'origin': 'text-generation-inference'}) if index is None: path_to_weights = os.path.join(output_dir, 'model.safetensors') logger.info(f'Model weights saved in {path_to_weights}') else: save_index_file = 'model.safetensors.index.json' save_index_file = os.path.join(output_dir, save_index_file) with open(save_index_file, 'w', encoding='utf-8') as f: content = json.dumps(index, indent=2, sort_keys=True) + '\n' f.write(content) logger.info(f'The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the index located at {save_index_file}.') config = AutoConfig.from_pretrained(model_id, trust_remote_code=trust_remote_code) config.quantization_config = {'bits': bits, 'group_size': groupsize, 'damp_percent': percdamp, 'desc_act': act_order, 'static_groups': False, 'sym': sym, 'quant_method': 'gptq'} config.save_pretrained(output_dir) logger.info('Saved config') logger.info('Saving tokenizer') tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=trust_remote_code) tokenizer.save_pretrained(output_dir) logger.info('Saved tokenizer') if upload_to_model_id: api = HfApi() api.upload_folder(folder_path=output_dir, repo_id=upload_to_model_id, repo_type='model') # File: text-generation-inference-main/server/text_generation_server/layers/gptq/utils.py import torch def torch_snr_error(y_pred: torch.Tensor, y_real: torch.Tensor, reduction: str='mean') -> torch.Tensor: if y_pred.shape != y_real.shape: raise ValueError(f'Can not compute snr loss for tensors with different shape. ({y_pred.shape} and {y_real.shape})') reduction = str(reduction).lower() if y_pred.ndim == 1: y_pred = y_pred.unsqueeze(0) y_real = y_real.unsqueeze(0) y_pred = y_pred.flatten(start_dim=1) y_real = y_real.flatten(start_dim=1) noise_power = torch.pow(y_pred - y_real, 2).sum(dim=-1) signal_power = torch.pow(y_real, 2).sum(dim=-1) snr = noise_power / (signal_power + 1e-07) if reduction == 'mean': return torch.mean(snr) elif reduction == 'sum': return torch.sum(snr) elif reduction == 'none': return snr else: raise ValueError('Unsupported reduction method.') # File: text-generation-inference-main/server/text_generation_server/layers/layernorm.py import torch from torch import nn from accelerate import init_empty_weights from text_generation_server.utils.import_utils import SYSTEM @classmethod def load_layer_norm(cls, prefix, weights, eps): weight = weights.get_tensor(f'{prefix}.weight') bias = weights.get_tensor(f'{prefix}.bias') with init_empty_weights(): ln = cls(weight.shape, eps=eps) ln.weight = torch.nn.Parameter(weight) ln.bias = torch.nn.Parameter(bias) return ln @classmethod def load_layer_norm_no_bias(cls, prefix, weights, eps): weight = weights.get_tensor(f'{prefix}.weight') with init_empty_weights(): ln = cls(weight.shape, eps=eps) ln.weight = torch.nn.Parameter(weight) ln.bias = None return ln torch.nn.LayerNorm.load = load_layer_norm torch.nn.LayerNorm.load_no_bias = load_layer_norm_no_bias if SYSTEM == 'cuda': import dropout_layer_norm class FastLayerNorm(nn.LayerNorm): def forward(self, hidden_states, residual=None): if hidden_states.shape[-1] > 8192: if residual is not None: hidden_states += residual residual = hidden_states return (super(FastLayerNorm, self).forward(hidden_states), residual) else: (normed_hidden_states, residual, *rest) = dropout_layer_norm.dropout_add_ln_fwd(hidden_states, residual, self.weight, self.bias, None, None, None, None, 0.0, self.eps, 1.0, 0, None, False, False) if residual is None: residual = hidden_states return (normed_hidden_states, residual) elif SYSTEM == 'rocm': from vllm._C import ops class FastLayerNorm(nn.LayerNorm): def forward(self, hidden_states, residual=None): if residual is not None: hidden_states += residual residual = hidden_states return (super().forward(hidden_states), residual) elif SYSTEM == 'ipex': import intel_extension_for_pytorch as ipex class FastLayerNorm(nn.LayerNorm): def forward(self, hidden_states, residual=None): out = ipex.llm.functional.add_layer_norm(residual, hidden_states, self.weight, self.bias, self.eps, residual is not None) return (out, residual if residual is not None else hidden_states) class FastRMSNorm(nn.Module): def __init__(self, weight: torch.Tensor, eps: float): super().__init__() self.weight = nn.Parameter(weight) self.variance_epsilon = eps @classmethod def load(cls, prefix, weights, eps=1e-06): weight = weights.get_tensor(f'{prefix}.weight') return cls(weight, eps) def forward(self, hidden_states, residual=None): if SYSTEM == 'ipex': out = ipex.llm.functional.add_rms_norm(residual, hidden_states, self.weight, None, self.variance_epsilon, residual is not None) return (out, residual if residual is not None else hidden_states) elif hidden_states.shape[-1] > 8192: if residual is not None: hidden_states += residual residual = hidden_states hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return (self.weight * hidden_states, residual) elif SYSTEM == 'cuda': (normed_hidden_states, res, *rest) = dropout_layer_norm.dropout_add_ln_fwd(hidden_states, residual, self.weight, None, None, None, None, None, 0.0, self.variance_epsilon, 1.0, 0, None, False, True) if res is None: res = hidden_states return (normed_hidden_states, res) elif SYSTEM == 'rocm': if residual is not None: hidden_states += residual residual = hidden_states out = torch.empty_like(hidden_states) ops.rms_norm(out, hidden_states, self.weight.data, self.variance_epsilon) return (out, residual) else: raise ValueError('Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction.') # File: text-generation-inference-main/server/text_generation_server/layers/linear.py import torch from text_generation_server.utils.import_utils import SYSTEM from torch.nn import functional as F if SYSTEM == 'rocm': try: from vllm import _custom_C except Exception as e: raise ImportError(f'Could not load `vllm._custom_C`. Full error: {e}') class FastLinear(torch.nn.Module): def __init__(self, weight, bias) -> None: super().__init__() self.weight = torch.nn.Parameter(weight, requires_grad=False) if bias is not None: self.bias = torch.nn.Parameter(bias, requires_grad=False) else: self.bias = None @classmethod def load(cls, config, prefix: str, weights, bias: bool): weight = weights.get_tensor(f'{prefix}.weight') if bias: bias = weights.get_tensor(f'{prefix}.bias') else: bias = None return cls(weight, bias) def forward(self, input: torch.Tensor) -> torch.Tensor: return F.linear(input, self.weight, self.bias) class FastLinearROCm(torch.nn.Module): def __init__(self, weight, bias) -> None: super().__init__() self.weight = torch.nn.Parameter(weight) if bias is not None: self.bias = torch.nn.Parameter(bias) else: self.bias = None @classmethod def load(cls, config, prefix: str, weights, bias: bool): weight = weights.get_tensor(f'{prefix}.weight') if bias: bias = weights.get_tensor(f'{prefix}.bias') else: bias = None return cls(weight, bias) def forward(self, inp: torch.Tensor) -> torch.Tensor: weight = self.weight bias = self.bias if SYSTEM == 'rocm' and inp.numel() // inp.shape[-1] == 1: batched = False inp_shape = inp.shape if inp.dim() == 3: inp = inp.view(-1, inp_shape[-1]) batched = True (m, k) = (weight.shape[0], inp_shape[1]) out = torch.empty(inp_shape[0], weight.shape[0], dtype=inp.dtype, device='cuda') if k == 8192 and (m == 1280 or m == 7168) or (k == 3584 and m == 8192): _custom_C.LLMM1(weight, inp, out, 8) elif k <= 8192 and k % 8 == 0 and (m % 4 == 0): _custom_C.LLMM1(weight, inp, out, 4) else: out = F.linear(inp, weight) if batched: out.view(*inp_shape[:-1], out.shape[-1]) if bias is not None: out = out + bias return out return F.linear(inp, self.weight, self.bias) def get_linear(weight, bias): if isinstance(weight, torch.Tensor): if SYSTEM == 'rocm': return FastLinearROCm(weight, bias) else: return FastLinear(weight, bias) return weight.get_linear(bias) # File: text-generation-inference-main/server/text_generation_server/layers/lora.py from typing import TYPE_CHECKING, Optional, List import torch import torch.distributed from torch import nn from torch.distributed import ProcessGroup from text_generation_server.utils.sgmv import add_lora_a_bgmv, add_lora_b_bgmv, has_sgmv, lora_a_sgmv_cutlass, lora_b_sgmv_cutlass, orient_for_rank if TYPE_CHECKING: from text_generation_server.adapters import AdapterBatchData from text_generation_server.adapters.lora import BatchLoraWeights class LoraLinear(nn.Module): def __init__(self, base_layer: nn.Module, layer_id: int, process_group: ProcessGroup): super().__init__() self.base_layer = base_layer self.layer_id = layer_id self.process_group = process_group def forward_layer_type(self, result: torch.Tensor, input: torch.Tensor, adapter_data: 'AdapterBatchData', layer_type: str, start_idx: int, end_idx: int) -> torch.Tensor: if adapter_data is None: return result data: Optional['BatchLoraWeights'] = adapter_data.data.get(layer_type) if has_sgmv() and data is not None and data.can_vectorize(self.process_group): if end_idx - start_idx != result.shape[1]: proj = torch.zeros_like(result[:, start_idx:end_idx]) else: proj = result for (r, rank_segments) in data.rank_data.items(): lora_a_ptr = rank_segments.lora_a_ptr lora_b_ptr = rank_segments.lora_b_ptr if lora_a_ptr is None or lora_b_ptr is None: raise ValueError('LoRA data is missing') if data.use_sgmv: v = lora_a_sgmv_cutlass(input, rank_segments.tmp_shrink, lora_a_ptr, rank_segments.segment_starts, rank_segments.segment_ends, self.layer_id, r) if self.process_group.size() > 1: v = self.collect_lora_a(v) lora_b_sgmv_cutlass(proj, v, rank_segments.tmp_expand, lora_b_ptr, rank_segments.segment_starts, rank_segments.segment_ends, self.layer_id) else: v = torch.zeros((input.size(0), r), dtype=input.dtype, device=input.device) add_lora_a_bgmv(v, input, lora_a_ptr, rank_segments.indices, self.layer_id) if self.process_group.size() > 1: v = self.collect_lora_a(v) add_lora_b_bgmv(proj, v, lora_b_ptr, rank_segments.indices, self.layer_id) if end_idx - start_idx != result.shape[1]: result[:, start_idx:end_idx] += proj else: for adapter_index in adapter_data.meta.adapter_set: if data is not None and data.has_adapter(adapter_index): adapter_mask = (adapter_data.meta.adapter_indices == adapter_index).to(input.dtype).view(-1, 1) layer_result = self.forward_lora(input, data, adapter_index, adapter_mask) result[:, start_idx:end_idx] += layer_result return result def forward_lora(self, input: torch.Tensor, data: 'BatchLoraWeights', adapter_index: int, adapter_mask: torch.Tensor) -> torch.Tensor: lora_a = data.lora_a[adapter_index][self.layer_id, :, :] lora_b = data.lora_b[adapter_index][self.layer_id, :, :] lora_a = orient_for_rank(lora_a, lora_b.size(0)) a_out = input @ lora_a if self.process_group.size() > 1: a_out = self.collect_lora_a(a_out) result = a_out @ lora_b * adapter_mask return result def collect_lora_a(self, a_out: torch.Tensor) -> torch.Tensor: raise NotImplementedError('Implemented in subclasses') class TensorParallelMultiAdapterLinear(LoraLinear): def __init__(self, base_layer: nn.Module, layer_id: int, layer_names: List[str], sizes: List[int], process_group: ProcessGroup): super().__init__(base_layer, layer_id, process_group) self.layer_names = layer_names self.sizes = sizes @classmethod def load(cls, base_layer: nn.Module, layer_id: int, layer_names: List[str], sizes: List[int], process_group: ProcessGroup): return TensorParallelMultiAdapterLinear(base_layer, layer_id, layer_names, sizes, process_group) def forward(self, input: torch.Tensor, adapter_data: 'AdapterBatchData') -> torch.Tensor: result = self.base_layer(input) if self.layer_names is None: return result prev_shape = result.shape is_3d = len(input.shape) >= 3 if is_3d: input = input.reshape(-1, input.shape[-1]) result = result.reshape(-1, result.shape[-1]) offset = 0 for (i, layer_name) in enumerate(self.layer_names): start_idx = offset // self.process_group.size() if self.sizes is not None: offset += self.sizes[i] end_idx = offset // self.process_group.size() else: end_idx = result.shape[1] result = self.forward_layer_type(result, input, adapter_data, layer_name, start_idx, end_idx) if is_3d: result = result.reshape(prev_shape) return result def collect_lora_a(self, a_out: torch.Tensor) -> torch.Tensor: gathered_tensors = [torch.empty_like(a_out) for _ in range(self.process_group.size())] torch.distributed.all_gather(gathered_tensors, a_out) return torch.cat(gathered_tensors, dim=1) class TensorParallelAdapterRowLinear(LoraLinear): def __init__(self, base_layer, layer_id, layer_name, process_group): super().__init__(base_layer, layer_id, process_group) self.layer_name = layer_name @classmethod def load(cls, base_layer, layer_id, layer_name, process_group): return cls(base_layer, layer_id, layer_name, process_group) def forward(self, input: torch.Tensor, adapter_data: 'AdapterBatchData') -> torch.Tensor: result = self.base_layer(input) if self.layer_name is None: return result stride = result.shape[-1] // self.process_group.size() start_idx = self.process_group.rank() * stride end_idx = (self.process_group.rank() + 1) * stride self.forward_layer_type(result, input, adapter_data, self.layer_name, start_idx, end_idx) return result def collect_lora_a(self, a_out: torch.Tensor) -> torch.Tensor: torch.distributed.all_reduce(a_out, group=self.process_group) return a_out # File: text-generation-inference-main/server/text_generation_server/layers/marlin/__init__.py from text_generation_server.layers.marlin.fp8 import GPTQMarlinFP8Linear from text_generation_server.layers.marlin.gptq import GPTQMarlinWeightsLoader, can_use_gptq_marlin, repack_gptq_for_marlin from text_generation_server.layers.marlin.marlin import MarlinWeightsLoader __all__ = ['GPTQMarlinFP8Linear', 'GPTQMarlinWeightsLoader', 'MarlinWeightsLoader', 'can_use_gptq_marlin', 'repack_gptq_for_marlin'] # File: text-generation-inference-main/server/text_generation_server/layers/marlin/fp8.py from typing import Optional import torch import torch.nn as nn from loguru import logger from text_generation_server.layers.fp8 import fp8_quantize from text_generation_server.layers.marlin.gptq import _check_valid_shape from text_generation_server.layers.marlin.util import _check_marlin_kernels, permute_scales from text_generation_server.utils.log import log_once try: import marlin_kernels except ImportError: marlin_kernels = None MARLIN_TILE_SIZE = 16 class GPTQMarlinFP8Linear(nn.Module): def __init__(self, qweight: torch.Tensor, scales: torch.Tensor, bias: Optional[torch.Tensor]) -> None: super().__init__() _check_marlin_kernels() assert marlin_kernels is not None log_once(logger.info, 'GPU does not support FP8, using Marlin FP8 kernel') scales = scales.unsqueeze(0) if scales.shape[1] == 1: (out_features, in_features) = qweight.shape scales = scales.repeat(1, out_features) (qweight, scales) = repack_fp8_for_marlin(qweight, scales) in_features = qweight.shape[0] * MARLIN_TILE_SIZE out_features = scales.shape[1] _check_valid_shape(in_features=in_features, out_features=out_features) self.qweight = qweight self.scales = scales self.bias = bias if bias is not None else None self.workspace = torch.zeros(out_features // 64 * 16, dtype=torch.int, device=qweight.device) @classmethod def from_unquant(cls, weight, bias, dtype): (qweight, scales) = fp8_quantize(weight) return cls(qweight=qweight, scales=scales.to(dtype), bias=bias) @classmethod def from_fp8(cls, weight, scale, _input_scale, bias, dtype): return cls(qweight=weight, scales=scale.to(dtype), bias=bias) def forward(self, A: torch.Tensor) -> torch.Tensor: assert marlin_kernels is not None A_flat = A.view(-1, A.shape[-1]) C = marlin_kernels.fp8_marlin_gemm(A_flat, self.qweight, self.scales, self.workspace, 8, A_flat.shape[0], self.scales.shape[1], A_flat.shape[1]) C = C.reshape(A.shape[:-1] + (self.scales.shape[1],)) if self.bias is not None: C += self.bias return C def pack_fp8_as_int32(fp8_tensor: torch.Tensor) -> torch.Tensor: assert fp8_tensor.dtype == torch.float8_e4m3fn if fp8_tensor.shape[0] % 4 != 0: raise ValueError(f'Leading tensor dimension is not divisable by 4: {fp8_tensor.shape[0]}') reshaped = fp8_tensor.reshape(-1, 4, *fp8_tensor.shape[1:]) byte_tensor = reshaped.view(torch.uint8) packed = torch.zeros(fp8_tensor.shape[0] // 4, fp8_tensor.shape[1], dtype=torch.int32, device=fp8_tensor.device) for i in range(4): packed.bitwise_or_(byte_tensor[:, i].to(torch.int32) << i * 8) return packed def repack_fp8_for_marlin(weight: torch.Tensor, scales: torch.Tensor): (out_features, in_features) = weight.shape qweight = pack_fp8_as_int32(weight.t()) perm = torch.empty(0, dtype=torch.int, device=qweight.device) repacked = marlin_kernels.gptq_marlin_repack(qweight, perm, in_features, out_features, 8) scales = permute_scales(scales) return (repacked, scales) # File: text-generation-inference-main/server/text_generation_server/layers/marlin/gptq.py from dataclasses import dataclass from typing import List, Optional, Union import numpy import torch import torch.nn as nn from loguru import logger from text_generation_server.layers.marlin.util import _check_marlin_kernels, marlin_zero_points, permute_scales, unpack_cols from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.log import log_once from text_generation_server.utils.weights import Weight, Weights, WeightsLoader try: import marlin_kernels except ImportError: marlin_kernels = None try: (major, _minor) = torch.cuda.get_device_capability() has_sm_8_0 = major >= 8 except Exception: has_sm_8_0 = False GPTQ_MARLIN_BITS = [4, 8] GPTQ_MARLIN_GROUP_SIZES = [-1, 32, 64, 128] MARLIN_TILE_SIZE = 16 def can_use_gptq_marlin(*, bits: int, groupsize: int, quant_method: str, quantize: str, sym: bool) -> bool: return SYSTEM == 'cuda' and marlin_kernels is not None and has_sm_8_0 and (quantize in {'awq', 'gptq'}) and (quant_method in {'awq', 'gptq'}) and (bits in GPTQ_MARLIN_BITS) and (groupsize in GPTQ_MARLIN_GROUP_SIZES) and (sym or quant_method == 'awq') class GPTQMarlinWeightsLoader(WeightsLoader): def __init__(self, *, bits: int, desc_act: bool, groupsize: int, quant_method: str, quantize: str, sym: bool): self.bits = bits self.desc_act = desc_act self.groupsize = groupsize self.quant_method = quant_method self.quantize = quantize self.sym = sym def get_weights(self, weights: Weights, prefix: str): log_once(logger.info, 'Using GPTQ-Marlin kernels') try: qweight = weights.get_tensor(f'{prefix}.qweight') except RuntimeError: raise RuntimeError(f'Cannot load `{self.quantize}` weight for GPTQ -> Marlin repacking, make sure the model is already quantized') if not self.sym: qzeros = weights.get_tensor(f'{prefix}.qzeros') else: qzeros = None if self.quant_method == 'awq': g_idx = None else: g_idx = weights.get_tensor(f'{prefix}.g_idx') scales = weights.get_tensor(f'{prefix}.scales') return repack_gptq_for_marlin(qweight=qweight, scales=scales, qzeros=qzeros, g_idx=g_idx, bits=self.bits, desc_act=self.desc_act, groupsize=self.groupsize, quant_method=self.quant_method, sym=self.sym, sharded_infeatures=False) def get_weights_col_packed(self, weights: Weights, prefix: str, block_sizes: Union[int, List[int]]): try: qweight = weights.get_packed_sharded(f'{prefix}.qweight', dim=1, block_sizes=block_sizes) except RuntimeError: raise RuntimeError(f'Cannot load `{self.quantize}` weight, make sure the model is already quantized.') scales = weights.get_packed_sharded(f'{prefix}.scales', dim=1, block_sizes=block_sizes) scales = scales.to(dtype=weights.dtype) if not self.sym: qzeros = weights.get_packed_sharded(f'{prefix}.qzeros', dim=1, block_sizes=block_sizes) else: qzeros = None if self.quant_method == 'awq': g_idx = None else: g_idx = weights.get_tensor(f'{prefix}.g_idx') return repack_gptq_for_marlin(qweight=qweight, scales=scales, qzeros=qzeros, g_idx=g_idx, bits=self.bits, desc_act=self.desc_act, groupsize=self.groupsize, quant_method=self.quant_method, sym=self.sym, sharded_infeatures=False) def get_multi_weights_col(self, weights: Weights, prefixes: List[str], dim: int): try: qweight = torch.cat([weights.get_sharded(f'{p}.qweight', dim=1) for p in prefixes], dim=1) except RuntimeError: raise RuntimeError(f'Cannot load `{self.quantize}` weight, make sure the model is already quantized') scales = torch.cat([weights.get_sharded(f'{p}.scales', dim=1) for p in prefixes], dim=1) if not self.sym: qzeros = torch.cat([weights.get_sharded(f'{p}.qzeros', dim=1) for p in prefixes], dim=1) else: qzeros = None if self.quant_method == 'awq': g_idx = None else: w = [weights.get_tensor(f'{p}.g_idx') for p in prefixes] for w2 in w[1:]: torch.testing.assert_close(w2, w[0]) g_idx = w[0] return repack_gptq_for_marlin(qweight=qweight, scales=scales, qzeros=qzeros, g_idx=g_idx, bits=self.bits, desc_act=self.desc_act, groupsize=self.groupsize, quant_method=self.quant_method, sym=self.sym, sharded_infeatures=False) def get_weights_row(self, weights: Weights, prefix: str): log_once(logger.info, 'Using GPTQ-Marlin kernels') try: qweight = weights.get_sharded(f'{prefix}.qweight', dim=0) except RuntimeError: raise RuntimeError(f'Cannot load `{self.quantize}` weight for GPTQ -> Marlin repacking, make sure the model is already quantized') if not self.sym: if self.desc_act or self.groupsize == -1: qzeros = weights.get_tensor(f'{prefix}.qzeros') else: qzeros = weights.get_sharded(f'{prefix}.qzeros', dim=0) else: qzeros = None if self.quant_method == 'awq': g_idx = None else: g_idx = weights.get_sharded(f'{prefix}.g_idx', dim=0) if self.desc_act or self.groupsize == -1: scales = weights.get_tensor(f'{prefix}.scales') else: scales = weights.get_sharded(f'{prefix}.scales', dim=0) sharded_in_features = weights.process_group.size() > 1 return repack_gptq_for_marlin(qweight=qweight, scales=scales, qzeros=qzeros, g_idx=g_idx, bits=self.bits, desc_act=self.desc_act, groupsize=self.groupsize, quant_method=self.quant_method, sym=self.sym, sharded_infeatures=sharded_in_features) def _get_gptq_params(self, weights: Weights): if weights._has_tensor('gptq_bits') and weights._has_tensor('gptq_groupsize'): self.bits = weights.get_tensor('gptq_bits').item() self.groupsize = weights.get_tensor('gptq_groupsize').item() self.desc_act = False self.sym = weights.get_tensor('gptq_sym').item() if weights._has_tensor('gptq_sym') else False self.quant_method = 'gptq' @dataclass class GPTQMarlinWeight(Weight): qweight: torch.Tensor qzeros: torch.Tensor scales: torch.Tensor g_idx: torch.Tensor perm: torch.Tensor bits: int is_full_k: bool def __post_init__(self): assert self.qweight.dtype == torch.int32 assert self.scales.dtype == torch.float16 assert self.g_idx.dtype == torch.int32 assert self.perm.dtype == torch.int32 def get_linear(self, bias: torch.Tensor): return GPTQMarlinLinear(weight=self, bias=bias) def repack_gptq_for_marlin(*, qweight: torch.Tensor, qzeros: Optional[torch.Tensor], scales: torch.Tensor, g_idx: Optional[torch.Tensor], bits: int, desc_act: bool, groupsize: int, quant_method: str, sym: bool, sharded_infeatures: bool) -> GPTQMarlinWeight: _check_marlin_kernels() assert marlin_kernels is not None if bits not in GPTQ_MARLIN_BITS: supported_bits = ', '.join((str(b) for b in GPTQ_MARLIN_BITS)) raise RuntimeError(f'Repacking {bits}-bit GPTQ weights as Marlin is not supported, must be one of: {supported_bits}') if groupsize not in GPTQ_MARLIN_GROUP_SIZES: supported_sizes = ', '.join((str(b) for b in GPTQ_MARLIN_GROUP_SIZES)) raise RuntimeError(f'Repacking GPTQ weights with group size {groupsize} as Marlin is not supported, must be one of: {supported_sizes}') if not (sym or quant_method == 'awq'): raise RuntimeError('Repacking GPTQ weights with asymmetric quantization as Marlin is not supported.') log_once(logger.info, f'Converting {quant_method} model to Marlin packing format.') weights_per_int = 32 // bits in_features = qweight.shape[0] out_features = qweight.shape[1] if quant_method == 'awq': out_features *= weights_per_int else: in_features *= weights_per_int if in_features % groupsize != 0: raise ValueError(f'Number of input features ({in_features}) not divisible by group size ({groupsize})') if g_idx is not None and desc_act and (groupsize != -1): perm = torch.argsort(g_idx).to(torch.int) g_idx = g_idx[perm] else: perm = torch.empty(0, dtype=torch.int, device=qweight.device) g_idx = torch.empty(0, dtype=torch.int, device=qweight.device) if quant_method == 'awq': repacked = marlin_kernels.awq_marlin_repack(qweight, in_features, out_features, bits) if qzeros is not None: qzeros = awq_to_marlin_zero_points(qzeros, in_features // groupsize, out_features, bits) else: repacked = marlin_kernels.gptq_marlin_repack(qweight, perm, in_features, out_features, bits) if qzeros is None: qzeros = torch.empty(0, dtype=torch.int, device=qweight.device) scales = permute_scales(scales) is_full_k = not (desc_act and sharded_infeatures) return GPTQMarlinWeight(qweight=repacked, qzeros=qzeros, scales=scales, g_idx=g_idx, perm=perm, bits=bits, is_full_k=is_full_k) class GPTQMarlinLinear(nn.Module): def __init__(self, *, weight: GPTQMarlinWeight, bias: Optional[torch.Tensor]): super().__init__() _check_marlin_kernels() assert marlin_kernels is not None in_features = weight.qweight.shape[0] * MARLIN_TILE_SIZE out_features = weight.scales.shape[1] _check_valid_shape(in_features=in_features, out_features=out_features) self.bits = weight.bits self.is_full_k = weight.is_full_k self.qweight = weight.qweight self.qzeros = weight.qzeros self.scales = weight.scales self.g_idx = weight.g_idx self.perm = weight.perm if bias is not None: self.bias = bias else: self.bias = None self.workspace = torch.zeros(out_features // 64 * 16, dtype=torch.int, device=weight.qweight.device) def forward(self, A: torch.Tensor) -> torch.Tensor: assert marlin_kernels is not None A_flat = A.view(-1, A.shape[-1]) C = marlin_kernels.gptq_marlin_gemm(A_flat, self.qweight, self.scales, self.qzeros, self.g_idx, self.perm, self.workspace, self.bits, A_flat.shape[0], self.scales.shape[1], A_flat.shape[1], self.is_full_k, self.qzeros.numel() > 0, True) C = C.reshape(A.shape[:-1] + (self.scales.shape[1],)) if self.bias is not None: C += self.bias return C def awq_to_marlin_zero_points(q_zp_packed: torch.Tensor, size_k: int, size_n: int, num_bits: int) -> torch.Tensor: q_zp = unpack_cols(q_zp_packed, num_bits, size_k, size_n) if num_bits == 4: undo_interleave = numpy.argsort(numpy.array([0, 2, 4, 6, 1, 3, 5, 7])) elif num_bits == 8: undo_interleave = numpy.argsort(numpy.array([0, 2, 1, 3])) else: raise Exception('num_bits must be 4 or 8, got {}'.format(num_bits)) q_zp = q_zp.reshape((-1, len(undo_interleave)))[:, undo_interleave].ravel() q_zp = q_zp.reshape((-1, size_n)).contiguous() marlin_zp = marlin_zero_points(q_zp, size_k, size_n, num_bits) return marlin_zp def _check_valid_shape(in_features: int, out_features: int): if (in_features % 128 != 0 or out_features % 64 != 0) and (in_features % 64 != 0 or out_features % 128 != 0): raise ValueError(f'The GPTQ Marlin kernel does not have a valid thread configuration for weight matrix with shape ({out_features}, {in_features}). The shape elements must be divisible by (128, 64) or (64, 128).') # File: text-generation-inference-main/server/text_generation_server/layers/marlin/marlin.py from dataclasses import dataclass from typing import List, Optional, Union import torch import torch.nn as nn from text_generation_server.layers.marlin.util import _check_marlin_kernels from text_generation_server.utils.weights import Weight, Weights, WeightsLoader try: import marlin_kernels except ImportError: marlin_kernels = None class MarlinWeightsLoader(WeightsLoader): def __init__(self, *, bits: int, is_marlin_24: bool): self.bits = bits self.is_marlin_24 = is_marlin_24 def get_weights(self, weights: 'Weights', prefix: str): is_marlin_24 = getattr(self, 'gptq_checkpoint_format', None) == 'marlin_24' if is_marlin_24: try: B = weights.get_tensor(f'{prefix}.B_24') except RuntimeError: raise RuntimeError('Cannot load `marlin` 2:4 sparsity weight, make sure the model is already quantized.') B_meta = weights.get_tensor(f'{prefix}.B_meta') s = weights.get_tensor(f'{prefix}.s') weight = GPTQMarlin24Weight(B=B, B_meta=B_meta, s=s, bits=self.bits) else: try: B = weights.get_tensor(f'{prefix}.B') except RuntimeError: raise RuntimeError('Cannot load `marlin` weight, make sure the model is already quantized.') s = weights.get_tensor(f'{prefix}.s') weight = MarlinWeight(B=B, s=s) return weight def get_weights_col_packed(self, weights: Weights, prefix: str, block_sizes: Union[int, List[int]]): if self.is_marlin_24: B = weights.get_packed_sharded(f'{prefix}.B_24', dim=1, block_sizes=block_sizes) B_meta = weights.get_packed_sharded(f'{prefix}.B_meta', dim=1, block_sizes=block_sizes) s = weights.get_packed_sharded(f'{prefix}.s', dim=1, block_sizes=block_sizes) weight = GPTQMarlin24Weight(B=B, B_meta=B_meta, s=s, bits=self.bits) else: B = weights.get_packed_sharded(f'{prefix}.B', dim=1, block_sizes=block_sizes) s = weights.get_packed_sharded(f'{prefix}.s', dim=1, block_sizes=block_sizes) weight = MarlinWeight(B=B, s=s) return weight def get_multi_weights_col(self, weights: Weights, prefixes: List[str], dim: int): if self.is_marlin_24: try: B = torch.cat([weights.get_sharded(f'{p}.B_24', dim=1) for p in prefixes], dim=1) except RuntimeError: raise RuntimeError('Cannot load `marlin` weight, make sure the model is already quantized') B_meta = torch.cat([weights.get_sharded(f'{p}.B_meta', dim=1) for p in prefixes], dim=1) s = torch.cat([weights.get_sharded(f'{p}.s', dim=1) for p in prefixes], dim=1) weight = GPTQMarlin24Weight(B=B, B_meta=B_meta, s=s, bits=self.bits) else: try: B = torch.cat([weights.get_sharded(f'{p}.B', dim=1) for p in prefixes], dim=1) except RuntimeError: raise RuntimeError('Cannot load `marlin` weight, make sure the model is already quantized') s = torch.cat([weights.get_sharded(f'{p}.s', dim=1) for p in prefixes], dim=1) weight = MarlinWeight(B=B, s=s) return weight def get_weights_row(self, weights: Weights, prefix: str): if self.is_marlin_24: try: B = weights.get_sharded(f'{prefix}.B_24', dim=0) except RuntimeError: raise RuntimeError('Cannot load `marlin` 2:4 sparsity weight, make sure the model is already quantized.') B_meta = weights.get_sharded(f'{prefix}.B_meta', dim=0) num_groups = weights._get_slice(f'{prefix}.s').get_shape()[0] if num_groups == 1: s = weights.get_tensor(f'{prefix}.s') else: s = weights.get_sharded(f'{prefix}.s', dim=0) weight = GPTQMarlin24Weight(B=B, B_meta=B_meta, s=s, bits=self.bits) else: try: B = weights.get_sharded(f'{prefix}.B', dim=0) except RuntimeError: raise RuntimeError('Cannot load `marlin` weight, make sure the model is already quantized.') num_groups = weights._get_slice(f'{prefix}.s').get_shape()[0] if num_groups == 1: s = weights.get_tensor(f'{prefix}.s') else: s = weights.get_sharded(f'{prefix}.s', dim=0) weight = MarlinWeight(B=B, s=s) return weight @dataclass class MarlinWeight(Weight): B: torch.Tensor s: torch.Tensor def __post_init__(self): assert self.B.dtype == torch.int32 assert self.s.dtype in [torch.float16, torch.bfloat16] def get_linear(self, bias: torch.Tensor): return MarlinLinear(weight=self, bias=bias) class MarlinLinear(nn.Module): def __init__(self, *, weight: MarlinWeight, bias: Optional[torch.Tensor]): super().__init__() _check_marlin_kernels() assert marlin_kernels is not None in_features = weight.B.shape[0] * MARLIN_TILE_SIZE out_features = weight.s.shape[1] assert in_features % 128 == 0, f'Number of input features ({in_features}) not divisable by 128' assert out_features % 256 == 0, f'Number of output features ({out_features}) not divisable by 256' groupsize = -1 if weight.s.shape[0] == 1 else in_features // weight.s.shape[0] assert groupsize in {-1, 128}, f'Group size must be -1 or 128, was {groupsize}' self.B = weight.B self.s = weight.s if bias is not None: self.bias = bias else: self.bias = None self.workspace = torch.zeros(out_features // 64 * 16, dtype=torch.int, device=weight.B.device) def forward(self, A: torch.Tensor) -> torch.Tensor: assert marlin_kernels is not None C = marlin_kernels.marlin_gemm(A.view(-1, A.shape[-1]), self.B, self.s, self.workspace, A.shape[0], self.s.shape[1], A.shape[1]) C = C.reshape(A.shape[:-1] + (self.s.shape[1],)) if self.bias is not None: C += self.bias return C GPTQ_MARLIN_24_MIN_THREAD_N = 128 GPTQ_MARLIN_24_MIN_THREAD_K = 128 GPTQ_MARLIN_24_MAX_PARALLEL = 64 GPTQ_MARLIN_24_SUPPORTED_NUM_BITS = [4, 8] GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES = [-1, 128] MARLIN_TILE_SIZE = 16 @dataclass class GPTQMarlin24Weight: B: torch.Tensor B_meta: torch.Tensor s: torch.Tensor bits: int def __post_init__(self): assert self.B.dtype == torch.int32 assert self.B_meta.dtype == torch.int16 assert self.s.dtype == torch.float16 def get_linear(self, bias: torch.Tensor): return GPTQMarlin24Linear(weight=self, bias=bias) class GPTQMarlin24Linear(nn.Module): def __init__(self, *, weight: GPTQMarlin24Weight, bias: Optional[torch.Tensor]): super().__init__() _check_marlin_kernels() assert marlin_kernels is not None if weight.bits not in GPTQ_MARLIN_24_SUPPORTED_NUM_BITS: supported_bits = ', '.join((str(b) for b in GPTQ_MARLIN_24_SUPPORTED_NUM_BITS)) raise RuntimeError(f'{weight.bits}-bit GPTQ Sparse 2:4 Marlin is not supported, must be one of: {supported_bits}') in_features = weight.B.shape[0] * MARLIN_TILE_SIZE * 2 out_features = weight.s.shape[1] groupsize = -1 if weight.s.shape[0] == 1 else in_features // weight.s.shape[0] if groupsize not in GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES: supported_sizes = ', '.join((str(b) for b in GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES)) raise RuntimeError(f'Group size {groupsize} is not supported, must be one of: {supported_sizes}') self.bits = weight.bits weights_per_int32 = 32 // self.bits assert out_features % GPTQ_MARLIN_24_MIN_THREAD_N == 0, f'Number of output features ({out_features}) not divisable by {GPTQ_MARLIN_24_MIN_THREAD_N} threads' assert out_features % weights_per_int32 == 0, f'Number of output features ({out_features}) not divisable by weights per int32 ({weights_per_int32})' assert in_features % GPTQ_MARLIN_24_MIN_THREAD_K == 0, f'Number of output features ({out_features}) not divisable by {GPTQ_MARLIN_24_MIN_THREAD_K} threads' if groupsize != -1 and in_features % groupsize != 0: raise ValueError(f'Number of input features ({in_features}) not divisable by group size ({groupsize})') self.B = weight.B self.B_meta = weight.B_meta self.s = weight.s if bias is not None: self.bias = bias else: self.bias = None self.workspace = torch.zeros(out_features // GPTQ_MARLIN_24_MIN_THREAD_N * GPTQ_MARLIN_24_MAX_PARALLEL, dtype=torch.int, device=weight.B.device) def forward(self, A: torch.Tensor) -> torch.Tensor: assert marlin_kernels is not None C = marlin_kernels.gptq_marlin_24_gemm(A.view(-1, A.shape[-1]), self.B, self.B_meta, self.s, self.workspace, self.bits, A.shape[0], self.s.shape[1], A.shape[1]) C = C.reshape(A.shape[:-1] + (self.s.shape[1],)) if self.bias is not None: C += self.bias return C # File: text-generation-inference-main/server/text_generation_server/layers/marlin/util.py import functools from typing import List, Tuple import numpy import torch from text_generation_server.utils.import_utils import SYSTEM try: import marlin_kernels except ImportError: marlin_kernels = None try: (major, _minor) = torch.cuda.get_device_capability() has_sm_8_0 = major >= 8 except Exception: has_sm_8_0 = False def _check_marlin_kernels(): if not (SYSTEM == 'cuda' and has_sm_8_0): raise NotImplementedError('Using quantized Marlin models requires a GPU with CUDA capability 8.0 or later.') if marlin_kernels is None: raise NotImplementedError('marlin is not installed, install it with: pip install server/marlin') @functools.cache def get_perms() -> Tuple[List[int], List[int]]: scale_perm = [] for i in range(8): scale_perm.extend([i + 8 * j for j in range(8)]) scale_perm_single = [] for i in range(4): scale_perm_single.extend([2 * i + j for j in [0, 1, 8, 9, 16, 17, 24, 25]]) return (scale_perm, scale_perm_single) def permute_scales(scales: torch.Tensor): (scale_perm, scale_perm_single) = get_perms() out_features = scales.shape[1] if scales.shape[0] == 1: scales = scales.reshape((-1, len(scale_perm_single)))[:, scale_perm_single] else: scales = scales.reshape((-1, len(scale_perm)))[:, scale_perm] return scales.reshape((-1, out_features)).contiguous() def get_pack_factor(bits: int) -> int: if 32 % bits != 0: raise ValueError(f'Cannot {bits} bit values into uint32') return 32 // bits def pack_cols(q_w: torch.Tensor, num_bits: int, size_k: int, size_n: int): assert q_w.shape == (size_k, size_n) pack_factor = get_pack_factor(num_bits) assert size_n % pack_factor == 0 orig_device = q_w.device q_w = q_w.cpu().numpy().astype(numpy.uint32) q_res = numpy.zeros((size_k, size_n // pack_factor), dtype=numpy.uint32) for i in range(pack_factor): q_res |= q_w[:, i::pack_factor] << num_bits * i q_res = torch.from_numpy(q_res.astype(numpy.int32)).to(orig_device) q_res = q_res.contiguous() return q_res def unpack_cols(packed_q_w: torch.Tensor, num_bits: int, size_k: int, size_n: int): pack_factor = get_pack_factor(num_bits) assert size_n % pack_factor == 0 assert packed_q_w.shape == (size_k, size_n // pack_factor), 'packed_q_w.shape = {} size_k = {}, size_n = {} pack_Factor = {}'.format(packed_q_w.shape, size_k, size_n, pack_factor) orig_device = packed_q_w.device packed_q_w_cpu = packed_q_w.cpu().numpy().astype(numpy.uint32) q_res = numpy.zeros((size_k, size_n), dtype=numpy.uint32) mask = (1 << num_bits) - 1 for i in range(pack_factor): vals = packed_q_w_cpu & mask packed_q_w_cpu >>= num_bits q_res[:, i::pack_factor] = vals q_res = torch.from_numpy(q_res.astype(numpy.int32)).to(orig_device) q_res = q_res.contiguous() return q_res def marlin_zero_points(zp: torch.Tensor, size_k: int, size_n: int, num_bits: int) -> torch.Tensor: (scale_perm, _) = get_perms() zp = zp.reshape((-1, len(scale_perm)))[:, scale_perm] if num_bits == 4: interleave = numpy.array([0, 2, 4, 6, 1, 3, 5, 7]) elif num_bits == 8: interleave = numpy.array([0, 2, 1, 3]) else: raise Exception('num_bits must be 4 or 8, got {}'.format(num_bits)) zp = zp.reshape((-1, len(interleave)))[:, interleave].ravel() zp = zp.reshape((-1, size_n)).contiguous() zp = pack_cols(zp, num_bits, size_k, size_n) return zp # File: text-generation-inference-main/server/text_generation_server/layers/medusa.py import torch from torch import nn from typing import Tuple, Optional from text_generation_server.utils.speculate import get_speculate from text_generation_server.layers.linear import FastLinear from text_generation_server.layers.tensor_parallel import TensorParallelHead, TensorParallelColumnLinear class ResBlock(torch.nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.linear = FastLinear.load(config, prefix=f'{prefix}.linear', weights=weights, bias=True) self.act = torch.nn.SiLU() def forward(self, x): return x + self.act(self.linear(x)) class MedusaModel(torch.nn.Module): def __init__(self, config, medusa_config, weights): super().__init__() self.heads = torch.nn.ModuleList([MedusaHead(config, medusa_config, prefix=f'{i}', weights=weights) for i in range(get_speculate())]) def forward(self, x): if not self.heads: return None speculative_logits = torch.stack([head(x) for head in self.heads], dim=1) return speculative_logits class MedusaHead(torch.nn.Module): def __init__(self, config, medusa_config, prefix, weights): super().__init__() self.blocks = torch.nn.ModuleList([ResBlock(config, prefix=f'{prefix}.{i}', weights=weights) for i in range(medusa_config['medusa_num_layers'])]) n = len(self.blocks) self.out = FastLinear.load(config, prefix=f'{prefix}.{n}', weights=weights, bias=False) def forward(self, x): for block in self.blocks: x = block(x) x = self.out(x) return x class MedusaHeadV1(nn.Module): def __init__(self, lm_head, medusa): super().__init__() self.lm_head = lm_head self.medusa = medusa @staticmethod def load(config, prefix: str, weights): from pathlib import Path from safetensors import safe_open import json speculator = config.speculator path = speculator['path'] medusa_config = str(Path(path) / 'config.json') for fname in speculator['model_paths']: filename = str(Path(path) / fname) with open(medusa_config, 'r') as f: medusa_config = json.load(f) routing = weights.routing with safe_open(filename, framework='pytorch') as f: for k in f.keys(): if k in routing and routing[k] != filename: raise RuntimeError(f'Key {k} was found in multiple files: {filename} and {routing[k]}') routing[k] = filename medusa = MedusaModel(config, medusa_config, weights) lm_head = TensorParallelHead.load(config, prefix, weights) return MedusaHeadV1(lm_head, medusa) def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: logits = self.lm_head(input) if input.shape[0] > 128: return (logits, None) speculative_logits = self.medusa(input) return (logits, speculative_logits) class MedusaHeadV2(nn.Module): def __init__(self, config, prefix, weights): super().__init__() from pathlib import Path from safetensors import safe_open import json speculator_path = config.speculator['path'] medusa_config = str(Path(speculator_path) / 'config.json') filename = str(Path(speculator_path) / 'medusa_lm_head.safetensors') with open(medusa_config, 'r') as f: medusa_config = json.load(f) routing = weights.routing with safe_open(filename, framework='pytorch') as f: for k in f.keys(): if k in routing and routing[k] != filename: raise RuntimeError(f'Key {k} was found in multiple files: {filename} and {routing[k]}') routing[k] = filename self.n_medusa_heads = get_speculate() assert medusa_config['medusa_num_layers'] == 1 self.linear = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{i}.0.linear' for i in range(self.n_medusa_heads)], dim=0, weights=weights, bias=True) self.process_group = weights.process_group self.world_size = self.process_group.size() self.rank = self.process_group.rank() self.act = torch.nn.SiLU() self.lm_head = TensorParallelHead.load(config, prefix, weights) def forward(self, x): if x.shape[0] > 128: logits = self.lm_head(x) return (logits, None) size = x.shape[-1] block_size = (size + self.world_size - 1) // self.world_size start = self.rank * block_size stop = (self.rank + 1) * block_size x_block = x[:, start:stop] medusa_res = self.act(self.linear(x)).reshape(*x_block.shape[:-1], self.n_medusa_heads, x_block.shape[-1]) output = x[:, start:stop].unsqueeze(-2) + medusa_res world_output = [torch.empty_like(output) for _ in range(self.process_group.size())] torch.distributed.all_gather(world_output, output, group=self.process_group) world_output = torch.cat(world_output, dim=-1) stacked_x = torch.cat([x.unsqueeze(-2), world_output], dim=-2) logits = self.lm_head(stacked_x) (logits, speculative_logits) = torch.split(logits, [1, self.n_medusa_heads], dim=-2) logits = logits.squeeze(-2) return (logits, speculative_logits) # File: text-generation-inference-main/server/text_generation_server/layers/mlp.py import torch import math from torch import nn from torch.nn import functional as F from typing import Optional, Tuple from text_generation_server.layers import TensorParallelEmbedding, FastLinear from text_generation_server.layers.tensor_parallel import TensorParallelHead from text_generation_server.utils.speculate import get_speculate class MLPSpeculatorLayerNorm(nn.Module): def __init__(self, prefix, config, weights, eps=1e-06): super(MLPSpeculatorLayerNorm, self).__init__() self.weight = weights.get_tensor(f'{prefix}.weight') self.bias = weights.get_tensor(f'{prefix}.bias') self.eps = eps def forward(self, x): xf = x xf = xf * torch.rsqrt(xf.pow(2).mean(-1, keepdim=True) + self.eps) x = xf.type_as(x) x = self.weight * x x = x + self.bias return x INV_SQRT2 = 2 ** (-0.5) def simple_norm(x: torch.Tensor, eps=1e-06): xf = x xf = xf * torch.rsqrt(xf.pow(2).mean(-1, keepdim=True) + eps) x = xf.type_as(x) return x * INV_SQRT2 class MLPSpeculatorModelTied(torch.nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.config = config self.n_predict = get_speculate() self.hidden_size = config.hidden_size self.emb = TensorParallelEmbedding(f'{prefix}.emb.0', weights) self.proj0 = FastLinear.load(config, prefix=f'{prefix}.proj.0', weights=weights, bias=False) self.proj1 = FastLinear.load(config, prefix=f'{prefix}.proj.1', weights=weights, bias=False) self.head = FastLinear.load(config, f'{prefix}.head.0', weights, bias=False) self.ln = MLPSpeculatorLayerNorm(prefix=f'{prefix}.ln.0', config=config, weights=weights) self.state_weight = 0.5 ** (0.5 / self.n_predict) if self.n_predict > 0 else 1 self.activation = nn.GELU() self.vsize = config.vocab_size self.inner_dim = config.speculator_config['inner_dim'] self.top_k_tokens_per_head = [1] * self.n_predict self.emb_weight = math.sqrt(1 - self.state_weight ** 2) * math.sqrt(self.inner_dim / 2) self.emb.weight *= self.emb_weight def forward(self, hidden_states: torch.Tensor, input_ids: torch.Tensor): top_k_tokens_per_head = self.top_k_tokens_per_head state = hidden_states b = state.size(0) ind = input_ids.unsqueeze(0) all_probs = torch.empty(b, self.n_predict, self.vsize, device=state.device) assert len(top_k_tokens_per_head) == self.n_predict, f'You must provide a topk number for each head ({self.n_predict} heads, {len(top_k_tokens_per_head)} provided)' for i in range(self.n_predict): z = self.emb(ind) if i == 0: state = self.proj0(state) * self.state_weight + z else: state = self.proj1(state) * self.state_weight + z state = self.activation(self.ln(state)) probs = F.log_softmax(self.head(state), dim=-1) (_probs, preds) = probs.topk(top_k_tokens_per_head[i], dim=-1) all_probs[:, i] = probs.exp() state = state.unsqueeze(2).expand(-1, -1, top_k_tokens_per_head[i], -1) state = state.reshape(-1, b, state.size(3)) ind = preds.view(-1, b) speculative_logits = all_probs return speculative_logits class MLPSpeculatorModel(torch.nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.config = config self.n_predict = get_speculate() self.hidden_size = config.hidden_size self.emb = nn.ModuleList([TensorParallelEmbedding(f'{prefix}.emb.{i}', weights) for i in range(self.n_predict)]) self.proj = [FastLinear.load(config, prefix=f'{prefix}.proj.{i}', weights=weights, bias=False) for i in range(self.n_predict)] self.head = nn.ModuleList([FastLinear.load(config, f'{prefix}.head.{i}', weights, bias=False) for i in range(self.n_predict)]) self.ln = nn.ModuleList([MLPSpeculatorLayerNorm(prefix=f'{prefix}.ln.{i}', config=config, weights=weights) for i in range(self.n_predict)]) self.state_weight = 0.5 ** (0.5 / self.n_predict) if self.n_predict > 0 else 1 self.activation = nn.GELU() self.vsize = config.vocab_size self.inner_dim = config.speculator_config['inner_dim'] self.top_k_tokens_per_head = [1] * self.n_predict self.emb_weight = math.sqrt(1 - self.state_weight ** 2) * math.sqrt(self.inner_dim / 2) self.emb.weight *= self.emb_weight def forward(self, hidden_states: torch.Tensor, input_ids: torch.Tensor): top_k_tokens_per_head = self.top_k_tokens_per_head state = hidden_states b = state.size(0) ind = input_ids.unsqueeze(0) all_probs = torch.empty(b, self.n_predict, self.vsize, device=state.device) assert len(top_k_tokens_per_head) == self.n_predict, f'You must provide a topk number for each head ({self.n_predict} heads, {len(top_k_tokens_per_head)} provided)' for i in range(self.n_predict): z = self.emb[i](ind) state = self.proj[i](state) * self.state_weight + z state = self.activation(self.ln[i](state)) probs = F.log_softmax(self.head[i](state), dim=-1) (_probs, preds) = probs.topk(top_k_tokens_per_head[i], dim=-1) all_probs[:, i] = probs.exp() state = state.unsqueeze(2).expand(-1, -1, top_k_tokens_per_head[i], -1) state = state.reshape(-1, b, state.size(3)) ind = preds.view(-1, b) speculative_logits = all_probs return speculative_logits class MLPSpeculatorHead(nn.Module): def __init__(self, lm_head, mlp_speculator, scale_input: bool): super().__init__() self.lm_head = lm_head self.mlp_speculator = mlp_speculator self.scale_input = scale_input def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: logits = self.lm_head(input) if input.shape[0] > 128: return (logits, None) input_ids = logits.argmax(dim=-1) if self.scale_input: input = simple_norm(input) speculative_logits = self.mlp_speculator(input, input_ids) return (logits, speculative_logits) @staticmethod def load(config, prefix: str, weights): from pathlib import Path from safetensors import safe_open speculator_path = config.speculator['path'] for fname in config.speculator['model_paths']: filename = str(Path(speculator_path) / fname) routing = weights.routing with safe_open(filename, framework='pytorch') as f: for k in f.keys(): if k in routing and routing[k] != filename: raise RuntimeError(f'Key {k} was found in multiple files: {filename} and {routing[k]}') routing[k] = filename tie_weights = config.speculator_config.get('tie_weights', False) if tie_weights: mlp_speculator = MLPSpeculatorModelTied(config, 'speculator', weights) else: mlp_speculator = MLPSpeculatorModel(config, 'speculator', weights) scale_input = config.speculator_config.get('scale_input', False) lm_head = TensorParallelHead.load(config, prefix, weights) return MLPSpeculatorHead(lm_head, mlp_speculator, scale_input) # File: text-generation-inference-main/server/text_generation_server/layers/rotary.py import os import math import torch from torch import nn from text_generation_server.utils.import_utils import SYSTEM if SYSTEM == 'cuda': import rotary_emb elif SYSTEM == 'rocm': from vllm._C import ops elif SYSTEM == 'ipex': import intel_extension_for_pytorch as ipex def _create_inv_freq(dim, base, device): inv_freq = 1.0 / base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim) return inv_freq def _get_rope_config(config): if os.getenv('ROPE_SCALING', None) is not None: rope_scaling = {'type': os.environ['ROPE_SCALING'], 'factor': float(os.environ['ROPE_FACTOR'])} return rope_scaling return getattr(config, 'rope_scaling', None) class PositionRotaryEmbedding(nn.Module): def __init__(self, inv_freq, scaling_factor): super().__init__() self.inv_freq = inv_freq self._seq_len_cached = 0 self._cos_cached = None self._sin_cached = None self._cos_k_cached = None self._sin_k_cached = None self.scaling_factor = scaling_factor self.dynamic_args = None def forward(self, query: torch.Tensor, key: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor): if SYSTEM == 'cuda': rotary_dim = cos.shape[-1] q1 = query[..., :rotary_dim] q2 = query[..., rotary_dim:2 * rotary_dim] rotary_emb.apply_rotary(q1, q2, cos, sin, q1, q2, False) k1 = key[..., :rotary_dim] k2 = key[..., rotary_dim:2 * rotary_dim] rotary_emb.apply_rotary(k1, k2, cos, sin, k1, k2, False) elif SYSTEM == 'rocm': head_size = query.shape[-1] ops.rotary_embedding(query, key, head_size, cos, sin, True) elif SYSTEM == 'ipex': ipex.llm.functional.rotary_embedding(query, key, sin, cos, query.size(-1), True) else: raise ValueError('Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction.') @classmethod def static(cls, config, dim, base, device): inv_freq = _create_inv_freq(dim, base, device) scaling_factor = None rope_scaling = _get_rope_config(config) if rope_scaling is not None: rope_type = rope_scaling.get('rope_type', rope_scaling.get('type', None)) if rope_type == 'linear': pass elif rope_type == 'dynamic': scaling_factor = rope_scaling['factor'] return DynamicPositionRotaryEmbedding(dim=dim, max_position_embeddings=config.max_position_embeddings, base=base, device=inv_freq.device, scaling_factor=scaling_factor) elif rope_type == 'llama3': inv_freq = apply_llama3_scaling(inv_freq, scaling_factor=rope_scaling['factor'], low_freq_factor=rope_scaling['low_freq_factor'], high_freq_factor=rope_scaling['high_freq_factor'], original_max_position_embeddings=rope_scaling['original_max_position_embeddings']) return cls(inv_freq, scaling_factor) elif rope_type == 'yarn': scaling_factor = rope_scaling['factor'] mscale = rope_scaling.get('mscale', 1.0) mscale_all_dim = rope_scaling.get('mscale_all_dim', 0.0) return YarnPositionRotaryEmbedding(dim=2 * inv_freq.shape[0], max_position_embeddings=rope_scaling['original_max_position_embeddings'], base=base, device=inv_freq.device, scaling_factor=scaling_factor, extrapolation_factor=1, attn_factor=1, beta_fast=32, beta_slow=1, mscale=mscale, mscale_all_dim=mscale_all_dim) elif rope_type in ['su', 'longrope']: short_factor = torch.tensor(rope_scaling['short_factor'], dtype=torch.float32, device=device) short_inv_freq = 1.0 / (short_factor * base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim)) long_factor = torch.tensor(rope_scaling['long_factor'], dtype=torch.float32, device=device) long_inv_freq = 1.0 / (long_factor * base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim)) original_max_position_embeddings = config.original_max_position_embeddings max_position_embeddings = config.max_position_embeddings if max_position_embeddings <= original_max_position_embeddings: scaling_factor = 1.0 else: scale = max_position_embeddings / original_max_position_embeddings scaling_factor = math.sqrt(1 + math.log(scale) / math.log(original_max_position_embeddings)) return SuRotaryEmbedding(short_inv_freq=short_inv_freq, long_inv_freq=long_inv_freq, scaling_factor=scaling_factor, original_max_position_embeddings=original_max_position_embeddings) else: raise NotImplementedError(f"rope scaling type {rope_scaling['type']} is not implemented or invalid") return cls(inv_freq, scaling_factor) @classmethod def load(cls, config, prefix, weights): dtype = weights.dtype weights.dtype = torch.float32 inv_freq = weights.get_tensor(f'{prefix}.inv_freq') weights.dtype = dtype scaling_factor = None rope_scaling = _get_rope_config(config) if rope_scaling is not None: scaling_factor = rope_scaling['factor'] if rope_scaling['type'] == 'linear': pass elif rope_scaling['type'] == 'dynamic': return DynamicPositionRotaryEmbedding(dim=2 * inv_freq.shape[0], max_position_embeddings=config.max_position_embeddings, base=10000.0, device=inv_freq.device, scaling_factor=scaling_factor) elif rope_scaling['type'] == 'yarn': mscale = rope_scaling.get('mscale', 1.0) mscale_all_dim = rope_scaling.get('mscale_all_dim', 0.0) return YarnPositionRotaryEmbedding(dim=2 * inv_freq.shape[0], max_position_embeddings=rope_scaling['original_max_position_embeddings'], base=10000.0, device=inv_freq.device, scaling_factor=scaling_factor, extrapolation_factor=1, attn_factor=1, beta_fast=32, beta_slow=1, mscale=mscale, mscale_all_dim=mscale_all_dim) else: raise NotImplementedError(f"rope scaling type {rope_scaling['type']} is not implemented or invalid") return cls(inv_freq, scaling_factor) def _update_cos_sin_cache(self, dtype, device, seqlen): if seqlen > self._seq_len_cached or self._cos_cached.device != device or self._cos_cached.dtype != dtype: self._seq_len_cached = seqlen t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype) if self.scaling_factor is not None: t /= self.scaling_factor freqs = torch.outer(t, self.inv_freq.to(device=t.device)) self._cos_cached = torch.cos(freqs).to(dtype) self._sin_cached = torch.sin(freqs).to(dtype) def get_cos_sin(self, position_ids: torch.Tensor, max_s: int, dtype: torch.dtype): if SYSTEM == 'rocm': dtype = torch.float32 self._update_cos_sin_cache(dtype, position_ids.device, max_s) cos = torch.index_select(self._cos_cached, 0, position_ids) sin = torch.index_select(self._sin_cached, 0, position_ids) return (cos.unsqueeze(1), sin.unsqueeze(1)) class SuRotaryEmbedding(PositionRotaryEmbedding): def __init__(self, short_inv_freq, long_inv_freq, scaling_factor, original_max_position_embeddings): super(PositionRotaryEmbedding, self).__init__() self.short_inv_freq = short_inv_freq self.long_inv_freq = long_inv_freq self.scaling_factor = scaling_factor self.original_max_position_embeddings = original_max_position_embeddings self._seq_len_cached = 0 self._cos_cached = None self._sin_cached = None self._cos_k_cached = None self._sin_k_cached = None self.dynamic_args = None def _update_cos_sin_cache(self, dtype, device, seqlen): if seqlen > self._seq_len_cached or self._cos_cached.device != device or self._cos_cached.dtype != dtype: self._seq_len_cached = seqlen t = torch.arange(seqlen, device=device, dtype=self.short_inv_freq.dtype) short_freqs = torch.outer(t[:self.original_max_position_embeddings], self.short_inv_freq.to(device=t.device)) long_freqs = torch.outer(t[self.original_max_position_embeddings:], self.long_inv_freq.to(device=t.device)) freqs = torch.cat([short_freqs, long_freqs]) self._cos_cached = (torch.cos(freqs) * self.scaling_factor).to(dtype) self._sin_cached = (torch.sin(freqs) * self.scaling_factor).to(dtype) class DynamicPositionRotaryEmbedding(PositionRotaryEmbedding): def __init__(self, dim, max_position_embeddings, base, device, scaling_factor): inv_freq = _create_inv_freq(dim, base, device) super().__init__(inv_freq, scaling_factor) self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base def _update_cos_sin_cache(self, dtype, device, seqlen): if seqlen > self._seq_len_cached or self._cos_cached.device != device or self._cos_cached.dtype != dtype: if seqlen > self.max_position_embeddings: newbase = self.base * (self.scaling_factor * seqlen / self.max_position_embeddings - (self.scaling_factor - 1)) ** (self.dim / (self.dim - 2)) self.inv_freq = _create_inv_freq(self.dim, newbase, self.inv_freq.device) self._seq_len_cached = seqlen t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype) freqs = torch.outer(t, self.inv_freq.to(device=t.device)) self._cos_cached = torch.cos(freqs).to(dtype) self._sin_cached = torch.sin(freqs).to(dtype) def find_correction_dim(num_rotations, dim, base=10000, max_position_embeddings=2048): return dim * math.log(max_position_embeddings / (num_rotations * 2 * math.pi)) / (2 * math.log(base)) def find_correction_range(low_rot, high_rot, dim, base=10000, max_position_embeddings=2048): low = math.floor(find_correction_dim(low_rot, dim, base, max_position_embeddings)) high = math.ceil(find_correction_dim(high_rot, dim, base, max_position_embeddings)) return (max(low, 0), min(high, dim - 1)) def linear_ramp_mask(min, max, dim): if min == max: max += 0.001 linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min) ramp_func = torch.clamp(linear_func, 0, 1) return ramp_func def get_mscale(scale: float=1.0, mscale: float=1.0): if scale <= 1: return 1.0 return 0.1 * mscale * math.log(scale) + 1.0 class YarnPositionRotaryEmbedding(PositionRotaryEmbedding): def __init__(self, dim, max_position_embeddings, base, device, scaling_factor, *, extrapolation_factor, attn_factor, beta_fast, beta_slow, mscale: float, mscale_all_dim: float): inv_freq = _create_inv_freq(dim, base, device) super().__init__(inv_freq, scaling_factor) self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base self.extrapolation_factor = extrapolation_factor self.attn_factor = attn_factor self.beta_fast = beta_fast self.beta_slow = beta_slow self.mscale_all_dim = mscale_all_dim self.scaling_factor = scaling_factor self.mscale = float(get_mscale(self.scaling_factor, mscale) / get_mscale(self.scaling_factor, mscale_all_dim) * self.attn_factor) def _update_cos_sin_cache(self, dtype, device, seqlen): if seqlen > self._seq_len_cached or self._cos_cached.device != device or self._cos_cached.dtype != dtype: if seqlen > self.max_position_embeddings or True: inv_freq_extrapolation = _create_inv_freq(self.dim, self.base, self.inv_freq.device) freqs = 1.0 / inv_freq_extrapolation inv_freq_interpolation = 1.0 / (self.scaling_factor * freqs) (low, high) = find_correction_range(self.beta_fast, self.beta_slow, self.dim, self.base, self.max_position_embeddings) inv_freq_mask = (1 - linear_ramp_mask(low, high, self.dim // 2).float().to(device)) * self.extrapolation_factor inv_freq = inv_freq_interpolation * (1 - inv_freq_mask) + inv_freq_extrapolation * inv_freq_mask self.inv_freq = inv_freq self._seq_len_cached = seqlen t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype) freqs = torch.outer(t, self.inv_freq.to(device=t.device)) self._cos_cached = (torch.cos(freqs) * self.mscale).to(dtype) self._sin_cached = (torch.sin(freqs) * self.mscale).to(dtype) def apply_llama3_scaling(freqs: torch.Tensor, *, scaling_factor: int, low_freq_factor: int, high_freq_factor: int, original_max_position_embeddings: int): low_freq_wavelen = original_max_position_embeddings / low_freq_factor high_freq_wavelen = original_max_position_embeddings / high_freq_factor new_freqs = [] for freq in freqs: wavelen = 2 * math.pi / freq if wavelen < high_freq_wavelen: new_freqs.append(freq) elif wavelen > low_freq_wavelen: new_freqs.append(freq / scaling_factor) else: assert low_freq_wavelen != high_freq_wavelen smooth = (original_max_position_embeddings / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor) new_freqs.append((1 - smooth) * freq / scaling_factor + smooth * freq) return torch.tensor(new_freqs, dtype=freqs.dtype, device=freqs.device) # File: text-generation-inference-main/server/text_generation_server/layers/speculative.py import torch import json from typing import Tuple, Optional from text_generation_server.layers.tensor_parallel import TensorParallelHead from text_generation_server.layers.medusa import MedusaHeadV1, MedusaHeadV2 from text_generation_server.layers.mlp import MLPSpeculatorHead class SpeculativeHead(torch.nn.Module): def __init__(self, lm_head, speculator): super().__init__() self.head = lm_head self.speculator = speculator @staticmethod def load(config, prefix: str, weights): speculator = config.speculator if speculator: speculator_path = config.speculator['path'] speculator_config = str(speculator_path / 'config.json') with open(speculator_config, 'r') as f: speculator_config = json.load(f) config.speculator_config = speculator_config try: architecture = speculator_config['architectures'][0] if architecture == 'MLPSpeculatorPreTrainedModel': speculator = MLPSpeculatorHead.load(config, prefix, weights) else: speculator = None except KeyError: try: speculator = MedusaHeadV1.load(config, prefix, weights) except Exception: speculator = MedusaHeadV2(config, prefix, weights) lm_head = None else: lm_head = TensorParallelHead.load(config, prefix, weights) speculator = None return SpeculativeHead(lm_head, speculator) def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if self.speculator is not None: return self.speculator(input) assert self.head is not None logits = self.head(input) return (logits, None) # File: text-generation-inference-main/server/text_generation_server/layers/tensor_parallel.py import torch from torch.nn import functional as F from typing import Iterable, List from text_generation_server.layers.linear import get_linear, FastLinear from text_generation_server.utils.import_utils import SYSTEM if SYSTEM == 'ipex': import intel_extension_for_pytorch as ipex class LayerConcat(torch.nn.Module): def __init__(self, layers: Iterable[torch.nn.Module], dim: int=-1): super().__init__() self.layers = layers self.dim = dim def forward(self, x: torch.Tensor): outputs = [layer(x) for layer in self.layers] return torch.cat(outputs, self.dim) class SuperLayer(torch.nn.Module): def __init__(self, linear): super().__init__() self.linear = linear def forward(self, x): return self.linear.forward(x) class TensorParallelHead(SuperLayer): def __init__(self, linear, process_group, should_gather: bool): super().__init__(linear) self.process_group = process_group self.should_gather = should_gather @staticmethod def load(config, prefix: str, weights): if config.quantize == 'exl2': try: weight = weights.get_tensor(f'{prefix}.weight') except Exception: weight = weights.get_weights_col(prefix) should_gather = weights.process_group.size() > 1 elif weights.process_group.size() > 1: try: weight = weights.get_sharded(f'{prefix}.weight', dim=0) should_gather = True except AssertionError: weight = weights.get_tensor(f'{prefix}.weight') should_gather = False else: weight = weights.get_tensor(f'{prefix}.weight') should_gather = False return TensorParallelHead(get_linear(weight, bias=None), process_group=weights.process_group, should_gather=should_gather) def forward(self, input: torch.Tensor) -> torch.Tensor: if not self.should_gather: return super().forward(input) world_size = self.process_group.size() if len(input.shape) == 2 and isinstance(self.linear, FastLinear): out_dim = self.linear.weight.shape[0] if input.shape[0] == 1: world_out = input.new_empty(1, out_dim * world_size) local_out = input.new_empty(1, out_dim) gather_input = local_out else: world_out = input.new_empty(out_dim * world_size, input.shape[0]) gather_input = input.new_empty(out_dim, input.shape[0]) local_out = gather_input.T torch.mm(input, self.linear.weight.T, out=local_out) if SYSTEM == 'ipex': ipex.distributed.all_gather_into_tensor(world_out, gather_input, group=self.process_group) else: torch.distributed.all_gather_into_tensor(world_out, gather_input, group=self.process_group) if input.shape[0] == 1: return world_out return world_out.T output = super().forward(input) world_output = [torch.empty_like(output) for _ in range(self.process_group.size())] if SYSTEM == 'ipex': ipex.distributed.all_gather(world_output, output, group=self.process_group) else: torch.distributed.all_gather(world_output, output, group=self.process_group) world_output = torch.cat(world_output, dim=-1) return world_output class TensorParallelColumnLinear(SuperLayer): @classmethod def load_gate_up(cls, config, prefix: str, weights, bias: bool): weight = weights.get_weights_col_packed_gate_up(prefix) if bias: raise NotImplementedError('packed_gate_up only implemented without bias') else: bias = None linear = get_linear(weight, bias) return cls(linear) @classmethod def load_qkv(cls, config, prefix: str, weights, bias: bool, num_heads: int, num_key_value_heads: int): weight = weights.get_weights_col_packed_qkv(prefix, num_heads=num_heads, num_key_value_heads=num_key_value_heads) if bias: raise NotImplementedError('packed_qkv only implemented for baichuan') else: bias = None linear = get_linear(weight, bias) return cls(linear) @classmethod def load(cls, config, prefix: str, weights, bias: bool): weight = weights.get_weights_col(prefix) if bias: bias = weights.get_sharded(f'{prefix}.bias', dim=0) else: bias = None linear = get_linear(weight, bias) return cls(linear) @classmethod def load_multi(cls, config, prefixes: List[str], weights, bias: bool, dim: int): if config.quantize == 'exl2': linears = [] for prefix in prefixes: weight = weights.get_weights_col(prefix) b = weights.get_tensor(f'{prefix}.bias') if bias else None linears.append(get_linear(weight, b)) linear = LayerConcat(linears) else: weight = weights.get_multi_weights_col(prefixes, dim=dim) if bias: b = [weights.get_sharded(f'{p}.bias', dim=0) for p in prefixes] bias = torch.cat(b, dim=dim) else: bias = None linear = get_linear(weight, bias) return cls(linear) class TensorParallelRowLinear(SuperLayer): def __init__(self, linear, process_group): super().__init__(linear) self.process_group = process_group @classmethod def load(cls, config, prefix: str, weights, bias: bool): weight = weights.get_weights_row(prefix) if bias and weights.process_group.rank() == 0: bias = weights.get_tensor(f'{prefix}.bias') else: bias = None return cls(get_linear(weight, bias), process_group=weights.process_group) def forward(self, input: torch.Tensor, reduce: bool=True) -> torch.Tensor: out = super().forward(input) if self.process_group.size() > 1 and reduce: if SYSTEM == 'ipex': ipex.distributed.all_reduce(out, group=self.process_group) else: torch.distributed.all_reduce(out, group=self.process_group) return out class TensorParallelEmbedding(torch.nn.Module): def __init__(self, prefix: str, weights, reduce=True): super().__init__() weight = weights.get_partial_sharded(f'{prefix}.weight', dim=0) num_embeddings = weights.get_shape(f'{prefix}.weight')[0] process_group = weights.process_group world_size = process_group.size() rank = process_group.rank() block_size = (num_embeddings + world_size - 1) // world_size self.min_id = rank * block_size self.max_id = min(num_embeddings, (rank + 1) * block_size) self.null_idx = weight.shape[0] self.process_group = weights.process_group self.reduce = reduce '' self.weight = torch.nn.Parameter(F.pad(weight, (0, 0, 0, 1))) def forward(self, input: torch.Tensor) -> torch.Tensor: input = torch.where((self.min_id > input) | (input >= self.max_id), self.null_idx, input - self.min_id) out = torch.nn.functional.embedding(input, self.weight) if self.reduce and self.process_group.size() > 1: if SYSTEM == 'ipex': ipex.distributed.all_reduce(out, group=self.process_group) else: torch.distributed.all_reduce(out, group=self.process_group) return out # File: text-generation-inference-main/server/text_generation_server/models/__init__.py import torch import enum import os from loguru import logger from transformers.configuration_utils import PretrainedConfig from transformers.models.auto import modeling_auto from huggingface_hub import hf_hub_download, HfApi from typing import Optional, List, Dict from pathlib import Path from text_generation_server.utils.speculate import get_speculate, set_speculate from text_generation_server.models.model import Model from text_generation_server.models.causal_lm import CausalLM, CausalLMBatchKeysLast from text_generation_server.models.custom_modeling.opt_modeling import OPTForCausalLM from text_generation_server.models.custom_modeling.mpt_modeling import MPTForCausalLM from text_generation_server.models.bloom import BloomCausalLMBatch from text_generation_server.models.custom_modeling.bloom_modeling import BloomForCausalLM from text_generation_server.models.seq2seq_lm import Seq2SeqLM from text_generation_server.models.galactica import GalacticaCausalLMBatch from text_generation_server.models.custom_modeling.neox_modeling import GPTNeoxForCausalLM from text_generation_server.models.custom_modeling.phi_modeling import PhiConfig, PhiForCausalLM from text_generation_server.models.custom_modeling.t5_modeling import T5ForConditionalGeneration from text_generation_server.utils.adapter import AdapterParameters, build_layer_weight_lookup, load_and_merge_adapters, AdapterInfo from text_generation_server.adapters.lora import LoraWeights from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.log import log_master torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.set_grad_enabled(False) __all__ = ['Model', 'CausalLM', 'Seq2SeqLM', 'get_model_with_lora_adapters'] FLASH_ATT_ERROR_MESSAGE = '{} requires Flash Attention enabled models.' FLASH_ATTENTION = True try: from text_generation_server.models.flash_causal_lm import FlashCausalLM from text_generation_server.models.vlm_causal_lm import VlmCausalLM from text_generation_server.models.custom_modeling.flash_deepseek_v2_modeling import FlashDeepseekV2ForCausalLM, DeepseekV2Config from text_generation_server.models.custom_modeling.flash_llama_modeling import FlashLlamaForCausalLM from text_generation_server.models.custom_modeling.flash_cohere_modeling import FlashCohereForCausalLM from text_generation_server.models.custom_modeling.flash_gemma_modeling import FlashGemmaForCausalLM from text_generation_server.models.custom_modeling.flash_gemma2_modeling import FlashGemma2ForCausalLM from text_generation_server.models.custom_modeling.flash_dbrx_modeling import FlashDbrxForCausalLM, DbrxConfig from text_generation_server.models.custom_modeling.flash_rw_modeling import RWConfig, FlashRWForCausalLM from text_generation_server.models.custom_modeling.flash_neox_modeling import FlashGPTNeoXForCausalLM from text_generation_server.models.pali_gemma import PaliGemmaBatch from text_generation_server.models.custom_modeling.flash_pali_gemma_modeling import PaliGemmaForConditionalGeneration from text_generation_server.models.custom_modeling.flash_phi_modeling import FlashPhiForCausalLM from text_generation_server.models.idefics import IDEFICSSharded from text_generation_server.models.custom_modeling.llava_next import LlavaNextForConditionalGeneration from text_generation_server.models.custom_modeling.flash_santacoder_modeling import FlashSantacoderForCausalLM from text_generation_server.models.custom_modeling.flash_starcoder2_modeling import FlashStarcoder2ForCausalLM from text_generation_server.models.custom_modeling.flash_qwen2_modeling import Qwen2ForCausalLM from text_generation_server.models.custom_modeling.flash_mistral_modeling import FlashMistralForCausalLM from text_generation_server.models.custom_modeling.flash_mixtral_modeling import FlashMixtralForCausalLM from text_generation_server.models.custom_modeling.flash_gpt2_modeling import FlashGPT2ForCausalLM from text_generation_server.models.custom_modeling.flash_gptj_modeling import FlashGPTJForCausalLM from text_generation_server.models.custom_modeling.idefics2 import Idefics2ForConditionalGeneration from text_generation_server.layers.attention import SUPPORTS_WINDOWING except ImportError as e: log_master(logger.warning, f'Could not import Flash Attention enabled models: {e}') SUPPORTS_WINDOWING = False FLASH_ATTENTION = False if FLASH_ATTENTION: __all__.append(FlashCausalLM) __all__.append(IDEFICSSharded) MAMBA_AVAILABLE = True try: from text_generation_server.models.mamba import Mamba except ImportError as e: log_master(logger.warning, f'Could not import Mamba: {e}') MAMBA_AVAILABLE = False if MAMBA_AVAILABLE: __all__.append(Mamba) class ModelType(enum.Enum): DEEPSEEK_V2 = {'type': 'deepseek_v2', 'name': 'Deepseek V2', 'url': 'https://huggingface.co/deepseek-ai/DeepSeek-V2'} IDEFICS2 = {'type': 'idefics2', 'name': 'Idefics 2', 'url': 'https://huggingface.co/HuggingFaceM4/idefics2-8b', 'multimodal': True} LLAVA_NEXT = {'type': 'llava_next', 'name': 'Llava Next (1.6)', 'url': 'https://huggingface.co/llava-hf/llava-v1.6-vicuna-13b-hf', 'multimodal': True} LLAMA = {'type': 'llama', 'name': 'Llama', 'url': 'https://huggingface.co/collections/meta-llama/llama-31-669fc079a0c406a149a5738f'} PHI3 = {'type': 'phi3', 'name': 'Phi 3', 'url': 'https://huggingface.co/microsoft/Phi-3-mini-4k-instruct'} GEMMA = {'type': 'gemma', 'name': 'Gemma', 'url': 'https://huggingface.co/google/gemma-7b'} PALIGEMMA = {'type': 'paligemma', 'name': 'PaliGemma', 'url': 'https://huggingface.co/google/paligemma-3b-pt-224'} GEMMA2 = {'type': 'gemma2', 'name': 'Gemma2', 'url': 'https://huggingface.co/collections/google/gemma-2-release-667d6600fd5220e7b967f315'} COHERE = {'type': 'cohere', 'name': 'Cohere', 'url': 'https://huggingface.co/CohereForAI/c4ai-command-r-plus'} DBRX = {'type': 'dbrx', 'name': 'Dbrx', 'url': 'https://huggingface.co/databricks/dbrx-instruct'} MAMBA = {'type': 'ssm', 'name': 'Mamba', 'url': 'https://huggingface.co/state-spaces/mamba-2.8b-slimpj'} MISTRAL = {'type': 'mistral', 'name': 'Mistral', 'url': 'https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407'} MIXTRAL = {'type': 'mixtral', 'name': 'Mixtral', 'url': 'https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1'} GPT_BIGCODE = {'type': 'gpt_bigcode', 'name': 'Gpt Bigcode', 'url': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder'} PHI = {'type': 'phi', 'name': 'Phi', 'url': 'https://huggingface.co/microsoft/phi-1_5'} BAICHUAN = {'type': 'baichuan', 'name': 'Baichuan', 'url': 'https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat'} FALCON = {'type': 'falcon', 'name': 'Falcon', 'url': 'https://huggingface.co/tiiuae/falcon-7b-instruct'} STARCODER2 = {'type': 'starcoder2', 'name': 'StarCoder 2', 'url': 'https://huggingface.co/bigcode/starcoder2-15b-instruct-v0.1'} QWEN2 = {'type': 'qwen2', 'name': 'Qwen 2', 'url': 'https://huggingface.co/collections/Qwen/qwen2-6659360b33528ced941e557f'} OPT = {'type': 'opt', 'name': 'Opt', 'url': 'https://huggingface.co/facebook/opt-6.7b'} T5 = {'type': 't5', 'name': 'T5', 'url': 'https://huggingface.co/google/flan-t5-xxl'} GALACTICA = {'type': 'galactica', 'name': 'Galactica', 'url': 'https://huggingface.co/facebook/galactica-120b'} SANTACODER = {'type': 'santacoder', 'name': 'SantaCoder', 'url': 'https://huggingface.co/bigcode/santacoder'} BLOOM = {'type': 'bloom', 'name': 'Bloom', 'url': 'https://huggingface.co/bigscience/bloom-560m'} MPT = {'type': 'mpt', 'name': 'Mpt', 'url': 'https://huggingface.co/mosaicml/mpt-7b-instruct'} GPT2 = {'type': 'gpt2', 'name': 'Gpt2', 'url': 'https://huggingface.co/openai-community/gpt2'} GPT_NEOX = {'type': 'gpt_neox', 'name': 'Gpt Neox', 'url': 'https://huggingface.co/EleutherAI/gpt-neox-20b'} GPTJ = {'type': 'gptj', 'name': 'Gptj', 'url': 'https://huggingface.co/EleutherAI/gpt-j-6b'} IDEFICS = {'type': 'idefics', 'name': 'Idefics', 'url': 'https://huggingface.co/HuggingFaceM4/idefics-9b', 'multimodal': True} __GLOBALS = locals() for data in ModelType: __GLOBALS[data.name] = data.value['type'] def get_model(model_id: str, lora_adapter_ids: Optional[List[str]], revision: Optional[str], sharded: bool, quantize: Optional[str], speculate: Optional[int], dtype: Optional[str], trust_remote_code: bool, max_input_tokens: int) -> Model: global FLASH_ATTENTION (config_dict, _) = PretrainedConfig.get_config_dict(model_id, revision=revision, trust_remote_code=trust_remote_code) model_type = config_dict.get('model_type', None) quantization_config = config_dict.get('quantization_config', None) if quantization_config is not None and quantize is None: method = quantization_config.get('quant_method', None) if method in {'gptq', 'awq', 'exl2'}: log_master(logger.info, f'Auto selecting quantization method {method}') quantize = method elif method == 'fbgemm_fp8': log_master(logger.info, 'Auto selecting quantization method fp8') quantize = 'fp8' else: log_master(logger.warning, f'Unknown quantization method {method}') if dtype is None: if quantize in ['awq', 'exl2', 'gptq', 'marlin']: dtype = torch.float16 elif quantize == 'fp8': from text_generation_server.layers.fp8 import FBGEMM_DYN_AVAILABLE if FBGEMM_DYN_AVAILABLE: dtype = torch.bfloat16 else: dtype = None elif dtype == 'float16': dtype = torch.float16 elif dtype == 'bfloat16': dtype = torch.bfloat16 else: raise RuntimeError(f'Unknown dtype {dtype}') if speculate is not None: set_speculate(speculate) else: set_speculate(0) speculator = None if 'medusa_num_heads' in config_dict: medusa_model_id = model_id medusa_revision = revision model_id = config_dict['base_model_name_or_path'] revision = 'main' speculate_medusa = config_dict['medusa_num_heads'] if speculate is not None: if speculate > speculate_medusa: raise RuntimeError(f'Speculate is set to `{speculate}` but this medusa models only has `{speculate_medusa}` heads, please make them match') else: set_speculate(speculate) else: set_speculate(speculate_medusa) (config_dict, _) = PretrainedConfig.get_config_dict(model_id, revision=revision, trust_remote_code=trust_remote_code) model_type = config_dict.get('model_type', None) is_local = Path(medusa_model_id).exists() if not is_local: medusa_config = hf_hub_download(medusa_model_id, revision=medusa_revision, filename='config.json') hf_hub_download(medusa_model_id, revision=medusa_revision, filename='medusa_lm_head.safetensors') speculator = {'path': Path(medusa_config).parent, 'model_paths': ['medusa_lm_head.safetensors']} else: speculator = {'path': Path(medusa_model_id), 'model_paths': ['medusa_lm_head.safetensors']} method = 'medusa' elif model_type == 'mlp_speculator': mlp_model_id = model_id mlp_revision = revision model_id = config_dict['base_model_name_or_path'] revision = 'main' speculate_mlp = config_dict['n_predict'] if speculate is not None: if speculate > speculate_mlp: raise RuntimeError(f'Speculate is set to `{speculate}` but this mlp_speculator models only has `{speculate_mlp}` heads, please make them match') else: set_speculate(speculate) else: set_speculate(speculate_mlp) (config_dict, _) = PretrainedConfig.get_config_dict(model_id, revision=revision, trust_remote_code=trust_remote_code) model_type = config_dict.get('model_type', None) is_local = Path(mlp_model_id).exists() extension = '.safetensors' if not is_local: mlp_speculator_config = hf_hub_download(mlp_model_id, revision=mlp_revision, filename='config.json') api = HfApi() info = api.model_info(mlp_model_id, revision=mlp_revision) filenames = [s.rfilename for s in info.siblings if s.rfilename.endswith(extension) and len(s.rfilename.split('/')) == 1 and ('arguments' not in s.rfilename) and ('args' not in s.rfilename) and ('training' not in s.rfilename)] for filename in filenames: hf_hub_download(mlp_model_id, revision=mlp_revision, filename=filename) speculator_dir_path = Path(mlp_speculator_config).parent filenames.extend([p for p in os.listdir(speculator_dir_path) if p.endswith(extension)]) speculator = {'path': Path(mlp_speculator_config).parent, 'model_paths': filenames} else: speculator = Path(mlp_model_id) filenames = [p for p in os.listdir(speculator) if p.endswith(extension)] speculator = {'path': speculator, 'model_paths': filenames} method = 'mlp_speculator' else: method = 'n-gram' speculate = get_speculate() if speculate > 0: log_master(logger.info, f'Using speculation {method} with {speculate} input ids.') if model_type is None: if 'ssm_cfg' in config_dict: model_type = 'ssm' else: raise RuntimeError(f'Could not determine model type for {model_id} revision {revision}') if quantize == 'exl2' and sharded: raise RuntimeError('Sharding is currently not supported with `exl2` quantization') sliding_window = config_dict.get('sliding_window') if config_dict.get('sliding_window') is not None else -1 use_sliding_window = sliding_window is not None and sliding_window != -1 needs_sliding_window = max_input_tokens is not None and max_input_tokens > sliding_window if use_sliding_window and needs_sliding_window and (not SUPPORTS_WINDOWING): raise ValueError(f'The backend {SYSTEM} does not support sliding window attention that is used by the model type {model_type}. To use this model nonetheless with the {SYSTEM} backend, please launch TGI with the argument `--max-input-tokens` smaller than sliding_window={sliding_window} (got here max_input_tokens={max_input_tokens}).') if model_type == DEEPSEEK_V2: if FLASH_ATTENTION: head_size = max(config_dict.get('qk_nope_dim', 128) + config_dict.get('qk_rope_dim', 64), config_dict.get('v_head_dim', 128)) return FlashCausalLM(model_id=model_id, model_class=FlashDeepseekV2ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, default_dtype=torch.bfloat16, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=DeepseekV2Config, head_size=head_size) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Sharded Deepseek V2')) else: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) elif model_type == MAMBA: return Mamba(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) if model_id.startswith('facebook/galactica'): return CausalLM(model_id=model_id, model_class=OPTForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, batch_class=GalacticaCausalLMBatch) if model_type == GPT_BIGCODE or (model_type == GPT2 and model_id.startswith('bigcode/')): if FLASH_ATTENTION: return FlashCausalLM(model_id=model_id, model_class=FlashSantacoderForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, aliases={'transformer.wte.weight': ['lm_head.weight']}, num_kv_heads=1) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Sharded Santacoder')) else: return CausalLM.fallback(model_id=model_id, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) if model_type == BLOOM: return CausalLM(model_id=model_id, model_class=BloomForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, batch_class=BloomCausalLMBatch) elif model_type == MPT: return CausalLM(model_id=model_id, model_class=MPTForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, batch_class=CausalLMBatchKeysLast) elif model_type == GPT2: if FLASH_ATTENTION: try: return FlashCausalLM(model_id=model_id, model_class=FlashGPT2ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids) except RuntimeError as e: log_master(logger.warning, f"Couldn't load flash gpt2 variant: {e}") return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Sharded GPT-2')) else: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) elif model_type == GPTJ: if FLASH_ATTENTION: try: return FlashCausalLM(model_id=model_id, model_class=FlashGPTJForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids) except RuntimeError as e: log_master(logger.warning, f"Couldn't load flash gptj variant: {e}") return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Sharded GPT-J')) else: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) elif model_type == GPT_NEOX: if FLASH_ATTENTION: from text_generation_server.models.custom_modeling.flash_neox_modeling import GPTNeoXConfig return FlashCausalLM(model_id=model_id, model_class=FlashGPTNeoXForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=GPTNeoXConfig) elif sharded: return CausalLM(model_id=model_id, model_class=GPTNeoxForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) else: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) elif model_type == PHI: if FLASH_ATTENTION: return FlashCausalLM(model_id=model_id, model_class=FlashPhiForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids) else: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) elif model_type == 'phi-msft': if FLASH_ATTENTION: raise NotImplementedError('Legacy phi-msft is not supported with Flash Attention') else: return CausalLM(model_id=model_id, model_class=PhiForCausalLM, config_class=PhiConfig, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) elif model_type == LLAMA or model_type == BAICHUAN or model_type == PHI3: print(f'>>> model_type: {model_type}') if FLASH_ATTENTION: return FlashCausalLM(model_id=model_id, model_class=FlashLlamaForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Sharded Llama')) else: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) if model_type == GEMMA: if FLASH_ATTENTION: return FlashCausalLM(model_id=model_id, model_class=FlashGemmaForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Sharded Gemma')) else: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) elif model_type == GEMMA2: if FLASH_ATTENTION: return FlashCausalLM(model_id=model_id, model_class=FlashGemma2ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Sharded Gemma2')) else: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) if model_type == COHERE: if FLASH_ATTENTION: return FlashCausalLM(model_id=model_id, model_class=FlashCohereForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Sharded Cohere')) else: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) if model_type == DBRX: if FLASH_ATTENTION: return FlashCausalLM(model_id=model_id, model_class=FlashDbrxForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=DbrxConfig) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Sharded DBRX')) else: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) if model_type in ['RefinedWeb', 'RefinedWebModel', FALCON]: if sharded: if FLASH_ATTENTION: if config_dict.get('alibi', False): raise NotImplementedError('sharded is not supported for this model') return FlashCausalLM(model_id=model_id, model_class=FlashRWForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, aliases={'lm_head.weight': ['transformer.word_embeddings.weight'], 'transformer.word_embeddings.weight': ['lm_head.weight']}, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=RWConfig) raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Sharded Falcon')) elif FLASH_ATTENTION and (not config_dict.get('alibi', False)): return FlashCausalLM(model_id=model_id, model_class=FlashRWForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, aliases={'lm_head.weight': ['transformer.word_embeddings.weight'], 'transformer.word_embeddings.weight': ['lm_head.weight']}, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=RWConfig) else: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) if model_type == MISTRAL: if FLASH_ATTENTION: return FlashCausalLM(model_id=model_id, model_class=FlashMistralForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Sharded Mistral')) else: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) if model_type == MIXTRAL: if FLASH_ATTENTION: return FlashCausalLM(model_id=model_id, model_class=FlashMixtralForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Sharded Mixtral')) else: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) if model_type == STARCODER2: if FLASH_ATTENTION: return FlashCausalLM(model_id=model_id, model_class=FlashStarcoder2ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Sharded Starcoder2')) else: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) if model_type == QWEN2: if FLASH_ATTENTION: return FlashCausalLM(model_id=model_id, model_class=Qwen2ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Sharded Qwen2')) else: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) if model_type == OPT: return CausalLM(model_id=model_id, model_class=OPTForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) if model_type == T5: return Seq2SeqLM(model_id=model_id, model_class=T5ForConditionalGeneration, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, aliases={'shared.weight': ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight']}) if model_type == IDEFICS: if FLASH_ATTENTION: return IDEFICSSharded(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Idefics')) if model_type == IDEFICS2: if FLASH_ATTENTION: return VlmCausalLM(model_id=model_id, model_class=Idefics2ForConditionalGeneration, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, processor_kwargs={'size': {'longest_edge': 448, 'shortest_edge': 378}}) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Idefics')) if model_type == PALIGEMMA: if FLASH_ATTENTION: return VlmCausalLM(model_id=model_id, model_class=PaliGemmaForConditionalGeneration, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, batch_class=PaliGemmaBatch) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('Idefics')) if model_type == LLAVA_NEXT: if FLASH_ATTENTION: return VlmCausalLM(model_class=LlavaNextForConditionalGeneration, model_id=model_id, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format('LlavaNext')) if sharded: raise NotImplementedError('sharded is not supported for AutoModel') if quantize == 'gptq': raise NotImplementedError('gptq quantization is not supported for AutoModel, you can try to quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`') if quantize == 'awq': raise NotImplementedError('awq quantization is not supported for AutoModel') elif quantize == 'bitsandbytes-fp4' or quantize == 'bitsandbytes-nf4': raise NotImplementedError('4bit quantization is not supported for AutoModel') elif quantize == 'eetq': raise NotImplementedError('Eetq quantization is not supported for AutoModel') elif quantize == 'exl2': raise NotImplementedError('exl2 quantization is not supported for AutoModel') if model_type in modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES: return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) if model_type in modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES: return Seq2SeqLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) auto_map = config_dict.get('auto_map', None) if trust_remote_code and auto_map is not None: if 'AutoModelForCausalLM' in auto_map.keys(): return CausalLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) if 'AutoModelForSeq2SeqLM' in auto_map.keys(): return Seq2SeqLM.fallback(model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code) raise ValueError(f'Unsupported model type {model_type}') def get_model_with_lora_adapters(model_id: str, lora_adapters: Optional[List[AdapterInfo]], revision: Optional[str], sharded: bool, quantize: Optional[str], speculate: Optional[int], dtype: Optional[str], trust_remote_code: bool, max_input_tokens: int, adapter_to_index: Dict[str, int]): lora_adapter_ids = [adapter.id for adapter in lora_adapters] model = get_model(model_id, lora_adapter_ids, revision, sharded, quantize, speculate, dtype, trust_remote_code, max_input_tokens) if len(lora_adapters) > 0: target_to_layer = build_layer_weight_lookup(model.model) for (index, adapter) in enumerate(lora_adapters): adapter_parameters = AdapterParameters(adapter_info=[adapter], weights=None, merge_strategy=0, density=1.0, majority_sign_method=0) adapter_index = index + 1 adapter_to_index[adapter.id] = adapter_index logger.info(f"Loading adapter weights into model: {','.join([adapter.id for adapter in adapter_parameters.adapter_info])}") weight_names = tuple([v[0] for v in target_to_layer.values()]) (module_map, adapter_config, adapter_weight_names, adapter_tokenizer) = load_and_merge_adapters(model.model_id, adapter_parameters, adapter_index, weight_names, False) unused_weight_names = adapter_weight_names.copy() adapter_layers = ['q_proj', 'k_proj', 'v_proj', 'o_proj', 'gate_proj', 'up_proj', 'down_proj', 'qkv_proj'] for layer_name in adapter_layers: nlayers = 1 if layer_name == 'lm_head' else len(model.model.model.layers) adapter_weights = LoraWeights.prepare_weights(config=adapter_config, module_map=module_map, layer_type=layer_name, unused_weight_names=unused_weight_names, nlayers=nlayers, dtype=model.dtype, world_size=model.world_size, process_group=model.process_group, target_to_layer=target_to_layer) if adapter_weights is None: continue model.layer_to_adapter_weights[layer_name].add_adapter(adapter_index, adapter_weights) if len(unused_weight_names) > 0: logger.warning(f"{','.join([a.id for a in lora_adapters])} unused adapter weights: {unused_weight_names}") if adapter_tokenizer is not None: model.tokenizers.add_tokenizer(adapter_index, adapter_tokenizer) model.loaded_adapters.add(adapter_index) return model # File: text-generation-inference-main/server/text_generation_server/models/bloom.py import torch import torch.distributed from typing import Optional, Type from transformers import PreTrainedTokenizerBase from text_generation_server.models import CausalLM from text_generation_server.models.causal_lm import CausalLMBatch from text_generation_server.pb import generate_pb2 class BloomCausalLMBatch(CausalLMBatch): @classmethod def from_pb(cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device) -> 'CausalLMBatch': batch = super().from_pb(pb=pb, tokenizer=tokenizer, dtype=dtype, device=device) batch.keys_head_dim_last = False return batch class BLOOMSharded(CausalLM): @property def batch_type(self) -> Type[CausalLMBatch]: return BloomCausalLMBatch def forward(self, input_ids, attention_mask, position_ids, past_key_values: Optional=None): (outputs, speculative_logits) = self.model.forward(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=True) logits = outputs.logits return (logits, speculative_logits, outputs.past_key_values) # File: text-generation-inference-main/server/text_generation_server/models/causal_lm.py import torch import time import torch.distributed from dataclasses import dataclass from opentelemetry import trace from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM, PreTrainedTokenizerBase from typing import Optional, Tuple, List, Type, Dict from text_generation_server.utils import initialize_torch_distributed, weight_files, Weights from text_generation_server.models import Model from text_generation_server.utils.chunks import concat_text_chunks from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.quantization import get_loader from text_generation_server.utils.tokens import batch_top_tokens from text_generation_server.models.types import Batch, Tokens, Generation, GeneratedText from text_generation_server.pb import generate_pb2 from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling tracer = trace.get_tracer(__name__) @dataclass class CausalLMBatch(Batch): batch_id: int requests: List[generate_pb2.Request] requests_idx_mapping: Dict[int, int] input_ids: torch.Tensor attention_mask: torch.Tensor position_ids: torch.Tensor past_key_values: Optional[List[Tuple]] all_input_ids: List[torch.Tensor] input_lengths: List[int] prefix_offsets: List[int] read_offsets: List[int] next_token_choosers: List[NextTokenChooser] stopping_criterias: List[StoppingCriteria] top_n_tokens: List[int] top_n_tokens_tensor: torch.Tensor max_input_length: int padding_right_offset: int max_tokens: int keys_head_dim_last: bool = True def to_pb(self) -> generate_pb2.CachedBatch: return generate_pb2.CachedBatch(id=self.batch_id, request_ids=[r.id for r in self.requests], size=len(self), max_tokens=self.max_tokens) @classmethod def from_pb(cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device) -> 'CausalLMBatch': inputs = [] next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] prefix_offsets = [] read_offsets = [] requests_idx_mapping = {} max_truncation = 0 padding_right_offset = 0 max_decode_tokens = 0 for (i, r) in enumerate(pb.requests): requests_idx_mapping[r.id] = i inputs.append(concat_text_chunks(r.input_chunks.chunks)) next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device, tokenizer)) stopping_criteria = StoppingCriteria.from_pb(r.stopping_parameters, tokenizer) stopping_criterias.append(stopping_criteria) top_n_tokens.append(r.top_n_tokens) max_truncation = max(max_truncation, r.truncate) max_decode_tokens += stopping_criteria.max_new_tokens padding_right_offset = max(padding_right_offset, stopping_criteria.max_new_tokens) tokenized_inputs = tokenizer(inputs, return_tensors='pt', padding=True, return_token_type_ids=False, truncation=True, max_length=max_truncation).to(device) for _ in pb.requests: input_len = tokenized_inputs['input_ids'].shape[1] prefix_offsets.append(input_len - 5) read_offsets.append(input_len) input_lengths = tokenized_inputs['attention_mask'].sum(1) max_input_length = input_lengths.max() input_ids = tokenized_inputs['input_ids'] attention_mask = input_ids.new_zeros((pb.size, max_input_length + padding_right_offset)) attention_mask[:, :max_input_length] = tokenized_inputs['attention_mask'] position_ids = tokenized_inputs['attention_mask'].long().cumsum(-1) - 1 position_ids.masked_fill_(tokenized_inputs['attention_mask'] == 0, 1) all_input_ids = tokenized_inputs['input_ids'].T.split(1, dim=1) top_n_tokens_tensor = torch.tensor(top_n_tokens, device=device, dtype=torch.int64) max_tokens = len(inputs) * (max_input_length + max_decode_tokens) return cls(batch_id=pb.id, requests=pb.requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=None, all_input_ids=list(all_input_ids), input_lengths=input_lengths.tolist(), prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, max_input_length=max_input_length.item(), padding_right_offset=padding_right_offset, max_tokens=max_tokens) @tracer.start_as_current_span('filter') def filter(self, request_ids: List[int]) -> Optional['CausalLMBatch']: if len(request_ids) == 0: raise ValueError('Batch must have at least one request') if len(request_ids) == len(self): return self keep_indices = [] requests_idx_mapping = {} requests = [] input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] max_input_length = 0 next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] total_remaining_decode_tokens = 0 new_padding_right_offset = 0 for (i, request_id) in enumerate(request_ids): idx = self.requests_idx_mapping[request_id] requests_idx_mapping[request_id] = i keep_indices.append(idx) requests.append(self.requests[idx]) prefix_offsets.append(self.prefix_offsets[idx]) read_offsets.append(self.read_offsets[idx]) all_input_ids.append(self.all_input_ids[idx]) request_input_length = self.input_lengths[idx] input_lengths.append(request_input_length) max_input_length = max(max_input_length, request_input_length) next_token_choosers.append(self.next_token_choosers[idx]) stopping_criteria = self.stopping_criterias[idx] stopping_criterias.append(stopping_criteria) top_n_tokens.append(self.top_n_tokens[idx]) remaining_decode_tokens = stopping_criteria.max_new_tokens - stopping_criteria.current_tokens total_remaining_decode_tokens += remaining_decode_tokens new_padding_right_offset = max(new_padding_right_offset, remaining_decode_tokens) input_ids = self.input_ids[keep_indices] position_ids = self.position_ids[keep_indices] self.attention_mask = self.attention_mask[keep_indices, -(self.padding_right_offset + max_input_length):self.attention_mask.shape[1] - self.padding_right_offset + new_padding_right_offset] if type(self.past_key_values[0]) is tuple: self.past_key_values = [list(layer) for layer in self.past_key_values] past_kv_length = max_input_length - 1 for layer in self.past_key_values: (past_keys, past_values) = layer if len(past_keys.shape) == 3: past_keys = past_keys.view(len(self), -1, *past_keys.shape[-2:]) past_values = past_values.view(len(self), -1, *past_values.shape[-2:]) if self.keys_head_dim_last: layer[0] = past_keys[keep_indices, :, -past_kv_length:, :] else: layer[0] = past_keys[keep_indices, :, :, -past_kv_length:] del past_keys layer[1] = past_values[keep_indices, :, -past_kv_length:, :] del past_values top_n_tokens_tensor = self.top_n_tokens_tensor[keep_indices] max_tokens = len(request_ids) * max_input_length + total_remaining_decode_tokens self.requests = requests self.requests_idx_mapping = requests_idx_mapping self.input_ids = input_ids self.position_ids = position_ids self.all_input_ids = all_input_ids self.input_lengths = input_lengths self.prefix_offsets = prefix_offsets self.read_offsets = read_offsets self.next_token_choosers = next_token_choosers self.stopping_criterias = stopping_criterias self.top_n_tokens = top_n_tokens self.top_n_tokens_tensor = top_n_tokens_tensor self.max_input_length = max_input_length self.padding_right_offset = new_padding_right_offset self.max_tokens = max_tokens return self @classmethod @tracer.start_as_current_span('concatenate') def concatenate(cls, batches: List['CausalLMBatch']) -> 'CausalLMBatch': total_batch_size = 0 max_input_length = 0 padding_right_offset = 0 for batch in batches: total_batch_size += len(batch) max_input_length = max(max_input_length, batch.max_input_length) padding_right_offset = max(padding_right_offset, batch.padding_right_offset) requests = [] requests_idx_mapping = {} input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] max_tokens = 0 input_ids = None attention_mask = None position_ids = None past_key_values = [] top_n_tokens_tensor = None start_index = 0 for (i, batch) in enumerate(batches): requests.extend(batch.requests) input_lengths.extend(batch.input_lengths) prefix_offsets.extend(batch.prefix_offsets) read_offsets.extend(batch.read_offsets) all_input_ids.extend(batch.all_input_ids) next_token_choosers.extend(batch.next_token_choosers) stopping_criterias.extend(batch.stopping_criterias) top_n_tokens.extend(batch.top_n_tokens) if i == 0: requests_idx_mapping = batch.requests_idx_mapping else: for (k, v) in batch.requests_idx_mapping.items(): requests_idx_mapping[k] = v + start_index end_index = start_index + len(batch) if batch.past_key_values is None: raise ValueError('only concatenate prefilled batches') if input_ids is None: input_ids = batch.input_ids.new_empty((total_batch_size, 1)) input_ids[start_index:end_index] = batch.input_ids if attention_mask is None: attention_mask = batch.attention_mask.new_zeros((total_batch_size, max_input_length + padding_right_offset)) if top_n_tokens_tensor is None: top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros(total_batch_size) top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor left_offset = max_input_length - batch.max_input_length batch_left_offset = batch.attention_mask.shape[1] - batch.max_input_length - batch.padding_right_offset attention_mask[start_index:end_index, left_offset:-padding_right_offset] = batch.attention_mask[:, batch_left_offset:-batch.padding_right_offset] if position_ids is None: position_ids = batch.position_ids.new_empty((total_batch_size, 1)) position_ids[start_index:end_index] = batch.position_ids if isinstance(batch.past_key_values[0], tuple): batch.past_key_values = [[t.view(len(batch), -1, *t.shape[-2:]) for t in layer] for layer in batch.past_key_values] elif len(batch.past_key_values[0][0].shape) == 3: for layer in batch.past_key_values: for (k, t) in enumerate(layer): layer[k] = t.view(len(batch), -1, *t.shape[-2:]) max_tokens += batch.max_tokens + (max_input_length - batch.max_input_length) * len(batch) start_index = end_index first_past_kvs = batches[0].past_key_values (_, num_heads, padded_sequence_length, head_dim) = first_past_kvs[0][1].shape padded_past_values_shape = (total_batch_size, num_heads, max_input_length - 1, head_dim) if batches[0].keys_head_dim_last: padded_past_keys_shape = padded_past_values_shape else: padded_past_keys_shape = (total_batch_size, num_heads, head_dim, max_input_length - 1) for j in range(len(first_past_kvs)): padded_past_keys = first_past_kvs[j][0].new_zeros(padded_past_keys_shape) start_index = 0 for batch in batches: past_keys = batch.past_key_values[j][0] batch.past_key_values[j][0] = None end_index = start_index + len(batch) past_seq_len = batch.max_input_length - 1 if batch.keys_head_dim_last: padded_past_keys[start_index:end_index, :, -past_seq_len:, :] = past_keys[:, :, -past_seq_len:, :] else: padded_past_keys[start_index:end_index, :, :, -past_seq_len:] = past_keys[:, :, :, -past_seq_len:] del past_keys start_index = end_index padded_past_values = first_past_kvs[j][1].new_zeros(padded_past_values_shape) start_index = 0 for batch in batches: past_values = batch.past_key_values[j][1] batch.past_key_values[j][1] = None end_index = start_index + len(batch) past_seq_len = batch.max_input_length - 1 padded_past_values[start_index:end_index, :, -past_seq_len:, :] = past_values[:, :, -past_seq_len:, :] del past_values start_index = end_index past_key_values.append([padded_past_keys, padded_past_values]) return cls(batch_id=batches[0].batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, all_input_ids=all_input_ids, input_lengths=input_lengths, prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, max_input_length=max_input_length, padding_right_offset=padding_right_offset, keys_head_dim_last=batches[0].keys_head_dim_last, max_tokens=max_tokens) def __len__(self): return len(self.requests) @dataclass class CausalLMBatchKeysLast(CausalLMBatch): keys_head_dim_last: bool = False class CausalLM(Model): def __init__(self, model_id: str, model_class, revision: Optional[str]=None, quantize: Optional[str]=None, speculator: Optional[str]=None, dtype: Optional[torch.dtype]=None, default_dtype=torch.float16, trust_remote_code: bool=False, tokenizer_class=AutoTokenizer, config_class=AutoConfig, batch_class=CausalLMBatch): self.quantize = quantize self.batch_class = batch_class (self.process_group, rank, world_size) = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device(f'cuda:{rank}') dtype = default_dtype if dtype is None else dtype elif SYSTEM == 'ipex': if hasattr(torch, 'xpu') and torch.xpu.is_available(): device = torch.device(f'xpu:{rank}') dtype = default_dtype if dtype is None else dtype else: device = torch.device('cpu') dtype = torch.bfloat16 if dtype is None else dtype else: device = torch.device('cpu') dtype = torch.float32 if dtype is None else dtype tokenizer = tokenizer_class.from_pretrained(model_id, revision=revision, padding_side='left', truncation_side='left', trust_remote_code=trust_remote_code) config = config_class.from_pretrained(model_id, revision=revision, trust_remote_code=trust_remote_code) config.quantize = quantize config.speculator = speculator if tokenizer.pad_token_id is None: if config.pad_token_id is not None: tokenizer.pad_token_id = config.pad_token_id elif config.eos_token_id is not None: tokenizer.pad_token_id = config.eos_token_id elif tokenizer.eos_token_id is not None: tokenizer.pad_token_id = tokenizer.eos_token_id torch.distributed.barrier(group=self.process_group) weights_loader = get_loader(quantize=quantize, model_id=model_id, revision=revision) filenames = weight_files(model_id, revision=revision, extension='.safetensors') weights = Weights(filenames, device=device, dtype=dtype, process_group=self.process_group, weights_loader=weights_loader) prefix = '' model = model_class(prefix, config, weights) torch.distributed.barrier(group=self.process_group) super().__init__(model_id=model_id, model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, rank=rank, world_size=world_size) @classmethod def fallback(cls, model_id: str, revision: Optional[str]=None, quantize: Optional[str]=None, speculator: Optional[str]=None, dtype: Optional[torch.dtype]=None, trust_remote_code: bool=False): if speculator: raise RuntimeError('Speculator decoding is not enabled for AutoModel') if torch.cuda.is_available(): device = torch.device('cuda') dtype = torch.float16 if dtype is None else dtype else: if quantize: raise ValueError('quantization is not available on CPU') device = torch.device('cpu') dtype = torch.float32 if dtype is None else dtype tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision, padding_side='left', truncation_side='left', trust_remote_code=trust_remote_code) model = AutoModelForCausalLM.from_pretrained(model_id, revision=revision, torch_dtype=dtype, device_map='auto' if torch.cuda.is_available() and torch.cuda.device_count() > 1 else None, load_in_8bit=quantize == 'bitsandbytes', trust_remote_code=trust_remote_code) if torch.cuda.is_available() and torch.cuda.device_count() == 1 and (quantize != 'bitsandbytes'): model = model.cuda() if tokenizer.pad_token_id is None: if model.config.pad_token_id is not None: tokenizer.pad_token_id = model.config.pad_token_id elif model.config.eos_token_id is not None: tokenizer.pad_token_id = model.config.eos_token_id elif tokenizer.eos_token_id is not None: tokenizer.pad_token_id = tokenizer.eos_token_id else: tokenizer.add_special_tokens({'pad_token': '[PAD]'}) self = cls.__new__(cls) self.batch_class = CausalLMBatch super().__init__(self, model_id=model_id, model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device) self.quantize = quantize return self @property def batch_type(self) -> Type[CausalLMBatch]: return self.batch_class def forward(self, input_ids, attention_mask, position_ids, past_key_values: Optional=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], List[Tuple[torch.Tensor, torch.Tensor]]]: kwargs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': True, 'return_dict': True} if self.has_position_ids: kwargs['position_ids'] = position_ids outputs = self.model.forward(**kwargs) if isinstance(outputs, tuple): (outputs, speculative_logits) = outputs else: speculative_logits = None return (outputs.logits, speculative_logits, outputs.past_key_values) @tracer.start_as_current_span('generate_token') def generate_token(self, batch: CausalLMBatch) -> Tuple[List[Generation], Optional[CausalLMBatch], Tuple[int, int]]: start = time.time_ns() attention_mask = batch.attention_mask[:, :-batch.padding_right_offset] (logits, speculative_logits, past) = self.forward(batch.input_ids, attention_mask, batch.position_ids, batch.past_key_values) generations: List[Generation] = [] stopped = True accepted_ids = torch.ones_like(batch.input_ids)[:, 0] (batch_top_token_ids, batch_top_token_logprobs) = batch_top_tokens(batch.top_n_tokens, batch.top_n_tokens_tensor, torch.log_softmax(logits[:, -1], -1), accepted_ids) start_decode = time.time_ns() iterator = zip(batch.requests, batch.input_lengths, batch.prefix_offsets, batch.read_offsets, logits, batch.next_token_choosers, batch.stopping_criterias, batch.all_input_ids, batch.top_n_tokens, batch_top_token_ids, batch_top_token_logprobs) for (i, (request, input_length, prefix_offset, read_offset, logits, next_token_chooser, stopping_criteria, all_input_ids, top_n_tokens, top_token_ids, top_token_logprobs)) in enumerate(iterator): (next_token_id, logprobs) = next_token_chooser(all_input_ids.view(1, -1), logits[-1:, :]) all_input_ids = torch.cat([all_input_ids, next_token_id]) new_input_length = input_length + 1 next_token_logprob = logprobs[-1, next_token_id] next_token_id_squeezed = next_token_id.squeeze() (next_token_text, prefix_offset, read_offset) = self.decode_token(all_input_ids[:, 0], prefix_offset, read_offset) (stop, reason) = stopping_criteria(next_token_id_squeezed, next_token_text) if not stop: stopped = False if i % self.world_size == self.rank: if stop: (output_text, _, _) = self.decode_token(all_input_ids[:, 0], prefix_offset=len(all_input_ids) - stopping_criteria.current_tokens - 1, read_offset=len(all_input_ids) - stopping_criteria.current_tokens, skip_special_tokens=True) if isinstance(next_token_chooser.choice, Sampling): seed = next_token_chooser.choice.seed else: seed = None generated_text = GeneratedText(output_text, stopping_criteria.current_tokens, reason, seed) else: generated_text = None if stopping_criteria.current_tokens == 1 and request.prefill_logprobs: prefill_logprobs = [float('nan')] + torch.log_softmax(logits, -1).gather(1, all_input_ids[1:]).squeeze(1)[-new_input_length:-1].tolist() prefill_token_ids = all_input_ids[-new_input_length:-1] prefill_texts = self.tokenizer.batch_decode(prefill_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False) prefill_tokens = Tokens(prefill_token_ids, prefill_logprobs, prefill_texts, is_special=[]) else: prefill_tokens = None if top_n_tokens > 0: all_top_tokens = [] for (top_token_ids, top_token_logprobs) in zip(top_token_ids, top_token_logprobs): toptoken_texts = self.tokenizer.batch_decode(top_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False) special_toptokens = [token_id in self.all_special_ids for token_id in top_token_ids] top_tokens = Tokens(top_token_ids, top_token_logprobs, toptoken_texts, special_toptokens) all_top_tokens.append(top_tokens) top_tokens = all_top_tokens else: top_tokens = None generation = Generation(request.id, prefill_tokens, Tokens([next_token_id_squeezed], [next_token_logprob], [next_token_text], [next_token_id_squeezed.item() in self.all_special_ids]), generated_text, top_tokens) generations.append(generation) batch.next_token_choosers[i] = batch.next_token_choosers[i].advance_grammar(next_token_id_squeezed.item()) batch.input_ids[i, 0] = next_token_id batch.all_input_ids[i] = all_input_ids batch.input_lengths[i] = new_input_length batch.prefix_offsets[i] = prefix_offset batch.read_offsets[i] = read_offset batch.max_input_length = max(batch.max_input_length, new_input_length) if stopped: forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return (generations, None, (forward_ns, decode_ns)) batch.input_ids = batch.input_ids[:, :1] batch.attention_mask[:, -batch.padding_right_offset] = 1 batch.padding_right_offset -= 1 batch.position_ids = batch.position_ids[:, -1:] + 1 batch.past_key_values = past forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return (generations, batch, (forward_ns, decode_ns)) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/bloom_modeling.py """""" import math import os import warnings from typing import Optional, Tuple, Union import torch import torch.distributed import torch.utils.checkpoint from torch import nn from torch.nn import LayerNorm from torch.nn import functional as F from transformers.modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions from transformers import BloomConfig, PreTrainedModel from text_generation_server.layers import TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, SpeculativeHead CUSTOM_KERNELS_ENABLED = False if torch.cuda.is_available() and (not os.environ.get('DISABLE_CUSTOM_KERNELS', 'False') == 'True'): try: from custom_kernels import fused_bloom_attention_cuda CUSTOM_KERNELS_ENABLED = True except ImportError: pass _CHECKPOINT_FOR_DOC = 'bigscience/bloom-560m' _CONFIG_FOR_DOC = 'BloomConfig' BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST = ['bigscience/bigscience-small-testing', 'bigscience/bloom-560m', 'bigscience/bloom-1b1', 'bigscience/bloom-1b7', 'bigscience/bloom-3b', 'bigscience/bloom-7b1', 'bigscience/bloom'] def _make_causal_mask(input_ids_shape: torch.Size, device: torch.device, past_key_values_length: int) -> torch.BoolTensor: (batch_size, target_length) = input_ids_shape mask = torch.ones((target_length, target_length + past_key_values_length), dtype=torch.bool, device=device) mask = mask.triu(1 + past_key_values_length) expanded_mask = mask.unsqueeze(0).expand(batch_size, target_length, target_length + past_key_values_length) return expanded_mask def _expand_mask(mask: torch.Tensor, tgt_length: int) -> torch.BoolTensor: (batch_size, src_length) = mask.shape tgt_length = tgt_length if tgt_length is not None else src_length expanded_mask = ~mask[:, None, :].to(torch.bool) return expanded_mask.expand(batch_size, tgt_length, src_length) def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int) -> torch.Tensor: (batch_size, seq_length) = attention_mask.shape closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = torch.tensor(2 ** (-2 ** (-(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32) powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != num_heads: extra_base = torch.tensor(2 ** (-2 ** (-(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32) num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :] alibi = slopes[..., None] * arange_tensor return alibi def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor: out = F.dropout(x, p=prob, training=training) out = residual + out return out def _split_heads(fused_qkv: torch.Tensor, num_heads: int, head_dim: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: (batch_size, seq_length, three_times_hidden_size) = fused_qkv.shape fused_qkv = fused_qkv.view(batch_size, seq_length, num_heads, 3 * head_dim) (query_layer, key_layer, value_layer) = fused_qkv.split(head_dim, dim=-1) query_layer = query_layer.transpose(1, 2).reshape(batch_size * num_heads, seq_length, head_dim) key_layer = key_layer.permute(0, 2, 3, 1).reshape(batch_size * num_heads, head_dim, seq_length) value_layer = value_layer.transpose(1, 2).reshape(batch_size * num_heads, seq_length, head_dim) return (query_layer, key_layer, value_layer) def _merge_heads(x: torch.Tensor, num_heads: int, head_dim: int) -> torch.Tensor: (batch_size_and_num_heads, seq_length, _) = x.shape batch_size = batch_size_and_num_heads // num_heads x = x.view(batch_size, num_heads, seq_length, head_dim) x = x.permute(0, 2, 1, 3) return x.reshape(batch_size, seq_length, num_heads * head_dim) class BloomAttention(nn.Module): def __init__(self, prefix, config: BloomConfig, weights): super().__init__() self.pretraining_tp = config.pretraining_tp self.slow_but_exact = config.slow_but_exact self.process_group = weights.process_group self.hidden_size = config.hidden_size self.num_heads = config.n_head self.head_dim = self.hidden_size // self.num_heads self.split_size = self.hidden_size self.hidden_dropout = config.hidden_dropout if self.head_dim * self.num_heads != self.hidden_size: raise ValueError(f'`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).') self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim) self.beta = 1.0 process_group = weights.process_group if self.num_heads % process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {process_group.size()}') self.num_heads = self.num_heads // process_group.size() self.query_key_value = TensorParallelColumnLinear.load(config=config, prefix=f'{prefix}.query_key_value', weights=weights, bias=True) self.dense = TensorParallelRowLinear.load(config=config, prefix=f'{prefix}.dense', weights=weights, bias=True) self.attention_dropout = nn.Dropout(config.attention_dropout) @staticmethod def compute_attention(fused_qkv: torch.Tensor, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]], alibi: torch.Tensor, attention_mask: torch.Tensor, head_mask: Optional[torch.Tensor], beta: float, inv_norm_factor: float, num_heads: int, use_cache: bool): (batch_size, q_length, three_times_hidden_size) = fused_qkv.shape head_dim = three_times_hidden_size // (3 * num_heads) batch_size * num_heads (query_layer, key_layer, value_layer) = _split_heads(fused_qkv, num_heads=num_heads, head_dim=head_dim) if layer_past is not None: (past_key, past_value) = layer_past past_key = past_key.view(-1, *past_key.shape[-2:]) key_layer = torch.cat((past_key, key_layer), dim=2) past_value = past_value.view(-1, *past_value.shape[-2:]) value_layer = torch.cat((past_value, value_layer), dim=1) (_, _, kv_length) = key_layer.shape if use_cache is True: present = (key_layer, value_layer) else: present = None attention_scores = alibi.baddbmm(batch1=query_layer, batch2=key_layer, beta=beta, alpha=inv_norm_factor) input_dtype = attention_scores.dtype if input_dtype == torch.float16: attention_scores = attention_scores.to(torch.float) attn_weights = attention_scores.masked_fill_(attention_mask, torch.finfo(attention_scores.dtype).min) attention_probs = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(input_dtype) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.bmm(attention_probs, value_layer, out=query_layer) context_layer = _merge_heads(context_layer, num_heads=num_heads, head_dim=head_dim) return (context_layer, present, attention_probs) def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False): fused_qkv = self.query_key_value(hidden_states) (batch_size, q_length, _) = fused_qkv.shape if layer_past is not None: (past_key, past_value) = layer_past layer_past = (past_key.view(-1, *past_key.shape[-2:]), past_value.view(-1, *past_value.shape[-2:])) if CUSTOM_KERNELS_ENABLED: assert self.training is False, 'Only foward pass was implemented' assert attention_mask.shape[-1] < 4096, 'Custom kernel support only up to 4096 tokens' (context_layer, present, attention_probs) = fused_bloom_attention_cuda.forward(fused_qkv, layer_past, alibi, attention_mask, head_mask, self.beta, self.inv_norm_factor, self.num_heads, use_cache) else: (context_layer, present, attention_probs) = self.compute_attention(fused_qkv=fused_qkv, layer_past=layer_past, alibi=alibi, attention_mask=attention_mask, head_mask=head_mask, beta=self.beta, inv_norm_factor=self.inv_norm_factor, num_heads=self.num_heads, use_cache=use_cache) if self.pretraining_tp > 1 and self.slow_but_exact: slices = self.hidden_size / self.pretraining_tp output_tensor = torch.zeros_like(context_layer) for i in range(self.pretraining_tp): output_tensor = output_tensor + F.linear(context_layer[:, :, int(i * slices):int((i + 1) * slices)], self.dense.weight[:, int(i * slices):int((i + 1) * slices)]) else: output_tensor = self.dense(context_layer) output_tensor += residual outputs = (output_tensor, present) if output_attentions: outputs += (attention_probs,) return outputs class BloomMLP(nn.Module): def __init__(self, prefix, config: BloomConfig, weights): super().__init__() self.pretraining_tp = config.pretraining_tp self.slow_but_exact = config.slow_but_exact self.dense_h_to_4h = TensorParallelColumnLinear.load(config=config, prefix=f'{prefix}.dense_h_to_4h', weights=weights, bias=True) self.dense_4h_to_h = TensorParallelRowLinear.load(config=config, prefix=f'{prefix}.dense_4h_to_h', weights=weights, bias=True) self.gelu_impl = torch.nn.GELU(approximate='tanh') self.hidden_dropout = config.hidden_dropout def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor: hidden_states = self.gelu_impl(self.dense_h_to_4h(hidden_states)) if self.pretraining_tp > 1 and self.slow_but_exact: intermediate_output = torch.zeros_like(residual) slices = self.dense_4h_to_h.weight.shape[-1] / self.pretraining_tp for i in range(self.pretraining_tp): intermediate_output = intermediate_output + F.linear(hidden_states[:, :, int(i * slices):int((i + 1) * slices)], self.dense_4h_to_h.weight[:, int(i * slices):int((i + 1) * slices)]) else: intermediate_output = self.dense_4h_to_h(hidden_states) intermediate_output += residual return intermediate_output class BloomBlock(nn.Module): def __init__(self, layer_id: int, config: BloomConfig, weights): super().__init__() prefix = f'h.{layer_id}' self.input_layernorm = LayerNorm.load(prefix=f'{prefix}.input_layernorm', weights=weights, eps=config.layer_norm_epsilon) self.num_heads = config.n_head self.self_attention = BloomAttention(prefix=f'{prefix}.self_attention', config=config, weights=weights) self.post_attention_layernorm = LayerNorm.load(prefix=f'{prefix}.post_attention_layernorm', weights=weights, eps=config.layer_norm_epsilon) self.mlp = BloomMLP(prefix=f'{prefix}.mlp', config=config, weights=weights) self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm self.hidden_dropout = config.hidden_dropout def forward(self, hidden_states: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False): layernorm_output = self.input_layernorm(hidden_states) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states attn_outputs = self.self_attention(layernorm_output, residual, layer_past=layer_past, attention_mask=attention_mask, alibi=alibi, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions) attention_output = attn_outputs[0] outputs = attn_outputs[1:] layernorm_output = self.post_attention_layernorm(attention_output) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = attention_output output = self.mlp(layernorm_output, residual) if use_cache: outputs = (output,) + outputs else: outputs = (output,) + outputs[1:] return outputs class BloomPreTrainedModel(PreTrainedModel): config_class = BloomConfig base_model_prefix = 'transformer' _no_split_modules = ['BloomBlock'] @staticmethod def _convert_to_standard_cache(past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]], batch_size: int) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]: (batch_size_times_num_heads, head_dim, seq_length) = past_key_value[0][0].shape num_heads = batch_size_times_num_heads // batch_size return tuple(((layer_past[0].view(batch_size, num_heads, head_dim, seq_length), layer_past[1].view(batch_size, num_heads, seq_length, head_dim)) for layer_past in past_key_value)) @staticmethod def _convert_to_bloom_cache(past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]]) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]: (batch_size, num_heads, head_dim, seq_length) = past_key_value[0][0].shape batch_size_times_num_heads = batch_size * num_heads return tuple(((layer_past[0].view(batch_size_times_num_heads, head_dim, seq_length), layer_past[1].view(batch_size_times_num_heads, seq_length, head_dim)) for layer_past in past_key_value)) class BloomModel(BloomPreTrainedModel): def __init__(self, config: BloomConfig, weights): super().__init__(config) self.embed_dim = config.hidden_size self.num_heads = config.n_head process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.word_embeddings = TensorParallelEmbedding(prefix='word_embeddings', weights=weights) self.word_embeddings_layernorm = LayerNorm.load(prefix='word_embeddings_layernorm', weights=weights, eps=config.layer_norm_epsilon) self.h = nn.ModuleList([BloomBlock(layer_id=layer_id, config=config, weights=weights) for layer_id in range(config.num_hidden_layers)]) self.ln_f = LayerNorm.load(prefix='ln_f', weights=weights, eps=config.layer_norm_epsilon) def _prepare_attn_mask(self, attention_mask: torch.Tensor, input_shape: Tuple[int, int], past_key_values_length: int) -> torch.BoolTensor: combined_attention_mask = None device = attention_mask.device (_, src_length) = input_shape if src_length > 1: combined_attention_mask = _make_causal_mask(input_shape, device=device, past_key_values_length=past_key_values_length) expanded_attn_mask = _expand_mask(attention_mask, tgt_length=src_length) combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask return combined_attention_mask def set_input_embeddings(self, new_embeddings: torch.Tensor): self.word_embeddings = new_embeddings def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **deprecated_arguments) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]: if deprecated_arguments.pop('position_ids', False) is not False: warnings.warn('`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore passing `position_ids`.', FutureWarning) if len(deprecated_arguments) > 0: raise ValueError(f'Got unexpected arguments: {deprecated_arguments}') output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: (batch_size, seq_length) = input_ids.shape elif inputs_embeds is not None: (batch_size, seq_length, _) = inputs_embeds.shape else: raise ValueError('You have to specify either input_ids or inputs_embeds') if past_key_values is None: past_key_values = tuple([None] * len(self.h)) head_mask = self.get_head_mask(head_mask, self.config.n_layer) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) hidden_states = self.word_embeddings_layernorm(inputs_embeds) presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values[0] is not None: past_key_values_length = past_key_values[0][0].shape[-1] seq_length_with_past = seq_length_with_past + past_key_values_length if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device) else: attention_mask = attention_mask.to(hidden_states.device) alibi = build_alibi_tensor(attention_mask, self.num_heads) causal_mask = self._prepare_attn_mask(attention_mask, input_shape=(batch_size, seq_length), past_key_values_length=past_key_values_length) if hasattr(self, 'tp_rank'): assert self.num_heads % self.tp_world_size == 0 block_size = self.num_heads // self.tp_world_size alibi = alibi[:, self.tp_rank * block_size:(self.tp_rank + 1) * block_size] alibi = alibi.reshape(batch_size * block_size, 1, seq_length_with_past) causal_mask = torch.repeat_interleave(causal_mask, block_size, dim=0) else: alibi = alibi.reshape(batch_size * self.num_heads, 1, seq_length_with_past) causal_mask = torch.repeat_interleave(causal_mask, self.num_heads, dim=0) alibi = alibi.to(hidden_states.dtype) for (i, (block, layer_past)) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block(hidden_states, layer_past=layer_past, attention_mask=causal_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, alibi=alibi) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions) class BloomForCausalLM(BloomPreTrainedModel): def __init__(self, prefix: str, config, weights): super().__init__(config) self.transformer = BloomModel(config, weights) self.lm_head = SpeculativeHead.load(config, prefix='word_embeddings', weights=weights) def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, past_key_values: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, **kwargs) -> dict: if past_key_values: input_ids = input_ids[:, -1].unsqueeze(-1) if past_key_values[0][0].shape[0] == input_ids.shape[0]: past_key_values = self._convert_to_bloom_cache(past_key_values) if inputs_embeds is not None and past_key_values is None: model_inputs = {'inputs_embeds': inputs_embeds} else: model_inputs = {'input_ids': input_ids} model_inputs.update({'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'attention_mask': attention_mask}) return model_inputs def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **deprecated_arguments) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: if deprecated_arguments.pop('position_ids', False) is not False: warnings.warn('`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore passing `position_ids`.', FutureWarning) if len(deprecated_arguments) > 0: raise ValueError(f'Got unexpected arguments: {deprecated_arguments}') return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] (logits, speculative_logits) = self.lm_head(hidden_states) loss = None if not return_dict: output = (logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return (CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions), speculative_logits) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/clip.py from typing import Optional, Tuple import torch from torch import nn from transformers.activations import ACT2FN from transformers.modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask from transformers.modeling_outputs import BaseModelOutputWithPooling from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig from text_generation_server.layers import TensorParallelEmbedding, TensorParallelColumnLinear, TensorParallelRowLinear class CLIPVisionEmbeddings(nn.Module): def __init__(self, prefix, config: CLIPVisionConfig, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = weights.get_tensor(f'{prefix}.class_embedding') self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False) self.patch_embedding.weight = nn.Parameter(weights.get_tensor(f'{prefix}.patch_embedding.weight'), requires_grad=False) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = TensorParallelEmbedding(prefix=f'{prefix}.position_embedding', weights=weights) self.register_buffer('position_ids', torch.arange(self.num_positions, device=weights.device).expand((1, -1)), persistent=False) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class CLIPTextEmbeddings(nn.Module): def __init__(self, config: CLIPTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings class CLIPAttention(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_size = self.embed_dim // self.num_heads if self.head_size * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.num_heads = self.num_heads // weights.process_group.size() self.embed_dim = self.embed_dim // weights.process_group.size() self.scale = self.head_size ** (-0.5) self.dropout = config.attention_dropout self.qkv = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0, weights=weights, bias=True) self.out_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.out_proj', weights=weights, bias=True) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_size).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, tgt_len, _) = hidden_states.size() qkv = self.qkv(hidden_states) (query_states, key_states, value_states) = qkv.split([self.head_size * self.num_heads] * 3, dim=2) query_states = query_states * self.scale key_states = self._shape(key_states, -1, bsz) value_states = self._shape(value_states, -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_size) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {causal_attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_size): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_size)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_size) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, None) class CLIPMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = TensorParallelColumnLinear.load(prefix=f'{prefix}.fc1', config=config, weights=weights, bias=True) self.fc2 = TensorParallelRowLinear.load(prefix=f'{prefix}.fc2', config=config, weights=weights, bias=True) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class CLIPEncoderLayer(nn.Module): def __init__(self, prefix, config: CLIPConfig, weights): super().__init__() self.embed_dim = config.hidden_size self.self_attn = CLIPAttention(prefix=f'{prefix}.self_attn', config=config, weights=weights) self.layer_norm1 = nn.LayerNorm.load(prefix=f'{prefix}.layer_norm1', weights=weights, eps=config.layer_norm_eps) self.mlp = CLIPMLP(prefix=f'{prefix}.mlp', config=config, weights=weights) self.layer_norm2 = nn.LayerNorm.load(prefix=f'{prefix}.layer_norm2', weights=weights, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor): residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states class CLIPPreTrainedModel(nn.Module): config_class = CLIPConfig base_model_prefix = 'clip' supports_gradient_checkpointing = True CLIP_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' CLIP_TEXT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n' CLIP_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.\n' CLIP_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.\n return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n' class CLIPEncoder(nn.Module): def __init__(self, prefix, config: CLIPConfig, weights): super().__init__() self.config = config self.layers = nn.ModuleList([CLIPEncoderLayer(prefix=f'{prefix}.layers.{i}', config=config, weights=weights) for i in range(config.num_hidden_layers)]) def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None): hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): hidden_states = encoder_layer(hidden_states, attention_mask, causal_attention_mask) return hidden_states class CLIPTextTransformer(nn.Module): def __init__(self, prefix: str, config: CLIPTextConfig, weights=None): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = CLIPTextEmbeddings(config) self.encoder = CLIPEncoder(prefix=f'{prefix}.encoder', config=config, weights=weights) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.eos_token_id = config.eos_token_id def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None): if input_ids is None: raise ValueError('You have to specify input_ids') input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) causal_attention_mask = _create_4d_causal_attention_mask(input_shape, hidden_states.dtype, device=hidden_states.device) if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder(inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) if self.eos_token_id == 2: last_hidden_state[torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1)] else: last_hidden_state[torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id).int().argmax(dim=-1)] return last_hidden_state class CLIPTextModel(CLIPPreTrainedModel): config_class = CLIPTextConfig _no_split_modules = ['CLIPTextEmbeddings', 'CLIPEncoderLayer'] def __init__(self, prefix, config: CLIPTextConfig): super().__init__(config) self.text_model = CLIPTextTransformer(prefix, config) self.post_init() def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None): return self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids) class CLIPVisionTransformer(nn.Module): def __init__(self, prefix, config: CLIPVisionConfig, weights): super().__init__() self.config = config self.embeddings = CLIPVisionEmbeddings(prefix=f'{prefix}.embeddings', config=config, weights=weights) self.pre_layrnorm = nn.LayerNorm.load(prefix=f'{prefix}.pre_layrnorm', weights=weights, eps=config.layer_norm_eps) self.encoder = CLIPEncoder(prefix=f'{prefix}.encoder', config=config, weights=weights) def forward(self, pixel_values: Optional[torch.FloatTensor]=None): if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder(inputs_embeds=hidden_states) last_hidden_state = encoder_outputs return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state) class CLIPVisionModel(CLIPPreTrainedModel): config_class = CLIPVisionConfig main_input_name = 'pixel_values' _no_split_modules = ['CLIPEncoderLayer'] def __init__(self, config: CLIPVisionConfig): super().__init__(config) self.vision_model = CLIPVisionTransformer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding def forward(self, pixel_values: Optional[torch.FloatTensor]=None): return self.vision_model(pixel_values=pixel_values) class CLIPModel(nn.Module): def __init__(self, prefix, config: CLIPConfig, weights): super().__init__() text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = CLIPTextTransformer(text_config) self.vision_model = CLIPVisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) self.post_init() def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None) -> torch.FloatTensor: text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features def get_image_features(self, pixel_values: Optional[torch.FloatTensor]=None) -> torch.FloatTensor: vision_outputs = self.vision_model(pixel_values=pixel_values) pooled_output = vision_outputs[1] image_features = self.visual_projection(pooled_output) return image_features def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None): vision_outputs = self.vision_model(pixel_values=pixel_values) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() return (logits_per_image, logits_per_text) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.layers.attention import paged_attention, attention, reshape_and_cache, Seqlen from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.layers import TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear from text_generation_server.layers.layernorm import FastLayerNorm from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.utils.weights import UnquantizedWeight if SYSTEM == 'cuda': import dropout_layer_norm else: dropout_layer_norm = None class CohereRotary(PositionRotaryEmbedding): def forward(self, query: torch.Tensor, key: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor): if SYSTEM == 'cuda': import rotary_emb q1 = query[..., ::2] q2 = query[..., 1::2] rotary_emb.apply_rotary(q1, q2, cos, sin, q1, q2, False) k1 = key[..., ::2] k2 = key[..., 1::2] rotary_emb.apply_rotary(k1, k2, cos, sin, k1, k2, False) elif SYSTEM == 'rocm': from vllm._C import ops head_size = query.shape[-1] ops.rotary_embedding(query, key, head_size, cos, sin, False) elif SYSTEM == 'ipex': import intel_extension_for_pytorch as ipex ipex.llm.functional.rotary_embedding(query, key, sin, cos, query.size(-1), False) else: raise ValueError('Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction.') class CohereLayerNorm(nn.Module): def __init__(self, prefix, weights, eps): super().__init__() weight = weights.get_sharded(f'{prefix}.weight', dim=0) self.weight = nn.Parameter(weight) self.ones = weight.new_ones(weight.shape[1]) self.eps = eps def forward(self, hidden_states): if hidden_states.shape[-1] > 8192 or SYSTEM != 'cuda': hidden_states = hidden_states.reshape(-1, self.weight.shape[0], self.weight.shape[1]) input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) mean = hidden_states.mean(-1, keepdim=True) hidden_states_minus_mean = hidden_states - mean variance = hidden_states_minus_mean.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states_minus_mean * torch.rsqrt(variance + self.eps) hidden_states = self.weight.to(torch.float32) * hidden_states hidden_states = hidden_states.view(-1, self.weight.shape[1]) return hidden_states.to(input_dtype) (hidden_states, *rest) = dropout_layer_norm.dropout_add_ln_fwd(hidden_states, None, self.ones, None, None, None, None, None, 0.0, self.eps, 1.0, 0, None, False, False) hidden_states = hidden_states.view(-1, self.weight.shape[0], self.weight.shape[1]) hidden_states = self.weight * hidden_states hidden_states = hidden_states.view(-1, self.weight.shape[1]) return hidden_states def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0, weights=weights, bias=config.attention_bias) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col(prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0) if isinstance(weight, UnquantizedWeight): weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.weight.shape) == [(num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size], f'{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}' if config.attention_bias: w = [weights.get_sharded(f'{p}.bias', dim=0) for p in [f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj']] bias = torch.cat(w, dim=0).to(dtype=weights.dtype).to(device=weights.device) else: bias = None return TensorParallelColumnLinear(get_linear(weight, bias=bias)) class FlashCohereAttention(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.rotary_emb = CohereRotary.static(config=config, dim=self.head_size, base=config.rope_theta, device=weights.device) self.softmax_scale = self.head_size ** (-0.5) if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = config.num_key_value_heads // weights.process_group.size() self.query_key_value = load_attention(config, prefix, weights) self.use_qk_norm = config.use_qk_norm if self.use_qk_norm: self.q_norm = CohereLayerNorm(prefix=f'{prefix}.q_norm', weights=weights, eps=config.layer_norm_eps) self.k_norm = CohereLayerNorm(prefix=f'{prefix}.k_norm', weights=weights, eps=config.layer_norm_eps) else: self.q_norm = None self.k_norm = None self.o_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.o_proj', weights=weights, bias=config.attention_bias) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange(0, self.num_key_value_heads, dtype=torch.int32, device=weights.device).repeat_interleave(self.num_groups) def forward(self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): qkv = self.query_key_value(hidden_states) (query, key, value) = qkv.split([self.head_size * self.num_heads, self.head_size * self.num_key_value_heads, self.head_size * self.num_key_value_heads], dim=1) if self.use_qk_norm: query = query.reshape(-1, self.head_size) key = key.reshape(-1, self.head_size) query = self.q_norm(query.contiguous()) key = self.k_norm(key.contiguous()) query = query.view(-1, self.num_heads, self.head_size) key = key.view(-1, self.num_key_value_heads, self.head_size) value = value.view(-1, self.num_key_value_heads, self.head_size) self.rotary_emb(query, key, cos, sin) reshape_and_cache(key, value, kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else key, kv_cache[1] if SYSTEM != 'ipex' else value, seqlen, block_tables, self.softmax_scale) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size), reduce=False) class CohereMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.hidden_act self.act = ACT2FN[act] if 'gelu' not in act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') self.gate_up_proj = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.gate_proj', f'{prefix}.up_proj'], weights=weights, dim=0, bias=False) self.down_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.down_proj', weights=weights, bias=False) self.intermediate_size = config.intermediate_size // weights.process_group.size() def forward(self, hidden_states): gate_up_states = self.gate_up_proj(hidden_states) gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size) return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1], reduce=False) class FlashCohereLayer(nn.Module): def __init__(self, prefix: str, layer_id, config, weights): super().__init__() prefix = f'{prefix}.layers.{layer_id}' self.self_attn = FlashCohereAttention(prefix=f'{prefix}.self_attn', config=config, weights=weights) self.mlp = CohereMLP(prefix=f'{prefix}.mlp', config=config, weights=weights) self.input_layernorm = FastLayerNorm.load_no_bias(prefix=f'{prefix}.input_layernorm', weights=weights, eps=config.layer_norm_eps) self.process_group = weights.process_group def forward(self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): (normed_hidden_states, res) = self.input_layernorm(hidden_states, residual) attn_output = self.self_attn(normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) mlp_output = self.mlp(normed_hidden_states) output = attn_output + mlp_output if self.process_group.size() > 1: torch.distributed.all_reduce(output, group=self.process_group) return (output, res) class FlashCohereModel(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.embed_tokens = TensorParallelEmbedding(prefix=f'{prefix}.embed_tokens', weights=weights) self.layers = nn.ModuleList([FlashCohereLayer(prefix, layer_id, config, weights) for layer_id in range(config.num_hidden_layers)]) self.norm = FastLayerNorm.load_no_bias(prefix=f'{prefix}.norm', weights=weights, eps=config.layer_norm_eps) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: torch.Tensor, max_s: int) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) (cos, sin) = self.layers[0].self_attn.rotary_emb.get_cos_sin(position_ids, max_s, hidden_states.dtype) residual = None for (i, layer) in enumerate(self.layers): (hidden_states, residual) = layer(hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s) (hidden_states, _) = self.norm(hidden_states, residual) return hidden_states class FlashCohereForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() if not prefix: prefix = 'model' else: prefix = f'{prefix}.model' self.model = FlashCohereModel(prefix, config, weights) try: self.lm_head = SpeculativeHead.load(config, prefix='lm_head', weights=weights) except RuntimeError: self.lm_head = SpeculativeHead.load(config, prefix=f'{prefix}.embed_tokens', weights=weights) self.logit_scale = config.logit_scale def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: hidden_states = self.model(input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] (logits, speculative_logits) = self.lm_head(hidden_states) logits *= self.logit_scale if speculative_logits is not None: speculative_logits *= self.logit_scale return (logits, speculative_logits) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple, Any from text_generation_server.utils.import_utils import SYSTEM if SYSTEM != 'ipex': from vllm.model_executor.layers.fused_moe import fused_moe from text_generation_server.layers.attention import paged_attention, attention, reshape_and_cache, Seqlen from text_generation_server.layers import FastLinear, TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.layers.layernorm import FastLayerNorm class DbrxAttentionConfig(PretrainedConfig): def __init__(self, attn_pdrop: float=0, clip_qkv: Optional[float]=None, kv_n_heads: int=1, rope_theta: float=10000.0, **kwargs: Any): super().__init__(**kwargs) self.attn_pdrop = attn_pdrop self.clip_qkv = clip_qkv self.kv_n_heads = kv_n_heads self.rope_theta = rope_theta for k in ['model_type']: if k in kwargs: kwargs.pop(k) if len(kwargs) != 0: raise ValueError(f'Found unknown kwargs={kwargs!r}') class DbrxFFNConfig(PretrainedConfig): def __init__(self, ffn_act_fn: Optional[dict]=None, ffn_hidden_size: int=3584, moe_num_experts: int=4, moe_top_k: int=1, moe_jitter_eps: Optional[float]=None, moe_loss_weight: float=0.01, moe_normalize_expert_weights: Optional[float]=1, uniform_expert_assignment: bool=False, **kwargs: Any): super().__init__() if ffn_act_fn is None: ffn_act_fn = {'name': 'silu'} self.ffn_act_fn = ffn_act_fn self.ffn_hidden_size = ffn_hidden_size self.moe_num_experts = moe_num_experts self.moe_top_k = moe_top_k self.moe_jitter_eps = moe_jitter_eps self.moe_loss_weight = moe_loss_weight self.moe_normalize_expert_weights = moe_normalize_expert_weights self.uniform_expert_assignment = uniform_expert_assignment if uniform_expert_assignment: raise ValueError('`uniform_expert_assignment = True` is not supported') for k in ['model_type']: if k in kwargs: kwargs.pop(k) if len(kwargs) != 0: raise ValueError(f'Found unknown kwargs={kwargs!r}') class DbrxConfig(PretrainedConfig): attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'n_heads', 'num_hidden_layers': 'n_layers'} def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, max_seq_len: int=2048, vocab_size: int=32000, resid_pdrop: float=0.0, emb_pdrop: float=0.0, attn_config: Optional[DbrxAttentionConfig]=None, ffn_config: Optional[DbrxFFNConfig]=None, use_cache: bool=True, initializer_range: float=0.02, output_router_logits: bool=False, router_aux_loss_coef: float=0.05, **kwargs: Any): if attn_config is None: self.attn_config = DbrxAttentionConfig() elif isinstance(attn_config, dict): self.attn_config = DbrxAttentionConfig(**attn_config) else: self.attn_config = attn_config if ffn_config is None: self.ffn_config = DbrxFFNConfig() elif isinstance(ffn_config, dict): self.ffn_config = DbrxFFNConfig(**ffn_config) else: self.ffn_config = ffn_config self.d_model = d_model self.n_heads = n_heads self.n_layers = n_layers self.max_seq_len = max_seq_len self.vocab_size = vocab_size self.resid_pdrop = resid_pdrop self.emb_pdrop = emb_pdrop self.use_cache = use_cache self.initializer_range = initializer_range self.output_router_logits = output_router_logits self.router_aux_loss_coef = router_aux_loss_coef tie_word_embeddings = kwargs.pop('tie_word_embeddings', False) if tie_word_embeddings: raise ValueError('tie_word_embeddings is not supported for Dbrx models.') super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) @property def num_key_value_heads(self): return self.attn_config.kv_n_heads def promote_scalar(x: torch.Tensor) -> torch.Tensor: return x.view(1) if len(x.size()) == 0 else x def load_attention(config, prefix, weights): return TensorParallelColumnLinear.load_qkv(config, prefix=f'{prefix}.Wqkv', weights=weights, bias=False, num_heads=config.n_heads, num_key_value_heads=config.attn_config.kv_n_heads) def _load_experts(config, prefix, weights): world_size = weights.process_group.size() rank = weights.process_group.rank() assert config.ffn_config.ffn_hidden_size % world_size == 0, f'The chosen size {config.ffn_config.ffn_hidden_size} is not compatible with sharding on {world_size} shards' expert_size = config.ffn_config.ffn_hidden_size block_size = expert_size // world_size start = rank * block_size stop = (rank + 1) * block_size tensor = torch.empty((config.ffn_config.moe_num_experts * block_size, config.d_model), dtype=weights.dtype, device=weights.device) slice_ = weights._get_slice(f'{prefix}') for i in range(config.ffn_config.moe_num_experts): offset = i * expert_size expert_slice = slice_[start + offset:stop + offset] tensor[i * block_size:(i + 1) * block_size] = expert_slice.to(dtype=weights.dtype).to(device=weights.device) return tensor def _load_experts_quantized(config, prefix, weights, cls): world_size = weights.process_group.size() rank = weights.process_group.rank() assert config.ffn_config.ffn_hidden_size % world_size == 0, f'The chosen size {config.ffn_config.ffn_hidden_size} is not compatible with sharding on {world_size} shards' expert_size = config.ffn_config.ffn_hidden_size block_size = expert_size // world_size start = rank * block_size stop = (rank + 1) * block_size slice_ = weights._get_slice(f'{prefix}') experts = [] for i in range(config.ffn_config.moe_num_experts): if config.quantize in ['gptq', 'awq']: raise NotImplementedError('Dbrx does not support gptq/awq quantization yet.') else: offset = i * expert_size expert_slice = slice_[start + offset:stop + offset].to(dtype=weights.dtype).to(device=weights.device) if cls == TensorParallelRowLinear: expert_slice = expert_slice.t().contiguous() linear = get_linear(expert_slice, None) experts.append(cls(linear, weights.process_group)) else: linear = get_linear(expert_slice, None) experts.append(cls(linear)) return experts class DbrxAttention(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.clip_qkv = config.attn_config.clip_qkv self.num_heads = config.n_heads self.hidden_size = config.d_model self.head_size = self.hidden_size // self.num_heads self.rotary_emb = PositionRotaryEmbedding.static(config=config, dim=self.head_size, base=config.attn_config.rope_theta, device=weights.device) self.softmax_scale = self.head_size ** (-0.5) if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = config.attn_config.kv_n_heads // weights.process_group.size() self.query_key_value = load_attention(config, prefix, weights) self.o_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.out_proj', weights=weights, bias=False) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange(0, self.num_key_value_heads, dtype=torch.int32, device=weights.device).repeat_interleave(self.num_groups) def forward(self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): qkv = self.query_key_value(hidden_states) if self.clip_qkv is not None: qkv = qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv) (query, kv) = qkv.split([self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads], dim=1) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) reshape_and_cache(kv[:, 0], kv[:, 1], kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else kv[:, 0], kv_cache[1] if SYSTEM != 'ipex' else kv[:, 1], seqlen, block_tables, self.softmax_scale) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) class DbrxNormAttentionNorm(nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.norm_1 = FastLayerNorm.load_no_bias(prefix=f'{prefix}.norm_1', weights=weights, eps=1e-05) self.self_attn = DbrxAttention(prefix=f'{prefix}.attn', config=config, weights=weights) self.norm_2 = FastLayerNorm.load_no_bias(prefix=f'{prefix}.norm_2', weights=weights, eps=1e-05) def forward(self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): (normed_hidden_states, res) = self.norm_1(hidden_states, residual) attn_output = self.self_attn(normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) (normed_attn_res_output, attn_res) = self.norm_2(attn_output, res) return (normed_attn_res_output, attn_res) @torch.jit.script def select_experts(gate_logits: torch.Tensor, top_k: int, moe_normalize_expert_weights: int): all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float) (weights, selected_experts) = torch.topk(all_probs, top_k, dim=-1) if moe_normalize_expert_weights: weights = weights / torch.norm(weights, p=moe_normalize_expert_weights, dim=-1, keepdim=True) weights = weights.view(-1) selected_experts = selected_experts.view(-1) return (selected_experts, weights) @torch.jit.script def round_up(x: torch.Tensor, value: int): return torch.div(x + (value - 1), value, rounding_mode='trunc') * value class BlockSparseMoE(nn.Module): def __init__(self, prefix, config: DbrxConfig, weights): super().__init__() self.moe_normalize_expert_weights = config.ffn_config.moe_normalize_expert_weights self.hidden_dim = config.d_model self.ffn_dim = config.ffn_config.ffn_hidden_size // weights.process_group.size() self.num_experts = config.ffn_config.moe_num_experts self.top_k = config.ffn_config.moe_top_k act = config.ffn_config.ffn_act_fn['name'] if 'gelu' in act: self.act = lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') elif 'silu' in act: self.act = torch.nn.functional.silu else: self.act = ACT2FN[act] self.gate = FastLinear.load(config, f'{prefix}.router.layer', weights, bias=False) w1 = _load_experts(config, f'{prefix}.experts.mlp.w1', weights).view(self.num_experts, self.ffn_dim, self.hidden_dim) v1 = _load_experts(config, f'{prefix}.experts.mlp.v1', weights).view(self.num_experts, self.ffn_dim, self.hidden_dim) self.wv1 = torch.cat([w1, v1], dim=1) self.w2 = _load_experts(config, f'{prefix}.experts.mlp.w2', weights).view(self.num_experts, self.ffn_dim, self.hidden_dim).transpose(1, 2).contiguous() self.process_group = weights.process_group def forward(self, x: torch.Tensor) -> torch.Tensor: router_logits = self.gate(x) out = fused_moe(x, self.wv1, self.w2, router_logits, self.top_k, renormalize=self.moe_normalize_expert_weights, inplace=True) if self.process_group.size() > 1: torch.distributed.all_reduce(out, group=self.process_group) return out.view(*x.shape) class DenseMoE(nn.Module): def __init__(self, prefix, config: DbrxConfig, weights): super().__init__() self.moe_normalize_expert_weights = config.ffn_config.moe_normalize_expert_weights self.hidden_dim = config.d_model self.ffn_dim = config.ffn_config.ffn_hidden_size // weights.process_group.size() self.num_experts = config.ffn_config.moe_num_experts self.top_k = config.ffn_config.moe_top_k act = config.ffn_config.ffn_act_fn['name'] if 'gelu' in act: self.act = lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') elif 'silu' in act: self.act = torch.nn.functional.silu else: self.act = ACT2FN[act] self.gate = FastLinear.load(config, f'{prefix}.router.layer', weights, bias=False) self.w1 = _load_experts_quantized(config, prefix=f'{prefix}.experts.mlp.w1', weights=weights, cls=TensorParallelColumnLinear) self.w2 = _load_experts_quantized(config, prefix=f'{prefix}.experts.mlp.w2', weights=weights, cls=TensorParallelRowLinear) self.v1 = _load_experts_quantized(config, prefix=f'{prefix}.experts.mlp.v1', weights=weights, cls=TensorParallelColumnLinear) self.process_group = weights.process_group def forward(self, x: torch.Tensor) -> torch.Tensor: input_shape = x.shape x = x.view(-1, input_shape[-1]) gate_logits = self.gate(x) weights = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float) if self.top_k < self.num_experts: (_, not_selected_experts) = torch.topk(weights, self.num_experts - self.top_k, largest=False, sorted=False, dim=1) weights.scatter_(1, not_selected_experts, 0) if self.moe_normalize_expert_weights: weights = weights / torch.norm(weights, p=self.moe_normalize_expert_weights, dim=-1, keepdim=True) weights = weights.to(x.dtype) out = x.new_zeros(x.shape[0], self.hidden_dim) for i in range(self.num_experts): h = self.act(self.w1[i](x)) * self.v1[i](x) h = self.w2[i](h, reduce=False) out += h * weights[:, i].view(-1, 1) if self.process_group.size() > 1: torch.distributed.all_reduce(out, group=self.process_group) return out class DbrxLayer(nn.Module): def __init__(self, prefix: str, layer_id, config, weights): super().__init__() prefix = f'{prefix}.blocks.{layer_id}' self.attn = DbrxNormAttentionNorm(prefix=f'{prefix}.norm_attn_norm', config=config, weights=weights) moe_cls = BlockSparseMoE if config.quantize is None else DenseMoE self.moe = moe_cls(f'{prefix}.ffn', config, weights) def forward(self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): (attn_output, attn_res) = self.attn(hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) moe_output = self.moe(attn_output) return (moe_output, attn_res) class DbrxModel(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.embed_tokens = TensorParallelEmbedding(prefix=f'{prefix}.wte', weights=weights) self.layers = nn.ModuleList([DbrxLayer(prefix, layer_id, config, weights) for layer_id in range(config.n_layers)]) self.norm = FastLayerNorm.load_no_bias(prefix=f'{prefix}.norm_f', weights=weights, eps=1e-05) self.head_size = self.layers[0].attn.self_attn.head_size self.num_heads = self.layers[0].attn.self_attn.num_heads self.num_key_value_heads = self.layers[0].attn.self_attn.num_key_value_heads def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) (cos, sin) = self.layers[0].attn.self_attn.rotary_emb.get_cos_sin(position_ids, max_s, hidden_states.dtype) residual = None for (i, layer) in enumerate(self.layers): (hidden_states, residual) = layer(hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s) (hidden_states, _) = self.norm(hidden_states, residual) return hidden_states class FlashDbrxForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() if not prefix: prefix = 'transformer' else: prefix = f'{prefix}.transformer' self.model = DbrxModel(prefix, config, weights) self.lm_head = SpeculativeHead.load(config, prefix='lm_head', weights=weights) def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: hidden_states = self.model(input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] (logits, speculative_logits) = self.lm_head(hidden_states) return (logits, speculative_logits) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py from typing import Any, Dict, List, Optional, Tuple import torch import torch.distributed from text_generation_server.layers import FastLinear, SpeculativeHead, TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, get_linear from text_generation_server.layers.attention import attention, paged_attention, reshape_and_cache, Seqlen from text_generation_server.layers.layernorm import FastRMSNorm from text_generation_server.layers.rotary import PositionRotaryEmbedding, get_mscale from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.weights import Weights from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig if SYSTEM == 'rocm': try: from vllm import _custom_C except Exception as e: raise ImportError(f'Could not load `vllm._custom_C`. Full error: {e}') class DeepseekV2Config(PretrainedConfig): def __init__(self, vocab_size=102400, hidden_size=4096, intermediate_size=11008, moe_intermediate_size=1407, num_hidden_layers=30, num_attention_heads=32, num_key_value_heads=32, n_shared_experts=2, n_routed_experts=160, ep_size=1, routed_scaling_factor=1.0, kv_lora_rank=512, q_lora_rank=1536, qk_rope_head_dim=64, v_head_dim=128, qk_nope_head_dim=128, topk_method='gready', n_group=8, topk_group=3, num_experts_per_tok=6, moe_layer_freq=1, first_k_dense_replace=0, norm_topk_prob=False, scoring_func='softmax', aux_loss_alpha=0.001, seq_aux=True, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=None, bos_token_id=100000, eos_token_id=100001, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.moe_intermediate_size = moe_intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.n_shared_experts = n_shared_experts self.n_routed_experts = n_routed_experts self.ep_size = ep_size self.routed_scaling_factor = routed_scaling_factor self.kv_lora_rank = kv_lora_rank self.q_lora_rank = q_lora_rank self.qk_rope_head_dim = qk_rope_head_dim self.v_head_dim = v_head_dim self.qk_nope_head_dim = qk_nope_head_dim self.topk_method = topk_method self.n_group = n_group self.topk_group = topk_group self.num_experts_per_tok = num_experts_per_tok self.moe_layer_freq = moe_layer_freq self.first_k_dense_replace = first_k_dense_replace self.norm_topk_prob = norm_topk_prob self.scoring_func = scoring_func self.aux_loss_alpha = aux_loss_alpha self.seq_aux = seq_aux if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.pretraining_tp = pretraining_tp self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.attention_bias = attention_bias self.attention_dropout = attention_dropout tie_word_embeddings = kwargs.pop('tie_word_embeddings', False) if tie_word_embeddings: raise ValueError('tie_word_embeddings is not supported for Deepseek V2 models.') if ep_size != 1: raise ValueError(f'Currently only ep_size == 1 is supported for Deepseek V2 models, was {ep_size}') super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) def _load_experts(config, prefix: str, mat: str, weights: Weights): if config.quantize is not None: raise NotImplementedError('Deepseek V2 does not support weight quantization yet.') assert mat in ['gate_proj', 'up_proj', 'down_proj'] world_size = weights.process_group.size() rank = weights.process_group.rank() assert config.moe_intermediate_size % world_size == 0, f'The chosen size {config.moe_intermediate_size} is not compatible with sharding on {world_size} shards' block_size = config.moe_intermediate_size // world_size start = rank * block_size stop = (rank + 1) * block_size tensor = torch.empty((config.n_routed_experts * block_size, config.hidden_size), dtype=weights.dtype, device=weights.device) for i in range(config.n_routed_experts): slice_ = weights._get_slice(f'{prefix}.{i}.{mat}.weight') if mat == 'down_proj': expert_slice = slice_[:, start:stop].t().contiguous() else: expert_slice = slice_[start:stop] tensor[i * block_size:(i + 1) * block_size] = expert_slice.to(dtype=weights.dtype).to(device=weights.device) return tensor class DeepseekV2Attention(torch.nn.Module): def __init__(self, prefix: str, config, weights: Weights): super().__init__() self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.kv_lora_rank = config.kv_lora_rank self.q_lora_rank = config.q_lora_rank self.qk_nope_head_dim = config.qk_nope_head_dim self.qk_rope_head_dim = config.qk_rope_head_dim self.head_size = config.qk_nope_head_dim + config.qk_rope_head_dim self.value_head_size = config.v_head_dim self.head_pad_size = max(self.head_size, self.value_head_size) self.rotary_emb = PositionRotaryEmbedding.static(config=config, dim=self.qk_rope_head_dim, base=config.rope_theta, device=weights.device) mscale = get_mscale(self.rotary_emb.scaling_factor, self.rotary_emb.mscale_all_dim) self.softmax_scale = self.head_size ** (-0.5) * mscale * mscale if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = config.num_key_value_heads // weights.process_group.size() if self.q_lora_rank is None: self.q_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.q_proj', weights=weights, bias=config.attention_bias) else: self.q_a_proj = get_linear(weight=weights.get_weights(f'{prefix}.q_a_proj'), bias=weights.get_tensor(f'{prefix}.q_a_proj.bias') if config.attention_bias else None) self.q_a_layernorm = FastRMSNorm.load(prefix=f'{prefix}.q_a_layernorm', weights=weights, eps=config.rms_norm_eps) self.q_b_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.q_b_proj', weights=weights, bias=config.attention_bias) self.kv_a_proj_with_mqa = get_linear(weight=weights.get_weights(f'{prefix}.kv_a_proj_with_mqa'), bias=weights.get_tensor(f'{prefix}.kv_a_proj_with_mqa.bias') if config.attention_bias else None) self.kv_a_layernorm = FastRMSNorm.load(prefix=f'{prefix}.kv_a_layernorm', weights=weights, eps=config.rms_norm_eps) self.kv_b_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.kv_b_proj', weights=weights, bias=config.attention_bias) self.o_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.o_proj', weights=weights, bias=False) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange(0, self.num_key_value_heads, dtype=torch.int32, device=weights.device).repeat_interleave(self.num_groups) def forward(self, hidden_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, cu_seqlen_prefill: torch.Tensor, kv_cache: Tuple[torch.Tensor, torch.Tensor], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int): if self.q_lora_rank is None: query = self.q_proj(hidden_states) else: query = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states))[0]) query = query.view(-1, self.num_heads, self.head_size) (_, query_pe) = torch.split(query, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1) compressed_kv = self.kv_a_proj_with_mqa(hidden_states) (compressed_kv, key_pe) = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1) key_pe = key_pe.view(-1, 1, self.qk_rope_head_dim) kv = self.kv_b_proj(self.kv_a_layernorm(compressed_kv.contiguous())[0]).view(-1, self.num_key_value_heads, self.qk_nope_head_dim + self.value_head_size) (key_nope, value) = torch.split(kv, [self.qk_nope_head_dim, self.value_head_size], dim=-1) (batch_size, heads, head_dim) = query_pe.shape query_pe = query_pe.view(batch_size, heads, head_dim // 2, 2).transpose(2, 3).reshape(batch_size, heads, head_dim) (batch_size, heads, head_dim) = key_pe.shape key_pe = key_pe.view(batch_size, heads, head_dim // 2, 2).transpose(2, 3).reshape(batch_size, heads, head_dim) self.rotary_emb(query_pe, key_pe, cos, sin) query[..., self.qk_nope_head_dim:] = query_pe key = torch.empty_like(query) key[..., :self.qk_nope_head_dim] = key_nope key[..., self.qk_nope_head_dim:] = key_pe query = torch.nn.functional.pad(query, (0, self.head_pad_size - self.head_size), value=0) key = torch.nn.functional.pad(key, (0, self.head_pad_size - self.head_size), value=0) value = torch.nn.functional.pad(value, (0, self.head_pad_size - self.value_head_size), value=0) reshape_and_cache(key, value, kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else key, kv_cache[1] if SYSTEM != 'ipex' else value, seqlen, block_tables, self.softmax_scale) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) attn_output = attn_output[..., :self.value_head_size] return self.o_proj(attn_output.reshape(-1, self.num_heads * self.value_head_size)) class DeepseekV2MLP(nn.Module): def __init__(self, prefix: str, config, weights, intermediate_size: int): super().__init__() self.hidden_act = config.hidden_act if self.hidden_act != 'silu': raise NotImplementedError('Currently only `silu` is supported as an activation for Deepseek V2.') self.act = ACT2FN[self.hidden_act] self.gate_up_proj = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.gate_proj', f'{prefix}.up_proj'], weights=weights, dim=0, bias=False) self.down_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.down_proj', weights=weights, bias=False) self.intermediate_size = intermediate_size // weights.process_group.size() self.quantize = config.quantize def forward(self, hidden_states: torch.Tensor, reduce: bool=True): if SYSTEM == 'rocm' and self.hidden_act == 'silu' and (hidden_states.shape[0] == 1) and (not self.quantize): out = torch.empty(hidden_states.shape[0], self.intermediate_size, dtype=hidden_states.dtype, device='cuda') _custom_C.LLMM_Silu(self.gate_up_proj.linear.weight, hidden_states, out, 8) return self.down_proj(out, reduce=reduce) else: gate_up_states = self.gate_up_proj(hidden_states) gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size) return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1], reduce=reduce) class BlockSparseMoE(nn.Module): def __init__(self, prefix, config: DeepseekV2Config, weights): super().__init__() self.hidden_dim = config.hidden_size self.moe_intermediate_size = config.moe_intermediate_size // weights.process_group.size() self.n_routed_experts = config.n_routed_experts self.n_expert_group = config.n_group self.topk_group = config.topk_group self.top_k = config.num_experts_per_tok self.norm_topk_prob = config.norm_topk_prob self.routed_scaling_factor = config.routed_scaling_factor gate_proj = _load_experts(config, f'{prefix}.experts', 'gate_proj', weights).view(self.n_routed_experts, self.moe_intermediate_size, self.hidden_dim) up_proj = _load_experts(config, f'{prefix}.experts', 'up_proj', weights).view(self.n_routed_experts, self.moe_intermediate_size, self.hidden_dim) self.gate_up_proj = torch.cat([gate_proj, up_proj], dim=1) self.down_proj = _load_experts(config, f'{prefix}.experts', 'down_proj', weights).view(self.n_routed_experts, self.moe_intermediate_size, self.hidden_dim).transpose(1, 2).contiguous() self.gate = FastLinear.load(config, f'{prefix}.gate', weights, bias=False) if config.n_shared_experts is not None: self.shared_experts = DeepseekV2MLP(prefix=f'{prefix}.shared_experts', config=config, weights=weights, intermediate_size=config.moe_intermediate_size * config.n_shared_experts) else: self.shared_experts = None self.process_group = weights.process_group def forward(self, x: torch.Tensor) -> torch.Tensor: if self.shared_experts is not None: shared_output = self.shared_experts(x, reduce=False) else: shared_output = None router_logits = self.gate(x) (topk_weights, topk_ids) = grouped_topk(x, router_logits, self.top_k, renormalize=self.norm_topk_prob, num_expert_group=self.n_expert_group, topk_group=self.topk_group) out = fused_experts(x, self.gate_up_proj, self.down_proj, topk_weights, topk_ids, inplace=True) * self.routed_scaling_factor if shared_output is not None: out = out + shared_output if self.process_group.size() > 1: torch.distributed.all_reduce(out, group=self.process_group) return out.view(*x.shape) class DenseMoE(nn.Module): def __init__(self, prefix: str, config: DeepseekV2Config, weights: Weights): super().__init__() self.hidden_dim = config.hidden_size self.moe_intermediate_size = config.moe_intermediate_size self.n_routed_experts = config.n_routed_experts self.n_expert_group = config.n_group self.topk_group = config.topk_group self.top_k = config.num_experts_per_tok self.norm_topk_prob = config.norm_topk_prob self.routed_scaling_factor = config.routed_scaling_factor self.gate = FastLinear.load(config, f'{prefix}.gate', weights, bias=False) self.experts = [DeepseekV2MLP(f'{prefix}.experts.{i}', config, weights, self.moe_intermediate_size) for i in range(self.n_routed_experts)] if config.n_shared_experts is not None: self.shared_experts = DeepseekV2MLP(prefix=f'{prefix}.shared_experts', config=config, weights=weights, intermediate_size=config.moe_intermediate_size * config.n_shared_experts) else: self.shared_experts = None self.process_group = weights.process_group def forward(self, x: torch.Tensor) -> torch.Tensor: input_shape = x.shape x = x.view(-1, input_shape[-1]) if self.shared_experts is not None: shared_output = self.shared_experts(x, reduce=False) else: shared_output = None router_logits = self.gate(x) (topk_weights, topk_ids) = grouped_topk(x, router_logits, self.top_k, renormalize=self.norm_topk_prob, num_expert_group=self.n_expert_group, topk_group=self.topk_group) out = self.moe_infer_gpu(x, topk_ids, topk_weights) * self.routed_scaling_factor if shared_output is not None: out = out + shared_output if self.process_group.size() > 1: torch.distributed.all_reduce(out, group=self.process_group) return out def moe_infer_gpu(self, x: torch.Tensor, topk_ids: torch.Tensor, topk_weight: torch.Tensor): weights = torch.zeros(topk_ids.shape[0], len(self.experts), dtype=x.dtype, device=x.device) weights.scatter_(1, topk_ids, topk_weight) out = x.new_zeros(x.shape[0], self.hidden_dim) for (i, expert) in enumerate(self.experts): out += expert(x, reduce=False) * weights[:, i].view(-1, 1) return out class DeepseekV2Layer(nn.Module): def __init__(self, prefix, layer_id, config, weights): super().__init__() prefix = f'{prefix}.layers.{layer_id}' self.self_attn = DeepseekV2Attention(prefix=f'{prefix}.self_attn', config=config, weights=weights) if config.n_routed_experts is not None and layer_id >= config.first_k_dense_replace and (layer_id % config.moe_layer_freq == 0): moe_cls = BlockSparseMoE if config.quantize is None else DenseMoE self.mlp = moe_cls(f'{prefix}.mlp', config, weights) else: self.mlp = DeepseekV2MLP(prefix=f'{prefix}.mlp', config=config, weights=weights, intermediate_size=config.intermediate_size) self.input_layernorm = FastRMSNorm.load(prefix=f'{prefix}.input_layernorm', weights=weights, eps=config.rms_norm_eps) self.post_attention_layernorm = FastRMSNorm.load(prefix=f'{prefix}.post_attention_layernorm', weights=weights, eps=config.rms_norm_eps) def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, cu_seqlen_prefill: torch.Tensor, kv_cache, block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int): (normed_hidden_states, residual) = self.input_layernorm(hidden_states, residual) attn_output = self.self_attn(normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) (normed_attn_res_output, residual) = self.post_attention_layernorm(attn_output, residual) output = self.mlp(normed_attn_res_output) return (output, residual) class DeepseekV2Model(torch.nn.Module): def __init__(self, prefix: str, config, weights: Weights): super().__init__() self.embed_tokens = TensorParallelEmbedding(prefix=f'{prefix}.embed_tokens', weights=weights) self.layers = nn.ModuleList([DeepseekV2Layer(prefix, layer_id, config, weights) for layer_id in range(config.num_hidden_layers)]) self.norm = FastRMSNorm.load(prefix=f'{prefix}.norm', weights=weights, eps=config.rms_norm_eps) self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) (cos, sin) = self.layers[0].self_attn.rotary_emb.get_cos_sin(position_ids, max_s, hidden_states.dtype) residual = None for (i, layer) in enumerate(self.layers): (hidden_states, residual) = layer(hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s) (hidden_states, _) = self.norm(hidden_states, residual) return hidden_states class FlashDeepseekV2ForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights: Weights): super().__init__() self.model = DeepseekV2Model('model' if not prefix else f'{prefix}.model', config, weights) self.lm_head = SpeculativeHead.load(config, prefix='lm_head' if not prefix else f'{prefix}.lm_head', weights=weights) def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: hidden_states = self.model(input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] (logits, speculative_logits) = self.lm_head(hidden_states) return (logits, speculative_logits) def grouped_topk(hidden_states: torch.Tensor, gating_output: torch.Tensor, topk: int, renormalize: bool, num_expert_group: int=0, topk_group: int=0) -> Tuple[torch.Tensor, torch.Tensor]: scores = torch.softmax(gating_output, dim=-1) num_token = scores.shape[0] group_scores = scores.view(num_token, num_expert_group, -1).max(dim=-1).values group_idx = torch.topk(group_scores, k=topk_group, dim=-1, sorted=False)[1] group_mask = torch.zeros_like(group_scores) group_mask.scatter_(1, group_idx, 1) score_mask = group_mask.unsqueeze(-1).expand(num_token, num_expert_group, scores.shape[-1] // num_expert_group).reshape(num_token, -1) tmp_scores = scores.masked_fill(~score_mask.bool(), 0.0) (topk_weights, topk_ids) = torch.topk(tmp_scores, k=topk, dim=-1, sorted=False) if renormalize: topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True) return (topk_weights, topk_ids) def get_default_config(M: int, E: int, N: int, K: int, topk: int, dtype: Optional[str]) -> Dict[str, int]: config = {'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8} if M <= E: config = {'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 1} return config def fused_experts(hidden_states: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, topk_weights: torch.Tensor, topk_ids: torch.Tensor, inplace: bool=False, override_config: Optional[Dict[str, Any]]=None, use_fp8: bool=False, w1_scale: Optional[torch.Tensor]=None, w2_scale: Optional[torch.Tensor]=None, a1_scale: Optional[torch.Tensor]=None, a2_scale: Optional[torch.Tensor]=None): assert hidden_states.shape[1] == w1.shape[2], 'Hidden size mismatch' assert topk_weights.shape == topk_ids.shape, 'topk shape mismatch' assert hidden_states.is_contiguous(), 'Hidden_states must be contiguous' assert w1.is_contiguous(), 'Expert weights1 must be contiguous' assert w2.is_contiguous(), 'Expert weights2 must be contiguous' assert hidden_states.dtype in [torch.float32, torch.float16, torch.bfloat16] import triton.language as tl from vllm import _custom_ops as ops from vllm.model_executor.layers.fused_moe.fused_moe import get_moe_configs, invoke_fused_moe_kernel, moe_align_block_size (M, _) = hidden_states.shape (E, N, _) = w1.shape if override_config: config = override_config else: configs = get_moe_configs(E, w2.shape[2], 'float8' if use_fp8 else None) if configs: config = configs[min(configs.keys(), key=lambda x: abs(x - M))] else: config = get_default_config(M, E, N, w1.shape[2], topk_ids.shape[1], 'float8' if use_fp8 else None) intermediate_cache1 = torch.empty((M, topk_ids.shape[1], N), device=hidden_states.device, dtype=hidden_states.dtype) intermediate_cache2 = torch.empty((M * topk_ids.shape[1], N // 2), device=hidden_states.device, dtype=hidden_states.dtype) intermediate_cache3 = torch.empty((M, topk_ids.shape[1], w2.shape[1]), device=hidden_states.device, dtype=hidden_states.dtype) (sorted_token_ids, expert_ids, num_tokens_post_padded) = moe_align_block_size(topk_ids, config['BLOCK_SIZE_M'], E) compute_type = tl.bfloat16 if hidden_states.dtype == torch.bfloat16 else tl.float16 invoke_fused_moe_kernel(hidden_states, w1, intermediate_cache1, a1_scale, w1_scale, topk_weights, topk_ids, sorted_token_ids, expert_ids, num_tokens_post_padded, False, topk_ids.shape[1], config, compute_type=compute_type, use_fp8=use_fp8) ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, N)) invoke_fused_moe_kernel(intermediate_cache2, w2, intermediate_cache3, a2_scale, w2_scale, topk_weights, topk_ids, sorted_token_ids, expert_ids, num_tokens_post_padded, True, 1, config, compute_type=compute_type, use_fp8=use_fp8) if inplace: return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1, out=hidden_states) return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.layers.attention import paged_attention, attention, reshape_and_cache, Seqlen from text_generation_server.layers import TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.layers.layernorm import FastRMSNorm from text_generation_server.utils.weights import UnquantizedWeight class Gemma2Config(PretrainedConfig): def __init__(self, vocab_size=256128, hidden_size=3072, intermediate_size=24576, num_hidden_layers=28, num_attention_heads=16, num_key_value_heads=16, head_dim=256, hidden_act='gelu_pytorch_tanh', max_position_embeddings=8192, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=True, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.head_dim = head_dim self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.attention_bias = attention_bias self.attention_dropout = attention_dropout super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) class Gemma2FastRMSNorm(FastRMSNorm): @classmethod def load(cls, prefix: str, weights, eps=1e-06): dtype = weights.dtype weights.dtype = torch.float32 weight = weights.get_tensor(f'{prefix}.weight') + 1 weights.dtype = dtype new = cls(weight, eps) new.dtype = dtype return new def forward(self, hidden_states, residual=None): if residual is not None: hidden_states += residual residual = hidden_states hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) hidden_states = hidden_states * self.weight return (hidden_states.to(self.dtype), residual) def load_attention(config, prefix: str, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0, weights=weights, bias=False) def _load_gqa(config, prefix: str, weights): assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col(prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0) if isinstance(weight, UnquantizedWeight): weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.head_dim num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.weight.shape) == [(num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size], f'{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}' return TensorParallelColumnLinear(get_linear(weight, bias=None)) class FlashGemma2Attention(torch.nn.Module): def __init__(self, prefix: str, config, weights, causal: bool, is_sliding: bool): super().__init__() self.num_heads = config.num_attention_heads self.head_size = config.head_dim self.causal = causal if is_sliding: self.window_size = config.sliding_window else: self.window_size = -1 self.rotary_emb = PositionRotaryEmbedding.static(config=config, dim=self.head_size, base=config.rope_theta, device=weights.device) self.softmax_scale = config.query_pre_attn_scalar ** (-0.5) if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = config.num_key_value_heads // weights.process_group.size() self.softcap = config.attn_logit_softcapping self.query_key_value = load_attention(config, prefix, weights) self.o_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.o_proj', weights=weights, bias=False) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange(0, self.num_key_value_heads, dtype=torch.int32, device=weights.device).repeat_interleave(self.num_groups) def forward(self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): qkv = self.query_key_value(hidden_states) (query, kv) = qkv.split([self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads], dim=1) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) reshape_and_cache(kv[:, 0], kv[:, 1], kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else kv[:, 0], kv_cache[1] if SYSTEM != 'ipex' else kv[:, 1], seqlen, block_tables, self.softmax_scale, causal=self.causal, window_size_left=self.window_size, softcap=self.softcap) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s, softcap=self.softcap) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) class Gemma2MLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.hidden_activation self.act = ACT2FN[act] if 'gelu' not in act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') self.gate_up_proj = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.gate_proj', f'{prefix}.up_proj'], weights=weights, dim=0, bias=False) self.down_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.down_proj', weights=weights, bias=False) self.intermediate_size = config.intermediate_size // weights.process_group.size() def forward(self, hidden_states): gate_up_states = self.gate_up_proj(hidden_states) gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size) return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1]) class FlashGemma2Layer(nn.Module): def __init__(self, prefix: str, config, weights, causal: bool, is_sliding: bool): super().__init__() self.self_attn = FlashGemma2Attention(prefix=f'{prefix}.self_attn', config=config, weights=weights, causal=causal, is_sliding=is_sliding) self.mlp = Gemma2MLP(prefix=f'{prefix}.mlp', config=config, weights=weights) self.input_layernorm = Gemma2FastRMSNorm.load(prefix=f'{prefix}.input_layernorm', weights=weights, eps=config.rms_norm_eps) self.post_attention_layernorm = Gemma2FastRMSNorm.load(prefix=f'{prefix}.post_attention_layernorm', weights=weights, eps=config.rms_norm_eps) self.pre_feedforward_layernorm = Gemma2FastRMSNorm.load(prefix=f'{prefix}.pre_feedforward_layernorm', weights=weights, eps=config.rms_norm_eps) self.post_feedforward_layernorm = Gemma2FastRMSNorm.load(prefix=f'{prefix}.post_feedforward_layernorm', weights=weights, eps=config.rms_norm_eps) def forward(self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): (normed_hidden_states, res) = self.input_layernorm(hidden_states, residual) attn_output = self.self_attn(normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) (normed_attn_res_output, _) = self.post_attention_layernorm(attn_output) normed_attn_res_output = normed_attn_res_output + res res = normed_attn_res_output (pre_normed, _) = self.pre_feedforward_layernorm(normed_attn_res_output) mlp_output = self.mlp(pre_normed) (post_hidden_states, _) = self.post_feedforward_layernorm(mlp_output) return (post_hidden_states, normed_attn_res_output) class FlashGemma2Model(torch.nn.Module): def __init__(self, prefix: str, config, weights, causal: bool): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.layers = nn.ModuleList([FlashGemma2Layer(prefix=f'{prefix}.layers.{layer_id}', config=config, weights=weights, causal=causal, is_sliding=layer_id % 2 == 0) for layer_id in range(config.num_hidden_layers)]) self.norm = Gemma2FastRMSNorm.load(prefix=f'{prefix}.norm', weights=weights, eps=config.rms_norm_eps) self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward(self, inputs_embeds: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int) -> torch.Tensor: hidden_states = inputs_embeds (cos, sin) = self.layers[0].self_attn.rotary_emb.get_cos_sin(position_ids, max_s, hidden_states.dtype) residual = None for (i, layer) in enumerate(self.layers): (hidden_states, residual) = layer(hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s) (hidden_states, _) = self.norm(hidden_states, residual) return hidden_states class FlashGemma2ForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights, *, causal: bool=True): super().__init__() embed_norm = config.hidden_size ** 0.5 if not prefix: prefix = 'model' else: prefix = f'{prefix}.model' self.embed_tokens = TensorParallelEmbedding(prefix=f'{prefix}.embed_tokens', weights=weights) self.embed_tokens.weight *= embed_norm self.model = FlashGemma2Model(prefix=prefix, config=config, weights=weights, causal=causal) self.lm_head = SpeculativeHead.load(prefix=f'{prefix}.embed_tokens' if config.tie_word_embeddings else f'{prefix}.lm_head', config=config, weights=weights) self.softcap = config.final_logit_softcapping assert isinstance(self.softcap, float) def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: input_embeds = self.embed_tokens(input_ids) hidden_states = self.model(input_embeds, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] (logits, speculative_logits) = self.lm_head(hidden_states) logits /= self.softcap logits = torch.tanh(logits) logits *= self.softcap return (logits, speculative_logits) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.layers.attention import paged_attention, attention, reshape_and_cache, Seqlen from text_generation_server.layers import TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.layers.layernorm import FastRMSNorm from text_generation_server.utils.weights import UnquantizedWeight class GemmaConfig(PretrainedConfig): def __init__(self, vocab_size=256128, hidden_size=3072, intermediate_size=24576, num_hidden_layers=28, num_attention_heads=16, num_key_value_heads=16, head_dim=256, hidden_act='gelu_pytorch_tanh', max_position_embeddings=8192, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=True, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.head_dim = head_dim self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.attention_bias = attention_bias self.attention_dropout = attention_dropout super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) class GemmaFastRMSNorm(FastRMSNorm): @classmethod def load(cls, prefix: str, weights, eps=1e-06): dtype = weights.dtype weights.dtype = torch.float32 weight = weights.get_tensor(f'{prefix}.weight') + 1 weights.dtype = dtype new = cls(weight, eps) new.dtype = dtype return new def forward(self, hidden_states, residual=None): if residual is not None: hidden_states += residual residual = hidden_states hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) hidden_states = hidden_states * self.weight return (hidden_states.to(self.dtype), residual) def load_attention(config, prefix: str, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0, weights=weights, bias=False) def _load_gqa(config, prefix: str, weights): assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col(prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0) if isinstance(weight, UnquantizedWeight): weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.head_dim num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.weight.shape) == [(num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size], f'{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}' return TensorParallelColumnLinear(get_linear(weight, bias=None)) class FlashGemmaAttention(torch.nn.Module): def __init__(self, prefix: str, config, weights, causal: bool): super().__init__() self.num_heads = config.num_attention_heads self.head_size = config.head_dim self.causal = causal self.rotary_emb = PositionRotaryEmbedding.static(config=config, dim=self.head_size, base=config.rope_theta, device=weights.device) self.softmax_scale = self.head_size ** (-0.5) if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = config.num_key_value_heads // weights.process_group.size() self.query_key_value = load_attention(config, prefix, weights) self.o_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.o_proj', weights=weights, bias=False) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange(0, self.num_key_value_heads, dtype=torch.int32, device=weights.device).repeat_interleave(self.num_groups) def forward(self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): qkv = self.query_key_value(hidden_states) (query, kv) = qkv.split([self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads], dim=1) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) reshape_and_cache(kv[:, 0], kv[:, 1], kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else kv[:, 0], kv_cache[1] if SYSTEM != 'ipex' else kv[:, 1], seqlen, block_tables, self.softmax_scale, causal=self.causal) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) class GemmaMLP(nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() act = config.hidden_act self.act = ACT2FN[act] if 'gelu' not in act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') self.gate_up_proj = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.gate_proj', f'{prefix}.up_proj'], weights=weights, dim=0, bias=False) self.down_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.down_proj', weights=weights, bias=False) self.intermediate_size = config.intermediate_size // weights.process_group.size() def forward(self, hidden_states): gate_up_states = self.gate_up_proj(hidden_states) gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size) return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1]) class FlashGemmaLayer(nn.Module): def __init__(self, prefix: str, config, weights, causal: bool): super().__init__() self.self_attn = FlashGemmaAttention(prefix=f'{prefix}.self_attn', config=config, weights=weights, causal=causal) self.mlp = GemmaMLP(prefix=f'{prefix}.mlp', config=config, weights=weights) self.input_layernorm = GemmaFastRMSNorm.load(prefix=f'{prefix}.input_layernorm', weights=weights, eps=config.rms_norm_eps) self.post_attention_layernorm = GemmaFastRMSNorm.load(prefix=f'{prefix}.post_attention_layernorm', weights=weights, eps=config.rms_norm_eps) def forward(self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): (normed_hidden_states, res) = self.input_layernorm(hidden_states, residual) attn_output = self.self_attn(normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) (normed_attn_res_output, attn_res) = self.post_attention_layernorm(attn_output, res) mlp_output = self.mlp(normed_attn_res_output) return (mlp_output, attn_res) class FlashGemmaModel(torch.nn.Module): def __init__(self, prefix: str, config, weights, causal: bool): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.layers = nn.ModuleList([FlashGemmaLayer(prefix=f'{prefix}.layers.{layer_id}', config=config, weights=weights, causal=causal) for layer_id in range(config.num_hidden_layers)]) self.norm = GemmaFastRMSNorm.load(prefix=f'{prefix}.norm', weights=weights, eps=config.rms_norm_eps) self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward(self, inputs_embeds: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int) -> torch.Tensor: hidden_states = inputs_embeds (cos, sin) = self.layers[0].self_attn.rotary_emb.get_cos_sin(position_ids, max_s, hidden_states.dtype) residual = None for (i, layer) in enumerate(self.layers): (hidden_states, residual) = layer(hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s) (hidden_states, _) = self.norm(hidden_states, residual) return hidden_states class FlashGemmaForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights, *, causal: bool=True): super().__init__() embed_norm = config.hidden_size ** 0.5 if not prefix: prefix = 'model' else: prefix = f'{prefix}.model' self.embed_tokens = TensorParallelEmbedding(prefix=f'{prefix}.embed_tokens', weights=weights) self.embed_tokens.weight *= embed_norm self.model = FlashGemmaModel(prefix=prefix, config=config, weights=weights, causal=causal) self.lm_head = SpeculativeHead.load(prefix=f'{prefix}.embed_tokens' if config.tie_word_embeddings else f'{prefix}.lm_head', config=config, weights=weights) def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: input_embeds = self.embed_tokens(input_ids) hidden_states = self.model(input_embeds, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] (logits, speculative_logits) = self.lm_head(hidden_states) return (logits, speculative_logits) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.layers.attention import paged_attention, attention, reshape_and_cache, Seqlen from text_generation_server.layers import TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear def load_qkv(config, prefix: str, weights, head_size, num_heads): if config.quantize == 'gptq': return _load_qkv_gptq(config, prefix, weights) elif config.quantize == 'marlin': raise RuntimeError('GPT-2 models with marlin quantization are not yet supported') else: return _load_qkv(config, prefix, weights, head_size, num_heads) def _load_qkv_gptq(config, prefix: str, weights): world_size = weights.process_group.size() rank = weights.process_group.rank() weight = weights.get_weights_col_packed_qkv(f'{prefix}.c_attn', config.num_attention_heads, config.num_attention_heads) slice_ = weights._get_slice(f'{prefix}.c_attn.bias') shape = slice_.get_shape() total_size = shape[0] assert total_size % 3 == 0, f'Prepacked is not divisible by {3}' single_size = total_size // 3 assert single_size % world_size == 0 block_size = single_size // world_size start = rank * block_size stop = (rank + 1) * block_size tensors = [] for i in range(3): tensor = slice_[start + i * single_size:stop + i * single_size] tensors.append(tensor) bias = torch.cat(tensors, dim=0) bias = bias.to(device=weights.device) return TensorParallelColumnLinear(get_linear(weight, bias)) def _load_qkv(config, prefix: str, weights, head_size, num_heads): slice_ = weights._get_slice(f'{prefix}.c_attn.weight') shape = slice_.get_shape() total_size = shape[1] assert total_size % 3 == 0, f'Prepacked is not divisible by {3}' world_size = weights.process_group.size() single_size = total_size // 3 assert single_size % world_size == 0 rank = weights.process_group.rank() block_size = single_size // world_size start = rank * block_size stop = (rank + 1) * block_size tensors = [] for i in range(3): tensor = slice_[:, start + i * single_size:stop + i * single_size] tensors.append(tensor) weight = torch.cat(tensors, dim=1).T weight = weight.to(dtype=weights.dtype) weight = weight.to(device=weights.device) slice_ = weights._get_slice(f'{prefix}.c_attn.bias') shape = slice_.get_shape() total_size = shape[0] single_size = total_size // 3 block_size = single_size // world_size assert single_size % world_size == 0 start = rank * block_size stop = (rank + 1) * block_size b = [] for i in range(3): tensor = slice_[start + i * single_size:stop + i * single_size] b.append(tensor) bias = torch.cat(b, dim=0) bias = bias.to(dtype=weights.dtype) bias = bias.to(device=weights.device) assert list(bias.shape) == [3 * num_heads * head_size], f'{weight.shape} != {[3 * num_heads * head_size]}' return TensorParallelColumnLinear(get_linear(weight, bias)) def load_row(config, prefix: str, weights, bias: bool): if config.quantize == 'gptq': weight = weights.get_weights_row(prefix) else: weight = weights.get_sharded(f'{prefix}.weight', dim=0).T if bias and weights.process_group.rank() == 0: bias = weights.get_tensor(f'{prefix}.bias') else: bias = None return TensorParallelRowLinear(get_linear(weight, bias), process_group=weights.process_group) def load_col(config, prefix: str, weights, bias: bool): if config.quantize == 'gptq': weight = weights.get_multi_weights_col([prefix], dim=1) else: weight = weights.get_sharded(f'{prefix}.weight', dim=1).T if bias: bias = weights.get_sharded(f'{prefix}.bias', dim=0) else: bias = None return TensorParallelColumnLinear(get_linear(weight, bias)) class FlashGPT2Attention(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.softmax_scale = self.head_size ** (-0.5) if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.query_key_value = load_qkv(config, prefix=prefix, weights=weights, head_size=self.head_size, num_heads=self.num_heads) self.o_proj = load_row(config, prefix=f'{prefix}.c_proj', weights=weights, bias=True) self.kv_head_mapping = torch.arange(0, self.num_heads, dtype=torch.int32, device=weights.device) def forward(self, hidden_states, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): (query, key, value) = self.query_key_value(hidden_states).split(self.head_size * self.num_heads, dim=1) query = query.view(-1, self.num_heads, self.head_size) key = key.view(-1, self.num_heads, self.head_size) value = value.view(-1, self.num_heads, self.head_size) reshape_and_cache(key, value, kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else key, kv_cache[1] if SYSTEM != 'ipex' else value, seqlen, block_tables, self.softmax_scale) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) class GPT2MLP(nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() act = config.activation_function self.act = ACT2FN[act] if 'gelu' not in act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') self.c_fc = load_col(config, prefix=f'{prefix}.c_fc', weights=weights, bias=True) self.c_proj = load_row(config, prefix=f'{prefix}.c_proj', weights=weights, bias=True) intermediate_size = config.n_inner if config.n_inner is not None else 4 * config.hidden_size self.intermediate_size = intermediate_size // weights.process_group.size() def forward(self, hidden_states): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) return self.c_proj(hidden_states) class FlashGPT2Layer(nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.self_attn = FlashGPT2Attention(prefix=f'{prefix}.attn', config=config, weights=weights) self.mlp = GPT2MLP(prefix=f'{prefix}.mlp', config=config, weights=weights) self.input_layernorm = nn.LayerNorm.load(prefix=f'{prefix}.ln_1', weights=weights, eps=config.layer_norm_epsilon) self.post_attention_layernorm = nn.LayerNorm.load(prefix=f'{prefix}.ln_2', weights=weights, eps=config.layer_norm_epsilon) def forward(self, hidden_states, residual, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): residual = hidden_states hidden_states = self.input_layernorm(hidden_states) attn_output = self.self_attn(hidden_states, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) hidden_states = attn_output + residual residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) mlp_output = self.mlp(hidden_states) return (residual + mlp_output, residual) class FlashGPT2Model(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.layers = nn.ModuleList([FlashGPT2Layer(prefix=f'h.{layer_id}' if not prefix else f'{prefix}.h.{layer_id}', config=config, weights=weights) for layer_id in range(config.num_hidden_layers)]) self.norm = nn.LayerNorm.load(prefix='ln_f' if not prefix else f'{prefix}.ln_f', weights=weights, eps=config.layer_norm_epsilon) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads def forward(self, inputs_embeds: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, true_max_s: int, prefill_cache_indices: Optional[torch.Tensor]) -> torch.Tensor: hidden_states = inputs_embeds residual = None for (i, layer) in enumerate(self.layers): (hidden_states, residual) = layer(hidden_states, residual, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s) hidden_states = self.norm(hidden_states) return hidden_states class FlashGPT2ForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.embed_tokens = TensorParallelEmbedding(prefix='wte' if not prefix else f'{prefix}.wte', weights=weights) self.embed_positions = TensorParallelEmbedding(prefix='wpe' if not prefix else f'{prefix}.wpe', weights=weights) self.model = FlashGPT2Model(prefix, config, weights) self.lm_head = SpeculativeHead.load(config, prefix='wte' if not prefix else f'{prefix}.wte', weights=weights) def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor]=None, lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: token_embeds = self.embed_tokens(input_ids) position_embeds = self.embed_positions(position_ids) inputs_embeds = token_embeds + position_embeds hidden_states = self.model(inputs_embeds, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, true_max_s=max_s, prefill_cache_indices=prefill_cache_indices) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] (logits, speculative_logits) = self.lm_head(hidden_states) return (logits, speculative_logits) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.layers.attention import paged_attention, attention, reshape_and_cache, Seqlen from text_generation_server.layers import TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.layers.layernorm import FastLayerNorm def load_attention(config, prefix: str, weights): return TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0, weights=weights, bias=False) def load_row(config, prefix: str, weights, bias: bool): weight = weights.get_weights_row(prefix) if bias and weights.process_group.rank() == 0: bias = weights.get_tensor(f'{prefix}.bias') else: bias = None linear = get_linear(weight, bias) return TensorParallelRowLinear(linear, process_group=weights.process_group) class GPTJRotary(PositionRotaryEmbedding): def forward(self, query: torch.Tensor, key: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor): if SYSTEM == 'cuda': import rotary_emb q1 = query[..., ::2] q2 = query[..., 1::2] rotary_emb.apply_rotary(q1, q2, cos, sin, q1, q2, False) k1 = key[..., ::2] k2 = key[..., 1::2] rotary_emb.apply_rotary(k1, k2, cos, sin, k1, k2, False) elif SYSTEM == 'rocm': from vllm._C import ops head_size = query.shape[-1] ops.rotary_embedding(query, key, head_size, cos, sin, False) elif SYSTEM == 'ipex': import intel_extension_for_pytorch as ipex ipex.llm.functional.rotary_embedding(query, key, sin, cos, query.size(-1), False) else: raise ValueError('Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction.') class FlashGPTJAttention(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.softmax_scale = self.head_size ** (-0.5) self.rotary_dim = config.rotary_dim if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.query_key_value = load_attention(config, prefix=prefix, weights=weights) self.o_proj = load_row(config, prefix=f'{prefix}.out_proj', weights=weights, bias=False) self.kv_head_mapping = torch.arange(0, self.num_heads, dtype=torch.int32, device=weights.device) self.rotary_emb = GPTJRotary.static(config=config, dim=self.rotary_dim, base=10000, device=weights.device) def forward(self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): (query, key, value) = self.query_key_value(hidden_states).split(self.head_size * self.num_heads, dim=1) query = query.view(-1, self.num_heads, self.head_size) key = key.view(-1, self.num_heads, self.head_size) value = value.view(-1, self.num_heads, self.head_size) if self.rotary_dim is not None: self.rotary_emb(query[..., :self.rotary_dim], key[..., :self.rotary_dim], cos, sin) else: self.rotary_emb(query, key, cos, sin) reshape_and_cache(key, value, kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else key, kv_cache[1] if SYSTEM != 'ipex' else value, seqlen, block_tables, self.softmax_scale) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) class GPTJMLP(nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() act = config.activation_function self.act = ACT2FN[act] if 'gelu' not in act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') self.fc_in = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.fc_in', weights=weights, bias=True) self.fc_out = load_row(config, prefix=f'{prefix}.fc_out', weights=weights, bias=True) def forward(self, hidden_states): hidden_states = self.fc_in(hidden_states) hidden_states = self.act(hidden_states) return self.fc_out(hidden_states) class FlashGPTJLayer(nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.self_attn = FlashGPTJAttention(prefix=f'{prefix}.attn', config=config, weights=weights) self.mlp = GPTJMLP(prefix=f'{prefix}.mlp', config=config, weights=weights) self.input_layernorm = FastLayerNorm.load(prefix=f'{prefix}.ln_1', weights=weights, eps=config.layer_norm_epsilon) def forward(self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): (hidden_states, residual) = self.input_layernorm(hidden_states, residual) attn_output = self.self_attn(hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) feed_forward_hidden_states = self.mlp(hidden_states) return (attn_output + feed_forward_hidden_states, residual) class FlashGPTJModel(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.config = config self.wte = TensorParallelEmbedding(prefix=f'{prefix}.wte', weights=weights) self.layers = nn.ModuleList([FlashGPTJLayer(prefix=f'h.{layer_id}' if not prefix else f'{prefix}.h.{layer_id}', config=config, weights=weights) for layer_id in range(config.num_hidden_layers)]) self.ln_f = FastLayerNorm.load(prefix='ln_f' if not prefix else f'{prefix}.ln_f', weights=weights, eps=config.layer_norm_epsilon) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads def forward(self, input_ids: Optional[torch.LongTensor], position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor]) -> torch.Tensor: hidden_states = self.wte(input_ids) (cos, sin) = self.layers[0].self_attn.rotary_emb.get_cos_sin(position_ids, max_s, hidden_states.dtype) residual = None for (i, layer) in enumerate(self.layers): (hidden_states, residual) = layer(hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s) (hidden_states, _) = self.ln_f(hidden_states, residual) return hidden_states class FlashGPTJForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() if not prefix: prefix = 'transformer' else: prefix = f'{prefix}.transformer' self.model = FlashGPTJModel(prefix, config, weights) self.lm_head = SpeculativeHead.load(config, prefix='lm_head', weights=weights) def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor]=None, lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: hidden_states = self.model(input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, prefill_cache_indices=prefill_cache_indices) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] (logits, speculative_logits) = self.lm_head(hidden_states) return (logits, speculative_logits) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py from contextlib import contextmanager from typing import List, Optional, Tuple import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.layers.attention import paged_attention, attention, reshape_and_cache, Seqlen from text_generation_server.layers import TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, TensorParallelMultiAdapterLinear, TensorParallelAdapterRowLinear from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.layers.layernorm import FastRMSNorm from text_generation_server.utils.weights import Weights from text_generation_server.layers.fp8 import HybridFP8UnquantLoader if SYSTEM == 'rocm': try: from vllm import _custom_C except Exception as e: raise ImportError(f'Could not load `vllm._custom_C`. Full error: {e}') def load_attention(config, prefix: str, weights, layer_id): bias = getattr(config, 'attention_bias', False) head_size = config.hidden_size // config.num_attention_heads sizes = None prefixes = None if config.model_type == 'phi3': base_layer = TensorParallelColumnLinear.load_qkv(config, prefix=f'{prefix}.qkv_proj', weights=weights, bias=bias, num_heads=config.num_attention_heads, num_key_value_heads=config.num_key_value_heads) prefixes = ['qkv_proj'] elif config.model_type == 'baichuan': prefix = f'{prefix}.W_pack' base_layer = TensorParallelColumnLinear.load_qkv(config, prefix=prefix, weights=weights, bias=bias, num_heads=config.num_attention_heads, num_key_value_heads=config.num_key_value_heads) prefixes = [prefix] else: prefixes = ['q_proj', 'k_proj', 'v_proj'] sizes = [head_size * config.num_attention_heads, head_size * config.num_key_value_heads, head_size * config.num_key_value_heads] base_layer = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0, weights=weights, bias=bias) return TensorParallelMultiAdapterLinear.load(base_layer=base_layer, layer_id=layer_id, layer_names=prefixes, sizes=sizes, process_group=weights.process_group) @contextmanager def no_fp8(weights: Weights): weights_loader = weights.weights_loader if isinstance(weights_loader, HybridFP8UnquantLoader) and weights_loader.to_fp8: weights_loader = HybridFP8UnquantLoader(weights_loader.activation_scale_ub, to_fp8=False) with weights.use_loader(weights_loader): yield class FlashLlamaAttention(torch.nn.Module): def __init__(self, index: int, prefix: str, config, weights): super().__init__() self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads config.rope_theta = getattr(config, 'rope_theta', 10000) config.num_key_value_heads = getattr(config, 'num_key_value_heads', config.num_attention_heads) self.rotary_emb = PositionRotaryEmbedding.static(config=config, dim=self.head_size, base=config.rope_theta, device=weights.device) self.softmax_scale = self.head_size ** (-0.5) if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') if config.num_key_value_heads % weights.process_group.size() != 0: raise ValueError(f'`num_key_value_heads` must be divisible by `num_shards` (got `num_key_value_heads`: {config.num_key_value_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = config.num_key_value_heads // weights.process_group.size() self.query_key_value = load_attention(config, prefix, weights, index) self.index = index o_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.o_proj', weights=weights, bias=False) self.o_proj = TensorParallelAdapterRowLinear.load(o_proj, index, 'o_proj', process_group=weights.process_group) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange(0, self.num_key_value_heads, dtype=torch.int32, device=weights.device).repeat_interleave(self.num_groups) def forward(self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, adapter_data): qkv = self.query_key_value(hidden_states, adapter_data) (query, kv) = qkv.split([self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads], dim=1) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) reshape_and_cache(kv[:, 0], kv[:, 1], kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else kv[:, 0], kv_cache[1] if SYSTEM != 'ipex' else kv[:, 1], seqlen, block_tables, self.softmax_scale) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size), adapter_data) class LlamaMLP(nn.Module): def __init__(self, prefix, config, weights, index): super().__init__() self.hidden_act = config.hidden_act self.act = ACT2FN[self.hidden_act] if 'gelu' not in self.hidden_act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if self.hidden_act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') prefixes = None sizes = None bias = getattr(config, 'mlp_bias', False) if config.model_type == 'phi3': gate_up_proj = TensorParallelColumnLinear.load_gate_up(config, prefix=f'{prefix}.gate_up_proj', weights=weights, bias=bias) else: prefixes = ['gate_proj', 'up_proj'] sizes = [config.intermediate_size, config.intermediate_size] gate_up_proj = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.gate_proj', f'{prefix}.up_proj'], weights=weights, dim=0, bias=bias) self.gate_up_proj = TensorParallelMultiAdapterLinear.load(gate_up_proj, index, layer_names=prefixes, sizes=sizes, process_group=weights.process_group) down_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.down_proj', weights=weights, bias=bias) self.down_proj = TensorParallelAdapterRowLinear.load(down_proj, index, 'down_proj', process_group=weights.process_group) self.intermediate_size = config.intermediate_size // weights.process_group.size() self.quantize = config.quantize def forward(self, hidden_states, adapter_data): if SYSTEM == 'rocm' and self.hidden_act == 'silu' and (hidden_states.shape[0] == 1) and (not self.quantize): out = torch.empty(hidden_states.shape[0], self.intermediate_size, dtype=hidden_states.dtype, device='cuda') _custom_C.LLMM_Silu(self.gate_up_proj.base_layer.linear.weight, hidden_states, out, 8) return self.down_proj(out, adapter_data) else: gate_up_states = self.gate_up_proj(hidden_states, adapter_data) gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size) return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1], adapter_data) class FlashLlamaLayer(nn.Module): def __init__(self, index, prefix, config, weights): super().__init__() with no_fp8(weights): self.self_attn = FlashLlamaAttention(index=index, prefix=f'{prefix}.self_attn', config=config, weights=weights) self.mlp = LlamaMLP(prefix=f'{prefix}.mlp', config=config, weights=weights, index=index) self.input_layernorm = FastRMSNorm.load(prefix=f'{prefix}.input_layernorm', weights=weights, eps=config.rms_norm_eps) self.post_attention_layernorm = FastRMSNorm.load(prefix=f'{prefix}.post_attention_layernorm', weights=weights, eps=config.rms_norm_eps) def forward(self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, adapter_data): (normed_hidden_states, res) = self.input_layernorm(hidden_states, residual) attn_output = self.self_attn(normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, adapter_data) (normed_attn_res_output, attn_res) = self.post_attention_layernorm(attn_output, res) mlp_output = self.mlp(normed_attn_res_output, adapter_data) return (mlp_output, attn_res) class FlashLlamaModel(torch.nn.Module): def __init__(self, prefix, config, weights): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.layers = nn.ModuleList() with no_fp8(weights): self.layers.append(FlashLlamaLayer(index=0, prefix='model.layers.0' if not prefix else f'{prefix}.model.layers.0', config=config, weights=weights)) self.layers.extend([FlashLlamaLayer(index=layer_id, prefix=f'model.layers.{layer_id}' if not prefix else f'{prefix}.model.layers.{layer_id}', config=config, weights=weights) for layer_id in range(1, config.num_hidden_layers - 1)]) with no_fp8(weights): last_layer_id = config.num_hidden_layers - 1 self.layers.append(FlashLlamaLayer(index=last_layer_id, prefix=f'model.layers.{last_layer_id}' if not prefix else f'{prefix}.model.layers.{last_layer_id}', config=config, weights=weights)) self.norm = FastRMSNorm.load(prefix='model.norm' if not prefix else f'{prefix}.model.norm', weights=weights, eps=config.rms_norm_eps) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward(self, inputs_embeds: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, true_max_s: int, prefill_cache_indices: Optional[torch.Tensor], adapter_data) -> torch.Tensor: hidden_states = inputs_embeds (cos, sin) = self.layers[0].self_attn.rotary_emb.get_cos_sin(position_ids, max_s, hidden_states.dtype) residual = None for (i, layer) in enumerate(self.layers): (hidden_states, residual) = layer(hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s, adapter_data) (hidden_states, _) = self.norm(hidden_states, residual) return hidden_states class FlashLlamaForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() with no_fp8(weights): self.embed_tokens = TensorParallelEmbedding(prefix='model.embed_tokens' if not prefix else f'{prefix}.model.embed_tokens', weights=weights) self.model = FlashLlamaModel(prefix, config, weights) if config.tie_word_embeddings: suffix = 'model.embed_tokens' else: suffix = 'lm_head' with no_fp8(weights): self.lm_head = SpeculativeHead.load(config, prefix=suffix if not prefix else f'{prefix}.{suffix}', weights=weights) def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor]=None, lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: inputs_embeds = self.embed_tokens(input_ids) hidden_states = self.model(inputs_embeds, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, true_max_s=max_s, prefill_cache_indices=prefill_cache_indices, adapter_data=adapter_data) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] (logits, speculative_logits) = self.lm_head(hidden_states) return (logits, speculative_logits) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.layers.attention import paged_attention, attention, reshape_and_cache, Seqlen from text_generation_server.layers import TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, TensorParallelMultiAdapterLinear, TensorParallelAdapterRowLinear from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.layers.layernorm import FastRMSNorm if SYSTEM == 'rocm': try: from vllm import _custom_C except Exception as e: raise ImportError(f'Could not load `vllm._custom_C`. Full error: {e}') class MistralConfig(PretrainedConfig): model_type = 'mistral' def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, hidden_act='silu', max_position_embeddings=4096 * 32, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, sliding_window=None, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.sliding_window = sliding_window if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.pretraining_tp = pretraining_tp self.use_cache = use_cache self.rope_theta = rope_theta super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) class MistralAttention(torch.nn.Module): def __init__(self, prefix: str, config, weights, layer_id): super().__init__() self.max_past = config.sliding_window if config.sliding_window is not None else -1 self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size if hasattr(config, 'head_dim'): self.head_size = config.head_dim else: self.head_size = self.hidden_size // self.num_heads self.rotary_emb = PositionRotaryEmbedding.static(config=config, dim=self.head_size, base=config.rope_theta, device=weights.device) self.softmax_scale = self.head_size ** (-0.5) if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = config.num_key_value_heads // weights.process_group.size() query_key_value = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0, weights=weights, bias=False) self.query_key_value = TensorParallelMultiAdapterLinear.load(query_key_value, layer_id, ['q_proj', 'k_proj', 'v_proj'], sizes=[self.head_size * config.num_attention_heads, self.head_size * config.num_key_value_heads, self.head_size * config.num_key_value_heads], process_group=weights.process_group) o_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.o_proj', weights=weights, bias=False) self.o_proj = TensorParallelAdapterRowLinear.load(o_proj, layer_id, 'o_proj', process_group=weights.process_group) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange(0, self.num_key_value_heads, dtype=torch.int32, device=weights.device).repeat_interleave(self.num_groups) def forward(self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, prefill_cache_indices, adapter_data): qkv = self.query_key_value(hidden_states, adapter_data) (query, kv) = qkv.split([self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads], dim=1) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) if prefill_cache_indices is not None: kv_to_cache = kv[prefill_cache_indices] else: kv_to_cache = kv reshape_and_cache(kv_to_cache[:, 0], kv_to_cache[:, 1], kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else kv_to_cache[:, 0], kv_cache[1] if SYSTEM != 'ipex' else kv_to_cache[:, 1], seqlen, block_tables, self.softmax_scale, window_size_left=self.max_past) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size), adapter_data) class MistralMLP(nn.Module): def __init__(self, prefix: str, config, weights, layer_id): super().__init__() self.hidden_act = config.hidden_act self.act = ACT2FN[self.hidden_act] if 'gelu' not in self.hidden_act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if self.hidden_act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') gate_up_proj = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.gate_proj', f'{prefix}.up_proj'], weights=weights, dim=0, bias=False) self.gate_up_proj = TensorParallelMultiAdapterLinear.load(gate_up_proj, layer_id, ['gate_proj', 'up_proj'], sizes=[config.intermediate_size, config.intermediate_size], process_group=weights.process_group) down_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.down_proj', weights=weights, bias=False) self.down_proj = TensorParallelAdapterRowLinear.load(down_proj, layer_id, 'down_proj', process_group=weights.process_group) self.intermediate_size = config.intermediate_size // weights.process_group.size() self.quantize = config.quantize def forward(self, hidden_states, adapter_data): if SYSTEM == 'rocm' and self.hidden_act == 'silu' and (hidden_states.shape[0] == 1) and (not self.quantize): out = torch.empty(hidden_states.shape[0], self.intermediate_size, dtype=hidden_states.dtype, device='cuda') _custom_C.LLMM_Silu(self.gate_up_proj.base_layer.linear.weight, hidden_states, out, 8) return self.down_proj(out, adapter_data) else: gate_up_states = self.gate_up_proj(hidden_states, adapter_data) gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size) return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1], adapter_data) class MistralLayer(nn.Module): def __init__(self, prefix: str, config, weights, layer_id): super().__init__() self.self_attn = MistralAttention(prefix=f'{prefix}.self_attn', config=config, weights=weights, layer_id=layer_id) self.mlp = MistralMLP(prefix=f'{prefix}.mlp', config=config, weights=weights, layer_id=layer_id) self.input_layernorm = FastRMSNorm.load(prefix=f'{prefix}.input_layernorm', weights=weights, eps=config.rms_norm_eps) self.post_attention_layernorm = FastRMSNorm.load(prefix=f'{prefix}.post_attention_layernorm', weights=weights, eps=config.rms_norm_eps) def forward(self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, prefill_cache_indices, adapter_data): (normed_hidden_states, res) = self.input_layernorm(hidden_states, residual) attn_output = self.self_attn(normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, prefill_cache_indices, adapter_data) (normed_attn_res_output, attn_res) = self.post_attention_layernorm(attn_output, res) mlp_output = self.mlp(normed_attn_res_output, adapter_data) return (mlp_output, attn_res) class MistralModel(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.layers = nn.ModuleList([MistralLayer(prefix=f'{prefix}.layers.{layer_id}', config=config, weights=weights, layer_id=layer_id) for layer_id in range(config.num_hidden_layers)]) self.norm = FastRMSNorm.load(prefix=f'{prefix}.norm', weights=weights, eps=config.rms_norm_eps) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward(self, inputs_embeds: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, true_max_s: int, prefill_cache_indices: Optional[torch.Tensor], adapter_data: Optional[torch.Tensor]=None): hidden_states = inputs_embeds (cos, sin) = self.layers[0].self_attn.rotary_emb.get_cos_sin(position_ids, true_max_s, hidden_states.dtype) residual = None for (i, layer) in enumerate(self.layers): (hidden_states, residual) = layer(hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s, prefill_cache_indices, adapter_data) (hidden_states, _) = self.norm(hidden_states, residual) return hidden_states class FlashMistralForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights, name=None): if name is None: name = 'model' super().__init__() self.embed_tokens = TensorParallelEmbedding(prefix=f'{name}.embed_tokens' if not prefix else f'{prefix}.{name}.embed_tokens', weights=weights) self.model = MistralModel(prefix=name if not prefix else f'{prefix}.{name}', config=config, weights=weights) self.lm_head = SpeculativeHead.load(config, prefix='lm_head' if not prefix or name != 'model' else f'{prefix}.lm_head', weights=weights) self.max_past = config.sliding_window self.max_past_tensor = torch.tensor(config.sliding_window, device=weights.device) if self.max_past is not None else None def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> torch.Tensor: true_max_s = max_s if prefill_cache_indices is not None: slots = slots[prefill_cache_indices] elif self.max_past is not None: seqlen = seqlen.clamp(max=self.max_past_tensor) inputs_embeds = self.embed_tokens(input_ids) hidden_states = self.model(inputs_embeds, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, true_max_s, prefill_cache_indices, adapter_data) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.lm_head(hidden_states) return logits # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py import torch import torch.distributed from torch import nn from text_generation_server.utils.import_utils import SYSTEM if SYSTEM != 'ipex': from vllm.model_executor.layers.fused_moe import fused_moe from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.layers.attention import paged_attention, attention, reshape_and_cache, Seqlen from text_generation_server.layers import FastLinear, TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear from text_generation_server.layers.layernorm import FastRMSNorm from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.utils.weights import UnquantizedWeight class MixtralConfig(PretrainedConfig): model_type = 'mixtral' def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, hidden_act='silu', max_position_embeddings=4096 * 32, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, sliding_window=None, num_experts_per_tok=2, num_local_experts=8, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.sliding_window = sliding_window if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.pretraining_tp = pretraining_tp self.use_cache = use_cache self.rope_theta = rope_theta self.num_experts_per_tok = num_experts_per_tok self.num_local_experts = num_local_experts super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) def promote_scalar(x: torch.Tensor) -> torch.Tensor: return x.view(1) if len(x.size()) == 0 else x def load_attention(config, prefix: str, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0, weights=weights, bias=False) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col(prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0) if isinstance(weight, UnquantizedWeight): weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.weight.shape) == [(num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size], f'{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}' return TensorParallelColumnLinear(get_linear(weight, bias=None)) def _load_experts(config, prefix: str, mat, weights): if config.quantize is not None: raise NotImplementedError('Mixtral does not support weight quantization yet.') assert mat in ['w1', 'w2', 'w3'] world_size = weights.process_group.size() rank = weights.process_group.rank() assert config.intermediate_size % world_size == 0, f'The chosen size {config.intermediate_size} is not compatible with sharding on {world_size} shards' block_size = config.intermediate_size // world_size start = rank * block_size stop = (rank + 1) * block_size tensor = torch.empty((config.num_local_experts * block_size, config.hidden_size), dtype=weights.dtype, device=weights.device) for i in range(config.num_local_experts): slice_ = weights._get_slice(f'{prefix}.{i}.{mat}.weight') if mat == 'w2': expert_slice = slice_[:, start:stop].t().contiguous() else: expert_slice = slice_[start:stop] tensor[i * block_size:(i + 1) * block_size] = expert_slice.to(dtype=weights.dtype).to(device=weights.device) return tensor class MixtralAttention(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.max_past = config.sliding_window if config.sliding_window is not None else -1 self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.rotary_emb = PositionRotaryEmbedding.static(config=config, dim=self.head_size, base=config.rope_theta, device=weights.device) self.softmax_scale = self.head_size ** (-0.5) if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = config.num_key_value_heads // weights.process_group.size() self.query_key_value = load_attention(config, prefix, weights) self.o_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.o_proj', weights=weights, bias=False) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange(0, self.num_key_value_heads, dtype=torch.int32, device=weights.device).repeat_interleave(self.num_groups) def forward(self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, prefill_cache_indices): qkv = self.query_key_value(hidden_states) (query, kv) = qkv.split([self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads], dim=1) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) if prefill_cache_indices is not None: kv_to_cache = kv[prefill_cache_indices] else: kv_to_cache = kv reshape_and_cache(kv_to_cache[:, 0], kv_to_cache[:, 1], kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else kv_to_cache[:, 0], kv_cache[1] if SYSTEM != 'ipex' else kv_to_cache[:, 1], seqlen, block_tables, self.softmax_scale, window_size_left=self.max_past) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) @torch.jit.script def select_experts(gate_logits: torch.Tensor, top_k: int): all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float) (weights, selected_experts) = torch.topk(all_probs, top_k, dim=-1) weights /= weights.sum(dim=-1, keepdim=True) weights = weights.view(-1) selected_experts = selected_experts.view(-1) return (selected_experts, weights) @torch.jit.script def round_up(x: torch.Tensor, value: int): return torch.div(x + (value - 1), value, rounding_mode='trunc') * value class BlockSparseMoE(nn.Module): def __init__(self, prefix, config: MixtralConfig, weights): super().__init__() self.hidden_dim = config.hidden_size self.ffn_dim = config.intermediate_size // weights.process_group.size() self.num_experts = config.num_local_experts self.top_k = config.num_experts_per_tok act = config.hidden_act if 'gelu' in act: self.act = lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') elif 'silu' in act: self.act = torch.nn.functional.silu else: self.act = ACT2FN[act] self.gate = FastLinear.load(config, f'{prefix}.gate', weights, bias=False) w1 = _load_experts(config, f'{prefix}.experts', 'w1', weights).view(self.num_experts, self.ffn_dim, self.hidden_dim) w3 = _load_experts(config, f'{prefix}.experts', 'w3', weights).view(self.num_experts, self.ffn_dim, self.hidden_dim) self.w13 = torch.cat([w1, w3], dim=1) self.w2 = _load_experts(config, f'{prefix}.experts', 'w2', weights).view(self.num_experts, self.ffn_dim, self.hidden_dim).transpose(1, 2).contiguous() self.process_group = weights.process_group def forward(self, x: torch.Tensor) -> torch.Tensor: router_logits = self.gate(x) out = fused_moe(x, self.w13, self.w2, router_logits, self.top_k, renormalize=True, inplace=True) if self.process_group.size() > 1: torch.distributed.all_reduce(out, group=self.process_group) return out.view(*x.shape) class DenseMoE(nn.Module): def __init__(self, prefix, config: MixtralConfig, weights): super().__init__() self.hidden_dim = config.hidden_size self.ffn_dim = config.intermediate_size // weights.process_group.size() self.num_experts = config.num_local_experts self.top_k = config.num_experts_per_tok act = config.hidden_act if 'gelu' in act: self.act = lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') elif 'silu' in act: self.act = torch.nn.functional.silu else: self.act = ACT2FN[act] self.gate = FastLinear.load(config, f'{prefix}.gate', weights, bias=False) self.w1 = [TensorParallelColumnLinear.load(config, prefix=f'{prefix}.experts.{i}.w1', weights=weights, bias=False) for i in range(self.num_experts)] self.w3 = [TensorParallelColumnLinear.load(config, prefix=f'{prefix}.experts.{i}.w3', weights=weights, bias=False) for i in range(self.num_experts)] self.w2 = [TensorParallelRowLinear.load(config, prefix=f'{prefix}.experts.{i}.w2', weights=weights, bias=False) for i in range(self.num_experts)] self.process_group = weights.process_group def forward(self, x: torch.Tensor) -> torch.Tensor: input_shape = x.shape x = x.view(-1, input_shape[-1]) gate_logits = self.gate(x) all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float) if self.top_k < self.num_experts: (_, not_selected_experts) = torch.topk(all_probs, self.num_experts - self.top_k, largest=False, sorted=False, dim=1) all_probs.scatter_(1, not_selected_experts, 0) weights = all_probs / all_probs.sum(dim=1, keepdim=True) weights = weights.to(x.dtype) out = x.new_zeros(x.shape[0], self.hidden_dim) for i in range(self.num_experts): h = self.act(self.w1[i](x)) * self.w3[i](x) h = self.w2[i](h, reduce=False) out += h * weights[:, i].view(-1, 1) if self.process_group.size() > 1: torch.distributed.all_reduce(out, group=self.process_group) return out class MixtralLayer(nn.Module): def __init__(self, prefix: str, layer_id, config, weights): super().__init__() prefix = f'{prefix}.layers.{layer_id}' self.self_attn = MixtralAttention(prefix=f'{prefix}.self_attn', config=config, weights=weights) moe_cls = BlockSparseMoE if config.quantize is None else DenseMoE self.moe = moe_cls(f'{prefix}.block_sparse_moe', config, weights) self.input_layernorm = FastRMSNorm.load(prefix=f'{prefix}.input_layernorm', weights=weights, eps=config.rms_norm_eps) self.post_attention_layernorm = FastRMSNorm.load(prefix=f'{prefix}.post_attention_layernorm', weights=weights, eps=config.rms_norm_eps) def forward(self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, prefill_cache_indices): (normed_hidden_states, res) = self.input_layernorm(hidden_states, residual) attn_output = self.self_attn(normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, prefill_cache_indices) (normed_attn_res_output, attn_res) = self.post_attention_layernorm(attn_output, res) moe_output = self.moe(normed_attn_res_output) return (moe_output, attn_res) class MixtralModel(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.embed_tokens = TensorParallelEmbedding(prefix='model.embed_tokens' if not prefix else f'{prefix}.model.embed_tokens', weights=weights) self.layers = nn.ModuleList([MixtralLayer('model' if not prefix else f'{prefix}.model', layer_id, config, weights) for layer_id in range(config.num_hidden_layers)]) self.norm = FastRMSNorm.load(prefix='model.norm' if not prefix else f'{prefix}.model.norm', weights=weights, eps=config.rms_norm_eps) self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, true_max_s: int, prefill_cache_indices: Optional[torch.Tensor]) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) (cos, sin) = self.layers[0].self_attn.rotary_emb.get_cos_sin(position_ids, true_max_s, hidden_states.dtype) residual = None for (i, layer) in enumerate(self.layers): (hidden_states, residual) = layer(hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s, prefill_cache_indices) (hidden_states, _) = self.norm(hidden_states, residual) return hidden_states class FlashMixtralForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.model = MixtralModel(prefix, config, weights) self.lm_head = SpeculativeHead.load(config, prefix='lm_head' if not prefix else f'{prefix}.lm_head', weights=weights) self.max_past = config.sliding_window self.max_past_tensor = torch.tensor(config.sliding_window, device=weights.device) if self.max_past is not None else None def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> torch.Tensor: true_max_s = max_s if prefill_cache_indices is not None: slots = slots[prefill_cache_indices] elif self.max_past is not None: seqlen = seqlen.clamp(max=self.max_past_tensor) hidden_states = self.model(input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, true_max_s, prefill_cache_indices) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.lm_head(hidden_states) return logits # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.modeling_utils import PreTrainedModel from transformers.models.gpt_neox import GPTNeoXConfig as TransformersGPTNeoXConfig from typing import Optional, List, Tuple from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.layers.attention import paged_attention, attention, reshape_and_cache, Seqlen from text_generation_server.layers import TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear from text_generation_server.layers.layernorm import FastLayerNorm from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.utils.weights import UnquantizedWeight class GPTNeoXConfig(TransformersGPTNeoXConfig): attribute_map = {'num_key_value_heads': 'num_attention_heads'} def load_row(config, prefix: str, weights, bias: bool): weight = weights.get_weights_row(prefix) if bias and weights.process_group.rank() == 0: bias = weights.get_tensor(f'{prefix}.bias') else: bias = None linear = get_linear(weight, bias) if config.use_parallel_residual: return linear else: return TensorParallelRowLinear(linear, process_group=weights.process_group) def load_qkv(config, prefix: str, weights, num_heads, head_size, hidden_size): weight = weights.get_multi_weights_col([prefix], dim=0) if isinstance(weight, UnquantizedWeight): weight.weight = weight.weight.view(num_heads, 3, head_size, hidden_size).permute(1, 0, 2, 3).reshape(-1, hidden_size) bias = weights.get_sharded(f'{prefix}.bias', dim=0) bias = bias.view(num_heads, 3, head_size).permute(1, 0, 2).reshape(-1) linear = get_linear(weight, bias) if config.use_parallel_residual: return linear else: return TensorParallelColumnLinear(linear) class FlashNeoxAttention(torch.nn.Module): def __init__(self, config, prefix, weights): super().__init__() num_heads = config.num_attention_heads hidden_size = config.hidden_size self.num_heads = num_heads self.hidden_size = hidden_size self.head_size = hidden_size // num_heads self.rotary_dim = int(config.rotary_pct * self.head_size) if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.rotary_emb = PositionRotaryEmbedding.static(config=config, dim=self.rotary_dim, base=config.rotary_emb_base, device=weights.device) self.softmax_scale = self.head_size ** (-0.5) self.query_key_value = load_qkv(config, prefix=f'{prefix}.query_key_value', weights=weights, num_heads=self.num_heads, head_size=self.head_size, hidden_size=self.hidden_size) self.dense = load_row(config, prefix=f'{prefix}.dense', weights=weights, bias=True) self.kv_head_mapping = torch.arange(0, self.num_heads, dtype=torch.int32, device=weights.device) def forward(self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): qkv = self.query_key_value(hidden_states) qkv = qkv.view(-1, 3, self.num_heads, self.head_size) query_rot = qkv[:, 0][..., :self.rotary_dim] query_pass = qkv[:, 0][..., self.rotary_dim:] key_rot = qkv[:, 1][..., :self.rotary_dim] key_pass = qkv[:, 1][..., self.rotary_dim:] self.rotary_emb(query_rot, key_rot, cos, sin) qkv[:, 0] = torch.cat((query_rot, query_pass), dim=-1) qkv[:, 1] = torch.cat((key_rot, key_pass), dim=-1) reshape_and_cache(qkv[:, 1], qkv[:, 2], kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(qkv[:, 0], kv_cache[0] if SYSTEM != 'ipex' else qkv[:, 1], kv_cache[1] if SYSTEM != 'ipex' else qkv[:, 2], seqlen, block_tables, self.softmax_scale) else: attn_output = paged_attention(qkv[:, 0], kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) return self.dense(attn_output.view(-1, self.num_heads * self.head_size)) class FlashMLP(nn.Module): def __init__(self, config, prefix, weights): super().__init__() act = config.hidden_act self.act = ACT2FN[act] if 'gelu' not in act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') self.dense_h_to_4h = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.dense_h_to_4h', weights=weights, bias=True) self.dense_4h_to_h = load_row(config, prefix=f'{prefix}.dense_4h_to_h', weights=weights, bias=True) def forward(self, hidden_states): hidden_states = self.dense_h_to_4h(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dense_4h_to_h(hidden_states) return hidden_states class FlashNeoXLayer(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() layer_norm_eps = config.layer_norm_eps prefix = f'gpt_neox.layers.{layer_id}' self.use_parallel_residual = config.use_parallel_residual self.input_layernorm = FastLayerNorm.load(prefix=f'{prefix}.input_layernorm', weights=weights, eps=layer_norm_eps) self.post_attention_layernorm = FastLayerNorm.load(prefix=f'{prefix}.post_attention_layernorm', weights=weights, eps=layer_norm_eps) self.attention = FlashNeoxAttention(config, prefix=f'{prefix}.attention', weights=weights) self.mlp = FlashMLP(config, prefix=f'{prefix}.mlp', weights=weights) self.process_group = weights.process_group def forward(self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): if self.use_parallel_residual: (ln1_hidden_states, _) = self.input_layernorm(hidden_states) attn_output = self.attention(ln1_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) (ln2_hidden_states, _) = self.post_attention_layernorm(hidden_states) mlp_output = self.mlp(ln2_hidden_states) intermediate = mlp_output + attn_output if self.process_group.size() > 1: torch.distributed.all_reduce(intermediate, group=self.process_group) return (intermediate + hidden_states, None) else: (hidden_states, residual) = self.input_layernorm(hidden_states, residual) hidden_states = self.attention(hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) (hidden_states, residual) = self.post_attention_layernorm(hidden_states, residual) mlp_output = self.mlp(hidden_states) return (mlp_output, residual) class FlashGPTNeoXPreTrainedModel(PreTrainedModel): config_class = GPTNeoXConfig base_model_prefix = 'gpt_neox' supports_gradient_checkpointing = False _no_split_modules = None class FlashGPTNeoXModel(FlashGPTNeoXPreTrainedModel): def __init__(self, prefix: str, config, weights): super().__init__(config) self.config = config self.embed_in = TensorParallelEmbedding(prefix=f'{prefix}.embed_in', weights=weights) self.layers = nn.ModuleList([FlashNeoXLayer(layer_id, config, weights) for layer_id in range(config.num_hidden_layers)]) self.final_layer_norm = FastLayerNorm.load(prefix=f'{prefix}.final_layer_norm', weights=weights, eps=config.layer_norm_eps) self.gradient_checkpointing = False self.head_size = self.layers[0].attention.head_size self.num_heads = self.layers[0].attention.num_heads def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int) -> torch.Tensor: hidden_states = self.embed_in(input_ids) (cos, sin) = self.layers[0].attention.rotary_emb.get_cos_sin(position_ids, max_s, hidden_states.dtype) residual = None for (i, layer) in enumerate(self.layers): (hidden_states, residual) = layer(hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s) (hidden_states, _) = self.final_layer_norm(hidden_states, residual) return hidden_states class FlashGPTNeoXForCausalLM(FlashGPTNeoXPreTrainedModel): def __init__(self, prefix, config, weights): super().__init__(config) if not prefix: prefix = 'gpt_neox' else: prefix = f'{prefix}.gpt_neox' self.gpt_neox = FlashGPTNeoXModel(prefix, config, weights) self.embed_out = SpeculativeHead.load(config, prefix='embed_out', weights=weights) def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = self.gpt_neox(input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.embed_out(hidden_states) return logits # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py import torch import torch.distributed from torch import nn from typing import Optional, List, Tuple from text_generation_server.layers.tensor_parallel import TensorParallelColumnLinear from text_generation_server.layers.attention import Seqlen from text_generation_server.models.custom_modeling.vlm import load_text_model, load_vision_model class PaliGemmaForConditionalGeneration(nn.Module): def __init__(self, prefix, config, weights): super().__init__() config.vision_config.quantize = config.quantize self.vision_tower = load_vision_model(prefix='vision_tower' if not prefix else f'{prefix}.vision_tower', config=config.vision_config, weights=weights) self.post_vision_tower_layernorm = nn.LayerNorm.load(prefix='vision_tower.vision_model.post_layernorm', weights=weights, eps=config.vision_config.layer_norm_eps) self.multi_modal_projector = TensorParallelColumnLinear.load(config, prefix='multi_modal_projector.linear', weights=weights, bias=True) self.vocab_size = config.vocab_size self.config = config text_config = config.text_config text_config.speculator = config.speculator text_config.quantize = config.quantize self.text_model = load_text_model(prefix='language_model' if not prefix else f'{prefix}.language_model', config=config.text_config, weights=weights) self.pad_token_id = config.pad_token_id if config.pad_token_id is not None else -1 def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor]=None, lm_head_indices: Optional[torch.Tensor]=None, pixel_values: torch.FloatTensor=None, pixel_attention_mask: Optional[torch.BoolTensor]=None, image_sizes: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: inputs_embeds = self.text_model.embed_tokens(input_ids) if cu_seqlen_prefill is not None: max_s += 1 position_ids += 1 if pixel_values is not None: pixel_values = pixel_values.to(dtype=inputs_embeds.dtype) image_outputs = self.vision_tower(pixel_values) last_hidden_state = self.post_vision_tower_layernorm(image_outputs.last_hidden_state) image_features = self.multi_modal_projector(last_hidden_state) mask = input_ids == self.config.image_token_index inputs_embeds[mask] = image_features.view(-1, image_features.shape[-1]) hidden_states = self.text_model.model(inputs_embeds=inputs_embeds, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, block_tables=block_tables, slots=slots, seqlen=seqlen, max_s=max_s) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] (logits, speculative_logits) = self.text_model.lm_head(hidden_states) return (logits, speculative_logits) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.layers.attention import paged_attention, attention, reshape_and_cache, Seqlen from text_generation_server.layers import TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear from text_generation_server.layers.layernorm import FastLayerNorm from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.utils.import_utils import SYSTEM class PhiConfig(PretrainedConfig): def __init__(self, vocab_size=51200, hidden_size=2560, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=32, hidden_act='gelu_fast', layer_norm_eps=1e-05, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=10000.0, resid_pdrop=0.1, partial_rotary_factor=0.5, **kwargs): self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.rope_theta = rope_theta self.resid_pdrop = resid_pdrop self.partial_rotary_factor = partial_rotary_factor super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0, weights=weights, bias=True) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col(prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0) if config.quantize not in ['gptq', 'awq', 'marlin']: weight = weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.shape) == [(num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size], f'{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}' return TensorParallelColumnLinear(get_linear(weight, bias=True)) class FlashPhiAttention(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.softmax_scale = self.head_size ** (-0.5) self.rotary_dim = int(config.partial_rotary_factor * self.head_size) self.rotary_emb = PositionRotaryEmbedding.static(config=config, dim=self.rotary_dim, base=config.rope_theta, device=weights.device) if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = config.num_key_value_heads // weights.process_group.size() self.query_key_value = load_attention(config, prefix, weights) self.dense = TensorParallelRowLinear.load(config, prefix=f'{prefix}.dense', weights=weights, bias=True) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange(0, self.num_key_value_heads, dtype=torch.int32, device=weights.device).repeat_interleave(self.num_groups) def forward(self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): qkv = self.query_key_value(hidden_states) (query, kv) = qkv.split([self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads], dim=1) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query[:, :, :self.rotary_dim], kv[:, 0, :, :self.rotary_dim], cos, sin) reshape_and_cache(kv[:, 0], kv[:, 1], kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else kv[:, 0], kv_cache[1] if SYSTEM != 'ipex' else kv[:, 1], seqlen, block_tables, self.softmax_scale) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) return self.dense(attn_output.view(-1, self.num_heads * self.head_size)) class PhiMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.hidden_act self.act = ACT2FN[act] if 'gelu' not in act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') self.up_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.fc1', weights=weights, bias=True) self.down_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.fc2', weights=weights, bias=True) def forward(self, hidden_states): return self.down_proj(self.act(self.up_proj(hidden_states))) class FlashPhiLayer(nn.Module): def __init__(self, prefix: str, layer_id, config, weights): super().__init__() prefix = f'{prefix}.layers.{layer_id}' self.self_attn = FlashPhiAttention(prefix=f'{prefix}.self_attn', config=config, weights=weights) self.mlp = PhiMLP(prefix=f'{prefix}.mlp', config=config, weights=weights) self.input_layernorm = FastLayerNorm.load(prefix=f'{prefix}.input_layernorm', weights=weights, eps=config.layer_norm_eps) self.resid_dropout = torch.nn.Dropout(config.resid_pdrop) def forward(self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): (hidden_states, res) = self.input_layernorm(hidden_states, residual) attn_output = self.self_attn(hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) hidden_states = self.resid_dropout(attn_output).add(self.resid_dropout(self.mlp(hidden_states))) return (hidden_states, res) class FlashPhiModel(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.embed_tokens = TensorParallelEmbedding(prefix=f'{prefix}.embed_tokens', weights=weights) self.layers = nn.ModuleList([FlashPhiLayer(prefix, layer_id, config, weights) for layer_id in range(config.num_hidden_layers)]) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads self.norm = FastLayerNorm.load(prefix='model.final_layernorm', weights=weights, eps=config.layer_norm_eps) def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) (cos, sin) = self.layers[0].self_attn.rotary_emb.get_cos_sin(position_ids, max_s, hidden_states.dtype) residual = None for (i, layer) in enumerate(self.layers): (hidden_states, residual) = layer(hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s) (hidden_states, _) = self.norm(hidden_states, residual) return hidden_states class FlashPhiForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() if not prefix: prefix = 'model' else: prefix = f'{prefix}.model' self.model = FlashPhiModel(prefix, config, weights) self.lm_head = SpeculativeHead.load(config, prefix='lm_head', weights=weights) def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = self.model(input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] return self.lm_head(hidden_states) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.layers.attention import paged_attention, attention, reshape_and_cache, Seqlen from text_generation_server.layers import TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.layers.layernorm import FastRMSNorm from text_generation_server.utils.import_utils import SYSTEM def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0, weights=weights, bias=True) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 return TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0, weights=weights, bias=True) class Qwen2Attention(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.max_past = config.sliding_window if config.sliding_window is not None else -1 self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.rotary_emb = PositionRotaryEmbedding.static(config=config, dim=self.head_size, base=config.rope_theta, device=weights.device) self.softmax_scale = self.head_size ** (-0.5) if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = config.num_key_value_heads // weights.process_group.size() self.query_key_value = load_attention(config, prefix, weights) self.o_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.o_proj', weights=weights, bias=False) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange(0, self.num_key_value_heads, dtype=torch.int32, device=weights.device).repeat_interleave(self.num_groups) def forward(self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, prefill_cache_indices): qkv = self.query_key_value(hidden_states) (query, kv) = qkv.split([self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads], dim=1) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) if prefill_cache_indices is not None: kv_to_cache = kv[prefill_cache_indices] else: kv_to_cache = kv reshape_and_cache(kv_to_cache[:, 0], kv_to_cache[:, 1], kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else kv_to_cache[:, 0], kv_cache[1] if SYSTEM != 'ipex' else kv_to_cache[:, 1], seqlen, block_tables, self.softmax_scale, window_size_left=self.max_past) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) class Qwen2MLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.hidden_act self.act = ACT2FN[act] if 'gelu' not in act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') self.gate_up_proj = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.gate_proj', f'{prefix}.up_proj'], weights=weights, dim=0, bias=False) self.down_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.down_proj', weights=weights, bias=False) self.intermediate_size = config.intermediate_size // weights.process_group.size() def forward(self, hidden_states): gate_up_states = self.gate_up_proj(hidden_states) gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size) return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1]) class Qwen2Layer(nn.Module): def __init__(self, prefix, layer_id, config, weights): super().__init__() prefix = f'{prefix}.layers.{layer_id}' self.self_attn = Qwen2Attention(prefix=f'{prefix}.self_attn', config=config, weights=weights) self.mlp = Qwen2MLP(prefix=f'{prefix}.mlp', config=config, weights=weights) self.input_layernorm = FastRMSNorm.load(prefix=f'{prefix}.input_layernorm', weights=weights, eps=config.rms_norm_eps) self.post_attention_layernorm = FastRMSNorm.load(prefix=f'{prefix}.post_attention_layernorm', weights=weights, eps=config.rms_norm_eps) def forward(self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, prefill_cache_indices): (normed_hidden_states, res) = self.input_layernorm(hidden_states, residual) attn_output = self.self_attn(normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, prefill_cache_indices) (normed_attn_res_output, attn_res) = self.post_attention_layernorm(attn_output, res) mlp_output = self.mlp(normed_attn_res_output) return (mlp_output, attn_res) class Qwen2Model(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() prefix = f'{prefix}.model' if prefix else 'model' process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.embed_tokens = TensorParallelEmbedding(prefix=f'{prefix}.embed_tokens', weights=weights) self.layers = nn.ModuleList([Qwen2Layer(prefix, layer_id, config, weights) for layer_id in range(config.num_hidden_layers)]) self.norm = FastRMSNorm.load(prefix=f'{prefix}.norm', weights=weights, eps=config.rms_norm_eps) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, true_max_s: int, prefill_cache_indices: Optional[torch.Tensor]) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) (cos, sin) = self.layers[0].self_attn.rotary_emb.get_cos_sin(position_ids, true_max_s, hidden_states.dtype) residual = None for (i, layer) in enumerate(self.layers): (hidden_states, residual) = layer(hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s, prefill_cache_indices) (hidden_states, _) = self.norm(hidden_states, residual) return hidden_states class Qwen2ForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.model = Qwen2Model(prefix, config, weights) if config.tie_word_embeddings: suffix = 'model.embed_tokens' else: suffix = 'lm_head' self.lm_head = SpeculativeHead.load(config, prefix=f'{prefix}.{suffix}' if prefix else suffix, weights=weights) self.max_past = config.sliding_window self.max_past_tensor = torch.tensor(config.sliding_window, device=weights.device) if self.max_past is not None else None def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor]=None, lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> torch.Tensor: true_max_s = max_s if prefill_cache_indices is not None: slots = slots[prefill_cache_indices] elif self.max_past is not None: seqlen = seqlen.clamp(max=self.max_past_tensor) hidden_states = self.model(input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, true_max_s, prefill_cache_indices) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.lm_head(hidden_states) return logits # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py from typing import List, Optional, Tuple import torch import torch.distributed from torch import nn from transformers.configuration_utils import PretrainedConfig from transformers.modeling_utils import PreTrainedModel from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.layers import SpeculativeHead, TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, get_linear from text_generation_server.layers.layernorm import FastLayerNorm from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.layers.attention import attention, paged_attention, reshape_and_cache, Seqlen def load_row(config, prefix: str, weights, bias: bool): weight = weights.get_weights_row(prefix) if bias and weights.process_group.rank() == 0: bias = weights.get_tensor(f'{prefix}.bias') else: bias = None linear = get_linear(weight, bias) if config.parallel_attn: return linear else: return TensorParallelRowLinear(linear, process_group=weights.process_group) class RWConfig(PretrainedConfig): attribute_map = {'num_hidden_layers': 'n_layer', 'num_attention_heads': 'n_head', 'num_key_value_heads': 'n_head_kv'} def __init__(self, model_type='RefinedWeb', vocab_size=250880, hidden_size=64, num_hidden_layers=None, num_attention_heads=None, num_ln_in_prallel_attention=None, layer_norm_epsilon=1e-05, initializer_range=0.02, use_cache=True, bos_token_id=1, eos_token_id=2, hidden_dropout=0.0, attention_dropout=0.0, num_kv_heads=None, multi_query=False, alibi=False, new_decoder_architecture=None, bias=False, parallel_attn=False, rope_theta=10000.0, **kwargs): if alibi: raise NotImplementedError('alibi is not supported by this version of the model') self.model_type = model_type self.alibi = False self.rotary = True self.rope_theta = rope_theta self.vocab_size = vocab_size n_embed = kwargs.pop('n_embed', None) self.hidden_size = hidden_size if n_embed is None else n_embed self.n_layer = num_hidden_layers if num_hidden_layers is not None else kwargs.pop('n_layer', 2) self.n_head = num_attention_heads if num_attention_heads is not None else kwargs.pop('n_head', 8) self.layer_norm_epsilon = layer_norm_epsilon self.num_ln_in_parallel_attn = num_ln_in_prallel_attention self.initializer_range = initializer_range self.use_cache = use_cache self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.bias = bias self.parallel_attn = parallel_attn self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id if num_kv_heads is not None: self.n_head_kv = num_kv_heads else: old_n_head_kv = kwargs.pop('n_head_kv', None) if old_n_head_kv is not None: self.n_head_kv = old_n_head_kv else: self.n_head_kv = 1 if multi_query else self.n_head if new_decoder_architecture is not None: self.new_decoder_architecture = new_decoder_architecture elif model_type == 'RefinedWeb': self.new_decoder_architecture = True else: self.new_decoder_architecture = False super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) class FlashRWAttention(torch.nn.Module): def __init__(self, config, prefix: str, weights): super().__init__() self.num_heads = config.n_head self.num_heads_kv = config.n_head_kv self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.rope_theta = config.rope_theta self.rotary_emb = PositionRotaryEmbedding.static(config=config, dim=self.head_size, base=self.rope_theta, device=weights.device) self.softmax_scale = self.head_size ** (-0.5) if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.query_key_value = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.query_key_value', weights=weights, bias=config.bias) self.dense = load_row(config, prefix=f'{prefix}.dense', weights=weights, bias=config.bias) if self.num_heads_kv == 1: self.kv_head_mapping = torch.zeros(self.num_heads, dtype=torch.int32, device=weights.device) else: self.kv_head_mapping = torch.arange(0, self.num_heads, dtype=torch.int32, device=weights.device) def forward(self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): qkv = self.query_key_value(hidden_states) (query, kv) = qkv.split([self.head_size * self.num_heads, 2 * self.head_size * self.num_heads_kv], dim=1) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_heads_kv, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) reshape_and_cache(kv[:, 0], kv[:, 1], kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else kv[:, 0], kv_cache[1] if SYSTEM != 'ipex' else kv[:, 1], seqlen, block_tables, self.softmax_scale) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) return self.dense(attn_output.view(-1, self.num_heads * self.head_size)) class FlashRWLargeAttention(torch.nn.Module): def __init__(self, config, prefix: str, weights): super().__init__() hidden_size = config.hidden_size num_heads = config.n_head num_groups = config.n_head_kv self.hidden_size = hidden_size self.head_size = hidden_size // num_heads self.num_groups = num_groups self.rope_theta = config.rope_theta self.rotary_emb = PositionRotaryEmbedding.static(config=config, dim=self.head_size, base=self.rope_theta, device=weights.device) self.softmax_scale = self.head_size ** (-0.5) self.num_heads = num_heads // self.num_groups process_group = weights.process_group if process_group.size() > self.num_groups: raise NotImplementedError('Tensor Parallelism is not implemented for world_size > n groups') if self.num_groups % process_group.size() != 0: raise NotImplementedError(f'Tensor Parallelism is not implemented for {self.num_groups} not divisible by {process_group.size()}') self.num_groups = self.num_groups // process_group.size() self.query_key_value = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.query_key_value', weights=weights, bias=config.bias) self.dense = load_row(config, prefix=f'{prefix}.dense', weights=weights, bias=config.bias) self.kv_head_mapping = torch.arange(0, self.num_groups, dtype=torch.int32, device=weights.device).repeat_interleave(self.num_heads) def forward(self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): qkv = self.query_key_value(hidden_states) qkv = qkv.view(-1, self.num_groups, self.num_heads + 2, self.head_size) (query, kv) = qkv.split([self.num_heads, 2], dim=2) query = query.reshape(-1, self.num_groups * self.num_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=2, index=0), cos, sin) reshape_and_cache(kv[:, :, 0].contiguous(), kv[:, :, 1].contiguous(), kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else kv[:, :, 0].contiguous(), kv_cache[1] if SYSTEM != 'ipex' else kv[:, :, 1].contiguous(), seqlen, block_tables, self.softmax_scale) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) return self.dense(attn_output.view(-1, self.num_groups * self.num_heads * self.head_size)) class FlashMLP(nn.Module): def __init__(self, config, prefix: str, weights): super().__init__() self.act = torch.nn.functional.gelu self.dense_h_to_4h = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.dense_h_to_4h', weights=weights, bias=config.bias) self.dense_4h_to_h = load_row(config, prefix=f'{prefix}.dense_4h_to_h', weights=weights, bias=config.bias) def forward(self, hidden_states): hidden_states = self.dense_h_to_4h(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dense_4h_to_h(hidden_states) return hidden_states class FlashRWLayer(nn.Module): def __init__(self, layer_id, prefix: str, config, weights): super().__init__() parallel_attn = config.parallel_attn self.parallel_attn = parallel_attn prefix = f'{prefix}.h.{layer_id}' ln_prefix = 'input_layernorm' if config.num_hidden_layers == 80: ln_prefix = 'ln_attn' self.input_layernorm = FastLayerNorm.load(prefix=f'{prefix}.{ln_prefix}', weights=weights, eps=config.layer_norm_epsilon) self.self_attention = FlashRWAttention(config, prefix=f'{prefix}.self_attention', weights=weights) self.post_attention_layernorm = FastLayerNorm.load(prefix=f'{prefix}.post_attention_layernorm', weights=weights, eps=config.layer_norm_epsilon) if not parallel_attn else None self.mlp = FlashMLP(config, prefix=f'{prefix}.mlp', weights=weights) self.process_group = weights.process_group def forward(self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): if self.parallel_attn: (ln_hidden_states, residual) = self.input_layernorm(hidden_states, residual) attn_output = self.self_attention(ln_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) mlp_output = self.mlp(ln_hidden_states) intermediate = mlp_output + attn_output if self.process_group.size() > 1: torch.distributed.all_reduce(intermediate, group=self.process_group) return (intermediate, residual) else: (hidden_states, residual) = self.input_layernorm(hidden_states, residual) hidden_states = self.self_attention(hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) if self.post_attention_layernorm is not None: (hidden_states, residual) = self.post_attention_layernorm(hidden_states, residual) mlp_output = self.mlp(hidden_states) return (mlp_output, residual) class FlashRWLayerNorm(nn.Module): def __init__(self, config, prefix: str, weights): super().__init__() self.num_ln = getattr(config, 'num_ln_in_parallel_attn', 1) if config.num_hidden_layers == 80: self.num_ln = 2 if self.num_ln == 1: self.input_ln = FastLayerNorm.load(prefix=f'{prefix}.input_layernorm', weights=weights, eps=config.layer_norm_epsilon) elif self.num_ln == 2: self.ln_attn = FastLayerNorm.load(prefix=f'{prefix}.ln_attn', weights=weights, eps=config.layer_norm_epsilon) self.ln_mlp = FastLayerNorm.load(prefix=f'{prefix}.ln_mlp', weights=weights, eps=config.layer_norm_epsilon) else: raise ValueError('Number of layer norms can either be 1 or 2.') def forward(self, hidden_states, residual): if self.num_ln == 1: (ln_hidden_states, residual) = self.input_ln(hidden_states, residual) return (ln_hidden_states, ln_hidden_states, residual) elif self.num_ln == 2: (ln_attn, residual) = self.ln_attn(hidden_states, residual) (ln_mlp, _) = self.ln_mlp(residual) return (ln_attn, ln_mlp, residual) class FlashRWLargeLayer(nn.Module): def __init__(self, layer_id, prefix: str, config, weights): super().__init__() prefix = f'{prefix}.h.{layer_id}' self.ln_layer = FlashRWLayerNorm(config, prefix, weights) self.self_attention = FlashRWLargeAttention(config, prefix=f'{prefix}.self_attention', weights=weights) assert config.parallel_attn, "This version doesn't support non parallel_attn" self.mlp = FlashMLP(config, prefix=f'{prefix}.mlp', weights=weights) self.process_group = weights.process_group def forward(self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): (ln_attn, ln_mlp, residual) = self.ln_layer(hidden_states, residual) attn_output = self.self_attention(ln_attn, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) mlp_output = self.mlp(ln_mlp) intermediate = attn_output + mlp_output if self.process_group.size() > 1: torch.distributed.all_reduce(intermediate, group=self.process_group) return (intermediate, residual) class FlashRWPreTrainedModel(PreTrainedModel): config_class = RWConfig class FlashRWModel(FlashRWPreTrainedModel): def __init__(self, prefix: str, config, weights): super().__init__(config) self.config = config self.word_embeddings = TensorParallelEmbedding(prefix=f'{prefix}.word_embeddings', weights=weights) if config.new_decoder_architecture: self.h = nn.ModuleList([FlashRWLargeLayer(layer_id, prefix, config, weights) for layer_id in range(config.num_hidden_layers)]) self.cache_size = self.h[0].self_attention.num_groups else: self.h = nn.ModuleList([FlashRWLayer(layer_id, prefix, config, weights) for layer_id in range(config.num_hidden_layers)]) self.cache_size = self.h[0].self_attention.num_heads_kv self.ln_f = FastLayerNorm.load(prefix=f'{prefix}.ln_f', weights=weights, eps=config.layer_norm_epsilon) self.head_size = self.h[0].self_attention.head_size def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int) -> torch.Tensor: hidden_states = self.word_embeddings(input_ids) (cos, sin) = self.h[0].self_attention.rotary_emb.get_cos_sin(position_ids, max_s, hidden_states.dtype) residual = None for (i, layer) in enumerate(self.h): (hidden_states, residual) = layer(hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s) (hidden_states, _) = self.ln_f(hidden_states, residual) return hidden_states class FlashRWForCausalLM(FlashRWPreTrainedModel): def __init__(self, prefix: str, config, weights): super().__init__(config) if not prefix: prefix = 'transformer' else: prefix = f'{prefix}.transformer' self.transformer = FlashRWModel(prefix, config, weights) self.lm_head = SpeculativeHead.load(config, prefix='lm_head', weights=weights) def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = self.transformer(input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.lm_head(hidden_states) return logits # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.layers.attention import paged_attention, attention, reshape_and_cache, Seqlen from text_generation_server.layers import TensorParallelRowLinear, TensorParallelColumnLinear, SpeculativeHead, TensorParallelEmbedding, get_linear from text_generation_server.layers.gptq import GPTQWeightsLoader from text_generation_server.layers.layernorm import FastLayerNorm from text_generation_server.utils.import_utils import SYSTEM def load_multi_mqa(config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size): if config.quantize == 'gptq': return _load_multi_mqa_gptq(config, prefix, weights, bias, head_size, num_heads, hidden_size) elif config.quantize == 'marlin': raise RuntimeError('santacoder models with marlin quantization are not yet supported') else: return _load_multi_mqa(config, prefix, weights, bias, head_size, num_heads, hidden_size) def _load_multi_mqa_gptq(config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size): from text_generation_server.layers.gptq import GPTQWeight if any(('c_attn' in k for k in weights.routing.keys())) and (not config.transpose): world_size = weights.process_group.size() rank = weights.process_group.rank() slice_ = weights._get_slice(f'{prefix}.c_attn.qweight') shape = slice_.get_shape() block_size = (shape[1] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[1] - 2 * head_size) % world_size == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size:] qweight = torch.cat([q_tensor, kv_tensor], dim=1) qweight = qweight.to(device=weights.device) slice_ = weights._get_slice(f'{prefix}.c_attn.scales') shape = slice_.get_shape() block_size = (shape[1] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[1] - 2 * head_size) % world_size == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size:] scales = torch.cat([q_tensor, kv_tensor], dim=1) scales = scales.to(device=weights.device) slice_ = weights._get_slice(f'{prefix}.c_attn.qzeros') shape = slice_.get_shape() block_size = (shape[1] - 2 * head_size * 4 // 32) // world_size start = rank * block_size stop = (rank + 1) * block_size assert 2 * head_size % (32 // 4) == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size * 4 // 32:] qzeros = torch.cat([q_tensor, kv_tensor], dim=1) qzeros = qzeros.to(device=weights.device) loader = weights.weights_loader assert isinstance(loader, GPTQWeightsLoader) loader._get_gptq_params(weights) if loader.quant_method == 'gptq': g_idx = weights.get_tensor(f'{prefix}.c_attn.g_idx') g_idx = g_idx.to(device=weights.device) elif loader.quant_method == 'awq': g_idx = None from text_generation_server.layers.awq.conversion_utils import fast_awq_to_gptq (qweight, qzeros) = fast_awq_to_gptq(qweight, qzeros) from text_generation_server.layers.gptq import HAS_EXLLAMA weight = GPTQWeight(qweight=qweight, qzeros=qzeros, scales=scales, g_idx=g_idx, bits=loader.bits, groupsize=loader.groupsize, use_awq_kernel=loader.quantize == 'awq', use_exllama=HAS_EXLLAMA) if bias: slice_ = weights._get_slice(f'{prefix}.c_attn.bias') shape = slice_.get_shape() block_size = (shape[0] - 2 * head_size) // world_size assert (shape[0] - 2 * head_size) % world_size == 0 q_tensor = slice_[start:stop] start = rank * block_size stop = (rank + 1) * block_size q_tensor = slice_[start:stop] kv_tensor = slice_[-2 * head_size:] bias = torch.cat([q_tensor, kv_tensor], dim=0) bias = bias.to(device=weights.device) return TensorParallelColumnLinear(get_linear(weight, bias)) else: raise NotImplementedError('Gptq loading with santacoder is not implemented') def _load_multi_mqa(config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size): if any(('c_attn' in k for k in weights.routing.keys())): slice_ = weights._get_slice(f'{prefix}.c_attn.weight') shape = slice_.get_shape() world_size = weights.process_group.size() rank = weights.process_group.rank() if config.transpose: block_size = (shape[1] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[1] - 2 * head_size) % world_size == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size:] weight = torch.cat([q_tensor, kv_tensor], dim=1).T else: block_size = (shape[0] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[0] - 2 * head_size) % world_size == 0 q_tensor = slice_[start:stop] kv_tensor = slice_[-2 * head_size:] weight = torch.cat([q_tensor, kv_tensor], dim=0) if bias: slice_ = weights._get_slice(f'{prefix}.c_attn.bias') shape = slice_.get_shape() block_size = (shape[0] - 2 * head_size) // world_size assert (shape[0] - 2 * head_size) % world_size == 0 start = rank * block_size stop = (rank + 1) * block_size q_tensor = slice_[start:stop] kv_tensor = slice_[-2 * head_size:] bias = torch.cat([q_tensor, kv_tensor], dim=0) else: if config.transpose: w = [weights.get_sharded(f'{prefix}.q_attn.weight', dim=1).T, weights.get_tensor(f'{prefix}.kv_attn.weight').T] weight = torch.cat(w, dim=0) else: w = [weights.get_sharded(f'{prefix}.q_attn.weight', dim=0), weights.get_tensor(f'{prefix}.kv_attn.weight')] weight = torch.cat(w, dim=1) if bias: b = [weights.get_sharded(f'{prefix}.q_attn.bias', dim=0), weights.get_tensor(f'{prefix}.kv_attn.bias')] bias = torch.cat(b, dim=0) else: bias = None weight = weight.to(dtype=weights.dtype).to(device=weights.device) assert list(weight.shape) == [(num_heads + 2) * head_size, hidden_size], f'{weight.shape} != {[(num_heads + 2) * head_size, hidden_size]}' if bias is not None: bias = bias.to(dtype=weights.dtype).to(device=weights.device) assert list(bias.shape) == [(num_heads + 2) * head_size], f'{weight.shape} != {[(num_heads + 2) * head_size]}' return TensorParallelColumnLinear(get_linear(weight, bias)) def load_col(config, prefix: str, weights, bias: bool): if config.transpose: weight = weights.get_sharded(f'{prefix}.weight', dim=1).T else: weight = weights.get_multi_weights_col([prefix], dim=0) if bias: bias = weights.get_sharded(f'{prefix}.bias', dim=0) else: bias = None return TensorParallelColumnLinear(get_linear(weight, bias)) def load_row(config, prefix: str, weights, bias: bool): if config.transpose: weight = weights.get_sharded(f'{prefix}.weight', dim=0).T else: weight = weights.get_weights_row(prefix) if bias and weights.process_group.rank() == 0: bias = weights.get_tensor(f'{prefix}.bias') else: bias = None return TensorParallelRowLinear(get_linear(weight, bias), process_group=weights.process_group) class FlashMQAttention(torch.nn.Module): def __init__(self, prefix, config, weights): super().__init__() num_heads = config.num_attention_heads hidden_size = config.hidden_size self.num_heads = num_heads self.hidden_size = hidden_size self.head_size = hidden_size // num_heads if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.softmax_scale = self.head_size ** (-0.5) self.c_attn = load_multi_mqa(config, prefix=prefix, weights=weights, bias=True, head_size=self.head_size, hidden_size=hidden_size, num_heads=self.num_heads) self.c_proj = load_row(config, prefix=f'{prefix}.c_proj', weights=weights, bias=True) self.kv_head_mapping = torch.zeros(self.num_heads, dtype=torch.int32, device=weights.device) def forward(self, hidden_states, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): qkv = self.c_attn(hidden_states) (query, key_value) = qkv.split([self.head_size * self.num_heads, 2 * self.head_size], dim=1) query = query.view(-1, self.num_heads, self.head_size) key_value = key_value.view(-1, 2, 1, self.head_size) reshape_and_cache(key_value[:, 0], key_value[:, 1], kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else key_value[:, 0], kv_cache[1] if SYSTEM != 'ipex' else key_value[:, 1], seqlen, block_tables, self.softmax_scale) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) return self.c_proj(attn_output.view(-1, self.num_heads * self.head_size)) class MLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.activation_function self.act = ACT2FN[act] if 'gelu' not in act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') self.c_fc = load_col(config, prefix=f'{prefix}.c_fc', weights=weights, bias=True) self.c_proj = load_row(config, prefix=f'{prefix}.c_proj', weights=weights, bias=True) def forward(self, hidden_states): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) return hidden_states class Block(nn.Module): def __init__(self, prefix: str, layer_id, config, weights): super().__init__() prefix = f'{prefix}.h.{layer_id}' self.ln_1 = FastLayerNorm.load(prefix=f'{prefix}.ln_1', weights=weights, eps=config.layer_norm_epsilon) self.ln_2 = FastLayerNorm.load(prefix=f'{prefix}.ln_2', weights=weights, eps=config.layer_norm_epsilon) self.self_attn = FlashMQAttention(prefix=f'{prefix}.attn', config=config, weights=weights) self.mlp = MLP(prefix=f'{prefix}.mlp', config=config, weights=weights) def forward(self, hidden_states, residual, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s): (hidden_states, residual) = self.ln_1(hidden_states, residual) hidden_states = self.self_attn(hidden_states, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) (hidden_states, residual) = self.ln_2(hidden_states, residual) mlp_output = self.mlp(hidden_states) return (mlp_output, residual) class FlashSantacoderModel(nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.config = config self.process_group = weights.process_group self.wte = TensorParallelEmbedding(prefix=f'{prefix}.wte', weights=weights, reduce=False) self.wpe = TensorParallelEmbedding(prefix=f'{prefix}.wpe', weights=weights, reduce=False) self.layers = nn.ModuleList([Block(prefix, layer_id, config, weights) for layer_id in range(config.num_hidden_layers)]) self.ln_f = FastLayerNorm.load(prefix='transformer.ln_f', weights=weights, eps=config.layer_norm_epsilon) self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int) -> torch.Tensor: hidden_states = self.wte(input_ids) + self.wpe(position_ids) if self.process_group.size() > 1: torch.distributed.all_reduce(hidden_states, group=self.process_group) residual = None for (i, layer) in enumerate(self.layers): (hidden_states, residual) = layer(hidden_states, residual, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s) (hidden_states, _) = self.ln_f(hidden_states, residual) return hidden_states class FlashSantacoderForCausalLM(nn.Module): def __init__(self, prefix, config, weights): super().__init__() if not prefix: prefix = 'transformer' else: prefix = f'{prefix}.transformer' config.transpose = config.architectures[0].startswith('GPT2') self.model = FlashSantacoderModel(prefix, config, weights) self.lm_head = SpeculativeHead.load(config, prefix=f'{prefix}.wte', weights=weights) def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = self.model(input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.lm_head(hidden_states) return logits # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.layers.attention import paged_attention, attention, reshape_and_cache, Seqlen from text_generation_server.layers import TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear from text_generation_server.layers.layernorm import FastLayerNorm, FastRMSNorm from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.utils.weights import UnquantizedWeight from text_generation_server.utils.import_utils import SYSTEM class Starcoder2Config(PretrainedConfig): model_type = 'starcoder2' def __init__(self, vocab_size=49152, hidden_size=3072, intermediate_size=12288, num_hidden_layers=30, num_attention_heads=24, num_key_value_heads=2, mlp_type='default', hidden_act='gelu_pytorch_tanh', max_position_embeddings=4096, initializer_range=0.018042, norm_type='layer_norm', norm_epsilon=1e-05, use_cache=True, bos_token_id=50256, eos_token_id=50256, rope_theta=10000.0, sliding_window=None, attention_dropout=0.0, residual_dropout=0.0, embedding_dropout=0.0, use_bias: bool=True, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.sliding_window = sliding_window self.use_bias = use_bias if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.mlp_type = mlp_type self.hidden_act = hidden_act self.initializer_range = initializer_range self.norm_type = norm_type self.norm_epsilon = norm_epsilon self.use_cache = use_cache self.rope_theta = rope_theta self.attention_dropout = attention_dropout self.residual_dropout = residual_dropout self.embedding_dropout = embedding_dropout super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0, weights=weights, bias=config.use_bias) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col(prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0) if isinstance(weight, UnquantizedWeight): weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.weight.shape) == [(num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size], f'{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}' if config.use_bias: w = [weights.get_sharded(f'{p}.bias', dim=0) for p in [f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj']] bias = torch.cat(w, dim=0).to(dtype=weights.dtype).to(device=weights.device) else: bias = None return TensorParallelColumnLinear(get_linear(weight, bias=bias)) class Starcoder2Attention(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.max_past = config.sliding_window if config.sliding_window is not None else -1 self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.rotary_emb = PositionRotaryEmbedding.static(config=config, dim=self.head_size, base=config.rope_theta, device=weights.device) self.softmax_scale = self.head_size ** (-0.5) if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = config.num_key_value_heads // weights.process_group.size() self.query_key_value = load_attention(config, prefix, weights) self.o_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.o_proj', weights=weights, bias=config.use_bias) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange(0, self.num_key_value_heads, dtype=torch.int32, device=weights.device).repeat_interleave(self.num_groups) def forward(self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, prefill_cache_indices): qkv = self.query_key_value(hidden_states) (query, kv) = qkv.split([self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads], dim=1) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) if prefill_cache_indices is not None: kv_to_cache = kv[prefill_cache_indices] else: kv_to_cache = kv reshape_and_cache(kv_to_cache[:, 0], kv_to_cache[:, 1], kv_cache[0], kv_cache[1], slots) if cu_seqlen_prefill is not None: attn_output = attention(query, kv_cache[0] if SYSTEM != 'ipex' else kv_to_cache[:, 0], kv_cache[1] if SYSTEM != 'ipex' else kv_to_cache[:, 1], seqlen, block_tables, self.softmax_scale, window_size_left=self.max_past) else: attn_output = paged_attention(query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) class Starcoder2MLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.hidden_act self.act = ACT2FN[act] if 'gelu' not in act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') self.c_fc = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.c_fc', weights=weights, bias=config.use_bias) self.c_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.c_proj', weights=weights, bias=config.use_bias) def forward(self, hidden_states): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) return self.c_proj(hidden_states) class Starcoder2GatedMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.hidden_act self.act = ACT2FN[act] if 'gelu' not in act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') self.gate_up_proj = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.gate_proj', f'{prefix}.up_proj'], weights=weights, dim=0, bias=config.use_bias) self.down_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.down_proj', weights=weights, bias=config.use_bias) self.intermediate_size = config.intermediate_size // weights.process_group.size() def forward(self, hidden_states): gate_up_states = self.gate_up_proj(hidden_states) gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size) return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1]) STARCODER2_NORMALIZATION_CLASSES = {'layer_norm': FastLayerNorm, 'rms_norm': FastRMSNorm} STARCODER2_MLP_CLASSES = {'default': Starcoder2MLP, 'gated': Starcoder2GatedMLP} class Starcoder2Layer(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() prefix = f'model.layers.{layer_id}' self.self_attn = Starcoder2Attention(prefix=f'{prefix}.self_attn', config=config, weights=weights) self.mlp = STARCODER2_MLP_CLASSES[config.mlp_type](prefix=f'{prefix}.mlp', config=config, weights=weights) self.input_layernorm = STARCODER2_NORMALIZATION_CLASSES[config.norm_type].load(prefix=f'{prefix}.input_layernorm', weights=weights, eps=config.norm_epsilon) self.post_attention_layernorm = STARCODER2_NORMALIZATION_CLASSES[config.norm_type].load(prefix=f'{prefix}.post_attention_layernorm', weights=weights, eps=config.norm_epsilon) def forward(self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, prefill_cache_indices): (normed_hidden_states, res) = self.input_layernorm(hidden_states, residual) attn_output = self.self_attn(normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, prefill_cache_indices) (normed_attn_res_output, attn_res) = self.post_attention_layernorm(attn_output, res) mlp_output = self.mlp(normed_attn_res_output) return (mlp_output, attn_res) class Starcoder2Model(torch.nn.Module): def __init__(self, prefix, config, weights): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.embed_tokens = TensorParallelEmbedding(prefix=f'{prefix}.embed_tokens', weights=weights) self.layers = nn.ModuleList([Starcoder2Layer(layer_id, config, weights) for layer_id in range(config.num_hidden_layers)]) self.norm = STARCODER2_NORMALIZATION_CLASSES[config.norm_type].load(prefix=f'{prefix}.norm', weights=weights, eps=config.norm_epsilon) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, true_max_s: int, prefill_cache_indices: Optional[torch.Tensor]) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) (cos, sin) = self.layers[0].self_attn.rotary_emb.get_cos_sin(position_ids, true_max_s, hidden_states.dtype) residual = None for (i, layer) in enumerate(self.layers): (hidden_states, residual) = layer(hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s, prefill_cache_indices) (hidden_states, _) = self.norm(hidden_states, residual) return hidden_states class FlashStarcoder2ForCausalLM(torch.nn.Module): def __init__(self, prefix, config, weights): super().__init__() if not prefix: prefix = 'model' else: prefix = f'{prefix}.model' self.model = Starcoder2Model(prefix, config, weights) try: self.lm_head = SpeculativeHead.load(config, prefix='lm_head', weights=weights) except RuntimeError: self.lm_head = SpeculativeHead.load(config, prefix=f'{prefix}.embed_tokens', weights=weights) self.max_past = config.sliding_window self.max_past_tensor = torch.tensor(config.sliding_window, device=weights.device) if self.max_past is not None else None def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None) -> torch.Tensor: true_max_s = max_s if prefill_cache_indices is not None: slots = slots[prefill_cache_indices] elif self.max_past is not None: seqlen = seqlen.clamp(max=self.max_past_tensor) hidden_states = self.model(input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, true_max_s, prefill_cache_indices) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.lm_head(hidden_states) return logits # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/idefics2.py """""" from typing import List, Optional, Tuple import torch import torch.utils.checkpoint from torch import nn import math from transformers.activations import ACT2FN from text_generation_server.models.custom_modeling.vlm import load_text_model from text_generation_server.layers.attention import Seqlen from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask from text_generation_server.layers import TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear from text_generation_server.utils.weights import DefaultWeightsLoader, UnquantizedWeight def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: (batch, num_key_value_heads, slen, head_dim) = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class Idefics2VisionEmbeddings(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, padding='valid') self.patch_embedding.weight = nn.Parameter(weights.get_tensor(f'{prefix}.patch_embedding.weight'), requires_grad=False) self.patch_embedding.bias = nn.Parameter(weights.get_tensor(f'{prefix}.patch_embedding.bias'), requires_grad=False) self.num_patches_per_side = self.image_size // self.patch_size self.num_patches = self.num_patches_per_side ** 2 self.num_positions = self.num_patches self.position_embedding = TensorParallelEmbedding(prefix=f'{prefix}.position_embedding', weights=weights) def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor) -> torch.Tensor: (batch_size, _, max_im_h, max_im_w) = pixel_values.shape patch_embeds = self.patch_embedding(pixel_values) embeddings = patch_embeds.flatten(2).transpose(1, 2) (max_nb_patches_h, max_nb_patches_w) = (max_im_h // self.patch_size, max_im_w // self.patch_size) boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side) position_ids = torch.full(size=(batch_size, max_nb_patches_h * max_nb_patches_w), fill_value=0) for (batch_idx, p_attn_mask) in enumerate(patch_attention_mask): nb_patches_h = p_attn_mask[:, 0].sum() nb_patches_w = p_attn_mask[0].sum() fractional_coords_h = torch.arange(0, 1 - 1e-06, 1 / nb_patches_h) fractional_coords_w = torch.arange(0, 1 - 1e-06, 1 / nb_patches_w) bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True) bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True) pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten() position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids position_ids = position_ids.to(self.position_embedding.weight.device) embeddings = embeddings + self.position_embedding(position_ids) return embeddings class Idefics2VisionAttention(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_size = self.embed_dim // self.num_heads if self.head_size * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_size ** (-0.5) self.dropout = config.attention_dropout self.num_heads = self.num_heads // weights.process_group.size() self.embed_dim = self.embed_dim // weights.process_group.size() self.qkv = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0, weights=weights, bias=True) self.out_proj = TensorParallelRowLinear.load(config=config, prefix=f'{prefix}.out_proj', weights=weights, bias=True) self.is_causal = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: (batch_size, q_len, _) = hidden_states.size() qkv = self.qkv(hidden_states) (query_states, key_states, value_states) = qkv.split([self.head_size * self.num_heads, self.head_size * self.num_heads, self.head_size * self.num_heads], dim=2) query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_size).transpose(1, 2) key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_size).transpose(1, 2) value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_size).transpose(1, 2) k_v_seq_len = key_states.shape[-2] attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len): raise ValueError(f'Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len): raise ValueError(f'Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}') attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_size): raise ValueError(f'`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_size)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output class Idefics2VisionMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = TensorParallelColumnLinear.load(prefix=f'{prefix}.fc1', config=config, weights=weights, bias=True) self.fc2 = TensorParallelRowLinear.load(prefix=f'{prefix}.fc2', config=config, weights=weights, bias=True) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class Idefics2EncoderLayer(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.embed_dim = config.hidden_size self.self_attn = Idefics2VisionAttention(prefix=f'{prefix}.self_attn', config=config, weights=weights) self.layer_norm1 = nn.LayerNorm.load(prefix=f'{prefix}.layer_norm1', eps=config.layer_norm_eps, weights=weights) self.layer_norm2 = nn.LayerNorm.load(prefix=f'{prefix}.layer_norm2', eps=config.layer_norm_eps, weights=weights) self.mlp = Idefics2VisionMLP(prefix=f'{prefix}.mlp', config=config, weights=weights) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states class Idefics2Encoder(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.layers = nn.ModuleList([Idefics2EncoderLayer(prefix=f'{prefix}.layers.{i}', config=config, weights=weights) for i in range(config.num_hidden_layers)]) def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None): hidden_states = inputs_embeds for encoder_layer in self.layers: hidden_states = encoder_layer(hidden_states, attention_mask) return hidden_states class Idefics2VisionTransformer(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.embeddings = Idefics2VisionEmbeddings(prefix=f'{prefix}.embeddings', config=config, weights=weights) self.encoder = Idefics2Encoder(prefix=f'{prefix}.encoder', config=config, weights=weights) self.post_layernorm = nn.LayerNorm.load(prefix=f'{prefix}.post_layernorm', weights=weights, eps=config.layer_norm_eps) def forward(self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor]=None): batch_size = pixel_values.size(0) if patch_attention_mask is None: patch_size = self.config.patch_size patch_attention_mask = torch.ones((batch_size, pixel_values.size(2) // patch_size, pixel_values.size(3) // patch_size)) patch_attention_mask = patch_attention_mask.to(dtype=torch.bool, device=pixel_values.device) hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask) patch_attention_mask = patch_attention_mask.view(batch_size, -1) if not torch.any(~patch_attention_mask): patch_attention_mask = None else: patch_attention_mask = _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype) encoder_outputs = self.encoder(inputs_embeds=hidden_states, attention_mask=patch_attention_mask) last_hidden_state = encoder_outputs last_hidden_state = self.post_layernorm(last_hidden_state) return last_hidden_state class Idefics2MLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.text_config.hidden_act self.act = ACT2FN[act] if 'gelu' not in act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none') self.gate_up_proj = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.gate_proj', f'{prefix}.up_proj'], weights=weights, dim=0, bias=False) self.down_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.down_proj', weights=weights, bias=False) def forward(self, hidden_states): start_shape = hidden_states.shape[:-1] gate_up_states = self.gate_up_proj(hidden_states) intermediate_size = gate_up_states.shape[-1] // 2 gate_up_states = gate_up_states.view(-1, 2, intermediate_size) return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1]).view(*start_shape, -1) class Idefics2RMSNorm(nn.Module): def __init__(self, prefix, weights, eps): super().__init__() self.weight = nn.Parameter(weights.get_tensor(f'{prefix}.weight'), requires_grad=False) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) class Idefics2PerceiverAttention(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.layer_idx = None self.hidden_size = config.text_config.hidden_size self.num_heads = config.perceiver_config.resampler_n_heads self.head_size = config.perceiver_config.resampler_head_dim self.num_key_value_heads = config.perceiver_config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.attention_dropout = config.perceiver_config.attention_dropout self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = self.num_key_value_heads // weights.process_group.size() self.q_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.q_proj', weights=weights, bias=False) self.kv = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0, weights=weights, bias=False) self.o_proj = TensorParallelRowLinear.load(config=config, prefix=f'{prefix}.o_proj', weights=weights, bias=False) self.is_causal = False def forward(self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, q_len, _) = latents.size() kv_seq_len = q_len + context.size()[1] hidden_states = torch.concat([context, latents], dim=-2) query_states = self.q_proj(latents) kv = self.kv(hidden_states) (key_states, value_states) = kv.split([self.head_size * self.num_key_value_heads, self.head_size * self.num_key_value_heads], dim=2) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_size).transpose(1, 2) key_states = key_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_size).transpose(1, 2) value_states = value_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_size).transpose(1, 2) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_size) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError(f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}') attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_size): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_size)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_size) attn_output = self.o_proj(attn_output) return attn_output class Idefics2PerceiverLayer(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.hidden_size = config.text_config.hidden_size self.n_latents = config.perceiver_config.resampler_n_latents self.depth = config.perceiver_config.resampler_depth self.rms_norm_eps = config.text_config.rms_norm_eps self.input_latents_norm = Idefics2RMSNorm(prefix=f'{prefix}.input_latents_norm', weights=weights, eps=self.rms_norm_eps) self.input_context_norm = Idefics2RMSNorm(prefix=f'{prefix}.input_context_norm', weights=weights, eps=self.rms_norm_eps) self.self_attn = Idefics2PerceiverAttention(prefix=f'{prefix}.self_attn', config=config, weights=weights) self.post_attention_layernorm = Idefics2RMSNorm(prefix=f'{prefix}.post_attention_layernorm', weights=weights, eps=self.rms_norm_eps) self.mlp = Idefics2MLP(prefix=f'{prefix}.mlp', config=config, weights=weights) def forward(self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor]=None): residual = latents latents = self.input_latents_norm(latents) context = self.input_context_norm(context) latents = self.self_attn(latents=latents, context=context, attention_mask=attention_mask) latents = residual + latents residual = latents latents = self.post_attention_layernorm(latents) latents = self.mlp(latents) latents = residual + latents return latents class Idefics2PerceiverResampler(nn.Module): def __init__(self, prefix, config, weights) -> None: super().__init__() self.hidden_size = config.text_config.hidden_size self.hidden_act = config.perceiver_config.hidden_act self.n_latents = config.perceiver_config.resampler_n_latents self.depth = config.perceiver_config.resampler_depth self.rms_norm_eps = config.text_config.rms_norm_eps self.latents = weights.get_tensor(f'{prefix}.latents') self.layers = nn.ModuleList([Idefics2PerceiverLayer(prefix=f'{prefix}.layers.{idx}', config=config, weights=weights) for idx in range(self.depth)]) self.norm = Idefics2RMSNorm(prefix=f'{prefix}.norm', weights=weights, eps=config.text_config.rms_norm_eps) def forward(self, context: torch.Tensor, attention_mask) -> torch.Tensor: latents = self.latents.unsqueeze(0).expand((context.shape[0], *self.latents.size())) latent_attention_mask = torch.ones((attention_mask.size(0), latents.size(1)), dtype=attention_mask.dtype, device=attention_mask.device) attention_mask = torch.cat([attention_mask, latent_attention_mask], dim=-1) attention_mask = _prepare_4d_attention_mask(attention_mask, latents.dtype, tgt_len=self.n_latents) compressed_context = latents for perceiver_layer in self.layers: compressed_context = perceiver_layer(compressed_context, context, attention_mask=attention_mask) compressed_context = self.norm(compressed_context) return compressed_context class Idefics2Connector(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.modality_projection = Idefics2MLP(prefix=f'{prefix}.modality_projection', config=config, weights=weights) self.perceiver_resampler = Idefics2PerceiverResampler(prefix=f'{prefix}.perceiver_resampler', config=config, weights=weights) def forward(self, image_hidden_states, attention_mask): image_hidden_states = self.modality_projection(image_hidden_states) image_hidden_states = self.perceiver_resampler(context=image_hidden_states, attention_mask=attention_mask) return image_hidden_states class Idefics2ForConditionalGeneration(nn.Module): def __init__(self, prefix, config, weights): super().__init__() config.vision_config.quantize = None config.vision_config.speculator = config.speculator config.text_config.quantize = config.quantize config.text_config.speculator = config.speculator vision_config = config.vision_config self.text_model = load_text_model(prefix='model' if not prefix else f'{prefix}.model', config=config.text_config, weights=weights, name='text_model') self.dtype = weights.dtype with weights.use_loader(DefaultWeightsLoader(UnquantizedWeight)): self.vision_model = Idefics2VisionTransformer(prefix=f'{prefix}.model.vision_model' if prefix else 'model.vision_model', config=vision_config, weights=weights) config.quantize = None self.connector = Idefics2Connector(prefix=f'{prefix}.model.connector' if prefix else 'model.connector', config=config, weights=weights) self.config = config self.image_seq_len = config.perceiver_config.resampler_n_latents self.image_token_id = config.image_token_id self.pad_token_id = config.pad_token_id if config.pad_token_id is not None else -1 def _merge_input_ids_with_image_features(self, input_ids: torch.Tensor, inputs_embeds: torch.Tensor, image_features: torch.Tensor): mask = input_ids == self.config.image_token_id inputs_embeds[mask] = image_features.view(-1, image_features.shape[-1]) return inputs_embeds def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor]=None, pixel_values: torch.FloatTensor=None, pixel_attention_mask: Optional[torch.BoolTensor]=None, image_sizes: Optional[torch.Tensor]=None, adapter_data: Optional[torch.Tensor]=None): inputs_embeds = self.text_model.embed_tokens(input_ids) if pixel_values is not None: (batch_size, num_images, num_channels, height, width) = pixel_values.shape all_states = [] all_pixel_values = pixel_values all_pixel_mask = pixel_attention_mask for i in range(batch_size): pixel_values = all_pixel_values.to(dtype=self.dtype) pixel_values = pixel_values[i:i + 1] pixel_values = pixel_values.view(num_images, *pixel_values.shape[2:]) nb_values_per_image = pixel_values.shape[1:].numel() real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image pixel_values = pixel_values[real_images_inds].contiguous() if pixel_attention_mask is None: pixel_attention_mask = torch.ones(size=(pixel_values.size(0), pixel_values.size(2), pixel_values.size(3)), dtype=torch.bool, device=pixel_values.device) else: pixel_attention_mask = all_pixel_mask[i:i + 1] pixel_attention_mask = pixel_attention_mask.view(1 * num_images, *pixel_attention_mask.shape[2:]) pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous() patch_size = self.config.vision_config.patch_size patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size) patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size) patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool() image_hidden_states = self.vision_model(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask) image_hidden_states = self.connector(image_hidden_states, attention_mask=patch_attention_mask.view(pixel_values.size(0), -1)) all_states.append(image_hidden_states) image_hidden_states = torch.stack(all_states, dim=0) inputs_embeds = self._merge_input_ids_with_image_features(input_ids, inputs_embeds, image_hidden_states) hidden_states = self.text_model.model(inputs_embeds=inputs_embeds, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, block_tables=block_tables, slots=slots, seqlen=seqlen, max_s=max_s, true_max_s=max_s, prefill_cache_indices=None, adapter_data=adapter_data) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] (logits, speculative_logits) = self.text_model.lm_head(hidden_states) return (logits, speculative_logits) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/idefics_config.py """""" import copy from transformers import PretrainedConfig IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP = {'HuggingFaceM4/idefics-9b': 'https://huggingface.co/HuggingFaceM4/idefics-9b/blob/main/config.json', 'HuggingFaceM4/idefics-80b': 'https://huggingface.co/HuggingFaceM4/idefics-80b/blob/main/config.json'} class IdeficsVisionConfig(PretrainedConfig): model_type = 'idefics' attribute_map = {'hidden_size': 'embed_dim'} def __init__(self, embed_dim=768, image_size=224, intermediate_size=5120, patch_size=14, num_hidden_layers=32, num_attention_heads=16, num_channels=3, hidden_act='gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs): self.embed_dim = embed_dim self.image_size = image_size self.intermediate_size = intermediate_size self.patch_size = patch_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.hidden_act = hidden_act super().__init__(**kwargs) class IdeficsPerceiverConfig(PretrainedConfig): model_type = 'idefics' def __init__(self, use_resampler=False, resampler_n_latents=64, resampler_depth=6, resampler_n_heads=16, resampler_head_dim=96, qk_layer_norms_perceiver=False, **kwargs): self.use_resampler = use_resampler self.resampler_n_latents = resampler_n_latents self.resampler_depth = resampler_depth self.resampler_n_heads = resampler_n_heads self.resampler_head_dim = resampler_head_dim self.qk_layer_norms_perceiver = qk_layer_norms_perceiver super().__init__(**kwargs) class IdeficsConfig(PretrainedConfig): model_type = 'idefics' is_composition = True def __init__(self, vocab_size=32000, additional_vocab_size=0, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, dropout=0.0, hidden_act='silu', initializer_range=0.02, alpha_initializer='zeros', alphas_initializer_range=0.0, alpha_type='float', rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, cross_layer_interval=1, qk_layer_norms=False, freeze_text_layers=True, freeze_text_module_exceptions=[], freeze_lm_head=False, freeze_vision_layers=True, freeze_vision_module_exceptions=[], use_resampler=False, vision_config=None, perceiver_config=None, **kwargs): self.vocab_size = vocab_size self.additional_vocab_size = additional_vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.dropout = dropout self.hidden_act = hidden_act self.initializer_range = initializer_range self.alpha_initializer = alpha_initializer self.alphas_initializer_range = alphas_initializer_range self.alpha_type = alpha_type self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.cross_layer_interval = cross_layer_interval self.qk_layer_norms = qk_layer_norms self.freeze_vision_layers = freeze_vision_layers self.freeze_text_layers = freeze_text_layers self.freeze_text_module_exceptions = freeze_text_module_exceptions self.freeze_vision_module_exceptions = freeze_vision_module_exceptions self.freeze_lm_head = freeze_lm_head self.use_resampler = use_resampler if perceiver_config is None: self.perceiver_config = IdeficsPerceiverConfig() elif isinstance(perceiver_config, dict): self.perceiver_config = IdeficsPerceiverConfig(**perceiver_config) elif isinstance(perceiver_config, IdeficsPerceiverConfig): self.perceiver_config = perceiver_config if vision_config is None: self.vision_config = IdeficsVisionConfig() elif isinstance(vision_config, dict): self.vision_config = IdeficsVisionConfig(**vision_config) elif isinstance(vision_config, IdeficsVisionConfig): self.vision_config = vision_config super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) def to_dict(self): output = copy.deepcopy(self.__dict__) output['vision_config'] = self.vision_config.to_dict() output['perceiver_config'] = self.perceiver_config.to_dict() output['model_type'] = self.__class__.model_type return output # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/idefics_image_processing.py """""" from typing import Callable, Dict, List, Optional, Union, Iterable import numpy as np from PIL import Image import transformers from transformers.image_processing_utils import BaseImageProcessor, BatchFeature from transformers.image_transforms import resize, to_channel_dimension_format, rescale, normalize from transformers.image_utils import ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images from io import BytesIO import base64 import requests from transformers import TensorType, is_torch_available IDEFICS_STANDARD_MEAN = [0.48145466, 0.4578275, 0.40821073] IDEFICS_STANDARD_STD = [0.26862954, 0.26130258, 0.27577711] def convert_to_rgb(image): if image.mode == 'RGB': return image image_rgba = image.convert('RGBA') background = Image.new('RGBA', image_rgba.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, image_rgba) alpha_composite = alpha_composite.convert('RGB') return alpha_composite class IdeficsImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, image_size: int=224, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, image_num_channels: Optional[int]=3, **kwargs) -> None: super().__init__(**kwargs) self.image_size = image_size self.image_num_channels = image_num_channels self.image_mean = image_mean self.image_std = image_std def preprocess(self, images: ImageInput, image_num_channels: Optional[int]=3, image_size: Optional[Dict[str, int]]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, transform: Callable=None, **kwargs) -> TensorType.PYTORCH: image_size = image_size if image_size is not None else self.image_size image_num_channels = image_num_channels if image_num_channels is not None else self.image_num_channels image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = (image_size, image_size) if len(images) == 0: return [] images = make_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') if transform is not None: if not is_torch_available(): raise ImportError('To pass in `transform` torch must be installed') import torch images = [transform(x) for x in images] return torch.stack(images) images = [convert_to_rgb(x) for x in images] images = [to_numpy_array(x) for x in images] images = [resize(x, size, resample=PILImageResampling.BICUBIC) for x in images] images = [self.rescale(image=image, scale=1 / 255) for image in images] images = [self.normalize(x, mean=image_mean, std=image_std) for x in images] images = [to_channel_dimension_format(x, ChannelDimension.FIRST) for x in images] images = BatchFeature(data={'pixel_values': images}, tensor_type=TensorType.PYTORCH)['pixel_values'] return images def fetch_images(self, image_url_or_urls: Union[str, List[str]]): headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'} if isinstance(image_url_or_urls, list): return [self.fetch_images(x) for x in image_url_or_urls] elif isinstance(image_url_or_urls, str): image = image_url_or_urls if image.startswith('http://') or image.startswith('https://'): response = requests.get(image_url_or_urls, stream=True, headers=headers, timeout=(1, 5)) response.raise_for_status() content = response.content elif image.startswith('data:'): image = image.split(',')[-1] content = base64.b64decode(image) else: raise ValueError(f'Unrecognized image {image}') try: image = Image.open(BytesIO(content)) except Exception: raise ValueError(f'Could not load image from url {image_url_or_urls}') return image else: raise ValueError(f'only a single or a list of entries is supported but got type={type(image_url_or_urls)}') def rescale(self, image: np.ndarray, scale: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: return rescale(image, scale=scale, data_format=data_format, **kwargs) def normalize(self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs) transformers.IdeficsImageProcessor = IdeficsImageProcessor # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/idefics_modeling.py """""" from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from transformers import PreTrainedModel from transformers.activations import ACT2FN from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, dataclass from text_generation_server.models.custom_modeling.idefics_config import IdeficsConfig from text_generation_server.models.custom_modeling.idefics_vision import IdeficsVisionTransformer from text_generation_server.models.custom_modeling.idefics_perceiver import IdeficsPerceiverResampler from text_generation_server.layers import TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, SpeculativeHead, FastLinear from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.utils.import_utils import SYSTEM from loguru import logger if SYSTEM == 'cuda': import dropout_layer_norm elif SYSTEM == 'rocm': from vllm._C import ops else: dropout_layer_norm = None @dataclass class BaseModelOutputWithPastImage(BaseModelOutputWithPast): image_hidden_states: Optional[torch.FloatTensor] = None @dataclass class CausalLMOutputWithPastImage(CausalLMOutputWithPast): image_hidden_states: Optional[torch.FloatTensor] = None def expand_inputs_for_generation(input_ids, expand_size=1, is_encoder_decoder=False, attention_mask=None, encoder_outputs=None, **model_kwargs): expanded_return_idx = torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device) input_ids = input_ids.index_select(0, expanded_return_idx) if 'token_type_ids' in model_kwargs: token_type_ids = model_kwargs['token_type_ids'] model_kwargs['token_type_ids'] = token_type_ids.index_select(0, expanded_return_idx) if attention_mask is not None: model_kwargs['attention_mask'] = attention_mask.index_select(0, expanded_return_idx) model_kwargs['image_attention_mask'] = model_kwargs['image_attention_mask'].index_select(0, expanded_return_idx) model_kwargs['pixel_values'] = model_kwargs['pixel_values'].index_select(0, expanded_return_idx) if is_encoder_decoder: if encoder_outputs is None: raise ValueError('If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.') encoder_outputs['last_hidden_state'] = encoder_outputs.last_hidden_state.index_select(0, expanded_return_idx.to(encoder_outputs.last_hidden_state.device)) model_kwargs['encoder_outputs'] = encoder_outputs return (input_ids, model_kwargs) def update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder=False): model_kwargs['past_key_values'] = model_kwargs.get('past_key_values', None) if 'past_key_values' in outputs: model_kwargs['past'] = outputs.past_key_values elif 'mems' in outputs: model_kwargs['past'] = outputs.mems elif 'past_buckets_states' in outputs: model_kwargs['past'] = outputs.past_buckets_states else: model_kwargs['past'] = None if 'token_type_ids' in model_kwargs: token_type_ids = model_kwargs['token_type_ids'] model_kwargs['token_type_ids'] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1) if not is_encoder_decoder: if 'attention_mask' in model_kwargs: attention_mask = model_kwargs['attention_mask'] model_kwargs['attention_mask'] = torch.cat([attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1) if 'image_attention_mask' in model_kwargs: image_attention_mask = model_kwargs['image_attention_mask'] last_mask = image_attention_mask[:, -1, :].unsqueeze(1) model_kwargs['image_attention_mask'] = last_mask return model_kwargs def prepare_inputs_for_generation(input_ids, past=None, **kwargs): token_type_ids = kwargs.get('token_type_ids', None) if past: input_ids = input_ids[:, -1].unsqueeze(-1) if token_type_ids is not None: token_type_ids = token_type_ids[:, -1].unsqueeze(-1) attention_mask = kwargs.get('attention_mask', None) position_ids = kwargs.get('position_ids', None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past: position_ids = position_ids[:, -1].unsqueeze(-1) pixel_values = kwargs.get('pixel_values', None) image_attention_mask = kwargs.get('image_attention_mask', None) return {'input_ids': input_ids, 'past_key_values': past, 'use_cache': kwargs.get('use_cache'), 'position_ids': position_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, 'pixel_values': pixel_values, 'image_attention_mask': image_attention_mask} def freeze_model(model, module_exceptions=[]): mapping = {'LayerNorm': nn.LayerNorm, 'Linear': nn.Linear, 'Embedding': nn.Embedding} module_exceptions_mapped = [mapping[m] for m in module_exceptions] for module in model.modules(): if module_exceptions and any([isinstance(module, t) for t in module_exceptions_mapped]): module.requires_grad_(True) else: module.requires_grad_(False) return model class IdeficsDecoupledPartialTPEmbedding(nn.Module): def __init__(self, config, weights): super().__init__() self.num_embeddings = config.vocab_size self.weight = TensorParallelEmbedding(prefix='model.embed_tokens', weights=weights) self.additional_weight = nn.Parameter(weights.get_tensor('model.embed_tokens.additional_embedding.weight')) def forward(self, input_ids): input_ids = input_ids.clone() additional_vocab_indices = torch.where(input_ids >= self.num_embeddings) input_ids_additional_vocab = input_ids[additional_vocab_indices] additional_embeddings = torch.nn.functional.embedding(input_ids_additional_vocab - self.num_embeddings, self.additional_weight) input_ids[additional_vocab_indices] = 0 full_vector = self.weight(input_ids) full_vector[additional_vocab_indices] = additional_embeddings return full_vector class IdeficsDecoupledTensorParallelLinear(nn.Module): def __init__(self, config, weights) -> None: super().__init__() self.fc = SpeculativeHead.load(config=config, prefix='lm_head', weights=weights) self.additional_fc = FastLinear.load(config=config, prefix='lm_head.additional_fc', weights=weights, bias=False) def forward(self, input: torch.Tensor) -> torch.Tensor: (output, speculative_logits) = self.fc(input) additional_features = self.additional_fc(input) output = torch.cat((output, additional_features), -1) return (output, speculative_logits) def extra_repr(self) -> str: return 'in_features={}, out_features={}, out_additional_features={}, bias={}, partially_freeze={}'.format(self.in_features, self.out_features, self.out_additional_features, self.bias is not None, self.partially_freeze) def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int=0): (bsz, tgt_len) = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int]=None): (bsz, src_len) = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class IdeficsRMSNorm(nn.Module): def __init__(self, prefix, weights, eps=1e-06): super().__init__() weight = weights.get_tensor(f'{prefix}.weight') self.weight = nn.Parameter(weight) self.variance_epsilon = eps def forward(self, hidden_states, residual=None): if SYSTEM == 'ipex': import intel_extension_for_pytorch as ipex out = ipex.llm.functional.add_rms_norm(residual, hidden_states, self.weight, None, self.variance_epsilon, residual is not None) return out elif hidden_states.shape[-1] > 8192: if residual is not None: hidden_states += residual residual = hidden_states hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states elif SYSTEM == 'cuda': unwrap = False if len(hidden_states.shape) > 2: unwrap = True shape = hidden_states.shape hidden_states = hidden_states.reshape(-1, shape[-1]) (normed_hidden_states, res, *rest) = dropout_layer_norm.dropout_add_ln_fwd(hidden_states, residual, self.weight, None, None, None, None, None, 0.0, self.variance_epsilon, 1.0, 0, None, False, True) if res is None: res = hidden_states if unwrap: normed_hidden_states = normed_hidden_states.view(*shape) return normed_hidden_states elif SYSTEM == 'rocm': if residual is not None: hidden_states += residual residual = hidden_states unwrap = False if len(hidden_states.shape) > 2: unwrap = True shape = hidden_states.shape hidden_states = hidden_states.reshape(-1, shape[-1]) out = torch.empty_like(hidden_states) ops.rms_norm(out, hidden_states, self.weight.data, self.variance_epsilon) if unwrap: out = out.view(*shape) return out else: raise ValueError('Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction.') class IdeficsMLP(nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.gate_up_proj = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.gate_proj', f'{prefix}.up_proj'], weights=weights, dim=0, bias=False) self.down_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.down_proj', weights=weights, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, hidden_states): gate_up_states = self.gate_up_proj(hidden_states) shape = gate_up_states.shape gate_up_states = gate_up_states.view(*shape[:-1], 2, shape[-1] // 2) return self.down_proj(self.act_fn(gate_up_states[:, :, 0]) * gate_up_states[:, :, 1]) class IdeficsAttention(nn.Module): def __init__(self, config, prefix, weights, qk_layer_norms: bool=False, is_cross_attention: bool=False): super().__init__() self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.dropout = config.dropout if self.head_dim * self.num_heads != self.hidden_size: raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).') self.is_cross_attention = is_cross_attention if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads //= weights.process_group.size() if self.is_cross_attention: self.q_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.q_proj', weights=weights, bias=False) self.k_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.k_proj', weights=weights, bias=False) self.v_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.v_proj', weights=weights, bias=False) else: self.qkv = TensorParallelColumnLinear.load_multi(config, prefixes=[f'{prefix}.q_proj', f'{prefix}.k_proj', f'{prefix}.v_proj'], dim=0, weights=weights, bias=False) self.o_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.o_proj', weights=weights, bias=False) self.rotary_emb = PositionRotaryEmbedding.static(config=config, dim=self.head_dim, base=10000.0, device=weights.device) self.qk_layer_norms = qk_layer_norms if self.qk_layer_norms: self.q_layer_norm = IdeficsRMSNorm(prefix=f'{prefix}.q_layer_norm', weights=weights, eps=config.rms_norm_eps) self.k_layer_norm = IdeficsRMSNorm(prefix=f'{prefix}.q_layer_norm', weights=weights, eps=config.rms_norm_eps) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: bool=False, use_cache: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: is_cross_attention = self.is_cross_attention or key_value_states is not None (bsz, q_len, _) = hidden_states.size() if is_cross_attention: query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim) query_states = query_states.transpose(1, 2) (_, kv_len, _) = key_value_states.size() key_states = self.k_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = self.v_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2) else: qkv = self.qkv(hidden_states) (query_states, key_states, value_states) = qkv.split(self.num_heads * self.head_dim, dim=2) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim) key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim) value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim) kv_seq_len = q_len if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] max_s = max(kv_seq_len, q_len) (cos, sin) = self.rotary_emb.get_cos_sin(position_ids.view(-1), max_s, hidden_states.dtype) query_shape = query_states.shape key_shape = key_states.shape self.rotary_emb(query_states.view(-1, *query_shape[2:]), key_states.reshape(-1, *key_shape[2:]), cos, sin) query_states = query_states.view(query_shape) key_states = key_states.view(key_shape) query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] if past_key_value is not None: key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None if self.qk_layer_norms: query_states = self.q_layer_norm(query_states) key_states = self.k_layer_norm(key_states) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}') attn_output = nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=self.dropout) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.o_proj(attn_output) attn_weights = None if output_attentions: logger.warning_once('attn_weights are not extracted in scaled_dot_product_attention. The model returns None instead') return (attn_output, attn_weights, past_key_value) class IdeficsDecoderLayer(nn.Module): def __init__(self, layer_id: int, config: IdeficsConfig, weights): super().__init__() self.process_group = weights.process_group self.hidden_size = config.hidden_size prefix = f'model.layers.{layer_id}' self.self_attn = IdeficsAttention(config=config, prefix=f'{prefix}.self_attn', weights=weights, qk_layer_norms=False, is_cross_attention=False) self.mlp = IdeficsMLP(config=config, prefix=f'{prefix}.mlp', weights=weights) self.input_layernorm = IdeficsRMSNorm(prefix=f'{prefix}.input_layernorm', weights=weights, eps=config.rms_norm_eps) self.post_attention_layernorm = IdeficsRMSNorm(prefix=f'{prefix}.post_attention_layernorm', weights=weights, eps=config.rms_norm_eps) self.dropout = config.dropout def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs class IdeficsGatedCrossAttentionLayer(nn.Module): def __init__(self, layer_id, config: IdeficsConfig, weights): super().__init__() self.process_group = weights.process_group self.hidden_size = config.hidden_size prefix = f'model.gated_cross_attn_layers.{layer_id}' self.cross_attn = IdeficsAttention(config=config, prefix=f'{prefix}.cross_attn', weights=weights, qk_layer_norms=True, is_cross_attention=True) self.mlp = IdeficsMLP(config=config, prefix=f'{prefix}.mlp', weights=weights) self.input_layernorm = IdeficsRMSNorm(prefix=f'{prefix}.input_layernorm', weights=weights, eps=config.rms_norm_eps) self.post_attention_layernorm = IdeficsRMSNorm(prefix=f'{prefix}.post_attention_layernorm', weights=weights, eps=config.rms_norm_eps) self.config = config.dropout self.act_cross_attn = nn.Tanh() self.act_dense = nn.Tanh() self.alpha_cross_attn = nn.Parameter(weights.get_tensor(f'{prefix}.alpha_cross_attn')) self.alpha_dense = nn.Parameter(weights.get_tensor(f'{prefix}.alpha_dense')) if not (hasattr(self, 'alpha_cross_attn') and hasattr(self, 'alpha_dense')): raise ValueError('Alpha parameters not initialized correctly!') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, image_hidden_states: Optional[torch.Tensor]=None, image_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, past_key_value: Optional[Tuple[torch.Tensor]]=None, no_images: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: if image_hidden_states is None: raise ValueError('`image_hidden_states` is required for Idefics cross attention module which are visual features to be conditioned on.') if past_key_value is not None: raise NotImplementedError('Past key value states are not implemented for Idefics cross attention module.') residual = hidden_states hidden_states = self.input_layernorm(hidden_states) (hidden_states, self_attn_weights, present_key_value) = self.cross_attn(hidden_states=hidden_states, key_value_states=image_hidden_states, attention_mask=image_attention_mask, output_attentions=output_attentions) gate = 0 if no_images else 1 hidden_states = residual + gate * self.act_cross_attn(self.alpha_cross_attn) * hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs LLAMA_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`IdeficsConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' class IdeficsPreTrainedModel(PreTrainedModel): config_class = IdeficsConfig class IdeficsModel(IdeficsPreTrainedModel): def __init__(self, config: IdeficsConfig, weights): super().__init__(config) self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = IdeficsDecoupledPartialTPEmbedding(config=config, weights=weights) self.image_size = config.vision_config.image_size self.vision_config = config.vision_config self.vision_model = IdeficsVisionTransformer(prefix='model.vision_model', config=config.vision_config, weights=weights) if config.use_resampler: perceiver_config = config.perceiver_config self.perceiver_resampler = IdeficsPerceiverResampler(prefix='model.perceiver_resampler', config=config, embed_dim=config.vision_config.embed_dim, depth=perceiver_config.resampler_depth, n_heads=perceiver_config.resampler_n_heads, head_dim=perceiver_config.resampler_head_dim, n_latents=perceiver_config.resampler_n_latents, weights=weights) self.layers = nn.ModuleList([IdeficsDecoderLayer(layer_id, config, weights) for layer_id in range(config.num_hidden_layers)]) self.cross_layer_interval = config.cross_layer_interval num_cross_layers = config.num_hidden_layers // self.cross_layer_interval self.gated_cross_attn_layers = nn.ModuleList([IdeficsGatedCrossAttentionLayer(layer_id, config, weights) for layer_id in range(num_cross_layers)]) self.norm = IdeficsRMSNorm(prefix='model.norm', weights=weights, eps=config.rms_norm_eps) def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask(input_shape, inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length) if attention_mask is not None: expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(inputs_embeds.device) combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask return combined_attention_mask def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_hidden_states: Optional[torch.FloatTensor]=None, image_embeddings: Optional[torch.FloatTensor]=None, image_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPastImage]: device = input_ids.device if input_ids is not None else inputs_embeds.device output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: (batch_size, seq_length) = input_ids.shape elif inputs_embeds is not None: (batch_size, seq_length, _) = inputs_embeds.shape else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) elif position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange(past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() no_images = False if image_hidden_states is None: if pixel_values is None and image_embeddings is None: raise ValueError('Either pixel_values and image_embeddings have to be not-None.') elif pixel_values is not None and image_embeddings is not None: raise ValueError('You cannot specify both pixel_values and image_embeddings at the same time') elif pixel_values is not None: no_images = len(torch.nonzero(pixel_values)) == 0 pixel_values = pixel_values.to(dtype=self.dtype, device=device) (batch_size, num_images) = pixel_values.shape[:2] pixel_values = pixel_values.contiguous().view(batch_size * num_images, *pixel_values.shape[2:]) image_hidden_states = self.vision_model(pixel_values=pixel_values).last_hidden_state elif image_embeddings is not None: (batch_size, num_images, image_seq_len, image_hidden_size) = image_embeddings.size() image_hidden_states = image_embeddings.to(dtype=self.dtype, device=input_ids.device) image_hidden_states = image_hidden_states.view(batch_size * num_images, image_seq_len, image_hidden_size) if self.config.use_resampler: image_hidden_states = self.perceiver_resampler(image_hidden_states) (image_seq_len, image_hidden_size) = (image_hidden_states.size(1), image_hidden_states.size(2)) image_hidden_states = image_hidden_states.view(batch_size, num_images * image_seq_len, image_hidden_size) else: no_images = False num_images = pixel_values.shape[1] image_seq_len = image_hidden_states.shape[1] // num_images text_seq_len = image_attention_mask.size(1) image_attention_mask = image_attention_mask.unsqueeze(-1) image_attention_mask = image_attention_mask.repeat(1, 1, 1, image_seq_len) image_attention_mask = image_attention_mask.view(batch_size, text_seq_len, num_images * image_seq_len) (image_batch_size, image_sequence_length, _) = image_hidden_states.size() image_hidden_shape = (image_batch_size, image_sequence_length) if image_attention_mask is None: image_attention_mask = torch.ones(image_hidden_shape, device=device) image_attention_mask = self.invert_attention_mask(image_attention_mask) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device) attention_mask = self._prepare_decoder_attention_mask(attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length) hidden_states = inputs_embeds all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None def vblock(main_block, hidden_states, attention_mask, position_ids, past_key_value, image_hidden_states, image_attention_mask, output_attentions, use_cache, no_images, layer_idx, cross_layer_interval, gated_cross_attn_layers): if layer_idx % cross_layer_interval == 0: xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval] outputs = xblock(hidden_states, attention_mask=attention_mask, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, output_attentions=output_attentions, use_cache=use_cache, past_key_value=None, no_images=no_images) hidden_states = outputs[0] layer_outputs = main_block(hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) return layer_outputs layer_outputs = vblock(decoder_layer, hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, output_attentions=output_attentions, use_cache=use_cache, no_images=no_images, layer_idx=idx, cross_layer_interval=self.cross_layer_interval, gated_cross_attn_layers=self.gated_cross_attn_layers) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)) return BaseModelOutputWithPastImage(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, image_hidden_states=image_hidden_states) class IdeficsForVisionText2Text(IdeficsPreTrainedModel): def __init__(self, config, weights): super().__init__(config) self.model = IdeficsModel(config=config, weights=weights) self.lm_head = IdeficsDecoupledTensorParallelLinear(config=config, weights=weights) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_embeddings: Optional[torch.FloatTensor]=None, image_hidden_states: Optional[torch.FloatTensor]=None, image_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithPastImage]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, image_embeddings=image_embeddings, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] (logits, speculative_logits) = self.lm_head(hidden_states) loss = None return (CausalLMOutputWithPastImage(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states), speculative_logits) def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs): inputs = prepare_inputs_for_generation(input_ids, past=past, **kwargs) unwanted_kwargs = ['token_type_ids'] for kwarg in unwanted_kwargs: inputs.pop(kwarg, None) return inputs @staticmethod def _expand_inputs_for_generation(*args, **model_kwargs): return expand_inputs_for_generation(*args, **model_kwargs) @staticmethod def _update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder=False): return update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder=is_encoder_decoder) @staticmethod def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple((past_state.index_select(0, beam_idx) for past_state in layer_past)),) return reordered_past # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/idefics_perceiver.py """""" from typing import Optional, Tuple import torch import torch.nn as nn from text_generation_server.layers import TensorParallelColumnLinear, TensorParallelRowLinear EPS = 1e-05 class IdeficsPerceiverResampler(nn.Module): def __init__(self, prefix, config, embed_dim: int, depth: int, n_heads: int, head_dim: int, n_latents: int, weights) -> None: super().__init__() (self.embed_dim, self.n_heads, self.head_dim, self.n_latents) = (embed_dim, n_heads, head_dim, n_latents) self.qk_layer_norms = config.perceiver_config.qk_layer_norms_perceiver self.latents = nn.Parameter(weights.get_tensor(f'{prefix}.latents')) self.intermediate_dim = self.embed_dim * 4 if not hasattr(config.vision_config, 'embed_dim') else config.vision_config.embed_dim * 4 self.blocks = nn.ModuleList([nn.ModuleList([IdeficsPerceiverAttention(prefix=f'{prefix}.blocks.{layer_id}.0', config=config, embed_dim=self.embed_dim, n_heads=self.n_heads, head_dim=self.head_dim, qk_layer_norms=self.qk_layer_norms, weights=weights), IdeficsMLP(prefix=f'{prefix}.blocks.{layer_id}.1', intermediate_size=self.intermediate_dim, config=config, weights=weights)]) for layer_id in range(depth)]) self.layer_norm = nn.LayerNorm.load(prefix=f'{prefix}.layer_norm', weights=weights, eps=EPS) def forward(self, context: torch.Tensor) -> torch.Tensor: latents = self.latents.repeat(context.shape[0], 1, 1) for (attn, ff) in self.blocks: latents = attn(context, latents) + latents latents = ff(latents) + latents return self.layer_norm(latents) class IdeficsPerceiverAttention(nn.Module): def __init__(self, prefix, config, embed_dim: int, n_heads: int, head_dim: int, qk_layer_norms: bool, weights) -> None: super().__init__() (self.embed_dim, self.n_heads, self.head_dim) = (embed_dim, n_heads, head_dim) self.qk_layer_norms = qk_layer_norms self.context_layer_norm = nn.LayerNorm.load(prefix=f'{prefix}.context_layer_norm', weights=weights, eps=EPS) self.latents_layer_norm = nn.LayerNorm.load(prefix=f'{prefix}.latents_layer_norm', weights=weights, eps=EPS) if self.qk_layer_norms: self.q_layer_norm = nn.LayerNorm.load(prefix=f'{prefix}.q_layer_norm', weights=weights, eps=EPS) self.k_layer_norm = nn.LayerNorm.load(prefix=f'{prefix}.k_layer_norm', weights=weights, eps=EPS) self.qk_scale = self.head_dim ** (-0.5) if n_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {n_heads} and `num_shards`: {weights.process_group.size()}') self.n_heads //= weights.process_group.size() self.q_proj = TensorParallelColumnLinear.load(config=config, prefix=f'{prefix}.q_proj', weights=weights, bias=False) self.k_proj = TensorParallelColumnLinear.load(config=config, prefix=f'{prefix}.k_proj', weights=weights, bias=False) self.v_proj = TensorParallelColumnLinear.load(config=config, prefix=f'{prefix}.v_proj', weights=weights, bias=False) self.output_proj = TensorParallelRowLinear.load(config=config, prefix=f'{prefix}.output_proj', weights=weights, bias=False) def forward(self, context: torch.Tensor, latents: torch.Tensor) -> torch.Tensor: context = self.context_layer_norm(context) latents = self.latents_layer_norm(latents) (batch_size, seq_length, embed_dim) = context.shape[:3] q = self.q_proj(latents) k = self.k_proj(torch.cat([context, latents], dim=-2)) v = self.v_proj(torch.cat([context, latents], dim=-2)) (q, k, v) = [x.reshape(batch_size, x.shape[1], self.n_heads, self.head_dim).transpose(1, 2) for x in (q, k, v)] if self.qk_layer_norms: q = self.q_layer_norm(q) k = self.k_layer_norm(k) scores = torch.einsum('... i d, ... j d -> ... i j', q * self.qk_scale, k) stabilized_scores = scores - scores.amax(dim=-1, keepdim=True).detach() attn = stabilized_scores.softmax(dim=-1) resampled = torch.einsum('... i j, ... j d -> ... i d', attn, v) return self.output_proj(resampled.transpose(1, 2).flatten(-2)) class IdeficsMLP(nn.Module): def __init__(self, prefix, intermediate_size, config, weights): super().__init__() self.embed_dim = config.vision_config.embed_dim self.ln = nn.LayerNorm.load(prefix=f'{prefix}.ln', weights=weights, eps=EPS) self.fc = TensorParallelColumnLinear.load(config=config, prefix=f'{prefix}.fc', weights=weights, bias=False) self.act = nn.ReLU() self.c_proj = TensorParallelRowLinear.load(config=config, prefix=f'{prefix}.c_proj', weights=weights, bias=False) def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.ln(hidden_states) hidden_states = self.fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) return hidden_states # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/idefics_processing.py """""" from typing import Callable, List, Optional, Union from urllib.parse import urlparse from transformers.feature_extraction_utils import BatchFeature from transformers.processing_utils import ProcessorMixin from transformers.tokenization_utils_base import BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy from transformers.utils import TensorType, is_torch_available if is_torch_available(): import torch IMAGE_TOKEN = '' def incremental_to_binary_attention_mask(incremental_mask, num_classes=-1): if num_classes != -1: incremental_mask[incremental_mask >= num_classes] = -1 negatives = incremental_mask == -1 incremental_mask[negatives] = 0 attn_mask = torch.nn.functional.one_hot(incremental_mask, num_classes=num_classes) attn_mask[negatives, :] = 0 return attn_mask def image_attention_mask_for_packed_input_ids(input_ids, tokenizer): image_attention_mask = torch.full_like(input_ids, fill_value=-1) next_image_attention_mask = torch.full_like(input_ids, fill_value=-1) image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN) eod_token_id = tokenizer.eos_token_id for batch_idx in range(input_ids.size(0)): count = -1 seen_eod = False for (idx, token_id) in enumerate(input_ids[batch_idx]): if token_id == image_token_id: count += 1 image_attention_mask[batch_idx][idx] = count seen_eod = False else: image_attention_mask[batch_idx][idx] = count if seen_eod: image_attention_mask[batch_idx][idx] = -1 if token_id == eod_token_id: seen_eod = True for batch_idx in range(input_ids.size(0)): count = -1 seen_eod = False for idx in range(input_ids[batch_idx].size(0) - 1, -1, -1): token_id = input_ids[batch_idx][idx] if token_id == image_token_id: count += 1 next_image_attention_mask[batch_idx][idx] = count seen_eod = False else: next_image_attention_mask[batch_idx][idx] = count if token_id == eod_token_id: seen_eod = True if seen_eod: next_image_attention_mask[batch_idx][idx] = -1 non_negative_indices = next_image_attention_mask[batch_idx] != -1 next_image_attention_mask[batch_idx][non_negative_indices] -= count next_image_attention_mask[batch_idx][non_negative_indices] *= -1 return (image_attention_mask, next_image_attention_mask) def is_url(string): if ' ' in string: return False result = urlparse(string) return all([result.scheme, result.netloc]) def is_image(string): return is_url(string) or string.startswith('data:') class IdeficsProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] image_processor_class = 'IdeficsImageProcessor' tokenizer_class = 'LlamaTokenizerFast' def __init__(self, image_processor, tokenizer=None, image_size=224, add_end_of_utterance_token=None, **kwargs): if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN) self.default_image_dims = (self.image_processor.image_num_channels, self.image_processor.image_size, self.image_processor.image_size) self.tokenizer_was_trained_with_end_of_utterance_token = True if '' in self.tokenizer.special_tokens_map.get('additional_special_tokens', []) else False def __call__(self, prompts: Union[List[TextInput], List[List[TextInput]]], padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, transform: Callable=None, add_eos_token=False, add_end_of_utterance_token=None, debug=False, return_tensors: Optional[Union[str, TensorType]]=TensorType.PYTORCH) -> BatchEncoding: if add_end_of_utterance_token is None: add_end_of_utterance_token = self.tokenizer_was_trained_with_end_of_utterance_token if not any((isinstance(i, list) for i in prompts)): prompts = [prompts] fake_token = '' image_token = '' end_of_utterance_token = '' def image_tokens(last_was_image): if last_was_image: return image_token + fake_token else: return fake_token + image_token + fake_token all_texts = [] all_images = [] for sample in prompts: full_text = f'{self.tokenizer.bos_token}' image_objects = [] last_was_image = False last_was_text = False for (i, item) in enumerate(sample): if i > 0: last_was_text = True if not last_was_image else False if isinstance(item, str): item = item.strip(' ') if is_image(item): image = self.image_processor.fetch_images(item) full_text += image_tokens(last_was_image) image_objects.append(image) last_was_image = True else: if add_end_of_utterance_token and last_was_text: full_text += end_of_utterance_token full_text += item last_was_image = False else: full_text += image_tokens(last_was_image) image_objects.append(item) last_was_image = True if add_eos_token: full_text += self.tokenizer.eos_token if debug is True: print(f'full_text={full_text!r}') image_objects = self.image_processor(image_objects, transform=transform) text_encoding = self.tokenizer(text=full_text, add_special_tokens=False, padding=padding, truncation=truncation, max_length=max_length) all_texts.append(text_encoding['input_ids']) all_images.append(image_objects) max_seq_len = max((len(x) for x in all_texts)) max_num_images = max((len(x) for x in all_images)) max_num_images = max(1, max_num_images) at_least_one_image = sum((len(x) for x in all_images)) > 0 output_input_ids = [] output_images = [] output_attention_masks = [] for (text, images) in zip(all_texts, all_images): padded_input_ids = [self.tokenizer.pad_token_id] * max_seq_len unpadded_seq_len = len(text) start = max_seq_len - unpadded_seq_len padded_input_ids[start:] = text[:max_seq_len] attention_mask = torch.zeros((max_seq_len,), dtype=torch.long) attention_mask[start:] = 1 image_count = padded_input_ids.count(self.image_token_id) local_max_num_images = min(image_count, max_num_images) current_images = images[:local_max_num_images] if len(current_images) > 0: padded_image_tensor = torch.zeros(max_num_images, *current_images.size()[1:]) padded_image_tensor[:current_images.size(0)] = current_images else: padded_image_tensor = torch.zeros(max_num_images, *self.default_image_dims) output_images.append(padded_image_tensor) output_input_ids.append(torch.tensor(padded_input_ids)) output_attention_masks.append(attention_mask) output_input_ids = torch.stack(output_input_ids) output_images = torch.stack(output_images) output_attention_masks = torch.stack(output_attention_masks) if at_least_one_image: (image_attention_mask, _) = image_attention_mask_for_packed_input_ids(output_input_ids, self.tokenizer) image_attention_mask = incremental_to_binary_attention_mask(image_attention_mask, num_classes=max_num_images) else: image_attention_mask = torch.zeros(output_input_ids.shape[0], output_input_ids.shape[1], 1, dtype=torch.bool) return BatchFeature(data={'input_ids': output_input_ids, 'attention_mask': output_attention_masks, 'pixel_values': output_images, 'image_attention_mask': image_attention_mask}) def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/idefics_vision.py """""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from transformers.activations import ACT2FN from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from transformers.utils import ModelOutput, logging from text_generation_server.layers import TensorParallelColumnLinear, TensorParallelRowLinear, TensorParallelEmbedding logger = logging.get_logger(__name__) @dataclass class IdeficsVisionModelOutput(ModelOutput): image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None class IdeficsVisionEmbeddings(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(weights.get_tensor(f'{prefix}.class_embedding')) self.patch_embedding = nn.Conv2d.load_no_bias(prefix=f'{prefix}.patch_embedding', weights=weights, in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = TensorParallelEmbedding(prefix='model.vision_model.embeddings.position_embedding', weights=weights) self.position_ids = torch.arange(self.num_positions).expand((1, -1)).to(device=weights.device) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class IdeficsVisionAttention(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = config.attention_dropout if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // weights.process_group.size() self.embed_dim = self.embed_dim // weights.process_group.size() self.k_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.k_proj', weights=weights, bias=True) self.v_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.v_proj', weights=weights, bias=True) self.q_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.q_proj', weights=weights, bias=True) self.out_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.out_proj', weights=weights, bias=True) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {causal_attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped) class IdeficsVisionMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.fc1', weights=weights, bias=True) self.fc2 = TensorParallelRowLinear.load(config, prefix=f'{prefix}.fc2', weights=weights, bias=True) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class IdeficsVisionEncoderLayer(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.embed_dim = config.hidden_size self.self_attn = IdeficsVisionAttention(prefix=f'{prefix}.self_attn', config=config, weights=weights) self.layer_norm1 = nn.LayerNorm.load(prefix=f'{prefix}.layer_norm1', weights=weights, eps=config.layer_norm_eps) self.mlp = IdeficsVisionMLP(prefix=f'{prefix}.mlp', config=config, weights=weights) self.layer_norm2 = nn.LayerNorm.load(prefix=f'{prefix}.layer_norm2', weights=weights, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class IdeficsVisionEncoder(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.layers = nn.ModuleList([IdeficsVisionEncoderLayer(prefix=f'{prefix}.encoder.layers.{layer_id}', config=config, weights=weights) for layer_id in range(config.num_hidden_layers)]) def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class IdeficsVisionTransformer(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.embeddings = IdeficsVisionEmbeddings(prefix=f'{prefix}.embeddings', config=config, weights=weights) self.pre_layrnorm = nn.LayerNorm.load(prefix=f'{prefix}.pre_layrnorm', weights=weights, eps=config.layer_norm_eps) self.encoder = IdeficsVisionEncoder(prefix=prefix, config=config, weights=weights) self.post_layernorm = nn.LayerNorm.load(prefix=f'{prefix}.post_layernorm', weights=weights, eps=config.layer_norm_eps) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/llava_next.py """""" from typing import List, Optional, Tuple import torch import torch.utils.checkpoint from torch import nn from transformers.activations import ACT2FN from transformers.image_processing_utils import select_best_resolution from text_generation_server.layers.attention import Seqlen from text_generation_server.models.custom_modeling.vlm import load_text_model, load_vision_model from text_generation_server.layers import TensorParallelColumnLinear, TensorParallelRowLinear def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size): if not isinstance(grid_pinpoints, list): raise ValueError('grid_pinpoints should be a list of tuples or lists') (height, width) = select_best_resolution(image_size, grid_pinpoints) return (height // patch_size, width // patch_size) def unpad_image(tensor, original_size): (original_height, original_width) = original_size (current_height, current_width) = tensor.shape[1:] original_aspect_ratio = original_width / original_height current_aspect_ratio = current_width / current_height if original_aspect_ratio > current_aspect_ratio: scale_factor = current_width / original_width new_height = int(original_height * scale_factor) padding = (current_height - new_height) // 2 unpadded_tensor = tensor[:, padding:current_height - padding, :] else: scale_factor = current_height / original_height new_width = int(original_width * scale_factor) padding = (current_width - new_width) // 2 unpadded_tensor = tensor[:, :, padding:current_width - padding] return unpadded_tensor class LlavaNextMultiModalProjector(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.linear_1 = TensorParallelColumnLinear.load(prefix=f'{prefix}.linear_1', config=config, weights=weights, bias=True) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = TensorParallelRowLinear.load(prefix=f'{prefix}.linear_2', config=config, weights=weights, bias=True) def forward(self, image_features): hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states class LlavaNextForConditionalGeneration(nn.Module): def __init__(self, prefix, config, weights): super().__init__() config.vision_config.quantize = config.quantize vision_config = config.vision_config if config.vision_feature_layer < 0: vision_config.num_hidden_layers += config.vision_feature_layer + 1 else: vision_config.num_hidden_layers = config.vision_feature_layer + 1 self.vision_tower = load_vision_model(prefix='vision_tower' if not prefix else f'{prefix}.vision_tower', config=config.vision_config, weights=weights) self.multi_modal_projector = LlavaNextMultiModalProjector(prefix='multi_modal_projector', config=config, weights=weights) self.image_newline = weights.get_tensor('image_newline') self.vocab_size = config.text_config.vocab_size self.config = config config.text_config.quantize = config.quantize config.text_config.speculator = config.speculator self.text_model = load_text_model(prefix='language_model' if not prefix else f'{prefix}.language_model', config=config.text_config, weights=weights) self.pad_token_id = config.pad_token_id if config.pad_token_id is not None else -1 def _merge_input_ids_with_image_features(self, input_ids: torch.Tensor, inputs_embeds: torch.Tensor, image_features: torch.Tensor): mask = input_ids == self.config.image_token_index try: inputs_embeds[mask] = image_features.view(-1, image_features.shape[-1]) except Exception as e: raise RuntimeError(f'Cannot fill images right now. If error happens at warmup, make sure you have enough `--max-input-tokens` to handle images. If error happens at regular runtime, please fill in an issue: {e}') return inputs_embeds def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor]=None, pixel_values: torch.FloatTensor=None, pixel_attention_mask=None, image_sizes: Optional[torch.LongTensor]=None, adapter_data: Optional[torch.Tensor]=None): inputs_embeds = self.text_model.embed_tokens(input_ids) if pixel_values is not None and len(pixel_values) > 0: (num_images, num_patches, channels, height, width) = pixel_values.shape pixel_values = pixel_values.view(num_images * num_patches, channels, height, width) image_features = self.vision_tower(pixel_values) selected_image_feature = image_features.last_hidden_state if self.config.vision_feature_select_strategy == 'default': selected_image_feature = selected_image_feature[:, 1:] elif self.config.vision_feature_select_strategy == 'full': selected_image_feature = selected_image_feature else: raise RuntimeError(f'Strategy `{self.config.vision_feature_select_strategy}` is not supported/valid.') image_features = self.multi_modal_projector(selected_image_feature) split_sizes = [num_patches] * num_images image_features = torch.split(image_features, split_sizes, dim=0) height = width = self.config.vision_config.image_size // self.config.vision_config.patch_size new_image_features = [] for (image_idx, image_feature) in enumerate(image_features): if image_feature.shape[0] > 1: base_image_feature = image_feature[0] image_feature = image_feature[1:] if height * width != base_image_feature.shape[0]: raise ValueError('The number of patches is not consistent with the image size.') (num_patch_width, num_patch_height) = get_anyres_image_grid_shape(image_sizes[image_idx], self.config.image_grid_pinpoints, self.config.vision_config.image_size) image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1) image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous() image_feature = image_feature.flatten(1, 2).flatten(2, 3) image_feature = unpad_image(image_feature, image_sizes[image_idx]) image_feature = torch.cat((image_feature, self.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1)), dim=-1) image_feature = image_feature.flatten(1, 2).transpose(0, 1) image_feature = torch.cat((base_image_feature, image_feature), dim=0) else: image_feature = image_feature[0] image_feature = torch.cat((image_feature, self.image_newline[None]), dim=0) new_image_features.append(image_feature) image_features = torch.stack(new_image_features, dim=0) inputs_embeds = self._merge_input_ids_with_image_features(input_ids, inputs_embeds, image_features) hidden_states = self.text_model.model(inputs_embeds=inputs_embeds, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, block_tables=block_tables, slots=slots, seqlen=seqlen, max_s=max_s, true_max_s=max_s, prefill_cache_indices=None, adapter_data=adapter_data) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] (logits, speculative_logits) = self.text_model.lm_head(hidden_states) return (logits, speculative_logits) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/mamba_modeling.py import torch import torch.distributed from mamba_ssm.ops.triton.selective_state_update import selective_state_update from mamba_ssm.ops.selective_scan_interface import selective_scan_fn from torch import nn from typing import Optional, Tuple, Any from transformers.configuration_utils import PretrainedConfig import torch.nn.functional as F from text_generation_server.layers import SpeculativeHead, TensorParallelEmbedding, FastLinear from text_generation_server.layers.layernorm import FastRMSNorm from einops import rearrange from causal_conv1d import causal_conv1d_fn, causal_conv1d_update import math from dataclasses import dataclass @dataclass class InferenceParams: max_seqlen: int max_batch_size: int conv_states: torch.Tensor ssm_states: torch.Tensor seqlen_offset: int class MambaConfig(PretrainedConfig): def __init__(self, vocab_size=50280, d_model=768, d_state=16, n_layer=32, layer_norm_epsilon=1e-05, tie_word_embeddings=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, expand=2, dt_rank='auto', **kwargs): self.vocab_size = vocab_size self.n_layer = n_layer self.layer_norm_epsilon = layer_norm_epsilon self.d_model = d_model self.d_inner = d_model * 2 self.d_conv = 4 self.d_state = d_state self.expand = expand self.dt_rank = math.ceil(self.d_model / 16) if dt_rank == 'auto' else dt_rank super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) class MambaBlock(nn.Module): def __init__(self, prefix, config, weights, layer_id): super().__init__() self.layer_id = layer_id self.in_proj = FastLinear.load(config, f'{prefix}.in_proj', weights, bias=False) self.x_proj = FastLinear.load(config, f'{prefix}.x_proj', weights, bias=False) self.dt_proj = FastLinear.load(config, f'{prefix}.dt_proj', weights, bias=True) self.dt_proj_no_bias = FastLinear.load(config, f'{prefix}.dt_proj', weights, bias=False) self.out_proj = FastLinear.load(config, f'{prefix}.out_proj', weights, bias=False) self.conv1d = FastLinear.load(config, f'{prefix}.conv1d', weights, bias=True) self.negA = -torch.exp(weights.get_tensor(f'{prefix}.A_log').float()) self.D = weights.get_tensor(f'{prefix}.D') self.activation = 'silu' self.dt_rank = config.dt_rank self.d_state = config.d_state self.d_conv = config.d_conv self.act = nn.SiLU() def forward(self, hidden_states: torch.Tensor, inference_params=None): if inference_params.seqlen_offset > 0: conv_state = inference_params.conv_states[self.layer_id] ssm_state = inference_params.ssm_states[self.layer_id] (out, conv_state, ssm_state) = self.step(hidden_states, conv_state, ssm_state) return (out, conv_state, ssm_state) (_, seqlen, _) = hidden_states.shape projected_states = self.in_proj(hidden_states).transpose(1, 2) (x, z) = projected_states.chunk(2, dim=1) conv_state = F.pad(x, (self.d_conv - seqlen, 0)) x = causal_conv1d_fn(x=x, weight=self.conv1d.weight.squeeze(1), bias=self.conv1d.bias, activation=self.activation) x_dbl = self.x_proj(rearrange(x, 'b d l -> (b l) d')) (dt, B, C) = torch.split(x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1) dt = self.dt_proj.weight @ dt.t() dt = rearrange(dt, 'd (b l) -> b d l', l=seqlen) B = rearrange(B, '(b l) dstate -> b dstate l', l=seqlen).contiguous() C = rearrange(C, '(b l) dstate -> b dstate l', l=seqlen).contiguous() (y, last_state) = selective_scan_fn(x, dt, self.negA, B, C, self.D.float(), z=z, delta_bias=self.dt_proj.bias.float(), delta_softplus=True, return_last_state=True) y = rearrange(y, 'b d l -> b l d') attn_outputs = self.out_proj(y) return (attn_outputs, conv_state, last_state) def step(self, hidden_states, conv_state, ssm_state): xz = self.in_proj(hidden_states.squeeze(1)) (x, z) = xz.chunk(2, dim=-1) x = causal_conv1d_update(x, conv_state, self.conv1d.weight.squeeze(1), self.conv1d.bias, self.activation) x_db = self.x_proj(x) (dt, B, C) = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1) dt = F.linear(dt, self.dt_proj.weight) A = self.negA y = selective_state_update(ssm_state, x, dt, A, B, C, self.D, z=z, dt_bias=self.dt_proj.bias, dt_softplus=True) out = self.out_proj(y) return (out.unsqueeze(1), conv_state.clone(), ssm_state.clone()) class ResidualBlock(nn.Module): def __init__(self, prefix, config, weights, layer_id): super().__init__() self.mamba_block = MambaBlock(prefix=f'{prefix}.mixer', config=config, weights=weights, layer_id=layer_id) self.layer_norm = FastRMSNorm.load(prefix=f'{prefix}.norm', weights=weights, eps=config.layer_norm_epsilon) def forward(self, hidden_states: torch.Tensor, residual: Optional[torch.Tensor]=None, inference_params: Optional[Any]=None): residual = hidden_states + residual if residual is not None else hidden_states shape = residual.shape (hidden_states, _) = self.layer_norm(residual.view(-1, shape[-1])) (hidden_states, conv_state, last_ssm_state) = self.mamba_block(hidden_states.view(*shape), inference_params) return (hidden_states, residual, conv_state, last_ssm_state) class MambaModel(nn.Module): def __init__(self, config, weights): super().__init__() prefix = 'backbone' self.embed_tokens = TensorParallelEmbedding(f'{prefix}.embedding', weights) self.blocks = nn.ModuleList([ResidualBlock(f'{prefix}.layers.{i}', config, weights, layer_id=i) for i in range(config.n_layer)]) self.norm_f = FastRMSNorm.load(f'{prefix}.norm_f', weights, eps=config.layer_norm_epsilon) self.lm_head = SpeculativeHead.load(config, f'{prefix}.embedding', weights) self.config = config def forward(self, input_ids: torch.Tensor, inference_params=None, residual=None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: hidden_states = self.embed_tokens(input_ids) for (i, block) in enumerate(self.blocks): (hidden_states, residual, conv_state, ssm_state) = block(hidden_states, residual, inference_params) inference_params.conv_states[i].copy_(conv_state) inference_params.ssm_states[i].copy_(ssm_state) hidden_states = hidden_states + residual if residual is not None else hidden_states (hidden_states, _) = self.norm_f(hidden_states.view(-1, hidden_states.size(-1))) hidden_states = hidden_states.view(residual.shape) (logits, speculative_logits) = self.lm_head(hidden_states) inference_params.seqlen_offset += input_ids.size(1) return (logits, speculative_logits) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/mpt_modeling.py """""" import math import warnings from typing import List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from einops import rearrange from packaging import version from text_generation_server.layers import TensorParallelEmbedding, TensorParallelColumnLinear, TensorParallelRowLinear, SpeculativeHead, get_linear EPS = 1e-05 def load_col(config, prefix, weights, bias): assert config.quantize != 'gptq', NotImplementedError slice_ = weights._get_slice(f'{prefix}.weight') rank = weights.process_group.rank() size = weights.process_group.size() (h3, h) = slice_.get_shape() block_size = h // size q_part = slice_[rank * block_size:(rank + 1) * block_size] k_part = slice_[h + rank * block_size:h + (rank + 1) * block_size] v_part = slice_[2 * h + rank * block_size:2 * h + (rank + 1) * block_size] weight = torch.cat([q_part, k_part, v_part], dim=0) if weight.dtype != torch.int32: weight = weight.to(dtype=weights.dtype) weight = weight.to(device=weights.device) if bias: bias_slice_ = weights._get_slice(f'{prefix}.bias') bias_rank = weights.process_group.rank() bias_size = weights.process_group.size() bias_h = bias_slice_.get_shape() bias_h = bias_h[0] bias_block_size = bias_h // bias_size bias_q_part = bias_slice_[bias_rank * bias_block_size:(bias_rank + 1) * bias_block_size] bias_k_part = bias_slice_[bias_h + bias_rank * bias_block_size:bias_h + (bias_rank + 1) * bias_block_size] bias_v_part = bias_slice_[2 * bias_h + bias_rank * bias_block_size:2 * bias_h + (bias_rank + 1) * bias_block_size] bias = torch.cat([bias_q_part, bias_k_part, bias_v_part], dim=0) if bias.dtype != torch.int32: bias = bias.to(dtype=weights.dtype) bias = bias.to(device=weights.device) else: bias = None linear = get_linear(weight, bias) return TensorParallelColumnLinear(linear) def _reset_is_causal(num_query_tokens: int, num_key_tokens: int, original_is_causal: bool): if original_is_causal and num_query_tokens != num_key_tokens: if num_query_tokens != 1: raise NotImplementedError('MPT does not support query and key with different number of tokens, unless number of query tokens is 1.') else: return False return original_is_causal def scaled_multihead_dot_product_attention(query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False): q = rearrange(query, 'b s (h d) -> b h s d', h=n_heads) kv_n_heads = 1 if multiquery else n_heads k = rearrange(key, 'b s (h d) -> b h d s', h=kv_n_heads) v = rearrange(value, 'b s (h d) -> b h s d', h=kv_n_heads) if past_key_value is not None: if len(past_key_value) != 0: k = torch.cat([past_key_value[0], k], dim=3) v = torch.cat([past_key_value[1], v], dim=2) past_key_value = (k, v) (b, _, s_q, d) = q.shape s_k = k.size(-1) attn_weight = q.matmul(k) * softmax_scale if attn_bias is not None: _s_q = max(0, attn_bias.size(2) - s_q) _s_k = max(0, attn_bias.size(3) - s_k) attn_bias = attn_bias[:, :, _s_q:, _s_k:] if attn_bias.size(-1) != 1 and attn_bias.size(-1) != s_k or (attn_bias.size(-2) != 1 and attn_bias.size(-2) != s_q): raise RuntimeError(f'attn_bias (shape: {attn_bias.shape}) is expected to broadcast to shape: {attn_weight.shape}.') attn_weight = attn_weight + attn_bias min_val = torch.finfo(q.dtype).min if key_padding_mask is not None: if attn_bias is not None: warnings.warn('Propogating key_padding_mask to the attention module ' + 'and applying it within the attention module can cause ' + 'unneccessary computation/memory usage. Consider integrating ' + 'into attn_bias once and passing that to each attention ' + 'module instead.') attn_weight = attn_weight.masked_fill(~key_padding_mask.view((b, 1, 1, s_k)), min_val) if is_causal and (not q.size(2) == 1): s = max(s_q, s_k) causal_mask = attn_weight.new_ones(s, s, dtype=torch.float16) causal_mask = causal_mask.tril() causal_mask = causal_mask.to(torch.bool) causal_mask = ~causal_mask causal_mask = causal_mask[-s_q:, -s_k:] attn_weight = attn_weight.masked_fill(causal_mask.view(1, 1, s_q, s_k), min_val) attn_weight = torch.softmax(attn_weight, dim=-1) if dropout_p: attn_weight = torch.nn.functional.dropout(attn_weight, p=dropout_p, training=training, inplace=True) out = attn_weight.to(v.dtype).matmul(v) out = rearrange(out, 'b h s d -> b s (h d)') if needs_weights: return (out, attn_weight, past_key_value) return (out, None, past_key_value) def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]): for tensor in tensors: if tensor.dtype not in valid_dtypes: raise TypeError(f'tensor.dtype={tensor.dtype!r} must be in valid_dtypes={valid_dtypes!r}.') if not tensor.is_cuda: raise TypeError(f'Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r}).') def flash_attn_fn(query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False): try: from flash_attn import bert_padding, flash_attn_interface except Exception: raise RuntimeError('Please install flash-attn==1.0.3.post0') check_valid_inputs(query, key, value) if past_key_value is not None: if len(past_key_value) != 0: key = torch.cat([past_key_value[0], key], dim=1) value = torch.cat([past_key_value[1], value], dim=1) past_key_value = (key, value) if attn_bias is not None: _s_q = max(0, attn_bias.size(2) - query.size(1)) _s_k = max(0, attn_bias.size(3) - key.size(1)) attn_bias = attn_bias[:, :, _s_q:, _s_k:] if attn_bias is not None: raise NotImplementedError('attn_bias not implemented for flash attn.') (batch_size, seqlen) = query.shape[:2] if key_padding_mask is None: key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool) query_padding_mask = key_padding_mask[:, -query.size(1):] (query_unpad, indices_q, cu_seqlens_q, max_seqlen_q) = bert_padding.unpad_input(query, query_padding_mask) query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads) (key_unpad, _, cu_seqlens_k, max_seqlen_k) = bert_padding.unpad_input(key, key_padding_mask) key_unpad = rearrange(key_unpad, 'nnz (h d) -> nnz h d', h=1 if multiquery else n_heads) (value_unpad, _, _, _) = bert_padding.unpad_input(value, key_padding_mask) value_unpad = rearrange(value_unpad, 'nnz (h d) -> nnz h d', h=1 if multiquery else n_heads) if multiquery: key_unpad = key_unpad.expand(key_unpad.size(0), n_heads, key_unpad.size(-1)) value_unpad = value_unpad.expand(value_unpad.size(0), n_heads, value_unpad.size(-1)) dropout_p = dropout_p if training else 0.0 reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal) output_unpad = flash_attn_interface.flash_attn_unpadded_func(query_unpad, key_unpad, value_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale=softmax_scale, causal=reset_is_causal, return_attn_probs=needs_weights) output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen) return (output, None, past_key_value) def triton_flash_attn_fn(query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False): try: from .flash_attn_triton import flash_attn_func except Exception: _installed = False if version.parse(torch.__version__) < version.parse('2.0.0'): _installed = True try: from flash_attn.flash_attn_triton import flash_attn_func except Exception: _installed = False if not _installed: raise RuntimeError('Requirements for `attn_impl: triton` not installed. Either (1) have a CUDA-compatible GPU and `pip install .[gpu]` if installing from llm-foundry source or `pip install triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir#subdirectory=python` if installing from pypi, or (2) use torch attn model.attn_config.attn_impl=torch (torch attn_impl will be slow). Note: (1) requires you have CMake and PyTorch already installed.') check_valid_inputs(query, key, value) if past_key_value is not None: if len(past_key_value) != 0: key = torch.cat([past_key_value[0], key], dim=1) value = torch.cat([past_key_value[1], value], dim=1) past_key_value = (key, value) if attn_bias is not None: _s_q = max(0, attn_bias.size(2) - query.size(1)) _s_k = max(0, attn_bias.size(3) - key.size(1)) attn_bias = attn_bias[:, :, _s_q:, _s_k:] if dropout_p: raise NotImplementedError('Dropout not implemented for attn_impl: triton.') if needs_weights: raise NotImplementedError('attn_impl: triton cannot return attn weights.') if key_padding_mask is not None: warnings.warn('Propagating key_padding_mask to the attention module ' + 'and applying it within the attention module can cause ' + 'unnecessary computation/memory usage. Consider integrating ' + 'into attn_bias once and passing that to each attention ' + 'module instead.') (b_size, s_k) = key_padding_mask.shape[:2] if attn_bias is None: attn_bias = query.new_zeros(b_size, 1, 1, s_k) attn_bias = attn_bias.masked_fill(~key_padding_mask.view((b_size, 1, 1, s_k)), torch.finfo(query.dtype).min) query = rearrange(query, 'b s (h d) -> b s h d', h=n_heads) key = rearrange(key, 'b s (h d) -> b s h d', h=1 if multiquery else n_heads) value = rearrange(value, 'b s (h d) -> b s h d', h=1 if multiquery else n_heads) if multiquery: key = key.expand(*key.shape[:2], n_heads, key.size(-1)) value = value.expand(*value.shape[:2], n_heads, value.size(-1)) reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal) attn_output = flash_attn_func(query, key, value, attn_bias, reset_is_causal, softmax_scale) output = attn_output.view(*attn_output.shape[:2], -1) return (output, None, past_key_value) class MultiheadAttention(nn.Module): def __init__(self, config, prefix, weights): super().__init__() attn_impl = config.attn_config.attn_impl self.attn_impl = config.attn_config.attn_impl self.clip_qkv = config.attn_config.clip_qkv self.qk_ln = config.attn_config.qk_ln self.d_model = config.d_model d_model = config.d_model self.n_heads = config.n_heads self.softmax_scale = config.attn_config.softmax_scale if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads) self.attn_dropout_p = config.attn_config.attn_pdrop if self.n_heads % weights.process_group.size() != 0: raise ValueError(f'`n_heads` must be divisible by `num_shards` (got `n_heads`: {self.n_heads} and `num_shards`: {weights.process_group.size()}') self.n_heads = self.n_heads // weights.process_group.size() self.Wqkv = load_col(config, prefix=f'{prefix}.Wqkv', weights=weights, bias=not config.no_bias) if self.qk_ln: bias = not config.no_bias hidden_size = config.d_model head_dim = hidden_size // self.n_heads self.q_ln = LPLayerNorm(d_model, bias=bias, prefix=f'{prefix}.q_ln', weights=weights) self.k_ln = LPLayerNorm(self.n_heads * head_dim, prefix=f'{prefix}.k_ln', weights=weights) if self.attn_impl == 'flash': self.attn_fn = flash_attn_fn elif self.attn_impl == 'triton': self.attn_fn = triton_flash_attn_fn elif self.attn_impl == 'torch': self.attn_fn = scaled_multihead_dot_product_attention else: raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.') self.out_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.out_proj', weights=weights, bias=not config.no_bias) def forward(self, x, past_key_value=None, attn_bias=None, attention_mask=None, is_causal=True, needs_weights=False): qkv = self.Wqkv(x) if self.clip_qkv: qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv) (query, key, value) = qkv.chunk(3, dim=2) key_padding_mask = attention_mask if self.qk_ln: dtype = query.dtype query = self.q_ln(query).to(dtype) key = self.k_ln(key).to(dtype) (context, attn_weights, past_key_value) = self.attn_fn(query, key, value, self.n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights) out = self.out_proj(context) return (out, attn_weights, past_key_value) class MultiQueryAttention(nn.Module): def __init__(self, config, prefix, weights, verbose=False): super().__init__() attn_impl = config.attn_config.attn_impl self.attn_impl = config.attn_config.attn_impl self.clip_qkv = config.attn_config.clip_qkv self.qk_ln = config.attn_config.qk_ln self.d_model = config.d_model d_model = config.d_model self.n_heads = config.n_heads self.softmax_scale = config.attn_config.softmax_scale if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.head_dim) self.attn_dropout_p = config.attn_config.attn_pdrop self.Wqkv = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.Wqkv', weights=weights, bias=not config.no_bias) (d_model, d_model + self.head_dim) if self.qk_ln: raise NotImplementedError('qk_ln not supported') if self.attn_impl == 'flash': self.attn_fn = flash_attn_fn elif self.attn_impl == 'triton': self.attn_fn = triton_flash_attn_fn if verbose: warnings.warn('While `attn_impl: triton` can be faster than `attn_impl: flash` ' + 'it uses more memory. When training larger models this can trigger ' + 'alloc retries which hurts performance. If encountered, we recommend ' + 'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.') elif self.attn_impl == 'torch': self.attn_fn = scaled_multihead_dot_product_attention if torch.cuda.is_available() and verbose: warnings.warn('Using `attn_impl: torch`. If your model does not use `alibi` or ' + '`prefix_lm` we recommend using `attn_impl: flash` otherwise ' + 'we recommend using `attn_impl: triton`.') else: raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.') self.out_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.out_proj', weights=weights, bias=not config.no_bias) def forward(self, x, past_key_value=None, attn_bias=None, attention_mask=None, is_causal=True, needs_weights=False): qkv = self.Wqkv(x) if self.clip_qkv: qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv) (query, key, value) = qkv.split([self.d_model, self.head_dim, self.head_dim], dim=2) key_padding_mask = attention_mask if self.qk_ln: dtype = query.dtype query = self.q_ln(query).to(dtype) key = self.k_ln(key).to(dtype) (context, attn_weights, past_key_value) = self.attn_fn(query, key, value, self.n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights, multiquery=True) return (self.out_proj(context), attn_weights, past_key_value) def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id): if attn_impl == 'flash': return None elif attn_impl in ['torch', 'triton']: if alibi: if (prefix_lm or not causal) or use_sequence_id: return (1, n_heads, seq_len, seq_len) return (1, n_heads, 1, seq_len) elif prefix_lm or use_sequence_id: return (1, 1, seq_len, seq_len) return None else: raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.') def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8): if attn_impl == 'flash': return None elif attn_impl in ['torch', 'triton']: if alibi: (device, dtype) = (attn_bias.device, attn_bias.dtype) attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype)) return attn_bias else: raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.') def gen_slopes(n_heads, alibi_bias_max=8, device=None): _n_heads = 2 ** math.ceil(math.log2(n_heads)) m = torch.arange(1, _n_heads + 1, dtype=torch.float32, device=device) m = m.mul(alibi_bias_max / _n_heads) slopes = 1.0 / torch.pow(2, m) if _n_heads != n_heads: slopes = torch.concat([slopes[1::2], slopes[::2]])[:n_heads] return slopes.view(1, n_heads, 1, 1) def build_alibi_bias(n_heads, seq_len, full=False, alibi_bias_max=8, device=None, dtype=None): alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(1, 1, 1, seq_len) if full: alibi_bias = alibi_bias - torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(1, 1, seq_len, 1) alibi_bias = alibi_bias.abs().mul(-1) slopes = gen_slopes(n_heads, alibi_bias_max, device=device) alibi_bias = alibi_bias * slopes return alibi_bias.to(dtype=dtype) ATTN_CLASS_REGISTRY = {'multihead_attention': MultiheadAttention, 'multiquery_attention': MultiQueryAttention} '' class MPTMLP(nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.up_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.up_proj', weights=weights, bias=not config.no_bias) self.act = nn.GELU(approximate='none') self.down_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.down_proj', weights=weights, bias=not config.no_bias) def forward(self, x): return self.down_proj(self.act(self.up_proj(x))) class MPTBlock(nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.prefix = prefix if config.attn_config.attn_type != 'multihead_attention': raise NotImplementedError(f'Not implemented attn {config.attn_config.attn_type}') resid_pdrop = config.resid_pdrop if config.no_bias: self.norm_1 = nn.LayerNorm.load_no_bias(prefix=f'{prefix}.norm_1', weights=weights, eps=EPS) self.norm_2 = nn.LayerNorm.load_no_bias(prefix=f'{prefix}.norm_2', weights=weights, eps=EPS) else: self.norm_1 = nn.LayerNorm.load(prefix=f'{prefix}.norm_1', weights=weights, eps=EPS) self.norm_2 = nn.LayerNorm.load(prefix=f'{prefix}.norm_2', weights=weights, eps=EPS) self.attn = MultiheadAttention(config, prefix=f'{prefix}.attn', weights=weights) self.ffn = MPTMLP(config, prefix=f'{prefix}.ffn', weights=weights) self.resid_attn_dropout = nn.Dropout(resid_pdrop) self.resid_ffn_dropout = nn.Dropout(resid_pdrop) def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]: a = self.norm_1(x) (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal) x = x + self.resid_attn_dropout(b) m = self.norm_2(x) n = self.ffn(m) x = x + self.resid_ffn_dropout(n) return (x, attn_weights, past_key_value) def _cast_if_autocast_enabled(tensor): if torch.is_autocast_enabled(): if tensor.device.type == 'cuda': dtype = torch.get_autocast_gpu_dtype() elif tensor.device.type == 'cpu': dtype = torch.get_autocast_cpu_dtype() else: raise NotImplementedError() return tensor.to(dtype=dtype) return tensor class LPLayerNorm(torch.nn.LayerNorm): def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True, device=None, dtype=None, bias: Optional[bool]=True, prefix=None, weights=None): super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=elementwise_affine, device=device, dtype=dtype, bias=bias) if weights is not None: self.weight = nn.Parameter(weights.get_sharded(f'{prefix}.weight', dim=0)) if bias: self.bias = nn.Parameter(weights.get_sharded(f'{prefix}.bias', dim=0)) self.normalized_shape = self.weight.shape def forward(self, x): module_device = x.device downcast_x = _cast_if_autocast_enabled(x) downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight downcast_bias = _cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias with torch.autocast(enabled=False, device_type=module_device.type): return torch.nn.functional.layer_norm(downcast_x, self.normalized_shape, downcast_weight, downcast_bias, self.eps) def rms_norm(x, weight=None, eps=1e-05): output = x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + eps) if weight is not None: return output * weight return output class RMSNorm(torch.nn.Module): def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None): super().__init__() self.eps = eps if weight: self.weight = torch.nn.Parameter(torch.ones(normalized_shape, dtype=dtype, device=device)) else: self.register_parameter('weight', None) def forward(self, x): return rms_norm(x.float(), self.weight, self.eps).to(dtype=x.dtype) class LPRMSNorm(RMSNorm): def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None): super().__init__(normalized_shape=normalized_shape, eps=eps, weight=weight, dtype=dtype, device=device) def forward(self, x): downcast_x = _cast_if_autocast_enabled(x) downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight with torch.autocast(enabled=False, device_type=x.device.type): return rms_norm(downcast_x, downcast_weight, self.eps).to(dtype=x.dtype) NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm} Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] class MPTPreTrainedModel(PreTrainedModel): base_model_prefix = 'model' _no_split_modules = ['MPTBlock'] class MPTModel(MPTPreTrainedModel): def __init__(self, prefix: str, config, weights): super().__init__(config) self.world_size = weights.process_group.size() self.rank = weights.process_group.rank() self.n_heads = config.n_heads self.attn_impl = config.attn_config.attn_impl self.prefix_lm = config.attn_config.prefix_lm self.attn_uses_sequence_id = config.attn_config.attn_uses_sequence_id self.alibi = config.attn_config.alibi self.alibi_bias_max = config.attn_config.alibi_bias_max if config.init_device == 'mixed': if True: config.init_device = 'cpu' else: config.init_device = 'meta' if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys(): norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys()) raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).') if config.norm_type.lower() != 'low_precision_layernorm': raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo.') self.wte = TensorParallelEmbedding(f'{prefix}.wte', weights) if not self.alibi: self.wpe = TensorParallelEmbedding(f'{prefix}.wpe', weights) self.blocks = nn.ModuleList([MPTBlock(config, prefix=f'{prefix}.blocks.{i}', weights=weights) for i in range(config.n_layers)]) if config.no_bias: self.norm_f = nn.LayerNorm.load_no_bias(prefix='transformer.norm_f', weights=weights, eps=EPS) else: self.norm_f = nn.LayerNorm.load(prefix='transformer.norm_f', weights=weights, eps=EPS) self.is_causal = not self.prefix_lm self._attn_bias_initialized = False self.attn_bias = None self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id) if config.no_bias: for module in self.modules(): if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter): if config.verbose: warnings.warn(f'Removing bias ({module.bias}) from {module}.') module.register_parameter('bias', None) if hasattr(self.config, 'verbose'): if config.verbose and config.verbose > 2: print(self) if 'verbose' not in self.config.init_config: self.config.init_config['verbose'] = self.config.verbose if self.config.init_config['verbose'] > 1: init_fn_name = self.config.init_config['name'] warnings.warn(f'Using {init_fn_name} initialization.') @torch.no_grad() def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None): if not self._attn_bias_initialized: if self.attn_bias_shape: self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max) assert self.n_heads % self.world_size == 0 block_size = self.n_heads // self.world_size self.attn_bias = self.attn_bias[:, self.rank * block_size:(self.rank + 1) * block_size] self._attn_bias_initialized = True if self.attn_impl == 'flash': return (self.attn_bias, attention_mask) if self.attn_bias is not None: self.attn_bias = self.attn_bias.to(dtype=dtype, device=device) attn_bias = self.attn_bias if self.prefix_lm: assert isinstance(attn_bias, torch.Tensor) assert isinstance(prefix_mask, torch.Tensor) attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask) if self.attn_uses_sequence_id and sequence_id is not None: assert isinstance(attn_bias, torch.Tensor) attn_bias = self._apply_sequence_id(attn_bias, sequence_id) if attention_mask is not None: s_k = attention_mask.shape[-1] if attn_bias is None: attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype) else: _s_k = max(0, attn_bias.size(-1) - s_k) attn_bias = attn_bias[:, :, :, _s_k:] if prefix_mask is not None and attention_mask.shape != prefix_mask.shape: raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.') min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val) return (attn_bias, None) def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor): (s_k, s_q) = attn_bias.shape[-2:] if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len: raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.') seq_len = prefix_mask.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}') attn_bias = attn_bias[..., :seq_len, :seq_len] causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len) prefix = prefix_mask.view(-1, 1, 1, seq_len) cannot_attend = ~torch.logical_or(causal, prefix.bool()) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor): seq_len = sequence_id.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}') attn_bias = attn_bias[..., :seq_len, :seq_len] cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None): return_dict = return_dict if return_dict is not None else self.config.return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if attention_mask is not None: attention_mask = attention_mask.bool() if prefix_mask is not None: prefix_mask = prefix_mask.bool() if not return_dict: raise NotImplementedError('return_dict False is not implemented yet for MPT') if output_attentions: if self.attn_impl != 'torch': raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.') if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training: raise NotImplementedError('MPT does not support training with left padding.') if self.prefix_lm and prefix_mask is None: raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.') if self.training: if self.attn_uses_sequence_id and sequence_id is None: raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.') elif self.attn_uses_sequence_id is False and sequence_id is not None: warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.') S = input_ids.size(1) assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}' tok_emb = self.wte(input_ids) if self.alibi: x = tok_emb else: past_position = 0 if past_key_values is not None: if len(past_key_values) != self.config.n_layers: raise ValueError('past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).') past_position = past_key_values[0][0].size(1) if self.attn_impl == 'torch': past_position = past_key_values[0][0].size(3) if S + past_position > self.config.max_seq_len: raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.') pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0) if attention_mask is not None: pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0) pos_emb = self.wpe(pos) x = tok_emb + pos_emb (attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id) if use_cache and past_key_values is None: past_key_values = [() for _ in range(self.config.n_layers)] all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for (b_idx, block) in enumerate(self.blocks): if output_hidden_states: assert all_hidden_states is not None all_hidden_states = all_hidden_states + (x,) past_key_value = past_key_values[b_idx] if past_key_values is not None else None (x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal) if past_key_values is not None: past_key_values[b_idx] = past_key_value if output_attentions: assert all_self_attns is not None all_self_attns = all_self_attns + (attn_weights,) x = self.norm_f(x) if output_hidden_states: assert all_hidden_states is not None all_hidden_states = all_hidden_states + (x,) return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns) class MPTForCausalLM(MPTPreTrainedModel): def __init__(self, prefix: str, config, weights): super().__init__(config) if not prefix: prefix = 'transformer' else: prefix = f'{prefix}.transformer' if not config.tie_word_embeddings: raise ValueError('MPTForCausalLM only supports tied word embeddings') self.transformer = MPTModel(prefix, config, weights) self.lm_head = SpeculativeHead.load(config, prefix=f'{prefix}.wte', weights=weights) self.logit_scale = None if config.logit_scale is not None: logit_scale = config.logit_scale if isinstance(logit_scale, str): if logit_scale == 'inv_sqrt_d_model': logit_scale = 1 / math.sqrt(config.d_model) else: raise ValueError(f"logit_scale={logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.") self.logit_scale = logit_scale def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None): return_dict = return_dict if return_dict is not None else self.config.return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache) (logits, speculative_logits) = self.lm_head(outputs.last_hidden_state) if self.logit_scale is not None: if self.logit_scale == 0: warnings.warn(f'Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs.') logits *= self.logit_scale loss = None if labels is not None: labels = torch.roll(labels, shifts=-1) labels[:, -1] = -100 loss = F.cross_entropy(logits.view(-1, logits.size(-1)), labels.to(logits.device).view(-1)) return (CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions), speculative_logits) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): if inputs_embeds is not None: raise NotImplementedError('inputs_embeds is not implemented for MPT yet') attention_mask = kwargs['attention_mask'].bool() if attention_mask[:, -1].sum() != attention_mask.shape[0]: raise NotImplementedError('MPT does not support generation with right padding.') if self.transformer.attn_uses_sequence_id and self.training: sequence_id = torch.zeros_like(input_ids[:1]) else: sequence_id = None if past_key_values is not None: input_ids = input_ids[:, -1].unsqueeze(-1) if self.transformer.prefix_lm: prefix_mask = torch.ones_like(attention_mask) if kwargs.get('use_cache') is False: raise NotImplementedError('MPT with prefix_lm=True does not support use_cache=False.') else: prefix_mask = None return {'input_ids': input_ids, 'attention_mask': attention_mask, 'prefix_mask': prefix_mask, 'sequence_id': sequence_id, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache', True)} @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = [] for layer_past in past_key_values: reordered_past += [tuple((past_state.index_select(0, beam_idx) for past_state in layer_past))] return reordered_past # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/neox_modeling.py """""" from typing import Optional, Tuple, Union import os import torch import torch.distributed import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from transformers.activations import ACT2FN from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from transformers.modeling_utils import PreTrainedModel from text_generation_server.layers import TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, SpeculativeHead CUSTOM_KERNELS_ENABLED = False if torch.cuda.is_available() and (not os.environ.get('DISABLE_CUSTOM_KERNELS', 'False') == 'True'): try: from custom_kernels import fused_attention_cuda CUSTOM_KERNELS_ENABLED = True except ImportError: pass def make_causal_mask(input_ids_shape: torch.Size, device: torch.device, past_key_values_length: int) -> torch.BoolTensor: (batch_size, target_length) = input_ids_shape mask = torch.ones((target_length, target_length + past_key_values_length), dtype=torch.bool, device=device) mask = mask.triu(1 + past_key_values_length) expanded_mask = mask.unsqueeze(0).expand(batch_size, target_length, target_length + past_key_values_length) return expanded_mask def expand_mask(mask: torch.Tensor, tgt_length: int) -> torch.BoolTensor: (batch_size, src_length) = mask.shape tgt_length = tgt_length if tgt_length is not None else src_length expanded_mask = ~mask[:, None, :].to(torch.bool) return expanded_mask.expand(batch_size, tgt_length, src_length) def prepare_attn_mask(attention_mask: torch.Tensor, input_shape: Tuple[int, int], past_key_values_length: int) -> torch.BoolTensor: combined_attention_mask = None device = attention_mask.device (_, src_length) = input_shape if src_length > 1: combined_attention_mask = make_causal_mask(input_shape, device=device, past_key_values_length=past_key_values_length) expanded_attn_mask = expand_mask(attention_mask, tgt_length=src_length) combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask return combined_attention_mask class GPTNeoXPreTrainedModel(PreTrainedModel): class GPTNeoXAttention(nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_attention_heads self.rotary_ndims = int(self.head_size * config.rotary_pct) self.rotary_emb = RotaryEmbedding(self.rotary_ndims, config.max_position_embeddings, base=config.rotary_emb_base) self.rotary_emb.inv_freq = nn.Parameter(weights.get_tensor(f'{prefix}.rotary_emb.inv_freq')) self.inv_norm_factor = 1.0 / torch.sqrt(torch.tensor(self.head_size, dtype=torch.float32)).to(torch.get_default_dtype()) if self.num_attention_heads % weights.process_group.size() != 0: raise ValueError(f'`num_attention_heads` must be divisible by `num_shards` (got `num_attention_heads`: {self.num_attention_heads} and `num_shards`: {weights.process_group.size()}') self.num_attention_heads = self.num_attention_heads // weights.process_group.size() self.query_key_value = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.query_key_value', weights=weights, bias=True) self.dense = TensorParallelRowLinear.load(config, prefix=f'{prefix}.dense', weights=weights, bias=True) def forward(self, hidden_states, position_ids, attention_mask, head_mask=None, layer_past=None, use_cache=False, output_attentions=False): has_layer_past = layer_past is not None qkv = self.query_key_value(hidden_states) new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size) qkv = qkv.view(*new_qkv_shape).permute(0, 2, 1, 3) (query, key, value) = qkv.split(self.head_size, -1) seq_len = key.shape[-2] if has_layer_past: seq_len += layer_past[0].shape[-2] query_rot = query[..., :self.rotary_ndims] key_rot = key[..., :self.rotary_ndims] (query_rot, key_rot) = self.rotary_emb(query_rot, key_rot, position_ids, seq_len) query[..., :self.rotary_ndims] = query_rot key[..., :self.rotary_ndims] = key_rot if CUSTOM_KERNELS_ENABLED: (attn_output, present, attn_weights) = fused_attention_cuda.forward(query, key, value, layer_past, attention_mask, head_mask, self.inv_norm_factor, self.num_attention_heads, use_cache) else: if has_layer_past: past_key = layer_past[0] past_value = layer_past[1] key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) present = (key, value) if use_cache else None (attn_output, attn_weights) = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_size) attn_output = self.dense(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs @classmethod def _split_heads(cls, tensor, num_attention_heads, attn_head_size): new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size) tensor = tensor.view(new_shape) tensor = tensor.permute(0, 2, 1, 3) return tensor @classmethod def _merge_heads(cls, tensor, num_attention_heads, attn_head_size): tensor = tensor.permute(0, 2, 1, 3).contiguous() tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size) return tensor def _attn(self, query, key, value, attention_mask=None, head_mask=None): (batch_size, num_attention_heads, query_length, attn_head_size) = query.size() key_length = key.size(-2) query = query.reshape(batch_size * num_attention_heads, query_length, attn_head_size) key = key.reshape(batch_size * num_attention_heads, key_length, attn_head_size) attn_scores = torch.zeros(1, dtype=query.dtype, device=key.device).expand(batch_size * num_attention_heads, query_length, key_length) attn_scores = torch.baddbmm(attn_scores, query, key.transpose(1, 2), beta=1.0, alpha=self.inv_norm_factor) input_dtype = attn_scores.dtype if input_dtype in [torch.float16, torch.bfloat16]: attn_scores = attn_scores.to(torch.float) attn_scores = torch.where(attention_mask, torch.finfo(attn_scores.dtype).min, attn_scores) attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length) attn_weights = nn.functional.softmax(attn_scores, dim=-1) attn_weights = attn_weights.to(value.dtype) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return (attn_output, attn_weights) class RotaryEmbedding(torch.nn.Module): def __init__(self, dim, max_position_embeddings, base=10000, device=None): super().__init__() self.true_inv_freq = 1.0 / base ** (torch.arange(0, dim, 2).float().to(device) / dim) self.register_buffer('inv_freq', self.true_inv_freq) self.max_seq_len_cached = max_position_embeddings self.cos_cached = None self.sin_cached = None @staticmethod def rotate_half(x): x1 = x[..., :x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) @staticmethod def _create_cos_sin(inv_freq, max_position_embeddings, dtype, device): t = torch.arange(max_position_embeddings, device=inv_freq.device, dtype=inv_freq.dtype) freqs = torch.einsum('i,j->ij', t, inv_freq) emb = torch.cat((freqs, freqs), dim=-1) return (emb.cos().to(device).to(dtype), emb.sin().to(device).to(dtype)) def forward(self, q, k, position_ids, seq_len=None): if seq_len > self.max_seq_len_cached or self.cos_cached is None or self.sin_cached is None: if seq_len > self.max_seq_len_cached: self.max_seq_len_cached = seq_len (self.cos_cached, self.sin_cached) = self._create_cos_sin(self.true_inv_freq, self.max_seq_len_cached, q.dtype, q.device) return rotary_forward(q, k, self.cos_cached, self.sin_cached, position_ids) @torch.jit.script def rotary_forward(q, k, cos, sin, position_ids): cos = cos[position_ids].unsqueeze(1) sin = sin[position_ids].unsqueeze(1) chunk_size = q.shape[-1] // 2 (q1, q2) = q.split(chunk_size, -1) q_rotated = torch.cat((-q2, q1), dim=-1) (k1, k2) = k.split(chunk_size, -1) k_rotated = torch.cat((-k2, k1), dim=-1) q_embed = q * cos + q_rotated * sin k_embed = k * cos + k_rotated * sin return (q_embed, k_embed) class GPTNeoXMLP(nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.act = ACT2FN[config.hidden_act] if 'gelu_fast' not in config.hidden_act else lambda x: torch.nn.functional.gelu(x, approximate='tanh') self.dense_h_to_4h = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.dense_h_to_4h', weights=weights, bias=True) self.dense_4h_to_h = TensorParallelRowLinear.load(config, prefix=f'{prefix}.dense_4h_to_h', weights=weights, bias=True) def forward(self, hidden_states): hidden_states = self.dense_h_to_4h(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dense_4h_to_h(hidden_states) return hidden_states class GPTNeoXLayer(nn.Module): def __init__(self, layer_id, prefix: str, config, weights): super().__init__() self.use_parallel_residual = config.use_parallel_residual self.input_layernorm = nn.LayerNorm.load(prefix=f'{prefix}.layers.{layer_id}.input_layernorm', weights=weights, eps=config.layer_norm_eps) self.post_attention_layernorm = nn.LayerNorm.load(prefix=f'{prefix}.layers.{layer_id}.post_attention_layernorm', weights=weights, eps=config.layer_norm_eps) self.attention = GPTNeoXAttention(config, prefix=f'{prefix}.layers.{layer_id}.attention', weights=weights) self.mlp = GPTNeoXMLP(config, prefix=f'{prefix}.layers.{layer_id}.mlp', weights=weights) def forward(self, hidden_states, position_ids, attention_mask=None, head_mask=None, use_cache=False, layer_past=None, output_attentions=False): attention_layer_outputs = self.attention(self.input_layernorm(hidden_states), attention_mask=attention_mask, position_ids=position_ids, layer_past=layer_past, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions) attn_output = attention_layer_outputs[0] outputs = attention_layer_outputs[1:] if self.use_parallel_residual: mlp_output = self.mlp(self.post_attention_layernorm(hidden_states)) hidden_states = mlp_output + attn_output + hidden_states else: attn_output = attn_output + hidden_states mlp_output = self.mlp(self.post_attention_layernorm(attn_output)) hidden_states = mlp_output + attn_output if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs class GPTNeoXModel(GPTNeoXPreTrainedModel): def __init__(self, prefix: str, config, weights): super().__init__(config) self.config = config self.num_attention_heads = config.num_attention_heads self.embed_in = TensorParallelEmbedding(prefix=f'{prefix}.embed_in', weights=weights) self.layers = nn.ModuleList([GPTNeoXLayer(layer_id, prefix, config, weights) for layer_id in range(config.num_hidden_layers)]) self.final_layer_norm = nn.LayerNorm.load(prefix=f'{prefix}.final_layer_norm', weights=weights, eps=config.layer_norm_eps) self.tp_world_size = weights.process_group.size() def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape if past_key_values is None: past_length = 0 past_key_values = tuple([None] * self.config.num_hidden_layers) else: past_length = past_key_values[0][0].size(-2) if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange(past_length, seq_length + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() if inputs_embeds is None: inputs_embeds = self.embed_in(input_ids) hidden_states = inputs_embeds seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values[0] is not None: past_key_values_length = past_key_values[0][0].shape[-1] seq_length_with_past = seq_length_with_past + past_key_values_length if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device) else: attention_mask = attention_mask.to(hidden_states.device) causal_mask = prepare_attn_mask(attention_mask, input_shape=(batch_size, seq_length), past_key_values_length=past_key_values_length) assert self.num_attention_heads % self.tp_world_size == 0 block_size = self.num_attention_heads // self.tp_world_size causal_mask = torch.repeat_interleave(causal_mask, block_size, dim=0) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) presents = () if use_cache else None all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for (i, (layer, layer_past)) in enumerate(zip(self.layers, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = layer(hidden_states, position_ids=position_ids, attention_mask=causal_mask, head_mask=head_mask[i], layer_past=layer_past, use_cache=use_cache, output_attentions=output_attentions) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_attentions = all_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.final_layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_attentions) class GPTNeoxForCausalLM(GPTNeoXPreTrainedModel): _keys_to_ignore_on_load_missing = ['position_ids', 'predictions.decoder.bias'] def __init__(self, prefix: str, config, weights): super().__init__(config) if not prefix: prefix = 'gpt_neox' else: prefix = f'{prefix}.gpt_neox' self.gpt_neox = GPTNeoXModel(prefix, config, weights) self.embed_out = SpeculativeHead.load(config, prefix='embed_out', weights=weights) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.gpt_neox(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] (lm_logits, speculative_logits) = self.embed_out(hidden_states) lm_loss = None if labels is not None: labels = labels.to(lm_logits.device) shift_logits = lm_logits[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return (lm_loss,) + output if lm_loss is not None else output return (CausalLMOutputWithPast(loss=lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions), speculative_logits) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs): input_shape = input_ids.shape if past_key_values and past_key_values[0] is not None: input_ids = input_ids[:, -1:] position_ids = kwargs.get('position_ids', None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) if inputs_embeds is not None and past_key_values is None: model_inputs = {'inputs_embeds': inputs_embeds} else: model_inputs = {'input_ids': input_ids} model_inputs.update({'attention_mask': attention_mask, 'past_key_values': past_key_values, 'position_ids': position_ids}) return model_inputs def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx) for past_state in layer_past[:2])) + layer_past[2:],) return reordered_past # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/opt_modeling.py """""" import random from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from transformers.activations import ACT2FN from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from transformers.modeling_utils import PreTrainedModel from transformers import OPTConfig from text_generation_server.layers import FastLinear, TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, SpeculativeHead EPS = 1e-05 def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int=0): (bsz, tgt_len) = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int]=None): (bsz, src_len) = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class OPTLearnedPositionalEmbedding(nn.Module): def __init__(self, prefix: str, weights): super().__init__() self.offset = 2 self.weight = nn.Parameter(weights.get_tensor(f"{(prefix + '.' if prefix else '')}decoder.embed_positions.weight")) def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int=0): attention_mask = attention_mask.long() positions = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() - 1 positions = positions[:, past_key_values_length:] return torch.nn.functional.embedding(positions + self.offset, self.weight) class OPTAttention(nn.Module): def __init__(self, config, prefix, weights, is_decoder: bool=False, bias: bool=True, process_group=None): super().__init__() hidden_size = config.hidden_size num_heads = config.num_attention_heads self.hidden_size = hidden_size self.num_heads = num_heads self.dropout = config.dropout self.head_dim = hidden_size // num_heads if self.head_dim * num_heads != self.hidden_size: raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder process_group = weights.process_group if self.num_heads % weights.process_group.size() != 0: raise ValueError(f'`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} and `num_shards`: {weights.process_group.size()}') self.num_heads = self.num_heads // process_group.size() self.hidden_size = self.hidden_size // process_group.size() self.q_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.q_proj', weights=weights, bias=bias) self.k_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.k_proj', weights=weights, bias=bias) self.v_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.v_proj', weights=weights, bias=bias) self.out_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.out_proj', weights=weights, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: is_cross_attention = key_value_states is not None (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None: key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attn_weights.dtype == torch.float16: attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(torch.float16) else: attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}') attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.hidden_size) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped, past_key_value) class OPTDecoderLayer(nn.Module): def __init__(self, layer_id: int, prefix: str, config: OPTConfig, weights): super().__init__() self.process_group = weights.process_group self.hidden_size = config.hidden_size prefix = f"{(prefix + '.' if prefix else '')}decoder.layers.{layer_id}" self.self_attn = OPTAttention(config, prefix=f'{prefix}.self_attn', weights=weights, is_decoder=True, bias=config.enable_bias) self.do_layer_norm_before = config.do_layer_norm_before self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.self_attn_layer_norm = nn.LayerNorm.load(prefix=f'{prefix}.self_attn_layer_norm', weights=weights, eps=EPS) self.fc1 = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.fc1', weights=weights, bias=config.enable_bias) self.fc2 = TensorParallelRowLinear.load(config, prefix=f'{prefix}.fc2', weights=weights, bias=config.enable_bias) self.final_layer_norm = nn.LayerNorm.load(prefix=f'{prefix}.final_layer_norm', weights=weights, eps=EPS) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, past_key_value: Optional[Tuple[torch.Tensor]]=None) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states if self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if not self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states_shape = hidden_states.shape hidden_states = hidden_states.reshape(-1, hidden_states.size(-1)) residual = hidden_states if self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = (residual + hidden_states).view(hidden_states_shape) if not self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs class OPTPreTrainedModel(PreTrainedModel): config_class = OPTConfig class OPTDecoder(OPTPreTrainedModel): def __init__(self, prefix: str, config: OPTConfig, weights): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.vocab_size = config.vocab_size prefix = prefix + '.' if prefix else '' self.embed_tokens = TensorParallelEmbedding(prefix=f'{prefix}decoder.embed_tokens', weights=weights) self.embed_positions = OPTLearnedPositionalEmbedding(prefix, weights) if config.word_embed_proj_dim != config.hidden_size: self.project_out = FastLinear.load(config, prefix=f'{prefix}decoder.project_out', weights=weights, bias=False) else: self.project_out = None if config.word_embed_proj_dim != config.hidden_size: self.project_in = FastLinear.load(config, prefix=f'{prefix}decoder.project_in', weights=weights, bias=False) else: self.project_in = None if config.do_layer_norm_before and (not config._remove_final_layer_norm): self.final_layer_norm = nn.LayerNorm.load(prefix=f'{prefix}decoder.final_layer_norm', weights=weights, eps=EPS) else: self.final_layer_norm = None self.layers = nn.ModuleList([OPTDecoderLayer(layer_id, prefix, config, weights) for layer_id in range(config.num_hidden_layers)]) def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask(input_shape, inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length) if attention_mask is not None: expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(inputs_embeds.device) combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask return combined_attention_mask def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) (batch_size, seq_length) = input_shape past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 mask_seq_length = past_key_values_length + seq_length if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) causal_attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) pos_embeds = self.embed_positions(attention_mask, past_key_values_length) if self.project_in is not None: inputs_embeds = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None for (attn_mask, mask_name) in zip([head_mask], ['head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None layer_outputs = decoder_layer(hidden_states, attention_mask=causal_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if self.final_layer_norm is not None: hidden_states = self.final_layer_norm(hidden_states) if self.project_out is not None: hidden_states = self.project_out(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns) class OPTModel(OPTPreTrainedModel): def __init__(self, prefix: str, config: OPTConfig, weights): super().__init__(config) self.decoder = OPTDecoder(prefix, config, weights) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict decoder_outputs = self.decoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return decoder_outputs return BaseModelOutputWithPast(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions) class OPTForCausalLM(OPTPreTrainedModel): def __init__(self, prefix, config, weights): super().__init__(config) self.model = OPTModel(prefix, config, weights) self.lm_head = SpeculativeHead.load(config, prefix=f"{(prefix + '.' if prefix else '')}decoder.embed_tokens", weights=weights) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) (logits, speculative_logits) = self.lm_head(outputs.last_hidden_state) loss = None return (CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions), speculative_logits) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs): if past_key_values: input_ids = input_ids[:, -1:] if inputs_embeds is not None and past_key_values is None: model_inputs = {'inputs_embeds': inputs_embeds} else: model_inputs = {'input_ids': input_ids} model_inputs.update({'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'attention_mask': attention_mask}) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx) for past_state in layer_past)),) return reordered_past # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/phi_modeling.py import torch import torch.distributed import math from torch import nn from typing import Optional, List, Tuple from transformers.configuration_utils import PretrainedConfig from transformers.modeling_outputs import CausalLMOutputWithPast from text_generation_server.layers import TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, FastLinear class PhiConfig(PretrainedConfig): def __init__(self, vocab_size=51200, n_positions=2048, n_embd=2560, n_layer=32, n_inner=None, n_head=32, rotary_dim=32, layer_norm_epsilon=1e-05, tie_word_embeddings=False, pad_vocab_size_multiple=64, pad_token_id=0, bos_token_id=1, eos_token_id=2, no_bias=False, **kwargs): self.vocab_size = vocab_size self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_inner = n_inner self.n_head = n_head self.rotary_dim = rotary_dim self.layer_norm_epsilon = layer_norm_epsilon self.tie_word_embeddings = tie_word_embeddings self.pad_vocab_size_multiple = pad_vocab_size_multiple self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.no_bias = no_bias super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) class RotaryEmbedding(nn.Module): def __init__(self, dim, max_seq_len): super().__init__() inv_freq = [1.0 / 10000.0 ** (i / dim) for i in range(0, dim, 2)] inv_freq_len = len(inv_freq) inv_freq = torch.tensor(inv_freq).view(1, inv_freq_len) t = torch.arange(0, max_seq_len, dtype=torch.float).view(max_seq_len, 1) freqs = t.matmul(inv_freq) self.sin = freqs.sin() self.cos = freqs.cos() def apply_rotary_emb_qkv(self, qkv, seqlen_offset): (b_size, seqlen, three, _, _headdim) = qkv.shape if three != 3: raise Exception('unexpected shape for qkv') (_, rotary_dim) = self.cos.shape rotary_dim = rotary_dim * 2 q_rot = qkv[:, :, 0, :, :rotary_dim] q_pass = qkv[:, :, 0, :, rotary_dim:] k_rot = qkv[:, :, 1, :, :rotary_dim] k_pass = qkv[:, :, 1, :, rotary_dim:] q12 = torch.chunk(q_rot, 2, dim=-1) k12 = torch.chunk(k_rot, 2, dim=-1) (q1, q2) = (q12[0], q12[1]) (k1, k2) = (k12[0], k12[1]) c = self.cos.narrow(0, seqlen_offset, seqlen).unsqueeze(1) s = self.sin.narrow(0, seqlen_offset, seqlen).unsqueeze(1) q_rot = torch.cat([q1 * c - q2 * s, q1 * s + q2 * c], dim=-1) k_rot = torch.cat([k1 * c - k2 * s, k1 * s + k2 * c], dim=-1) q = torch.cat([q_rot, q_pass], dim=-1) k = torch.cat([k_rot, k_pass], dim=-1) v = qkv[:, :, 2] return (q, k, v) class PhiCausalLMHead(nn.Module): def __init__(self, config, weights): super().__init__() self.ln = nn.LayerNorm.load(prefix='lm_head.ln', weights=weights, eps=config.layer_norm_epsilon) self.linear = SpeculativeHead.load(config=config, prefix='lm_head.linear', weights=weights) def forward(self, hidden_states): hidden_states = self.ln(hidden_states) hidden_states = self.linear(hidden_states) return hidden_states class PhiMHA(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.Wqkv = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.Wqkv', weights=weights, bias=not config.no_bias) self.out_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.out_proj', weights=weights, bias=not config.no_bias) self.op_size = config.n_embd self.head_dim = int(config.n_embd / config.n_head) self.num_heads = config.n_head self.rotary_emb = RotaryEmbedding(config.rotary_dim, config.n_positions) self.softmax_scale = 1.0 / math.sqrt(self.head_dim) def forward(self, hidden_states, past_kv_cache, attention_mask=None): (b_size, seq_len, _n_embd) = hidden_states.shape qkv = self.Wqkv(hidden_states) qkv = qkv.view(b_size, seq_len, 3, self.num_heads, self.head_dim) seqlen_offset = 0 if past_kv_cache is None else past_kv_cache[0].shape[1] (q, k, v) = self.rotary_emb.apply_rotary_emb_qkv(qkv, seqlen_offset) if past_kv_cache is not None: (prev_k, prev_v) = past_kv_cache k = torch.cat([prev_k, k], dim=1) v = torch.cat([prev_v, v], dim=1) past_kv_cache = [k, v] attn_weights = torch.einsum('bthd,bshd->bhts', q, k * self.softmax_scale) if attention_mask is not None: seqlen_k = k.shape[1] seqlen_q = q.shape[1] causal_mask = torch.triu(torch.full((seqlen_q, seqlen_k), -10000.0, device=attn_weights.device), 1) attn_weights = attn_weights + causal_mask.to(dtype=attn_weights.dtype) attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1) attn_output = attn_weights.matmul(v.transpose(1, 2)).squeeze(0) attn_output = attn_output.view((b_size, self.num_heads, seq_len, self.head_dim)).transpose(1, 2).flatten(-2) return (self.out_proj(attn_output), past_kv_cache) class PhiMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.n_inner = config.n_inner self.fc1 = FastLinear.load(config=config, prefix=f'{prefix}.fc1', weights=weights, bias=False) self.fc2 = FastLinear.load(config=config, prefix=f'{prefix}.fc2', weights=weights, bias=False) self.activation = torch.nn.functional.gelu def forward(self, hidden_states): hidden_states = self.fc1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class PhiBlock(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() self.layer_id = layer_id self.layer_norm = nn.LayerNorm.load(prefix=f'{layer_id}.ln', weights=weights, eps=config.layer_norm_epsilon) self.mixer = PhiMHA(prefix=f'{layer_id}.mixer', config=config, weights=weights) self.mlp = PhiMLP(prefix=f'{layer_id}.mlp', config=config, weights=weights) def forward(self, hidden_states, kv_cache, attention_mask): residual = hidden_states hidden_states = self.layer_norm(hidden_states) (attn_outputs, past_kv_cache) = self.mixer(hidden_states, kv_cache, attention_mask) feed_forward_hidden_states = self.mlp(hidden_states) out = attn_outputs + feed_forward_hidden_states + residual return (out, past_kv_cache) class PhiModel(nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.tp_rank = weights.process_group.rank() self.tp_world_size = weights.process_group.size() self.embed_tokens = TensorParallelEmbedding(prefix=f'{prefix}.embd.wte', weights=weights) self.blocks = nn.ModuleList([PhiBlock(f'{prefix}.h.{layer_id}', config, weights) for layer_id in range(config.n_layer)]) def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, return_dict: Optional[bool]=None, use_cache: Optional[bool]=None) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]: hidden_states = self.embed_tokens(input_ids) seq_len = hidden_states.shape[1] mask = None if seq_len <= 1 else attention_mask past_key_values = [None] * len(self.blocks) if past_key_values is None else past_key_values for (index, block) in enumerate(self.blocks): (hidden_states, new_key_values) = block(hidden_states, past_key_values[index], mask) past_key_values[index] = new_key_values return (hidden_states, past_key_values) class PhiForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() if not prefix: prefix = 'transformer' else: prefix = f'{prefix}.transformer' self.model = PhiModel(prefix, config, weights) self.lm_head = PhiCausalLMHead(config, weights) def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, return_dict: Optional[bool]=None, use_cache: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]: model_output = self.model(input_ids, past_key_values, attention_mask, return_dict, use_cache) logits = self.lm_head(model_output[0]) loss = None if labels is not None: loss = nn.CrossEntropyLoss()(logits[:, :-1].view(-1, logits.size(-1)), labels[:, 1:].view(-1)) if not return_dict: return (loss,) + (logits,) + model_output[1:] if loss is not None else (logits,) + model_output[1:] return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=model_output[1], hidden_states=None, attentions=None) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/siglip.py from typing import Optional, Tuple import warnings import math import torch from torch import nn from transformers.activations import ACT2FN from transformers.modeling_outputs import BaseModelOutputWithPooling from transformers import SiglipConfig, SiglipVisionConfig from torch.nn.init import _calculate_fan_in_and_fan_out from text_generation_server.layers.tensor_parallel import TensorParallelEmbedding, TensorParallelColumnLinear, TensorParallelRowLinear class SiglipVisionEmbeddings(nn.Module): def __init__(self, prefix, config: SiglipVisionConfig, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, padding='valid') self.patch_embedding.weight = nn.Parameter(weights.get_tensor(f'{prefix}.patch_embedding.weight'), requires_grad=False) self.patch_embedding.bias = nn.Parameter(weights.get_tensor(f'{prefix}.patch_embedding.bias'), requires_grad=False) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches self.position_embedding = TensorParallelEmbedding(prefix=f'{prefix}.position_embedding', weights=weights) self.register_buffer('position_ids', torch.arange(self.num_positions, device=weights.device).expand((1, -1)), persistent=False) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: patch_embeds = self.patch_embedding(pixel_values) embeddings = patch_embeds.flatten(2).transpose(1, 2) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class SiglipAttention(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.head_size = self.head_dim if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.num_heads = self.num_heads // weights.process_group.size() self.embed_dim = self.embed_dim // weights.process_group.size() self.scale = self.head_dim ** (-0.5) self.dropout = config.attention_dropout self.k_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.k_proj', weights=weights, bias=True) self.v_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.v_proj', weights=weights, bias=True) self.q_proj = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.q_proj', weights=weights, bias=True) self.out_proj = TensorParallelRowLinear.load(config, prefix=f'{prefix}.out_proj', weights=weights, bias=True) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) * self.scale if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(attn_weights.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_size): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_size)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_size) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights) class SiglipMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = TensorParallelColumnLinear.load(prefix=f'{prefix}.fc1', config=config, weights=weights, bias=True) self.fc2 = TensorParallelRowLinear.load(prefix=f'{prefix}.fc2', config=config, weights=weights, bias=True) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class SiglipEncoderLayer(nn.Module): def __init__(self, prefix, config: SiglipConfig, weights): super().__init__() self.embed_dim = config.hidden_size self.self_attn = SiglipAttention(prefix=f'{prefix}.self_attn', config=config, weights=weights) self.layer_norm1 = nn.LayerNorm.load(prefix=f'{prefix}.layer_norm1', weights=weights, eps=config.layer_norm_eps) self.mlp = SiglipMLP(prefix=f'{prefix}.mlp', config=config, weights=weights) self.layer_norm2 = nn.LayerNorm.load(prefix=f'{prefix}.layer_norm2', weights=weights, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return (hidden_states, None) class SiglipMultiheadAttentionPoolingHead(nn.Module): def __init__(self, prefix, config: SiglipVisionConfig, weights): super().__init__() self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size)) self.attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = SiglipMLP(prefix, config, weights) def forward(self, hidden_state): batch_size = hidden_state.shape[0] probe = self.probe.repeat(batch_size, 1, 1) hidden_state = self.attention(probe, hidden_state, hidden_state)[0] residual = hidden_state hidden_state = self.layernorm(hidden_state) hidden_state = residual + self.mlp(hidden_state) return hidden_state[:, 0] def _trunc_normal_(tensor, mean, std, a, b): def norm_cdf(x): return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 if mean < a - 2 * std or mean > b + 2 * std: warnings.warn('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.', stacklevel=2) lower = norm_cdf((a - mean) / std) upper = norm_cdf((b - mean) / std) tensor.uniform_(2 * lower - 1, 2 * upper - 1) tensor.erfinv_() tensor.mul_(std * math.sqrt(2.0)) tensor.add_(mean) tensor.clamp_(min=a, max=b) def trunc_normal_tf_(tensor: torch.Tensor, mean: float=0.0, std: float=1.0, a: float=-2.0, b: float=2.0) -> torch.Tensor: with torch.no_grad(): _trunc_normal_(tensor, 0, 1.0, a, b) tensor.mul_(std).add_(mean) def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): (fan_in, fan_out) = _calculate_fan_in_and_fan_out(tensor) if mode == 'fan_in': denom = fan_in elif mode == 'fan_out': denom = fan_out elif mode == 'fan_avg': denom = (fan_in + fan_out) / 2 variance = scale / denom if distribution == 'truncated_normal': trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.8796256610342398) elif distribution == 'normal': with torch.no_grad(): tensor.normal_(std=math.sqrt(variance)) elif distribution == 'uniform': bound = math.sqrt(3 * variance) with torch.no_grad(): tensor.uniform_(-bound, bound) else: raise ValueError(f'invalid distribution {distribution}') def lecun_normal_(tensor): variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') def default_flax_embed_init(tensor): variance_scaling_(tensor, mode='fan_in', distribution='normal') class SiglipEncoder(nn.Module): def __init__(self, prefix, config: SiglipConfig, weights): super().__init__() self.config = config self.layers = nn.ModuleList([SiglipEncoderLayer(prefix=f'{prefix}.layers.{i}', config=config, weights=weights) for i in range(config.num_hidden_layers)]) def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None): hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): (hidden_states, _) = encoder_layer(hidden_states, attention_mask) return hidden_states class SiglipVisionTransformer(nn.Module): def __init__(self, prefix, config: SiglipVisionConfig, weights): super().__init__() self.config = config self.embeddings = SiglipVisionEmbeddings(prefix=f'{prefix}.embeddings', config=config, weights=weights) self.encoder = SiglipEncoder(prefix=f'{prefix}.encoder', config=config, weights=weights) def forward(self, pixel_values: Optional[torch.FloatTensor]=None): if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values) encoder_outputs = self.encoder(inputs_embeds=hidden_states) last_hidden_state = encoder_outputs return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state) # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/t5_modeling.py """""" import copy import math import warnings from typing import Optional, Tuple, Union from loguru import logger import torch import torch.distributed from torch import nn from torch.nn import CrossEntropyLoss from transformers.activations import ACT2FN from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput from transformers.modeling_utils import PreTrainedModel from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS from transformers.utils import is_torch_fx_proxy from transformers import T5Config from text_generation_server.layers import TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, SpeculativeHead __HEAD_MASK_WARNING_MSG = '\nThe input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,\n`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.\nIf you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,\nnum_heads)`.\n' class PartialTPEmbedding(nn.Module): def __init__(self, prefix: str, weights): super().__init__() weight = weights.get_sharded(f'{prefix}.weight', dim=1) self.weight = nn.Parameter(weight) def forward(self, input: torch.Tensor) -> torch.Tensor: return torch.nn.functional.embedding(input, self.weight) @torch.jit.script def layer_norm(hidden_states, weight, epsilon): variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + epsilon) if weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(weight.dtype) return weight * hidden_states class T5LayerNorm(nn.Module): def __init__(self, prefix, weights, eps=1e-06): super().__init__() weight = weights.get_tensor(f'{prefix}.weight') self.weight = nn.Parameter(weight) self.variance_epsilon = torch.tensor(eps) def forward(self, hidden_states): return layer_norm(hidden_states, self.weight, self.variance_epsilon) try: from apex.normalization import FusedRMSNorm T5LayerNorm = FusedRMSNorm logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of T5LayerNorm') except ImportError: pass except Exception: logger.warning('discovered apex but it failed to load, falling back to T5LayerNorm') pass ALL_LAYERNORM_LAYERS.append(T5LayerNorm) class T5DenseActDense(nn.Module): def __init__(self, config: T5Config, prefix, weights): super().__init__() self.wi = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.wi', weights=weights, bias=False) _q = config.quantize _dtype = weights.dtype weights.dtype = torch.float32 config.quantize = None self.wo_cast = (torch.float32, _dtype) self.wo = TensorParallelRowLinear.load(config, prefix=f'{prefix}.wo', weights=weights, bias=False) weights.dtype = _dtype config.quantize = _q self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] if 'gelu' not in config.dense_act_fn else lambda x: torch.nn.functional.gelu(x, approximate='tanh') def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.to(dtype=self.wo_cast[0]) hidden_states = self.wo(hidden_states) return hidden_states class T5DenseGatedActDense(nn.Module): def __init__(self, config: T5Config, prefix, weights): super().__init__() self.wi_0 = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.wi_0', weights=weights, bias=False) self.wi_1 = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.wi_1', weights=weights, bias=False) _q = config.quantize _dtype = weights.dtype weights.dtype = torch.float32 config.quantize = None self.wo_cast = (torch.float32, _dtype) self.wo = TensorParallelRowLinear.load(config, prefix=f'{prefix}.wo', weights=weights, bias=False) weights.dtype = _dtype config.quantize = _q self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] if 'gelu' not in config.dense_act_fn else lambda x: torch.nn.functional.gelu(x, approximate='tanh') def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.to(dtype=self.wo_cast[0]) hidden_states = self.wo(hidden_states) return hidden_states class T5LayerFF(nn.Module): def __init__(self, config: T5Config, prefix, weights): super().__init__() if config.is_gated_act: self.DenseReluDense = T5DenseGatedActDense(config, prefix=f'{prefix}.DenseReluDense', weights=weights) else: self.DenseReluDense = T5DenseActDense(config, prefix=f'{prefix}.DenseReluDense', weights=weights) self.layer_norm = T5LayerNorm(prefix=f'{prefix}.layer_norm', weights=weights, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states class T5Attention(nn.Module): def __init__(self, config: T5Config, prefix, weights, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim process_group = weights.process_group assert self.n_heads % process_group.size() == 0 self.q = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.q', weights=weights, bias=False) self.k = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.k', weights=weights, bias=False) self.v = TensorParallelColumnLinear.load(config, prefix=f'{prefix}.v', weights=weights, bias=False) self.o = TensorParallelRowLinear.load(config, prefix=f'{prefix}.o', weights=weights, bias=False) if self.n_heads % weights.process_group.size() != 0: raise ValueError(f'`n_heads` must be divisible by `num_shards` (got `n_heads`: {self.n_heads} and `num_shards`: {weights.process_group.size()}') self.n_heads = self.n_heads // process_group.size() self.inner_dim = self.inner_dim // process_group.size() if self.has_relative_attention_bias: self.relative_attention_bias = PartialTPEmbedding(prefix=f'{prefix}.relative_attention_bias', weights=weights) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) max_exact = num_buckets // 2 is_small = relative_position < max_exact relative_position_if_large = max_exact + (torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).to(torch.long) relative_position_if_large = torch.min(relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None): if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_position_bucket(relative_position, bidirectional=not self.is_decoder, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance) values = self.relative_attention_bias(relative_position_bucket) values = values.permute([2, 0, 1]).unsqueeze(0) return values def forward(self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False): (batch_size, seq_length) = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: assert len(past_key_value) == 2, f'past_key_value should have 2 past states: keys and values. Got {len(past_key_value)} past states' real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states): return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): if key_value_states is None: hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: hidden_states = torch.cat([past_key_value, hidden_states], dim=2) elif past_key_value.shape[2] != key_value_states.shape[1]: hidden_states = shape(proj_layer(key_value_states)) else: hidden_states = past_key_value return hidden_states query_states = shape(self.q(hidden_states)) key_states = project(hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None) value_states = project(hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None) scores = torch.matmul(query_states, key_states.transpose(3, 2)) if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros((1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype) else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1):, :] if mask is not None: position_bias = position_bias + mask position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if self.is_decoder and use_cache else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class T5LayerSelfAttention(nn.Module): def __init__(self, config, prefix, weights, has_relative_attention_bias=False): super().__init__() self.SelfAttention = T5Attention(config, prefix=f'{prefix}.SelfAttention', weights=weights, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = T5LayerNorm(prefix=f'{prefix}.layer_norm', weights=weights, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention(normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] return outputs class T5LayerCrossAttention(nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.EncDecAttention = T5Attention(config, prefix=f'{prefix}.EncDecAttention', weights=weights, has_relative_attention_bias=False) self.layer_norm = T5LayerNorm(prefix=f'{prefix}.layer_norm', weights=weights, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention(normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] return outputs class T5Block(nn.Module): def __init__(self, config, prefix, weights, has_relative_attention_bias: bool): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append(T5LayerSelfAttention(config, prefix=f'{prefix}.layer.0', weights=weights, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: i = 2 self.layer.append(T5LayerCrossAttention(config, prefix=f'{prefix}.layer.1', weights=weights)) else: i = 1 self.layer.append(T5LayerFF(config, prefix=f'{prefix}.layer.{i}', weights=weights)) def forward(self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True): if past_key_value is not None: if not self.is_decoder: logger.warning('`past_key_values` is passed to the encoder. Please make sure this is intended.') expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError(f"There should be {expected_num_past_key_values} past states. {('2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else '')}Got {len(past_key_value)} past key / value states") self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: (self_attn_past_key_value, cross_attn_past_key_value) = (None, None) self_attention_outputs = self.layer[0](hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions) (hidden_states, present_key_value_state) = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] if hidden_states.dtype == torch.float16: clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1](hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions) hidden_states = cross_attention_outputs[0] if hidden_states.dtype == torch.float16: clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] attention_outputs = attention_outputs + cross_attention_outputs[2:] hidden_states = self.layer[-1](hidden_states) if hidden_states.dtype == torch.float16: clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs class T5PreTrainedModel(PreTrainedModel): config_class = T5Config def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, 'self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information' if is_torch_fx_proxy(input_ids): shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, 'self.model.config.pad_token_id has to be defined.' shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class T5Stack(T5PreTrainedModel): def __init__(self, config, prefix, weights, embed_tokens): super().__init__(config) self.is_decoder = config.is_decoder self.embed_tokens = embed_tokens self.block = nn.ModuleList([T5Block(config, prefix=f'{prefix}.block.{layer_id}', weights=weights, has_relative_attention_bias=layer_id == 0) for layer_id in range(config.num_layers)]) self.final_layer_norm = T5LayerNorm(prefix=f'{prefix}.final_layer_norm', weights=weights, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = 'decoder_' if self.is_decoder else '' raise ValueError(f'You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = 'decoder_' if self.is_decoder else '' raise ValueError(f'You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds') if inputs_embeds is None: assert self.embed_tokens is not None, 'You have to initialize the model with valid token embeddings' inputs_embeds = self.embed_tokens(input_ids) (batch_size, seq_length) = input_shape mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if use_cache is True: assert self.is_decoder, f'`use_cache` can only be set to `True` if {self} is used as a decoder' if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None and (encoder_hidden_states is not None): encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones(batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long) if past_key_values is None: past_key_values = [None] * len(self.block) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) if self.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.is_decoder else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for (i, (layer_module, past_key_value)) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] (hidden_states, present_key_value_state) = layer_outputs[:2] position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions) class T5ForConditionalGeneration(T5PreTrainedModel): def __init__(self, config: T5Config, weights): super().__init__(config) self.model_dim = config.d_model self.shared = TensorParallelEmbedding(prefix='shared', weights=weights) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(config=encoder_config, prefix='encoder', weights=weights, embed_tokens=self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack(config=decoder_config, prefix='decoder', weights=weights, embed_tokens=self.shared) try: self.lm_head = SpeculativeHead.load(config, prefix='lm_head', weights=weights) except RuntimeError: self.lm_head = SpeculativeHead.load(config, prefix='shared', weights=weights) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and (decoder_inputs_embeds is None): decoder_input_ids = self._shift_right(labels) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: sequence_output = sequence_output * self.model_dim ** (-0.5) (logits, speculative_logits) = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) labels = labels.to(logits.device) loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1)) if not return_dict: output = (logits,) + decoder_outputs[1:] + encoder_outputs return (loss,) + output if loss is not None else output return (Seq2SeqLMOutput(loss=loss, logits=logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions), speculative_logits) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, decoder_attention_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs): if past_key_values is not None: input_ids = input_ids[:, -1:] return {'decoder_input_ids': input_ids, 'past_key_values': past_key_values, 'encoder_outputs': encoder_outputs, 'attention_mask': attention_mask, 'head_mask': head_mask, 'decoder_head_mask': decoder_head_mask, 'decoder_attention_mask': decoder_attention_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'use_cache': use_cache} def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past_key_values, beam_idx): if past_key_values is None: logger.warning('You might want to consider setting `use_cache=True` to speed up decoding') return past_key_values reordered_decoder_past = () for layer_past_states in past_key_values: reordered_layer_past_states = () for layer_past_state in layer_past_states: reordered_layer_past_states = reordered_layer_past_states + (layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past # File: text-generation-inference-main/server/text_generation_server/models/custom_modeling/vlm.py def load_text_model(prefix, config, weights, name=None): if config.model_type == 'llama': from text_generation_server.models.custom_modeling.flash_llama_modeling import FlashLlamaForCausalLM return FlashLlamaForCausalLM(prefix, config, weights) elif config.model_type == 'mistral': from text_generation_server.models.custom_modeling.flash_mistral_modeling import FlashMistralForCausalLM return FlashMistralForCausalLM(prefix, config, weights, name=name) elif config.model_type == 'gemma': from text_generation_server.models.custom_modeling.flash_gemma_modeling import FlashGemmaForCausalLM return FlashGemmaForCausalLM(prefix, config, weights, causal=False) elif config.model_type == 'paligemma': from text_generation_server.models.custom_modeling.flash_gemma_modeling import FlashGemmaForCausalLM return FlashGemmaForCausalLM(prefix, config, weights) else: raise RuntimeError(f'Unsupported model type {config.model_type}') def load_vision_model(prefix, config, weights): if config.model_type == 'clip_vision_model': from text_generation_server.models.custom_modeling.clip import CLIPVisionTransformer return CLIPVisionTransformer(prefix=f'{prefix}.vision_model', config=config, weights=weights) if config.model_type == 'siglip_vision_model': from text_generation_server.models.custom_modeling.siglip import SiglipVisionTransformer return SiglipVisionTransformer(prefix='vision_tower.vision_model', config=config, weights=weights) else: raise RuntimeError(f'Unsupported model type {config.model_type}') # File: text-generation-inference-main/server/text_generation_server/models/flash_causal_lm.py from contextlib import nullcontext import math import os import time import torch import torch.distributed import numpy as np from loguru import logger from dataclasses import dataclass from opentelemetry import trace from transformers import PreTrainedTokenizerBase, AutoConfig, AutoTokenizer, GenerationConfig from typing import Any, ContextManager, Iterable, Optional, Tuple, List, Type, Dict from text_generation_server.adapters import AdapterBatchData, AdapterBatchMetadata from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE from text_generation_server.utils.chunks import concat_text_chunks from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.models import Model from text_generation_server.utils.log import log_master from text_generation_server.utils.tokens import batch_top_tokens from text_generation_server.utils.speculate import get_speculate from text_generation_server.utils import initialize_torch_distributed, weight_files, Weights from text_generation_server.models.types import Batch, Tokens, Generation, GeneratedText from text_generation_server.pb import generate_pb2 from text_generation_server.models.globals import MEM_POOL, ATTENTION, BLOCK_SIZE, CUDA_GRAPHS, TGI_WIGGLE_ROOM, get_adapter_to_index from text_generation_server.layers.attention import Seqlen from text_generation_server.utils import StoppingCriteria, HeterogeneousNextTokenChooser from text_generation_server.utils.dist import MEMORY_FRACTION from text_generation_server.utils.quantization import get_loader from text_generation_server.utils.segments import SegmentConcatBuilder, find_segments from text_generation_server.utils.import_utils import empty_cache, synchronize, get_free_memory tracer = trace.get_tracer(__name__) SLIDING_WINDOW: Optional[int] = None def set_sliding_window(sliding_window: int): global SLIDING_WINDOW SLIDING_WINDOW = sliding_window def get_sliding_windows() -> int: global SLIDING_WINDOW return SLIDING_WINDOW def init_cpu_threads_env(rank_id: int, world_size: int): import importlib.util if importlib.util.find_spec('numa') is not None: import numa import psutil nodes = numa.info.get_max_node() + 1 rank_per_node = math.ceil(world_size / nodes) num_cpus_per_nodes = int(psutil.cpu_count(logical=False) / nodes) node_id = int(rank_id / rank_per_node) rank_offset_per_node = rank_id % rank_per_node if os.getenv('OMP_NUM_THREADS') is None: num_cpus_per_rank = max(int(num_cpus_per_nodes / rank_per_node), 1) else: num_cpus_per_rank = int(os.getenv('OMP_NUM_THREADS')) if len(numa.memory.get_membind_nodes()) == nodes: numa.memory.set_membind_nodes(node_id) torch.set_num_threads(num_cpus_per_rank) if len(numa.schedule.get_affinitive_cpus(0)) == psutil.cpu_count(logical=True): cpu_start = num_cpus_per_rank * rank_offset_per_node numa.schedule.run_on_cpus(0, *numa.info.node_to_cpus(node_id)[cpu_start:cpu_start + num_cpus_per_rank]) logger.info(f'affinity={numa.schedule.get_affinitive_cpus(0)}, membind = {numa.memory.get_membind_nodes()}') @dataclass class FlashCausalLMBatch(Batch): batch_id: int requests: List[generate_pb2.Request] requests_idx_mapping: Dict[int, int] input_ids: torch.Tensor position_ids: torch.Tensor speculative_ids: Optional[torch.Tensor] cu_seqlen_prefill: Optional[torch.Tensor] prefill_cache_indices: Optional[torch.Tensor] start_slots: torch.Tensor slot_indices: torch.Tensor block_tables: List[List[int]] block_tables_tensor: torch.Tensor slots: torch.Tensor prefix_lens: List[int] prefix_lens_tensor: torch.Tensor max_seqlen: int prefill_head_indices: Optional[torch.Tensor] prefill_next_token_indices: Optional[torch.tensor] prefill_cu_outlens: Optional[List[int]] prefix_ids: List[List[int]] all_input_ids: List[List[int]] all_input_ids_tensor: torch.Tensor input_lengths: List[int] input_lengths_tensor: torch.Tensor prefix_offsets: List[Optional[int]] read_offsets: List[Optional[int]] next_token_chooser: HeterogeneousNextTokenChooser stopping_criterias: List[StoppingCriteria] top_n_tokens: List[int] top_n_tokens_tensor: torch.Tensor adapter_meta: AdapterBatchMetadata num_blocks: int max_blocks: int def to_pb(self) -> generate_pb2.CachedBatch: return generate_pb2.CachedBatch(id=self.batch_id, request_ids=[r.id for r in self.requests], size=len(self), max_tokens=self.num_blocks * BLOCK_SIZE) @classmethod def batch_tokenized_inputs(cls, requests: Iterable[generate_pb2.Request], tokenizer): max_length = 0 all_input_ids = [] batch_size = 0 for r in requests: batch_size += 1 inputs = concat_text_chunks(r.input_chunks.chunks) input_ids = tokenizer(inputs, truncation=True, max_length=r.truncate, add_special_tokens=r.add_special_tokens)['input_ids'] max_length = max(max_length, len(input_ids)) all_input_ids.append(input_ids) return all_input_ids @classmethod def from_tokenized(cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, batch_tokenized_inputs, dtype: torch.dtype, device: torch.device) -> 'FlashCausalLMBatch': sliding_window = get_sliding_windows() position_ids = [] cu_seqlen_prefill = [0] start_slots = [] slot_indices = [] prefill_cache_indices = [] input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] prefix_ids = [] requests_idx_mapping = {} all_prefill_logprobs = True no_prefill_logprobs = True prefill_head_indices = [] prefill_next_token_indices = [] prefill_cu_outlens = [0] next_token_chooser_parameters = [] stopping_criterias = [] top_n_tokens = [] adapter_indices_list = [] adapter_set = set() cumulative_length = 0 cumulative_slot_tokens = 0 prefill_out_cumulative_length = 0 num_blocks = 0 max_seqlen = 0 max_length = 0 max_blocks = 0 block_tables = [] slots = [] prefix_lens = [] for (i, (r, tokenized_input)) in enumerate(zip(pb.requests, batch_tokenized_inputs)): requests_idx_mapping[r.id] = i orig_input_length = len(tokenized_input) prefix_len = r.prefix_len assert prefix_len <= orig_input_length, f'Prefix {prefix_len} vs input {orig_input_length}' if prefix_len == orig_input_length: assert prefix_len > 0 prefix_len -= 1 prefix_ids.append(tokenized_input[:prefix_len]) tokenized_input = tokenized_input[prefix_len:] input_length = len(tokenized_input) input_lengths.append(input_length) prefix_offsets.append(input_length - 5) read_offsets.append(input_length) all_input_ids.append(tokenized_input) request_position_ids = torch.arange(prefix_len, orig_input_length, dtype=torch.int32) position_ids.append(request_position_ids) cu_seqlen_prefill.append(cumulative_length + input_length) next_token_chooser_parameters.append(r.parameters) stopping_criteria = StoppingCriteria.from_pb(r.stopping_parameters, tokenizer) max_new_tokens = stopping_criteria.max_new_tokens stopping_criterias.append(stopping_criteria) top_n_tokens.append(r.top_n_tokens) ADAPTER_TO_INDEX = get_adapter_to_index() adapter_index = ADAPTER_TO_INDEX.get(r.adapter_id, 0) adapter_indices_list.append(torch.full((input_length,), adapter_index)) adapter_set.add(adapter_index) speculative_length = get_speculate() speculative_length = 0 if speculative_length is None else speculative_length block_tokens = orig_input_length + max_new_tokens - 1 + speculative_length slot_tokens = input_length + max_new_tokens - 1 + speculative_length if not r.blocks: needed_blocks = math.ceil(block_tokens / BLOCK_SIZE) request_blocks = [b for b in range(num_blocks, num_blocks + needed_blocks)] request_slots = [s for b in request_blocks for s in range(b * BLOCK_SIZE, (b + 1) * BLOCK_SIZE)] else: request_blocks = r.blocks request_slots = r.slots[prefix_len:] block_tables.append(request_blocks) slots.extend(request_slots) prefix_lens.append(prefix_len) num_blocks += len(request_blocks) start_slots.append(cumulative_slot_tokens) request_slot_indices = torch.arange(cumulative_slot_tokens, cumulative_slot_tokens + input_length, dtype=torch.int64) slot_indices.append(request_slot_indices) if sliding_window is not None: request_prefill_cache_indices = torch.arange(cumulative_length + max(0, input_length - sliding_window), cumulative_length + input_length, dtype=torch.int64) prefill_cache_indices.append(request_prefill_cache_indices) all_prefill_logprobs = all_prefill_logprobs and r.prefill_logprobs no_prefill_logprobs = no_prefill_logprobs and (not r.prefill_logprobs) if r.prefill_logprobs: prefill_head_indices.append(request_position_ids + cumulative_length) prefill_next_token_indices.append(prefill_out_cumulative_length + input_length - 1) prefill_cu_outlens.append(prefill_out_cumulative_length + input_length) prefill_out_cumulative_length += input_length else: prefill_head_indices.append(torch.tensor([cumulative_length + input_length - 1], dtype=torch.int32)) prefill_next_token_indices.append(prefill_out_cumulative_length) prefill_cu_outlens.append(prefill_out_cumulative_length + 1) prefill_out_cumulative_length += 1 cumulative_length += input_length cumulative_slot_tokens += slot_tokens max_seqlen = max(max_seqlen, input_length) max_blocks = max(max_blocks, len(request_blocks)) max_length = max(max_length, input_length + max_new_tokens + speculative_length) adapter_indices = torch.cat(adapter_indices_list).to(dtype=torch.int64, device=device) next_token_chooser = HeterogeneousNextTokenChooser.from_pb(next_token_chooser_parameters, dtype, device, tokenizer) start_slots = torch.tensor(start_slots, dtype=torch.int64) all_input_ids_tensor = np.zeros((len(all_input_ids), max_length), dtype=np.int64) for (i, input_ids) in enumerate(all_input_ids): all_input_ids_tensor[i, :len(input_ids)] = input_ids all_input_ids_tensor = torch.tensor(all_input_ids_tensor, dtype=torch.int64, device=device) if len(pb.requests) > 1: input_ids = np.concatenate(all_input_ids, dtype=np.int64) position_ids = torch.cat(position_ids) slot_indices = torch.cat(slot_indices) if sliding_window is not None: prefill_cache_indices = torch.cat(prefill_cache_indices) else: input_ids = all_input_ids[0] position_ids = position_ids[0] slot_indices = slot_indices[0] if sliding_window is not None: prefill_cache_indices = prefill_cache_indices[0] cu_seqlen_prefill = torch.tensor(cu_seqlen_prefill, device=device, dtype=torch.int32) position_ids = position_ids.to(device) slot_indices = slot_indices.to(device) prefill_cache_indices = prefill_cache_indices.to(device) if sliding_window is not None else None input_ids = torch.tensor(input_ids, dtype=torch.int64, device=device) input_lengths_tensor = torch.tensor(input_lengths, dtype=torch.int32, device=device) (adapter_segments, adapter_segment_indices) = find_segments(adapter_indices) adapter_segments = torch.tensor(adapter_segments, dtype=torch.int32, device=device) if all_prefill_logprobs: prefill_head_indices = None prefill_next_token_indices = cu_seqlen_prefill[1:] - 1 elif no_prefill_logprobs: prefill_head_indices = cu_seqlen_prefill[1:] - 1 prefill_next_token_indices = None else: prefill_head_indices = torch.tensor(torch.cat(prefill_head_indices), dtype=torch.int64, device=device) prefill_next_token_indices = torch.tensor(prefill_next_token_indices, dtype=torch.int64, device=device) top_n_tokens_tensor = torch.tensor(top_n_tokens, device=device, dtype=torch.int64) slots = torch.tensor(slots, dtype=torch.int64, device=device) block_tables_tensor = torch.zeros((len(block_tables), max_blocks), dtype=torch.int32, device='cpu') for (i, request_blocks) in enumerate(block_tables): block_tables_tensor[i, :len(request_blocks)] = torch.tensor(request_blocks) block_tables_tensor = block_tables_tensor.to(device) prefix_lens_tensor = torch.tensor(prefix_lens, dtype=torch.int32, device=device) return cls(batch_id=pb.id, requests=pb.requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, prefill_cache_indices=prefill_cache_indices, start_slots=start_slots, slot_indices=slot_indices, block_tables=block_tables, block_tables_tensor=block_tables_tensor, slots=slots, prefix_lens=prefix_lens, prefix_lens_tensor=prefix_lens_tensor, max_seqlen=max_seqlen, prefill_head_indices=prefill_head_indices, prefill_next_token_indices=prefill_next_token_indices, prefill_cu_outlens=prefill_cu_outlens, input_lengths=input_lengths, input_lengths_tensor=input_lengths_tensor, prefix_offsets=prefix_offsets, read_offsets=read_offsets, all_input_ids=all_input_ids, all_input_ids_tensor=all_input_ids_tensor, prefix_ids=prefix_ids, next_token_chooser=next_token_chooser, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, num_blocks=num_blocks, max_blocks=max_blocks, adapter_meta=AdapterBatchMetadata(adapter_indices=adapter_indices, adapter_set=adapter_set, adapter_segments=adapter_segments, segment_indices=adapter_segment_indices), speculative_ids=None) @classmethod def from_pb(cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device) -> 'FlashCausalLMBatch': assert len(pb.requests) > 0 batch_tokenized_inputs = cls.batch_tokenized_inputs(pb.requests, tokenizer) return cls.from_tokenized(pb, tokenizer, batch_tokenized_inputs, dtype, device) @tracer.start_as_current_span('filter') def filter(self, request_ids: List[int]) -> 'FlashCausalLMBatch': if len(request_ids) == 0: raise ValueError('Batch must have at least one request') if len(request_ids) == len(self): return self device = self.input_ids.device requests_idx_mapping = {} indices = [] slot_filtering_indices = torch.zeros(self.slots.shape[0], dtype=torch.bool, device=device) slot_indices = torch.empty(len(request_ids), dtype=torch.int64) max_seqlen = 0 requests = [] start_slots = [] block_tables = [] all_input_ids = [] prefix_ids = [] input_lengths = [] prefix_lens = [] prefix_offsets = [] read_offsets = [] stopping_criterias = [] top_n_tokens = [] adapter_set = set() num_blocks = 0 max_blocks = 0 cumulative_max_length = 0 for (i, request_id) in enumerate(request_ids): idx = self.requests_idx_mapping[request_id] indices.append(idx) requests_idx_mapping[request_id] = i requests.append(self.requests[idx]) request_input_length = self.input_lengths[idx] prefix_len = self.prefix_lens[idx] max_seqlen = max(max_seqlen, request_input_length) all_input_ids.append(self.all_input_ids[idx]) prefix_ids.append(self.prefix_ids[idx]) input_lengths.append(request_input_length) prefix_lens.append(prefix_len) prefix_offsets.append(self.prefix_offsets[idx]) read_offsets.append(self.read_offsets[idx]) stopping_criteria = self.stopping_criterias[idx] stopping_criterias.append(stopping_criteria) top_n_tokens.append(self.top_n_tokens[idx]) ADAPTER_TO_INDEX = get_adapter_to_index() adapter_index = ADAPTER_TO_INDEX.get(self.requests[idx].adapter_id, 0) adapter_set.add(adapter_index) remaining_tokens = stopping_criteria.max_new_tokens - stopping_criteria.current_tokens request_block_table = self.block_tables[idx] num_blocks += len(request_block_table) block_tables.append(request_block_table) start_slots.append(cumulative_max_length) slot_indices[i] = cumulative_max_length + request_input_length - 1 slot_filtering_indices[self.start_slots[idx]:self.start_slots[idx] + request_input_length + remaining_tokens - 1] = True cumulative_max_length += request_input_length + remaining_tokens - 1 max_blocks = max(max_blocks, len(request_block_table)) input_ids = self.input_ids[indices] position_ids = self.position_ids[indices] adapter_indices = self.adapter_meta.adapter_indices[indices] all_input_ids_tensor = self.all_input_ids_tensor[indices] block_tables_tensor = self.block_tables_tensor[indices] input_lengths_tensor = self.input_lengths_tensor[indices] slots = self.slots[slot_filtering_indices] prefix_lens_tensor = self.prefix_lens_tensor[indices] next_token_chooser = self.next_token_chooser.filter(indices) top_n_tokens_tensor = self.top_n_tokens_tensor[indices] speculative_ids = self.speculative_ids[indices] if self.speculative_ids is not None else None start_slots = torch.tensor(start_slots, dtype=torch.int64) slot_indices = slot_indices.to(device) (adapter_segments, adapter_segment_indices) = find_segments(adapter_indices) adapter_segments = torch.tensor(adapter_segments, dtype=torch.int32, device=device) return type(self)(batch_id=self.batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=None, prefill_cache_indices=None, start_slots=start_slots, slot_indices=slot_indices, block_tables=block_tables, block_tables_tensor=block_tables_tensor, slots=slots, max_seqlen=max_seqlen, prefill_head_indices=None, prefill_next_token_indices=None, prefill_cu_outlens=None, input_lengths=input_lengths, input_lengths_tensor=input_lengths_tensor, prefix_lens=prefix_lens, prefix_lens_tensor=prefix_lens_tensor, prefix_offsets=prefix_offsets, read_offsets=read_offsets, all_input_ids=all_input_ids, all_input_ids_tensor=all_input_ids_tensor, prefix_ids=prefix_ids, next_token_chooser=next_token_chooser, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, num_blocks=num_blocks, max_blocks=max_blocks, speculative_ids=speculative_ids, adapter_meta=AdapterBatchMetadata(adapter_indices=adapter_indices, adapter_set=adapter_set, adapter_segments=adapter_segments, segment_indices=adapter_segment_indices)) @classmethod @tracer.start_as_current_span('concatenate') def concatenate(cls, batches: List['FlashCausalLMBatch']) -> 'FlashCausalLMBatch': requests = [] requests_idx_mapping = {} num_blocks = 0 total_batch_size = 0 total_slots = 0 max_blocks = 0 max_length = 0 max_seqlen = 0 for b in batches: total_batch_size += len(b) total_slots += len(b.slots) num_blocks += b.num_blocks speculative_length = b.speculative_ids.shape[1] if b.speculative_ids is not None else 0 max_blocks = max(max_blocks, b.max_blocks) max_seqlen = max(max_seqlen, b.max_seqlen) max_length = max(max_length, max((input_length + stopping_criteria.max_new_tokens + speculative_length - stopping_criteria.current_tokens for (input_length, stopping_criteria) in zip(b.input_lengths, b.stopping_criterias)))) input_ids = batches[0].input_ids.new_empty(total_batch_size) position_ids = batches[0].position_ids.new_empty(total_batch_size) slots = batches[0].slots.new_empty(total_slots) slot_indices = batches[0].slot_indices.new_empty(total_batch_size) input_lengths_tensor = batches[0].input_lengths_tensor.new_empty(total_batch_size) block_tables_tensor = batches[0].block_tables_tensor.new_zeros((total_batch_size, max_blocks)) prefix_lens_tensor = batches[0].prefix_lens_tensor.new_empty(total_batch_size) all_input_ids_tensor = batches[0].all_input_ids_tensor.new_zeros((total_batch_size, max_length)) top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros(total_batch_size) total_indices_size = sum((b.adapter_meta.adapter_indices.shape[0] for b in batches)) adapter_indices = batches[0].adapter_meta.adapter_indices.new_empty(total_indices_size) adapter_set = set() adapter_segment_builder = SegmentConcatBuilder() start_slots = [] block_tables = [] prefix_lens = [] all_input_ids = [] prefix_ids = [] input_lengths = [] prefix_offsets = [] read_offsets = [] next_token_chooser_parameters = [] fsm_grammar_states = [] stopping_criterias = [] top_n_tokens = [] cumulative_batch_size = 0 cumulative_slots = 0 cumulative_adapter_indices_size = 0 for (i, batch) in enumerate(batches): requests.extend(batch.requests) if i == 0: requests_idx_mapping = batch.requests_idx_mapping else: for (k, v) in batch.requests_idx_mapping.items(): requests_idx_mapping[k] = v + cumulative_batch_size start_index = cumulative_batch_size end_index = cumulative_batch_size + len(batch) slots_start_index = cumulative_slots slots_end_index = cumulative_slots + len(batch.slots) input_ids[start_index:end_index] = batch.input_ids position_ids[start_index:end_index] = batch.position_ids slot_indices[start_index:end_index] = batch.slot_indices + cumulative_slots input_lengths_tensor[start_index:end_index] = batch.input_lengths_tensor top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor slots[slots_start_index:slots_end_index] = batch.slots adapter_start_index = cumulative_adapter_indices_size adapter_end_index = cumulative_adapter_indices_size + batch.adapter_meta.adapter_indices.shape[0] adapter_indices[adapter_start_index:adapter_end_index] = batch.adapter_meta.adapter_indices cumulative_adapter_indices_size = adapter_end_index adapter_set.update(batch.adapter_meta.adapter_set) adapter_segment_builder.concat(batch.adapter_meta.adapter_segments, batch.adapter_meta.segment_indices) all_input_ids_tensor[start_index:end_index, :batch.all_input_ids_tensor.shape[1]] = batch.all_input_ids_tensor[:, :max_length] block_tables_tensor[start_index:end_index, :batch.block_tables_tensor.shape[1]] = batch.block_tables_tensor[:, :max_blocks] prefix_lens_tensor[start_index:end_index] = batch.prefix_lens_tensor start_slots.append(batch.start_slots + cumulative_slots) block_tables.extend(batch.block_tables) prefix_lens.extend(batch.prefix_lens) all_input_ids.extend(batch.all_input_ids) prefix_ids.extend(batch.prefix_ids) input_lengths.extend(batch.input_lengths) prefix_offsets.extend(batch.prefix_offsets) read_offsets.extend(batch.read_offsets) next_token_chooser_parameters.extend([r.parameters for r in batch.requests]) fsm_grammar_states.extend(batch.next_token_chooser.fsm_grammar_states) stopping_criterias.extend(batch.stopping_criterias) top_n_tokens.extend(batch.top_n_tokens) cumulative_batch_size += len(batch) cumulative_slots += len(batch.slots) start_slots = torch.concat(start_slots) next_token_chooser = HeterogeneousNextTokenChooser.from_pb(next_token_chooser_parameters, dtype=batches[0].next_token_chooser.dtype, device=batches[0].next_token_chooser.device, tokenizer=batches[0].next_token_chooser.tokenizer, fsm_grammar_states=fsm_grammar_states) speculative_ids = torch.cat([b.speculative_ids for b in batches], dim=0) if batches[0].speculative_ids is not None else None (adapter_segments, adapter_segment_indices) = adapter_segment_builder.build() return cls(batch_id=batches[0].batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=None, prefill_cache_indices=None, start_slots=start_slots, slot_indices=slot_indices, block_tables=block_tables, block_tables_tensor=block_tables_tensor, prefix_lens=prefix_lens, prefix_lens_tensor=prefix_lens_tensor, slots=slots, max_seqlen=max_seqlen, prefill_head_indices=None, prefill_next_token_indices=None, prefill_cu_outlens=None, input_lengths=input_lengths, input_lengths_tensor=input_lengths_tensor, prefix_offsets=prefix_offsets, read_offsets=read_offsets, all_input_ids=all_input_ids, all_input_ids_tensor=all_input_ids_tensor, prefix_ids=prefix_ids, next_token_chooser=next_token_chooser, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, num_blocks=num_blocks, max_blocks=max_blocks, speculative_ids=speculative_ids, adapter_meta=AdapterBatchMetadata(adapter_indices=adapter_indices, adapter_set=adapter_set, adapter_segments=adapter_segments, segment_indices=adapter_segment_indices)) def __len__(self): return len(self.requests) ADAPTER_LAYERS = ['q_proj', 'k_proj', 'v_proj', 'o_proj', 'gate_proj', 'up_proj', 'down_proj'] ROW_PARALLEL = {'o_proj', 'down_proj', 'lm_head'} class FlashCausalLM(Model): def __init__(self, model_id: str, model_class, revision: Optional[str]=None, quantize: Optional[str]=None, speculator: Optional[str]=None, dtype: Optional[torch.dtype]=None, trust_remote_code: bool=False, lora_adapter_ids: Optional[list]=[], tokenizer_class: PreTrainedTokenizerBase=AutoTokenizer, config_class: PreTrainedTokenizerBase=AutoConfig, default_dtype=torch.float16, aliases=None, num_kv_heads: Optional[int]=None, head_size: Optional[int]=None, skip_special_tokens: bool=True): self.quantize = quantize (self.process_group, rank, world_size) = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device(f'cuda:{rank}') dtype = default_dtype if dtype is None else dtype elif SYSTEM == 'ipex': if hasattr(torch, 'xpu') and torch.xpu.is_available(): device = torch.device(f'xpu:{rank}') dtype = default_dtype if dtype is None else dtype else: device = torch.device('cpu') dtype = torch.bfloat16 if dtype is None else dtype init_cpu_threads_env(rank_id=rank, world_size=world_size) else: raise NotImplementedError(f'{model_class} is only available on GPU') tokenizer = tokenizer_class.from_pretrained(model_id, revision=revision, padding_side='left', truncation_side='left', trust_remote_code=trust_remote_code) try: generation_config = GenerationConfig.from_pretrained(model_id, revision=revision, trust_remote_code=trust_remote_code) if isinstance(generation_config.eos_token_id, (list, set)): tokenizer._eos_token_ids = set(generation_config.eos_token_id) except Exception: pass config = config_class.from_pretrained(model_id, revision=revision, trust_remote_code=trust_remote_code) config.quantize = quantize config.speculator = speculator torch.distributed.barrier(group=self.process_group) weights_loader = get_loader(quantize, model_id, revision) filenames = weight_files(model_id, revision=revision, extension='.safetensors') weights = Weights(filenames, device, dtype, process_group=self.process_group, aliases=aliases, weights_loader=weights_loader) prefix = '' model = model_class(prefix, config, weights) torch.distributed.barrier(group=self.process_group) text_config = getattr(config, 'text_config', None) if text_config is not None: config = text_config if getattr(config, 'sliding_window', None) is not None: set_sliding_window(config.sliding_window) else: config.sliding_window = None self.num_layers = config.num_hidden_layers self.num_heads = config.num_attention_heads // self.process_group.size() if num_kv_heads is None: num_kv_heads = getattr(config, 'num_key_value_heads', None) if num_kv_heads is None: num_kv_heads = getattr(config, 'n_head', None) if num_kv_heads is None: raise ValueError('Cannot get the number of key/value heads') self.num_kv_heads = num_kv_heads // self.process_group.size() if num_kv_heads > 1 else num_kv_heads assert self.num_kv_heads > 0 if head_size is None: if hasattr(config, 'head_dim'): self.head_size = config.head_dim else: self.head_size = config.hidden_size // config.num_attention_heads else: self.head_size = head_size self.cuda_graphs = {} self.kv_cache = [] if ATTENTION == 'flashinfer': from text_generation_server.layers.attention.flashinfer import create_prefill_state, create_decode_state, create_prefill_with_paged_kv_state self.prefill_state = create_prefill_state(device=device) self.prefill_with_paged_kv_state = create_prefill_with_paged_kv_state(device=device) self.decode_state = create_decode_state(device=device, num_heads=self.num_heads, num_kv_heads=self.num_kv_heads) super().__init__(model_id=model_id, model=model, tokenizer=tokenizer, requires_padding=False, dtype=dtype, device=device, rank=rank, world_size=world_size, sliding_window=config.sliding_window) @property def batch_type(self) -> Type[FlashCausalLMBatch]: return FlashCausalLMBatch def max_past(self) -> int: return getattr(self.model, 'max_past', None) def init_kv_cache(self, num_blocks: int, num_layers: int, num_heads: int, head_size: int, dtype: torch.dtype, device: torch.device): self.kv_cache = [] empty_cache() element_size = torch.tensor([], dtype=dtype).element_size() if SYSTEM == 'ipex' and device.type == 'xpu': x = 1 else: x = BLOCK_SIZE // element_size if ATTENTION in {'flashdecoding', 'flashinfer'}: self.kv_cache = [(torch.empty((num_blocks, BLOCK_SIZE, num_heads, head_size), dtype=dtype, device=device), torch.empty((num_blocks, BLOCK_SIZE, num_heads, head_size), dtype=dtype, device=device)) for _ in range(num_layers)] elif SYSTEM == 'ipex' and device == torch.device('cpu'): self.kv_cache = [(torch.empty((num_blocks, num_heads, BLOCK_SIZE, head_size), dtype=dtype, device=device), torch.empty((num_blocks, num_heads, BLOCK_SIZE, head_size), dtype=dtype, device=device)) for _ in range(num_layers)] else: self.kv_cache = [(torch.empty((num_blocks, num_heads, head_size // x, BLOCK_SIZE, x), dtype=dtype, device=device), torch.empty((num_blocks, num_heads, head_size, BLOCK_SIZE), dtype=dtype, device=device)) for _ in range(num_layers)] def cuda_graph_warmup(self, bs: int, max_s: int, max_bt: int): input_ids = torch.zeros(bs, dtype=torch.int64, device=self.device) position_ids = torch.zeros(bs, dtype=torch.int32, device=self.device) slots = torch.arange(bs, dtype=torch.int64, device=self.device) input_lengths = [max_s] * bs prefix_lengths = [0] * bs input_lengths_tensor = torch.ones(bs, dtype=torch.int32, device=self.device) * max_s prefix_lengths_tensor = torch.zeros(bs, dtype=torch.int32, device=self.device) block_tables = torch.arange(max_bt, dtype=torch.int32, device=self.device).repeat(bs) block_tables = block_tables.reshape((bs, max_bt)) if ATTENTION == 'flashinfer': block_tables = block_tables_to_ragged(block_tables=block_tables, input_lengths=input_lengths, prefix_lens=prefix_lengths) from text_generation_server.layers.attention.flashinfer import create_decode_state_cuda_graphs block_tables_ptr = torch.zeros(bs + 1, dtype=torch.int32, device=self.device) last_page_len = torch.ones(bs, dtype=torch.int32, device=self.device) state = create_decode_state_cuda_graphs(device=input_ids.device, block_tables=block_tables, block_tables_ptr=block_tables_ptr, last_page_len=last_page_len, num_heads=self.num_heads, num_kv_heads=self.num_kv_heads) else: state = None graph = torch.cuda.CUDAGraph() self.cuda_graphs[bs] = {'input_ids': input_ids, 'position_ids': position_ids, 'kv_cache': self.kv_cache, 'block_tables': block_tables, 'slots': slots, 'input_lengths': input_lengths_tensor, 'prefix_lengths': prefix_lengths_tensor, 'state': state, 'graph': graph} torch.cuda.synchronize() with self._forward_context(block_tables=block_tables, cu_seqlen_prefill=None, input_lengths_tensor=input_lengths_tensor, state=state, prefix_lens_tensor=prefix_lengths_tensor): seqlen = Seqlen(input_lengths=input_lengths_tensor, prefix_lengths=prefix_lengths_tensor, cu_seqlen_q=None, max_q=1, max_k=max_s) self.model.forward(input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=None, kv_cache=self.kv_cache, block_tables=block_tables, slots=slots, seqlen=seqlen, max_s=max_s, prefill_cache_indices=None, lm_head_indices=None) del seqlen torch.cuda.synchronize() with torch.cuda.graph(graph, pool=MEM_POOL): seqlen = Seqlen(input_lengths=input_lengths_tensor, prefix_lengths=prefix_lengths_tensor, cu_seqlen_q=None, max_q=1, max_k=max_s) (logits, speculative_logits) = self.model.forward(input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=None, kv_cache=self.kv_cache, block_tables=block_tables, slots=slots, seqlen=seqlen, max_s=max_s, prefill_cache_indices=None, lm_head_indices=None) self.cuda_graphs[bs]['logits'] = logits self.cuda_graphs[bs]['speculative_logits'] = speculative_logits torch.cuda.synchronize() def warmup(self, batch: FlashCausalLMBatch): empty_cache() try: self.init_kv_cache(batch.num_blocks, self.num_layers, self.num_kv_heads, self.head_size, self.dtype, self.device) max_bt = batch.max_blocks max_s = max_bt * BLOCK_SIZE if SYSTEM == 'rocm' and os.environ.get('PYTORCH_TUNABLEOP_ENABLED', False): torch.cuda.tunable.tuning_enable(False) (_, batch, _) = self.generate_token(batch) except torch.cuda.OutOfMemoryError as e: raise RuntimeError(f'Not enough memory to handle {len(batch.input_ids)} prefill tokens. You need to decrease `--max-batch-prefill-tokens`') from e synchronize(self.device) dtype_size = torch.tensor([], dtype=self.dtype).element_size() cache_block_size = BLOCK_SIZE * self.num_kv_heads * self.head_size total_cache_size = self.num_layers * cache_block_size * 2 * dtype_size free_memory = get_free_memory(self.device, MEMORY_FRACTION) batch_num_blocks = batch.num_blocks if batch is not None else 0 num_blocks = int(free_memory * TGI_WIGGLE_ROOM // total_cache_size) + batch_num_blocks del batch self.init_kv_cache(num_blocks, self.num_layers, self.num_kv_heads, self.head_size, self.dtype, self.device) if SYSTEM == 'rocm': if os.environ.get('PYTORCH_TUNABLEOP_ENABLED') is None or os.environ.get('PYTORCH_TUNABLEOP_ENABLED') == '1': torch.cuda.tunable.enable() if os.environ.get('PYTORCH_TUNABLEOP_TUNING') != '0': torch.cuda.tunable.tuning_enable(True) if os.environ.get('PYTORCH_TUNABLEOP_SEQLENS') is not None: tuning_sequences = [int(val) for val in os.environ['PYTORCH_TUNABLEOP_SEQLENS'].split(',')] elif CUDA_GRAPHS is not None: tuning_sequences = CUDA_GRAPHS else: tuning_sequences = [2, 3, 4, 5, 6, 7] tunableop_filepath = os.path.join(HUGGINGFACE_HUB_CACHE, f"tunableop_{self.model_id.replace('/', '-')}_tp{self.world_size}_rank{self.rank}.csv") log_master(logger.info, f"PyTorch TunableOp (https://github.com/fxmarty/pytorch/tree/2.3-patched/aten/src/ATen/cuda/tunable) is enabled. The warmup may take several minutes, picking the ROCm optimal matrix multiplication kernel for the target lengths {', '.join([str(seqlen) for seqlen in tuning_sequences])}, with typical 5-8% latency improvement for small sequence lengths. The picked GEMMs are saved in the file {tunableop_filepath}. To disable TunableOp, please launch TGI with `PYTORCH_TUNABLEOP_ENABLED=0`.") if os.path.isfile(tunableop_filepath): log_master(logger.info, f'The file {tunableop_filepath} already exists and will be reused.') torch.cuda.tunable.read_file(tunableop_filepath) os.makedirs(HUGGINGFACE_HUB_CACHE, exist_ok=True) for seqlen in tuning_sequences: log_master(logger.info, f'Warming up TunableOp for seqlen={seqlen}') self.tunableop_warmup(seqlen) torch.cuda.tunable.write_file(tunableop_filepath) torch.cuda.tunable.tuning_enable(False) else: log_master(logger.info, 'PyTorch ROCm TunableOp (https://github.com/pytorch/pytorch/tree/main/aten/src/ATen/cuda/tunable) is disabled. TunableOp brings an additional 5-8% latency improvement for small sequence lengths but requires a warmup. If necessary, please use the environment variable PYTORCH_TUNABLEOP_ENABLED=1 to enable TunableOp.') if CUDA_GRAPHS: try: log_master(logger.info, f'Cuda Graphs are enabled for sizes {CUDA_GRAPHS}') for bs in CUDA_GRAPHS: if self.speculate is None or self.speculate + 1 <= bs: self.cuda_graph_warmup(bs, max_s, max_bt) except torch.cuda.OutOfMemoryError: logger.exception('Decode cuda graph warmup failed') else: log_master(logger.info, f'Cuda Graphs are disabled (CUDA_GRAPHS={CUDA_GRAPHS}).') return int(num_blocks * BLOCK_SIZE) def tunableop_warmup(self, seqlen: int): input_ids = torch.zeros(seqlen, dtype=torch.int64, device=self.device) position_ids = torch.zeros(seqlen, dtype=torch.int32, device=self.device) slots = torch.arange(seqlen, dtype=torch.int64, device=self.device) input_lengths = torch.ones(seqlen, dtype=torch.int32, device=self.device) prefix_lens_tensor = torch.zeros(seqlen, dtype=torch.int32, device=self.device) cu_seqlen_prefill = torch.tensor([0, seqlen], device=self.device, dtype=torch.int32) seqlen = Seqlen(input_lengths=input_lengths, prefix_lengths=prefix_lens_tensor, cu_seqlen_q=cu_seqlen_prefill, max_q=1, max_k=seqlen) self.model.forward(input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=self.kv_cache, block_tables=None, seqlen=seqlen, slots=slots, max_s=seqlen, lm_head_indices=None, prefill_cache_indices=None) def forward(self, batch: FlashCausalLMBatch, adapter_data: AdapterBatchData) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if batch.speculative_ids is not None: input_ids = batch.input_ids position_ids = batch.position_ids cu_seqlen_prefill = batch.cu_seqlen_prefill kv_cache = self.kv_cache block_tables = batch.block_tables_tensor slots = batch.slots[batch.slot_indices] input_lengths = batch.input_lengths_tensor max_s = batch.max_seqlen lm_head_indices = batch.prefill_head_indices speculative_ids = batch.speculative_ids (B, speculative_length) = speculative_ids.shape new_length = speculative_length + 1 new_input_ids = torch.cat([input_ids.unsqueeze(-1), speculative_ids], dim=1).reshape(-1) arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0) arange_int = arange.to(dtype=torch.int32) new_position_ids = (position_ids.unsqueeze(-1).expand(B, new_length) + arange).view(-1) slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) input_lengths = (input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) prefix_lens_tensor = batch.prefix_lens_tensor.unsqueeze(-1).expand(B, new_length).reshape(-1) block_tables = block_tables.unsqueeze(1).expand(B, new_length, -1).reshape(B * new_length, -1).contiguous() max_s = max_s + speculative_length input_ids = new_input_ids position_ids = new_position_ids else: input_ids = batch.input_ids position_ids = batch.position_ids cu_seqlen_prefill = batch.cu_seqlen_prefill kv_cache = self.kv_cache block_tables = batch.block_tables_tensor slots = batch.slots[batch.slot_indices] input_lengths = batch.input_lengths_tensor prefix_lens_tensor = batch.prefix_lens_tensor max_s = batch.max_seqlen lm_head_indices = batch.prefill_head_indices if cu_seqlen_prefill is None and self.max_past() is not None: max_s = min(self.max_past(), max_s) bs = input_ids.shape[0] sorted_padded_bs = sorted([k for k in self.cuda_graphs.keys() if k >= bs]) if sorted_padded_bs: cuda_graph = self.cuda_graphs[sorted_padded_bs[0]] else: cuda_graph = None if cu_seqlen_prefill is not None or cuda_graph is None: if ATTENTION == 'flashinfer': block_tables = block_tables_to_ragged(block_tables=block_tables, input_lengths=batch.input_lengths, prefix_lens=batch.prefix_lens) with self._forward_context(block_tables=block_tables, cu_seqlen_prefill=cu_seqlen_prefill, input_lengths_tensor=input_lengths, prefix_lens_tensor=prefix_lens_tensor): max_k = (input_lengths + prefix_lens_tensor).max().item() seqlen = Seqlen(input_lengths=input_lengths, prefix_lengths=prefix_lens_tensor, cu_seqlen_q=cu_seqlen_prefill, max_q=max_s, max_k=max_k) (logits, speculative_logits) = self.model.forward(input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, block_tables=block_tables, slots=slots, seqlen=seqlen, max_s=max_s, prefill_cache_indices=batch.prefill_cache_indices, lm_head_indices=lm_head_indices, adapter_data=adapter_data) if batch.prefill_cache_indices is not None: batch.prefill_cache_indices = None return (logits, speculative_logits) cuda_graph['input_ids'][:input_ids.shape[0]] = input_ids cuda_graph['position_ids'][:position_ids.shape[0]] = position_ids if ATTENTION == 'flashinfer': block_tables = block_tables_to_ragged(block_tables=block_tables, input_lengths=batch.input_lengths, prefix_lens=batch.prefix_lens) cuda_graph['block_tables'][:block_tables.shape[0]] = block_tables else: cuda_graph['block_tables'][:block_tables.shape[0], :block_tables.shape[1]] = block_tables cuda_graph['slots'].fill_(0) cuda_graph['slots'][:slots.shape[0]] = slots cuda_graph['input_lengths'].zero_() cuda_graph['input_lengths'][:input_lengths.shape[0]] = input_lengths cuda_graph['prefix_lengths'].zero_() cuda_graph['prefix_lengths'][:prefix_lens_tensor.shape[0]] = prefix_lens_tensor with self._forward_context(block_tables=cuda_graph['block_tables'], cu_seqlen_prefill=None, input_lengths_tensor=cuda_graph['input_lengths'], prefix_lens_tensor=cuda_graph['prefix_lengths'], state=cuda_graph['state']): cuda_graph['graph'].replay() speculative_logits = cuda_graph['speculative_logits'][:bs] if cuda_graph['speculative_logits'] is not None else None logits = cuda_graph['logits'][:bs] return (logits, speculative_logits) @tracer.start_as_current_span('generate_token') def generate_token(self, batch: FlashCausalLMBatch) -> Tuple[List[Generation], Optional[FlashCausalLMBatch], Tuple[int, int]]: start = time.time_ns() prefill = batch.cu_seqlen_prefill is not None prefill_logprobs = batch.prefill_next_token_indices is not None adapter_meta = batch.adapter_meta if batch.speculative_ids is not None: (B, speculative_length) = batch.speculative_ids.shape new_length = speculative_length + 1 adapter_indices = adapter_meta.adapter_indices.unsqueeze(-1).expand(B, new_length).reshape(-1) adapter_segments = adapter_meta.adapter_segments * new_length adapter_meta = AdapterBatchMetadata(adapter_indices=adapter_indices, adapter_set=adapter_meta.adapter_set, adapter_segments=adapter_segments, segment_indices=adapter_meta.segment_indices) adapter_data = AdapterBatchData.from_meta(adapter_meta, self.layer_to_adapter_weights, prefill, batch.prefill_head_indices) (out, speculative_logits) = self.forward(batch, adapter_data) if prefill: next_token_logits = out[batch.prefill_next_token_indices] if prefill_logprobs else out if speculative_logits is not None: speculative_logits = speculative_logits[batch.prefill_next_token_indices] if prefill_logprobs else speculative_logits next_adapter_indices = batch.adapter_meta.adapter_indices.new_empty(len(batch)) else: next_token_logits = out next_adapter_indices = batch.adapter_meta.adapter_indices speculate = get_speculate() (next_input_ids, next_token_logprobs, logprobs, accepted_ids, speculative_ids) = batch.next_token_chooser(batch.all_input_ids_tensor[:, :batch.max_seqlen], next_token_logits, speculate, batch.speculative_ids, speculative_logits) (batch_top_token_ids, batch_top_token_logprobs) = batch_top_tokens(batch.top_n_tokens, batch.top_n_tokens_tensor, logprobs, accepted_ids) if prefill: if len(batch) > 1 and prefill_logprobs: prefill_tokens_indices = batch.input_ids.new_zeros(len(out)) next_position_ids = batch.position_ids.new_empty(len(batch)) batch.slot_indices = batch.slot_indices[batch.cu_seqlen_prefill[1:] - 1] batch.cu_seqlen_prefill = None else: prefill_logprobs = None next_position_ids = batch.position_ids cumulative_length = 0 generations: List[Generation] = [] stopped = True iterator = zip(batch.input_lengths, batch.all_input_ids, accepted_ids) index = 0 for (i, (input_length, all_input_ids, n_accepted_ids)) in enumerate(iterator): start_index = cumulative_length end_index = cumulative_length + input_length if prefill: out_start_index = batch.prefill_cu_outlens[i] out_end_index = batch.prefill_cu_outlens[i + 1] out_length = out_end_index - out_start_index next_position_ids[i] = batch.position_ids[end_index - 1] next_adapter_indices[i] = batch.adapter_meta.adapter_indices[end_index - 1] if prefill_logprobs: if len(batch) > 1: prefill_tokens_indices[out_start_index:out_end_index - 1] = batch.input_ids[start_index + 1:start_index + out_length] else: prefill_tokens_indices = batch.input_ids[start_index + 1:start_index + out_length] for j in range(n_accepted_ids): batch.all_input_ids_tensor[i, input_length + j] = next_input_ids[index] index += 1 cumulative_length += input_length batch.input_ids = next_input_ids[accepted_ids.cumsum(dim=-1) - 1] batch.speculative_ids = speculative_ids batch.position_ids = next_position_ids + accepted_ids batch.input_lengths_tensor += accepted_ids batch.slot_indices += accepted_ids batch.adapter_meta.adapter_indices = next_adapter_indices if prefill: (adapter_segments, _) = find_segments(batch.adapter_meta.adapter_indices) batch.adapter_meta.adapter_segments = torch.tensor(adapter_segments, dtype=torch.int32, device=batch.adapter_meta.adapter_segments.device) if prefill and prefill_logprobs: prefill_logprobs_tensor = torch.log_softmax(out, -1) prefill_logprobs = torch.gather(prefill_logprobs_tensor, 1, prefill_tokens_indices.view(-1, 1)) prefill_logprobs = prefill_logprobs.view(-1).tolist() next_token_logprobs = next_token_logprobs.tolist() next_token_ids = next_input_ids.tolist() accepted_ids = accepted_ids.tolist() start_decode = time.time_ns() iterator = zip(batch.requests, batch.input_lengths, batch.prefix_offsets, batch.read_offsets, batch.stopping_criterias, batch.all_input_ids, batch.prefix_ids, batch.next_token_chooser.do_sample, batch.next_token_chooser.seeds, batch.top_n_tokens, accepted_ids, batch_top_token_ids, batch_top_token_logprobs) index = 0 for (i, (request, input_length, prefix_offset, read_offset, stopping_criteria, all_input_ids, prefix_ids, do_sample, seed, top_n_tokens, n_accepted_ids, top_token_ids, top_token_logprobs)) in enumerate(iterator): next_token_texts = [] left = 0 if n_accepted_ids > 1: log_master(logger.debug, f'speculated ids {n_accepted_ids - 1}') current_stopped = False for j in range(index, index + n_accepted_ids): next_token_id = next_token_ids[j] all_input_ids.append(next_token_id) (next_token_text, prefix_offset, read_offset) = self.decode_token(all_input_ids, prefix_offset, read_offset) next_token_texts.append(next_token_text) (stop, reason) = stopping_criteria(next_token_id, next_token_text) if stop: left = index + n_accepted_ids - j - 1 current_stopped = True break else: current_stopped = False stopped = stopped and current_stopped _next_token_ids = next_token_ids[index:index + n_accepted_ids - left] _next_token_logprobs = next_token_logprobs[index:index + n_accepted_ids - left] index += n_accepted_ids if i % self.world_size == self.rank: if stop: (output_text, _, _) = self.decode_token(all_input_ids, prefix_offset=len(all_input_ids) - stopping_criteria.current_tokens - 1, read_offset=len(all_input_ids) - stopping_criteria.current_tokens, skip_special_tokens=True) generated_text = GeneratedText(output_text, stopping_criteria.current_tokens, reason, seed if do_sample else None) else: generated_text = None if prefill and request.prefill_logprobs: out_start_index = batch.prefill_cu_outlens[i] out_end_index = batch.prefill_cu_outlens[i + 1] request_prefill_logprobs = [float('nan')] * (len(prefix_ids) + 1) + prefill_logprobs[out_start_index:out_end_index - 1] prefill_token_ids = all_input_ids[:-1] prefill_texts = self.tokenizer.batch_decode(prefix_ids + prefill_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False) prefill_tokens = Tokens(prefix_ids + prefill_token_ids, request_prefill_logprobs, prefill_texts, is_special=[]) else: prefill_tokens = None if top_n_tokens > 0: all_top_tokens = [] for (top_token_ids, top_token_logprobs) in zip(top_token_ids, top_token_logprobs): toptoken_texts = self.tokenizer.batch_decode(top_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False) special_toptokens = [token_id in self.all_special_ids for token_id in top_token_ids] top_tokens = Tokens(top_token_ids, top_token_logprobs, toptoken_texts, special_toptokens) all_top_tokens.append(top_tokens) top_tokens = all_top_tokens else: top_tokens = None generation = Generation(request.id, prefill_tokens, Tokens(_next_token_ids, _next_token_logprobs, next_token_texts, [nid in self.all_special_ids for nid in _next_token_ids]), generated_text, top_tokens) generations.append(generation) for next_token_id in _next_token_ids: batch.next_token_chooser = batch.next_token_chooser.advance_grammar_single(i, next_token_id) batch.input_lengths[i] = input_length + n_accepted_ids if batch.input_lengths[i] > batch.max_seqlen: batch.max_seqlen = batch.input_lengths[i] batch.prefix_offsets[i] = prefix_offset batch.read_offsets[i] = read_offset batch.all_input_ids[i] = all_input_ids if stopped: forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return (generations, None, (forward_ns, decode_ns)) batch.prefill_cu_outlens = None batch.prefill_head_indices = None batch.prefill_next_token_indices = None forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return (generations, batch, (forward_ns, decode_ns)) def _forward_context(self, *, block_tables: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], input_lengths_tensor: torch.Tensor, prefix_lens_tensor: torch.Tensor, state: Optional[Any]=None) -> ContextManager: if ATTENTION != 'flashinfer': return nullcontext() from text_generation_server.layers.attention.flashinfer import use_decode_state, use_prefill_with_paged_kv_state if cu_seqlen_prefill is not None: return use_prefill_with_paged_kv_state(state=state if state is not None else self.prefill_with_paged_kv_state, block_tables=block_tables, cu_seqlens=cu_seqlen_prefill, input_lengths=input_lengths_tensor + prefix_lens_tensor, num_heads=self.num_heads, num_kv_heads=self.num_kv_heads, head_size=self.head_size, page_size=BLOCK_SIZE) else: assert input_lengths_tensor is not None return use_decode_state(state=state if state is not None else self.decode_state, input_lengths=input_lengths_tensor + prefix_lens_tensor, block_tables=block_tables, num_heads=self.num_heads, num_kv_heads=self.num_kv_heads, head_size=self.head_size, page_size=BLOCK_SIZE) def block_tables_to_ragged(*, block_tables: torch.Tensor, input_lengths: List[int], prefix_lens: List[int]) -> torch.Tensor: assert len(input_lengths) == len(prefix_lens) total_len = sum(input_lengths) + sum(prefix_lens) block_tables_ragged = torch.empty(total_len, dtype=torch.int32, device=block_tables.device) offset = 0 for (i, (input_length, prefix_len)) in enumerate(zip(input_lengths, prefix_lens)): seq_len = prefix_len + input_length block_tables_ragged[offset:offset + seq_len] = block_tables[i][:seq_len] offset += seq_len return block_tables_ragged # File: text-generation-inference-main/server/text_generation_server/models/galactica.py import re import torch import torch.distributed from transformers import PreTrainedTokenizerBase from text_generation_server.models.causal_lm import CausalLMBatch from text_generation_server.pb import generate_pb2 from text_generation_server.utils import NextTokenChooser, StoppingCriteria from text_generation_server.utils.chunks import concat_text_chunks CUSTOM_SEQ_RE = re.compile('(\\[START_(DNA|SMILES|I_SMILES|AMINO)])(.*?)(\\[END_\\2])') SPLIT_MARKER = f'SPL{1}T-TH{1}S-Pl3A5E' def _insert_split_marker(m: re.Match): (start_token, _, sequence, end_token) = m.groups() sequence = re.sub('(.)', f'{SPLIT_MARKER}\\1', sequence, flags=re.DOTALL) return f'{start_token}{sequence}{SPLIT_MARKER}{end_token}' def escape_custom_split_sequence(text): return CUSTOM_SEQ_RE.sub(_insert_split_marker, text) class GalacticaCausalLMBatch(CausalLMBatch): @classmethod def from_pb(cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device) -> 'GalacticaCausalLMBatch': inputs = [] next_token_choosers = [] stopping_criterias = [] prefix_offsets = [] top_n_tokens = [] read_offsets = [] requests_idx_mapping = {} max_truncation = 0 padding_right_offset = 0 max_decode_tokens = 0 for (i, r) in enumerate(pb.requests): requests_idx_mapping[r.id] = i inputs.append(escape_custom_split_sequence(concat_text_chunks(r.input_chunks.chunks))) next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device, tokenizer)) stopping_criteria = StoppingCriteria.from_pb(r.stopping_parameters, tokenizer) stopping_criterias.append(stopping_criteria) top_n_tokens.append(r.top_n_tokens) max_truncation = max(max_truncation, r.truncate) max_decode_tokens += stopping_criteria.max_new_tokens padding_right_offset = max(padding_right_offset, stopping_criteria.max_new_tokens) tokenized_inputs = tokenizer(inputs, return_tensors='pt', padding=True, return_token_type_ids=False, truncation=True, max_length=max_truncation).to(device) for _ in pb.requests: input_len = tokenized_inputs['input_ids'].shape[1] prefix_offsets.append(0) read_offsets.append(input_len) input_lengths = tokenized_inputs['attention_mask'].sum(1) max_input_length = input_lengths.max() input_ids = tokenized_inputs['input_ids'] attention_mask = input_ids.new_zeros((pb.size, max_input_length + padding_right_offset)) attention_mask[:, :max_input_length] = tokenized_inputs['attention_mask'] position_ids = tokenized_inputs['attention_mask'].long().cumsum(-1) - 1 position_ids.masked_fill_(tokenized_inputs['attention_mask'] == 0, 1) all_input_ids = tokenized_inputs['input_ids'].T.split(1, dim=1) top_n_tokens_tensor = torch.tensor(top_n_tokens, device=device, dtype=torch.int64) max_tokens = len(inputs) * max_input_length + max_decode_tokens return cls(batch_id=pb.id, requests=pb.requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=None, all_input_ids=list(all_input_ids), input_lengths=input_lengths.tolist(), prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, max_input_length=max_input_length.item(), padding_right_offset=padding_right_offset, max_tokens=max_tokens) # File: text-generation-inference-main/server/text_generation_server/models/globals.py import torch import os from loguru import logger from typing import Dict, Optional from text_generation_server.utils.log import log_master PREFIX_CACHING = os.getenv('USE_PREFIX_CACHING').lower() in {'1', 'true'} log_master(logger.info, f'Using prefix caching = {PREFIX_CACHING}') ATTENTION = os.getenv('ATTENTION') _expected = {'paged', 'flashdecoding', 'flashinfer'} assert ATTENTION in _expected, f'Attention is not valid {ATTENTION}, expected {_expected}' log_master(logger.info, f'Using Attention = {ATTENTION}') if PREFIX_CACHING and ATTENTION not in {'flashinfer', 'flashdecoding'}: raise RuntimeError('Prefix caching is only supported with flashinfer') MEM_POOL = torch.cuda.graph_pool_handle() if torch.cuda.is_available() else None TGI_WIGGLE_ROOM = float(os.getenv('TGI_WIGGLE_ROOM', '0.95')) assert TGI_WIGGLE_ROOM > 0 assert TGI_WIGGLE_ROOM < 1 BLOCK_SIZE: int if ATTENTION == 'flashdecoding': BLOCK_SIZE = 256 elif ATTENTION == 'flashinfer': BLOCK_SIZE = 1 else: BLOCK_SIZE = 16 cuda_graphs = os.getenv('CUDA_GRAPHS') if cuda_graphs is not None: try: cuda_graphs = [int(item) for item in cuda_graphs.split(',')] except Exception as e: raise RuntimeError(f'Could not parse cuda graphs {cuda_graphs}, expected comma separated list for batch sizes to run on: {e}') else: cuda_graphs = None if cuda_graphs is not None: cuda_graphs.sort(reverse=True) CUDA_GRAPHS = cuda_graphs ADAPTER_TO_INDEX: Optional[Dict[str, int]] = None def set_adapter_to_index(adapter_to_index: Dict[str, int]): global ADAPTER_TO_INDEX ADAPTER_TO_INDEX = adapter_to_index def get_adapter_to_index(): global ADAPTER_TO_INDEX return ADAPTER_TO_INDEX # File: text-generation-inference-main/server/text_generation_server/models/idefics.py import torch import torch.distributed from typing import Optional from text_generation_server.models.custom_modeling.idefics_config import IdeficsConfig from text_generation_server.models.custom_modeling.idefics_processing import IdeficsProcessor from transformers import LlamaTokenizerFast from text_generation_server.models.custom_modeling.idefics_modeling import IdeficsForVisionText2Text from text_generation_server.models.idefics_causal_lm import IdeficsCausalLM from text_generation_server.utils import initialize_torch_distributed, weight_files, Weights from text_generation_server.utils.quantization import get_loader from text_generation_server.utils.import_utils import SYSTEM class IDEFICSSharded(IdeficsCausalLM): def __init__(self, model_id: str, revision: Optional[str]=None, quantize: Optional[str]=None, speculator: Optional[str]=None, dtype: Optional[torch.dtype]=None, trust_remote_code: bool=False): self.quantize = quantize (self.process_group, rank, world_size) = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device(f'cuda:{rank}') dtype = torch.float16 if dtype is None else dtype elif SYSTEM == 'ipex': if hasattr(torch, 'xpu') and torch.xpu.is_available(): device = torch.device(f'xpu:{rank}') dtype = torch.float16 if dtype is None else dtype else: device = torch.device('cpu') dtype = torch.bfloat16 if dtype is None else dtype else: device = torch.device('cpu') dtype = torch.float32 if dtype is None else dtype (self.device, self.dtype) = (device, dtype) config = IdeficsConfig.from_pretrained(model_id, revision=revision, trust_remote_code=trust_remote_code) config.quantize = quantize config.speculator = speculator config.vision_config.quantize = quantize tokenizer = LlamaTokenizerFast.from_pretrained(model_id, revision=revision, padding_side='left', truncation_side='left', trust_remote_code=trust_remote_code) self.processor = IdeficsProcessor.from_pretrained(model_id, revision=revision, padding_side='left', truncation_side='left', trust_remote_code=trust_remote_code) weights_loader = get_loader(quantize=quantize, model_id=model_id, revision=revision) torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension='.safetensors') weights = Weights(filenames, device=device, dtype=dtype, process_group=self.process_group, weights_loader=weights_loader) model = IdeficsForVisionText2Text(config, weights) torch.distributed.barrier(group=self.process_group) super(IdeficsCausalLM, self).__init__(model_id=model_id, model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, rank=rank, world_size=world_size) # File: text-generation-inference-main/server/text_generation_server/models/idefics_causal_lm.py from io import BytesIO from PIL import Image import torch import time from dataclasses import dataclass from opentelemetry import trace from transformers import AutoProcessor, AutoTokenizer, PreTrainedTokenizerBase, ProcessorMixin from typing import Optional, Tuple, List, Type, Dict from text_generation_server.models import Model from text_generation_server.models.types import Batch, Tokens, Generation, GeneratedText from text_generation_server.pb import generate_pb2 from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling tracer = trace.get_tracer(__name__) @dataclass class IdeficsCausalLMBatch(Batch): batch_id: int requests: List[generate_pb2.Request] requests_idx_mapping: Dict[int, int] input_ids: torch.Tensor attention_mask: torch.Tensor position_ids: torch.Tensor pixel_values: Optional[torch.Tensor] image_hidden_states: Optional[torch.Tensor] image_attention_mask: Optional[torch.Tensor] past_key_values: Optional[List[Tuple]] all_input_ids: List[torch.Tensor] input_lengths: List[int] prefix_offsets: List[int] read_offsets: List[int] next_token_choosers: List[NextTokenChooser] stopping_criterias: List[StoppingCriteria] max_input_length: int padding_right_offset: int max_tokens: int keys_head_dim_last: bool = True def to_pb(self) -> generate_pb2.CachedBatch: return generate_pb2.CachedBatch(id=self.batch_id, request_ids=[r.id for r in self.requests], size=len(self), max_tokens=self.max_tokens) @classmethod def from_pb(cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device) -> 'IdeficsCausalLMBatch': raise NotImplementedError @classmethod def from_pb_processor(cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, processor: ProcessorMixin, config, dtype: torch.dtype, device: torch.device) -> 'IdeficsCausalLMBatch': inputs = [] next_token_choosers = [] stopping_criterias = [] prefix_offsets = [] read_offsets = [] requests_idx_mapping = {} max_truncation = 0 padding_right_offset = 0 max_decode_tokens = 0 for (i, r) in enumerate(pb.requests): requests_idx_mapping[r.id] = i inputs.append(r.input_chunks.chunks) next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device, tokenizer)) stopping_criteria = StoppingCriteria.from_pb(r.stopping_parameters, tokenizer) stopping_criterias.append(stopping_criteria) max_truncation = max(max_truncation, r.truncate) max_decode_tokens += stopping_criteria.max_new_tokens padding_right_offset = max(padding_right_offset, stopping_criteria.max_new_tokens) prompts = [] for inp in inputs: prompt = [] for chunk in inp: chunk_type = chunk.WhichOneof('chunk') if chunk_type == 'text': prompt.append(chunk.text) elif chunk_type == 'image': image = Image.open(BytesIO(chunk.image.data)) prompt.append(image) else: raise RuntimeError(f'Invalid chunk type {chunk_type}') prompts.append(prompt) tokenized_inputs = processor(prompts, return_tensors='pt', padding=True, truncation=True, max_length=max_truncation).to(device) for _ in pb.requests: input_len = tokenized_inputs['input_ids'].shape[1] prefix_offsets.append(input_len - 5) read_offsets.append(input_len) input_lengths = tokenized_inputs['attention_mask'].sum(1) max_input_length = input_lengths.max() input_ids = tokenized_inputs['input_ids'] pixel_values = tokenized_inputs.get('pixel_values', None) image_hidden_states = None attention_mask = input_ids.new_zeros((pb.size, max_input_length + padding_right_offset)) attention_mask[:, :max_input_length] = tokenized_inputs['attention_mask'] if pixel_values is None: image_attention_mask = None else: image_attention_mask = input_ids.new_zeros((pb.size, max_input_length + padding_right_offset, pixel_values.size(1))) image_attention_mask[:, :max_input_length, :] = tokenized_inputs['image_attention_mask'] position_ids = tokenized_inputs['attention_mask'].long().cumsum(-1) - 1 position_ids.masked_fill_(tokenized_inputs['attention_mask'] == 0, 1) all_input_ids = tokenized_inputs['input_ids'].T.split(1, dim=1) max_tokens = len(inputs) * (max_input_length + max_decode_tokens) return cls(batch_id=pb.id, requests=pb.requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, pixel_values=pixel_values, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, past_key_values=None, all_input_ids=list(all_input_ids), input_lengths=input_lengths.tolist(), prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, max_input_length=max_input_length.item(), padding_right_offset=padding_right_offset, max_tokens=max_tokens) @tracer.start_as_current_span('filter') def filter(self, request_ids: List[int]) -> Optional['IdeficsCausalLMBatch']: if len(request_ids) == 0: raise ValueError('Batch must have at least one request') if len(request_ids) == len(self): return self keep_indices = [] requests_idx_mapping = {} requests = [] input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] max_input_length = 0 next_token_choosers = [] stopping_criterias = [] total_remaining_decode_tokens = 0 new_padding_right_offset = 0 for (i, request_id) in enumerate(request_ids): idx = self.requests_idx_mapping[request_id] requests_idx_mapping[request_id] = i keep_indices.append(idx) requests.append(self.requests[idx]) prefix_offsets.append(self.prefix_offsets[idx]) read_offsets.append(self.read_offsets[idx]) all_input_ids.append(self.all_input_ids[idx]) request_input_length = self.input_lengths[idx] input_lengths.append(request_input_length) max_input_length = max(max_input_length, request_input_length) next_token_choosers.append(self.next_token_choosers[idx]) stopping_criteria = self.stopping_criterias[idx] stopping_criterias.append(stopping_criteria) remaining_decode_tokens = stopping_criteria.max_new_tokens - stopping_criteria.current_tokens total_remaining_decode_tokens += remaining_decode_tokens new_padding_right_offset = max(new_padding_right_offset, remaining_decode_tokens) input_ids = self.input_ids[keep_indices] position_ids = self.position_ids[keep_indices] self.attention_mask = self.attention_mask[keep_indices, -(self.padding_right_offset + max_input_length):self.attention_mask.shape[1] - self.padding_right_offset + new_padding_right_offset] pixel_values = self.pixel_values[keep_indices] self.image_attention_mask = self.image_attention_mask[keep_indices, -(self.padding_right_offset + max_input_length):self.image_attention_mask.shape[1] - self.padding_right_offset + new_padding_right_offset, :] if self.image_hidden_states is None: image_hidden_states = None else: image_hidden_states = self.image_hidden_states[keep_indices] if type(self.past_key_values[0]) is tuple: self.past_key_values = [list(layer) for layer in self.past_key_values] past_kv_length = max_input_length - 1 for layer in self.past_key_values: (past_keys, past_values) = layer if len(past_keys.shape) == 3: past_keys = past_keys.view(len(self), -1, *past_keys.shape[-2:]) past_values = past_values.view(len(self), -1, *past_values.shape[-2:]) if self.keys_head_dim_last: layer[0] = past_keys[keep_indices, :, -past_kv_length:, :] else: layer[0] = past_keys[keep_indices, :, :, -past_kv_length:] del past_keys layer[1] = past_values[keep_indices, :, -past_kv_length:, :] del past_values max_tokens = len(request_ids) * max_input_length + total_remaining_decode_tokens self.requests = requests self.requests_idx_mapping = requests_idx_mapping self.input_ids = input_ids self.pixel_values = pixel_values self.image_hidden_states = image_hidden_states self.position_ids = position_ids self.all_input_ids = all_input_ids self.input_lengths = input_lengths self.prefix_offsets = prefix_offsets self.read_offsets = read_offsets self.next_token_choosers = next_token_choosers self.stopping_criterias = stopping_criterias self.max_input_length = max_input_length self.padding_right_offset = new_padding_right_offset self.max_tokens = max_tokens return self @classmethod @tracer.start_as_current_span('concatenate') def concatenate(cls, batches: List['IdeficsCausalLMBatch']) -> 'IdeficsCausalLMBatch': total_batch_size = 0 max_input_length = 0 max_num_images = 0 padding_right_offset = 0 for batch in batches: total_batch_size += len(batch) max_input_length = max(max_input_length, batch.max_input_length) max_num_images = max(max_num_images, batch.pixel_values.size(1)) padding_right_offset = max(padding_right_offset, batch.padding_right_offset) requests = [] requests_idx_mapping = {} input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] next_token_choosers = [] stopping_criterias = [] max_tokens = 0 input_ids = None attention_mask = None position_ids = None pixel_values = None image_hidden_states = None image_attention_mask = None past_key_values = [] start_index = 0 for (i, batch) in enumerate(batches): requests.extend(batch.requests) input_lengths.extend(batch.input_lengths) prefix_offsets.extend(batch.prefix_offsets) read_offsets.extend(batch.read_offsets) all_input_ids.extend(batch.all_input_ids) next_token_choosers.extend(batch.next_token_choosers) stopping_criterias.extend(batch.stopping_criterias) if i == 0: requests_idx_mapping = batch.requests_idx_mapping else: for (k, v) in batch.requests_idx_mapping.items(): requests_idx_mapping[k] = v + start_index end_index = start_index + len(batch) if batch.past_key_values is None: raise ValueError('only concatenate prefilled batches') if input_ids is None: input_ids = batch.input_ids.new_empty((total_batch_size, 1)) input_ids[start_index:end_index] = batch.input_ids if attention_mask is None: attention_mask = batch.attention_mask.new_zeros((total_batch_size, max_input_length + padding_right_offset)) curr_batch_max_num_images = batch.pixel_values.size(1) if pixel_values is None: pixel_values = batch.pixel_values.new_zeros((total_batch_size, max_num_images, 3, 224, 224)) pixel_values[start_index:end_index, :curr_batch_max_num_images] = batch.pixel_values if image_attention_mask is None: image_attention_mask = batch.image_attention_mask.new_zeros((total_batch_size, max_input_length + padding_right_offset, max_num_images)) left_offset = max_input_length - batch.max_input_length batch_left_offset = batch.attention_mask.shape[1] - batch.max_input_length - batch.padding_right_offset attention_mask[start_index:end_index, left_offset:-padding_right_offset] = batch.attention_mask[:, batch_left_offset:-batch.padding_right_offset] image_attention_mask[start_index:end_index, left_offset:-padding_right_offset, :curr_batch_max_num_images] = batch.image_attention_mask[:, batch_left_offset:-batch.padding_right_offset, :] if position_ids is None: position_ids = batch.position_ids.new_empty((total_batch_size, 1)) position_ids[start_index:end_index] = batch.position_ids if isinstance(batch.past_key_values[0], tuple): batch.past_key_values = [[t.view(len(batch), -1, *t.shape[-2:]) for t in layer] for layer in batch.past_key_values] elif len(batch.past_key_values[0][0].shape) == 3: for layer in batch.past_key_values: for (k, t) in enumerate(layer): layer[k] = t.view(len(batch), -1, *t.shape[-2:]) max_tokens += batch.max_tokens + (max_input_length - batch.max_input_length) * len(batch) start_index = end_index first_past_kvs = batches[0].past_key_values (_, num_heads, padded_sequence_length, head_dim) = first_past_kvs[0][1].shape padded_past_values_shape = (total_batch_size, num_heads, max_input_length - 1, head_dim) if batches[0].keys_head_dim_last: padded_past_keys_shape = padded_past_values_shape else: padded_past_keys_shape = (total_batch_size, num_heads, head_dim, max_input_length - 1) for j in range(len(first_past_kvs)): padded_past_keys = first_past_kvs[j][0].new_zeros(padded_past_keys_shape) start_index = 0 for batch in batches: past_keys = batch.past_key_values[j][0] batch.past_key_values[j][0] = None end_index = start_index + len(batch) past_seq_len = batch.max_input_length - 1 if batch.keys_head_dim_last: padded_past_keys[start_index:end_index, :, -past_seq_len:, :] = past_keys[:, :, -past_seq_len:, :] else: padded_past_keys[start_index:end_index, :, :, -past_seq_len:] = past_keys[:, :, :, -past_seq_len:] del past_keys start_index = end_index padded_past_values = first_past_kvs[j][1].new_zeros(padded_past_values_shape) start_index = 0 for batch in batches: past_values = batch.past_key_values[j][1] batch.past_key_values[j][1] = None end_index = start_index + len(batch) past_seq_len = batch.max_input_length - 1 padded_past_values[start_index:end_index, :, -past_seq_len:, :] = past_values[:, :, -past_seq_len:, :] del past_values start_index = end_index past_key_values.append([padded_past_keys, padded_past_values]) return cls(batch_id=batches[0].batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, pixel_values=pixel_values, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, past_key_values=past_key_values, all_input_ids=all_input_ids, input_lengths=input_lengths, prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, max_input_length=max_input_length, padding_right_offset=padding_right_offset, keys_head_dim_last=batches[0].keys_head_dim_last, max_tokens=max_tokens) def __len__(self): return len(self.requests) class IdeficsCausalLM(Model): def __init__(self, model_id: str, revision: Optional[str]=None, quantize: Optional[str]=None, dtype: Optional[torch.dtype]=None, trust_remote_code: bool=False): self.quantize = quantize from text_generation_server.models.custom_modeling.idefics_modeling import IdeficsForVisionText2Text if torch.cuda.is_available(): device = torch.device('cuda') dtype = torch.bfloat16 if dtype is None else dtype else: if quantize: raise ValueError('quantization is not available on CPU') device = torch.device('cpu') dtype = torch.float32 if dtype is None else dtype tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision, padding_side='left', truncation_side='left', trust_remote_code=trust_remote_code) self.processor = AutoProcessor.from_pretrained(model_id, revision=revision, padding_side='left', truncation_side='left', trust_remote_code=trust_remote_code) model = IdeficsForVisionText2Text.from_pretrained(model_id, revision=revision, torch_dtype=dtype, device_map='auto' if torch.cuda.is_available() and torch.cuda.device_count() > 1 else None, load_in_8bit=quantize == 'bitsandbytes', trust_remote_code=trust_remote_code) if torch.cuda.is_available() and torch.cuda.device_count() == 1: model = model.cuda() if tokenizer.pad_token_id is None: if model.config.pad_token_id is not None: tokenizer.pad_token_id = model.config.pad_token_id elif model.config.eos_token_id is not None: tokenizer.pad_token_id = model.config.eos_token_id elif tokenizer.eos_token_id is not None: tokenizer.pad_token_id = tokenizer.eos_token_id else: tokenizer.add_special_tokens({'pad_token': ''}) super(IdeficsCausalLM, self).__init__(model_id=model_id, model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device) @property def batch_type(self) -> Type[IdeficsCausalLMBatch]: return IdeficsCausalLMBatch def forward(self, input_ids, attention_mask, position_ids, pixel_values, image_hidden_states, image_attention_mask, past_key_values: Optional=None) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]: kwargs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'pixel_values': pixel_values, 'image_hidden_states': image_hidden_states, 'image_attention_mask': image_attention_mask, 'past_key_values': past_key_values, 'use_cache': True, 'return_dict': True} if self.has_position_ids: kwargs['position_ids'] = position_ids (outputs, speculative_logits) = self.model.forward(**kwargs) return (outputs.logits, speculative_logits, outputs.past_key_values, outputs.image_hidden_states) @tracer.start_as_current_span('generate_token') def generate_token(self, batch: IdeficsCausalLMBatch) -> Tuple[List[Generation], Optional[IdeficsCausalLMBatch], Tuple[int, int]]: start = time.time_ns() attention_mask = batch.attention_mask[:, :-batch.padding_right_offset] if batch.image_attention_mask is None: image_attention_mask = None elif batch.input_ids.size(1) == 1: image_attention_mask = batch.image_attention_mask[:, -(batch.padding_right_offset + 1)].unsqueeze(1) else: image_attention_mask = batch.image_attention_mask[:, :-batch.padding_right_offset] (logits, speculative_logits, past, image_hidden_states) = self.forward(input_ids=batch.input_ids, attention_mask=attention_mask, position_ids=batch.position_ids, pixel_values=batch.pixel_values, image_hidden_states=batch.image_hidden_states, image_attention_mask=image_attention_mask, past_key_values=batch.past_key_values) logits[:, 32000:32001] = torch.finfo(logits.dtype).min start_decode = time.time_ns() generations: List[Generation] = [] stopped = True iterator = zip(batch.requests, batch.input_lengths, batch.prefix_offsets, batch.read_offsets, logits, batch.next_token_choosers, batch.stopping_criterias, batch.all_input_ids) for (i, (request, input_length, prefix_offset, read_offset, logits, next_token_chooser, stopping_criteria, all_input_ids)) in enumerate(iterator): (next_token_id, logprobs) = next_token_chooser(all_input_ids.view(1, -1), logits[-1:, :]) all_input_ids = torch.cat([all_input_ids, next_token_id]) new_input_length = input_length + 1 next_token_logprob = logprobs[-1, next_token_id] next_token_id_squeezed = next_token_id.squeeze() (next_token_text, prefix_offset, read_offset) = self.decode_token(all_input_ids[:, 0], prefix_offset, read_offset) (stop, reason) = stopping_criteria(next_token_id_squeezed, next_token_text) if not stop: stopped = False if i % self.world_size == self.rank: if stop: (output_text, _, _) = self.decode_token(all_input_ids[:, 0], prefix_offset=len(all_input_ids) - stopping_criteria.current_tokens - 1, read_offset=len(all_input_ids) - stopping_criteria.current_tokens, skip_special_tokens=True) if isinstance(next_token_chooser.choice, Sampling): seed = next_token_chooser.choice.seed else: seed = None generated_text = GeneratedText(output_text, stopping_criteria.current_tokens, reason, seed) else: generated_text = None if stopping_criteria.current_tokens == 1 and request.prefill_logprobs: prefill_logprobs = [float('nan')] + torch.log_softmax(logits, -1).gather(1, all_input_ids[1:]).squeeze(1)[-new_input_length:-1].tolist() prefill_token_ids = all_input_ids[-new_input_length:-1] prefill_texts = self.tokenizer.batch_decode(prefill_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False) prefill_tokens = Tokens(prefill_token_ids, prefill_logprobs, prefill_texts, is_special=[]) else: prefill_tokens = None top_tokens = None generation = Generation(request.id, prefill_tokens, Tokens([next_token_id_squeezed], [next_token_logprob], [next_token_text], [next_token_id_squeezed.item() in self.all_special_ids]), generated_text, top_tokens) generations.append(generation) batch.next_token_choosers[i] = batch.next_token_choosers[i].advance_grammar(next_token_id_squeezed.item()) batch.input_ids[i, 0] = next_token_id batch.all_input_ids[i] = all_input_ids batch.input_lengths[i] = new_input_length batch.prefix_offsets[i] = prefix_offset batch.read_offsets[i] = read_offset batch.max_input_length = max(batch.max_input_length, new_input_length) if stopped: forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return (generations, None, (forward_ns, decode_ns)) batch.input_ids = batch.input_ids[:, :1] batch.attention_mask[:, -batch.padding_right_offset] = 1 batch.image_attention_mask[:, -batch.padding_right_offset, :] = batch.image_attention_mask[:, -(batch.padding_right_offset + 1), :] batch.padding_right_offset -= 1 batch.position_ids = batch.position_ids[:, -1:] + 1 batch.past_key_values = past batch.image_hidden_states = image_hidden_states forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return (generations, batch, (forward_ns, decode_ns)) # File: text-generation-inference-main/server/text_generation_server/models/mamba.py import torch import torch.distributed from transformers import AutoTokenizer, PreTrainedTokenizerBase from typing import Optional from text_generation_server.models.custom_modeling.mamba_modeling import MambaConfig from loguru import logger from text_generation_server.pb import generate_pb2 from text_generation_server.utils import initialize_torch_distributed, weight_files, Weights from text_generation_server.models.globals import CUDA_GRAPHS, MEM_POOL import time from text_generation_server.models.custom_modeling.mamba_modeling import MambaModel, InferenceParams from text_generation_server.models import Model from typing import Any, List, Tuple, Type, Dict from text_generation_server.models.types import Batch, Tokens, Generation, GeneratedText from text_generation_server.utils.chunks import concat_text_chunks from text_generation_server.utils.quantization import get_loader from text_generation_server.utils.tokens import batch_top_tokens, Sampling from dataclasses import dataclass from text_generation_server.utils import NextTokenChooser, StoppingCriteria def new_inference_params(n_blocks: int, batch_size: int, d_inner: int, d_conv: int, d_state: int, seqlen_offset: int, dtype: torch.dtype, device: torch.device): max_seqlen = 0 conv_states = torch.zeros((n_blocks, batch_size, d_inner, d_conv), device=device, dtype=dtype) ssm_states = torch.zeros((n_blocks, batch_size, d_inner, d_state), device=device, dtype=dtype) inference_params = InferenceParams(max_seqlen=max_seqlen, max_batch_size=batch_size, seqlen_offset=seqlen_offset, conv_states=conv_states, ssm_states=ssm_states) return inference_params @dataclass class MambaBatch(Batch): batch_id: int requests: List[generate_pb2.Request] requests_idx_mapping: Dict[int, int] input_ids: torch.Tensor all_input_ids: List[torch.Tensor] input_lengths: List[int] prefix_offsets: List[int] read_offsets: List[int] next_token_choosers: List[NextTokenChooser] stopping_criterias: List[StoppingCriteria] top_n_tokens: List[int] top_n_tokens_tensor: torch.Tensor max_input_length: int padding_right_offset: int max_tokens: int keys_head_dim_last: bool = True inference_params: Optional[Dict[str, Any]] = None def to_pb(self) -> generate_pb2.CachedBatch: return generate_pb2.CachedBatch(id=self.batch_id, request_ids=[r.id for r in self.requests], size=len(self), max_tokens=self.max_tokens) @classmethod def from_pb(cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device) -> 'MambaBatch': inputs = [] next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] prefix_offsets = [] read_offsets = [] requests_idx_mapping = {} max_truncation = 0 padding_right_offset = 0 max_decode_tokens = 0 for (i, r) in enumerate(pb.requests): requests_idx_mapping[r.id] = i inputs.append(concat_text_chunks(r.input_chunks.chunks)) next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device, tokenizer)) stopping_criteria = StoppingCriteria.from_pb(r.stopping_parameters, tokenizer) stopping_criterias.append(stopping_criteria) top_n_tokens.append(r.top_n_tokens) max_truncation = max(max_truncation, r.truncate) max_decode_tokens += stopping_criteria.max_new_tokens padding_right_offset = max(padding_right_offset, stopping_criteria.max_new_tokens) tokenized_inputs = tokenizer(inputs, return_tensors='pt', padding=True, return_token_type_ids=False, truncation=True, max_length=max_truncation).to(device) for _ in pb.requests: input_len = tokenized_inputs['input_ids'].shape[1] prefix_offsets.append(input_len - 5) read_offsets.append(input_len) input_lengths = tokenized_inputs['attention_mask'].sum(1) max_input_length = input_lengths.max() input_ids = tokenized_inputs['input_ids'] all_input_ids = tokenized_inputs['input_ids'].T.split(1, dim=1) top_n_tokens_tensor = torch.tensor(top_n_tokens, device=device, dtype=torch.int64) max_tokens = len(inputs) * (max_input_length + max_decode_tokens) return cls(batch_id=pb.id, requests=pb.requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, all_input_ids=list(all_input_ids), input_lengths=input_lengths.tolist(), prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, max_input_length=max_input_length.item(), padding_right_offset=padding_right_offset, max_tokens=max_tokens) def filter(self, request_ids: List[int]) -> Optional['MambaBatch']: if len(request_ids) == 0: raise ValueError('Batch must have at least one request') if len(request_ids) == len(self): return self keep_indices = [] requests_idx_mapping = {} requests = [] input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] max_input_length = 0 next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] total_remaining_decode_tokens = 0 new_padding_right_offset = 0 indices = [] for (i, request_id) in enumerate(request_ids): idx = self.requests_idx_mapping[request_id] requests_idx_mapping[request_id] = i keep_indices.append(idx) requests.append(self.requests[idx]) prefix_offsets.append(self.prefix_offsets[idx]) read_offsets.append(self.read_offsets[idx]) all_input_ids.append(self.all_input_ids[idx]) request_input_length = self.input_lengths[idx] input_lengths.append(request_input_length) max_input_length = max(max_input_length, request_input_length) indices.append(idx) next_token_choosers.append(self.next_token_choosers[idx]) stopping_criteria = self.stopping_criterias[idx] stopping_criterias.append(stopping_criteria) top_n_tokens.append(self.top_n_tokens[idx]) remaining_decode_tokens = stopping_criteria.max_new_tokens - stopping_criteria.current_tokens total_remaining_decode_tokens += remaining_decode_tokens new_padding_right_offset = max(new_padding_right_offset, remaining_decode_tokens) input_ids = self.input_ids[keep_indices] top_n_tokens_tensor = self.top_n_tokens_tensor[keep_indices] max_tokens = len(request_ids) * max_input_length + total_remaining_decode_tokens self.requests = requests self.requests_idx_mapping = requests_idx_mapping self.input_ids = input_ids self.all_input_ids = all_input_ids self.input_lengths = input_lengths self.prefix_offsets = prefix_offsets self.read_offsets = read_offsets self.next_token_choosers = next_token_choosers self.stopping_criterias = stopping_criterias self.top_n_tokens = top_n_tokens self.top_n_tokens_tensor = top_n_tokens_tensor self.max_input_length = max_input_length self.padding_right_offset = new_padding_right_offset self.max_tokens = max_tokens self.inference_params.conv_states = self.inference_params.conv_states[:, indices] self.inference_params.ssm_states = self.inference_params.ssm_states[:, indices] return self @classmethod def concatenate(cls, batches: List['MambaBatch']) -> 'MambaBatch': total_batch_size = 0 max_input_length = 0 padding_right_offset = 0 for batch in batches: total_batch_size += len(batch) max_input_length = max(max_input_length, batch.max_input_length) padding_right_offset = max(padding_right_offset, batch.padding_right_offset) requests = [] requests_idx_mapping = {} input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] max_tokens = 0 seqlen_offset = 0 (n_blocks, _, d_inner, d_conv) = batches[0].inference_params.conv_states.shape (_, _, _, d_state) = batches[0].inference_params.ssm_states.shape dtype = batches[0].inference_params.conv_states.dtype device = batches[0].inference_params.conv_states.device inference_params = new_inference_params(n_blocks=n_blocks, batch_size=total_batch_size, d_state=d_state, d_conv=d_conv, d_inner=d_inner, seqlen_offset=seqlen_offset, device=device, dtype=dtype) input_ids = None top_n_tokens_tensor = None start_index = 0 for (i, batch) in enumerate(batches): requests.extend(batch.requests) input_lengths.extend(batch.input_lengths) prefix_offsets.extend(batch.prefix_offsets) read_offsets.extend(batch.read_offsets) all_input_ids.extend(batch.all_input_ids) next_token_choosers.extend(batch.next_token_choosers) stopping_criterias.extend(batch.stopping_criterias) top_n_tokens.extend(batch.top_n_tokens) if i == 0: requests_idx_mapping = batch.requests_idx_mapping else: for (k, v) in batch.requests_idx_mapping.items(): requests_idx_mapping[k] = v + start_index end_index = start_index + len(batch) if input_ids is None: input_ids = batch.input_ids.new_empty((total_batch_size, 1)) input_ids[start_index:end_index] = batch.input_ids if top_n_tokens_tensor is None: top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros(total_batch_size) top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor max_tokens += batch.max_tokens + (max_input_length - batch.max_input_length) * len(batch) inference_params.max_seqlen = max(inference_params.max_seqlen, batch.inference_params.max_seqlen) assert batch.inference_params.seqlen_offset != 0, 'Invalid seqlen offset' inference_params.seqlen_offset = max(inference_params.seqlen_offset, batch.inference_params.seqlen_offset) inference_params.conv_states[:, start_index:end_index] = batch.inference_params.conv_states inference_params.ssm_states[:, start_index:end_index] = batch.inference_params.ssm_states start_index = end_index return cls(batch_id=batches[0].batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, all_input_ids=all_input_ids, input_lengths=input_lengths, prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, max_input_length=max_input_length, padding_right_offset=padding_right_offset, keys_head_dim_last=batches[0].keys_head_dim_last, max_tokens=max_tokens, inference_params=inference_params) def __len__(self): return len(self.requests) class Mamba(Model): def __init__(self, model_id: str, revision: Optional[str]=None, quantize: Optional[str]=None, speculator: Optional[str]=None, dtype: Optional[torch.dtype]=None, trust_remote_code: bool=False): self.quantize = quantize (self.process_group, _rank, world_size) = initialize_torch_distributed() if world_size > 1: raise RuntimeError('Mamba does not support Tensor Parallelism (TP)') self.cuda_graphs = {} if torch.cuda.is_available(): device = torch.device('cuda') dtype = torch.bfloat16 if dtype is None else dtype else: if quantize: raise ValueError('quantization is not available on CPU') device = torch.device('cpu') dtype = torch.float32 if dtype is None else dtype tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b', revision=revision, padding_side='left', truncation_side='left', trust_remote_code=trust_remote_code) config = MambaConfig.from_pretrained(model_id, revision=revision, trust_remote_code=trust_remote_code) tokenizer.bos_token_id = config.bos_token_id tokenizer.eos_token_id = config.eos_token_id tokenizer.pad_token = tokenizer.eos_token config.quantize = quantize config.speculator = speculator torch.distributed.barrier(group=self.process_group) weights_loader = get_loader(quantize=quantize, model_id=model_id, revision=revision) filenames = weight_files(model_id, revision=revision, extension='.safetensors') weights = Weights(filenames, device, dtype, process_group=self.process_group, weights_loader=weights_loader) model = MambaModel(config, weights) torch.distributed.barrier(group=self.process_group) super(Mamba, self).__init__(model_id=model_id, model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device) @property def batch_type(self) -> Type[MambaBatch]: return MambaBatch def warmup(self, batch) -> Optional[int]: if CUDA_GRAPHS: if self.speculate is None or self.speculate == 0: try: logger.info(f'Cuda Graphs are enabled for sizes {CUDA_GRAPHS}') for bs in CUDA_GRAPHS: self.cuda_graph_warmup(bs) except Exception: logger.exception('Decode cuda graph warmup failed') else: logger.info(f'Cuda Graphs are disabled (CUDA_GRAPHS={CUDA_GRAPHS}).') return None def cuda_graph_warmup(self, batch_size: int): input_ids = torch.zeros((batch_size, 1), dtype=torch.int64, device=self.device) n_blocks = len(self.model.blocks) d_state = self.model.config.d_state d_conv = self.model.config.d_conv d_inner = self.model.config.d_inner seqlen_offset = 1 inference_params = new_inference_params(n_blocks=n_blocks, batch_size=batch_size, d_state=d_state, d_conv=d_conv, d_inner=d_inner, seqlen_offset=seqlen_offset, device=self.device, dtype=self.dtype) graph = torch.cuda.CUDAGraph() torch.cuda.synchronize() self.model.forward(input_ids=input_ids, inference_params=inference_params) torch.cuda.synchronize() with torch.cuda.graph(graph, pool=MEM_POOL): (logits, speculative_logits) = self.model.forward(input_ids=input_ids, inference_params=inference_params) torch.cuda.synchronize() graph_dict = {'input_ids': input_ids, 'inference_params': inference_params, 'graph': graph, 'logits': logits, 'speculative_logits': speculative_logits} self.cuda_graphs[batch_size] = graph_dict def tunableop_warmup(self, batch_size: int, seqlen: int): input_ids = torch.zeros((batch_size, 1), dtype=torch.int64, device=self.device) n_blocks = len(self.model.blocks) d_state = self.model.config.d_state d_conv = self.model.config.d_conv d_inner = self.model.config.d_inner seqlen_offset = 1 inference_params = new_inference_params(n_blocks=n_blocks, batch_size=seqlen, d_state=d_state, d_conv=d_conv, d_inner=d_inner, seqlen_offset=seqlen_offset, device=self.device, dtype=self.dtype) self.model.forward(input_ids=input_ids, inference_params=inference_params) def forward(self, input_ids: torch.Tensor, inference_params: Any) -> Tuple[torch.Tensor, torch.Tensor]: bs = input_ids.shape[0] padded_bs = bs if bs == 3: padded_bs = 4 elif 3 < bs <= 8: padded_bs = 8 elif bs > 8: padded_bs = (bs + 7) // 8 * 8 cuda_graph = self.cuda_graphs.get(padded_bs, None) is_prefill = inference_params is None or inference_params.seqlen_offset == 0 if is_prefill or cuda_graph is None: return self.model(input_ids, inference_params=inference_params) cuda_graph['input_ids'][:bs] = input_ids cuda_graph['inference_params'].conv_states[:, :bs] = inference_params.conv_states cuda_graph['inference_params'].ssm_states[:, :bs] = inference_params.ssm_states cuda_graph['graph'].replay() inference_params.conv_states.copy_(cuda_graph['inference_params'].conv_states[:, :bs]) inference_params.ssm_states.copy_(cuda_graph['inference_params'].ssm_states[:, :bs]) speculative_logits = cuda_graph['speculative_logits'][:bs] if cuda_graph['speculative_logits'] is not None else None logits = cuda_graph['logits'][:bs] return (logits, speculative_logits) def generate_token(self, batch) -> Tuple[List[Any], Optional[Any], Tuple[int, int]]: start = time.time_ns() input_ids = batch.input_ids (batch_size, max_seqlen) = input_ids.shape if batch.inference_params is None: seqlen_offset = 0 n_blocks = len(self.model.blocks) d_state = self.model.config.d_state d_conv = self.model.config.d_conv d_inner = self.model.config.d_inner inference_params = new_inference_params(n_blocks=n_blocks, batch_size=batch_size, d_state=d_state, d_conv=d_conv, d_inner=d_inner, seqlen_offset=seqlen_offset, device=self.device, dtype=self.dtype) batch.inference_params = inference_params (logits, speculative_logits) = self.forward(input_ids, inference_params=batch.inference_params) generations: List[Generation] = [] stopped = True accepted_ids = torch.ones_like(batch.input_ids)[:, 0] (batch_top_token_ids, batch_top_token_logprobs) = batch_top_tokens(batch.top_n_tokens, batch.top_n_tokens_tensor, torch.log_softmax(logits[:, -1], -1), accepted_ids) start_decode = time.time_ns() iterator = zip(batch.requests, batch.input_lengths, batch.prefix_offsets, batch.read_offsets, logits, batch.next_token_choosers, batch.stopping_criterias, batch.all_input_ids, batch.top_n_tokens, batch_top_token_ids, batch_top_token_logprobs) for (i, (request, input_length, prefix_offset, read_offset, logits, next_token_chooser, stopping_criteria, all_input_ids, top_n_tokens, top_token_ids, top_token_logprobs)) in enumerate(iterator): (next_token_id, logprobs) = next_token_chooser(all_input_ids.view(1, -1), logits[-1:, :]) all_input_ids = torch.cat([all_input_ids, next_token_id]) new_input_length = input_length + 1 next_token_logprob = logprobs[-1, next_token_id] next_token_id_squeezed = next_token_id.squeeze() (next_token_text, prefix_offset, read_offset) = self.decode_token(all_input_ids[:, 0], prefix_offset, read_offset) (stop, reason) = stopping_criteria(next_token_id_squeezed, next_token_text) if not stop: stopped = False if i % self.world_size == self.rank: if stop: (output_text, _, _) = self.decode_token(all_input_ids[:, 0], prefix_offset=len(all_input_ids) - stopping_criteria.current_tokens - 1, read_offset=len(all_input_ids) - stopping_criteria.current_tokens, skip_special_tokens=True) if isinstance(next_token_chooser.choice, Sampling): seed = next_token_chooser.choice.seed else: seed = None generated_text = GeneratedText(output_text, stopping_criteria.current_tokens, reason, seed) else: generated_text = None if stopping_criteria.current_tokens == 1 and request.prefill_logprobs: prefill_logprobs = [float('nan')] + torch.log_softmax(logits, -1).gather(1, all_input_ids[1:]).squeeze(1)[-new_input_length:-1].tolist() prefill_token_ids = all_input_ids[-new_input_length:-1] prefill_texts = self.tokenizer.batch_decode(prefill_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False) prefill_tokens = Tokens(prefill_token_ids, prefill_logprobs, prefill_texts, is_special=[]) else: prefill_tokens = None if top_n_tokens > 0: toptoken_texts = self.tokenizer.batch_decode(top_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False) special_toptokens = [token_id in self.all_special_ids for token_id in top_token_ids] top_tokens = Tokens(top_token_ids, top_token_logprobs, toptoken_texts, special_toptokens) else: top_tokens = None generation = Generation(request.id, prefill_tokens, Tokens([next_token_id_squeezed], [next_token_logprob], [next_token_text], [next_token_id_squeezed.item() in self.all_special_ids]), generated_text, top_tokens) generations.append(generation) batch.next_token_choosers[i] = batch.next_token_choosers[i].advance_grammar(next_token_id_squeezed.item()) batch.input_ids[i, 0] = next_token_id batch.all_input_ids[i] = all_input_ids batch.input_lengths[i] = new_input_length batch.prefix_offsets[i] = prefix_offset batch.read_offsets[i] = read_offset batch.max_input_length = max(batch.max_input_length, new_input_length) if stopped: forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return (generations, None, (forward_ns, decode_ns)) batch.input_ids = batch.input_ids[:, :1] forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return (generations, batch, (forward_ns, decode_ns)) # File: text-generation-inference-main/server/text_generation_server/models/model.py import inspect import torch from abc import ABC, abstractmethod from typing import List, Tuple, Optional, TypeVar, Type, Dict from collections import defaultdict from transformers import PreTrainedTokenizerBase from text_generation_server.models.types import Batch, Generation from text_generation_server.utils.speculate import get_speculate from text_generation_server.pb.generate_pb2 import InfoResponse from text_generation_server.adapters.weights import LayerAdapterWeights BASE_MODEL_ADAPTER_ID = '__base_model__' B = TypeVar('B', bound=Batch) class Model(ABC): def __init__(self, model_id: str, model: torch.nn.Module, tokenizer: PreTrainedTokenizerBase, requires_padding: bool, dtype: torch.dtype, device: torch.device, rank: int=0, world_size: int=1, sliding_window: Optional[int]=None, speculate: Optional[int]=None, adapter_id: str=BASE_MODEL_ADAPTER_ID): self.model_id = model_id self.model = model.eval() self.tokenizer = tokenizer other_special_ids = {id for (id, token) in tokenizer.added_tokens_decoder.items() if token.special} self.all_special_ids = set(tokenizer.all_special_ids) self.all_special_ids.update(other_special_ids) self.requires_padding = requires_padding self.dtype = dtype self.device = device self.rank = rank self.world_size = world_size self.sliding_window = sliding_window if sliding_window != -1 else None self.layer_to_adapter_weights: Dict[str, LayerAdapterWeights] = defaultdict(LayerAdapterWeights) self.loaded_adapters = set() self.static_adapter_id = adapter_id if speculate is None: speculate = get_speculate() self.speculate = speculate self.has_position_ids = inspect.signature(model.forward).parameters.get('position_ids', None) is not None self.check_initialized() @property def info(self) -> InfoResponse: if self.requires_padding and self.sliding_window is not None: raise NotImplementedError('sliding_window is not implemented with padding') return InfoResponse(requires_padding=self.requires_padding, dtype=str(self.dtype), device_type=self.device.type, window_size=self.sliding_window, speculate=self.speculate) @property @abstractmethod def batch_type(self) -> Type[B]: raise NotImplementedError @abstractmethod def generate_token(self, batch: B) -> Tuple[List[Generation], Optional[B], Tuple[int, int]]: raise NotImplementedError def warmup(self, batch: B) -> Optional[int]: self.generate_token(batch) return None def decode_token(self, all_input_ids: List[int], prefix_offset: int=0, read_offset: int=0, skip_special_tokens: bool=False) -> Tuple[str, int, int]: prefix_text = self.tokenizer.decode(all_input_ids[prefix_offset:read_offset], skip_special_tokens=skip_special_tokens) new_text = self.tokenizer.decode(all_input_ids[prefix_offset:], skip_special_tokens=skip_special_tokens) if len(new_text) > len(prefix_text) and (not new_text.endswith('�')): new_text = new_text[len(prefix_text):] return (new_text, read_offset, len(all_input_ids)) else: return ('', prefix_offset, read_offset) def check_initialized(self): uninitialized_parameters = [] for (n, p) in self.model.named_parameters(): if p.data.device == torch.device('meta'): uninitialized_parameters.append(n) if uninitialized_parameters: raise RuntimeError(f'found uninitialized parameters in model {self.__class__.__name__}: {uninitialized_parameters}') # File: text-generation-inference-main/server/text_generation_server/models/pali_gemma.py from io import BytesIO from PIL import Image import torch import torch.distributed from opentelemetry import trace from typing import Iterable from text_generation_server.models.vlm_causal_lm import VlmCausalLMBatch, image_text_replacement from text_generation_server.pb.generate_pb2 import Request tracer = trace.get_tracer(__name__) class PaliGemmaBatch(VlmCausalLMBatch): @classmethod def batch_tokenized_inputs(cls, requests: Iterable[Request], tokenizer, processor, config): batch_inputs = [] image_inputs = [] max_truncation = 0 for r in requests: full_text = '' image_id = 0 for chunk in r.input_chunks.chunks: chunk_type = chunk.WhichOneof('chunk') if chunk_type == 'text': full_text += '' + chunk.text + '\n' elif chunk_type == 'image': image = Image.open(BytesIO(chunk.image.data)) image = image.convert('RGB') image_input = processor.image_processor(image, return_tensors='pt') full_text += image_text_replacement(processor, image_input, config, image_id) image_inputs.append(image_input) else: raise RuntimeError(f'Invalid chunk type {chunk_type}') batch_inputs.append(full_text) max_truncation = max(max_truncation, r.truncate) batch_tokenized_inputs = tokenizer(batch_inputs, truncation=True, max_length=max_truncation, add_special_tokens=False)['input_ids'] if image_inputs: image_input = image_inputs[0] new_image_inputs = {'pixel_values': torch.cat([img['pixel_values'] for img in image_inputs], dim=0)} if 'pixel_attention_mask' in image_input: new_image_inputs['pixel_attention_mask'] = torch.cat([img['pixel_attention_mask'] for img in image_inputs], dim=0) if 'image_sizes' in image_input: new_image_inputs['image_sizes'] = torch.cat([img['image_sizes'] for img in image_inputs], dim=0) image_inputs = new_image_inputs else: image_inputs = None return (batch_tokenized_inputs, image_inputs) # File: text-generation-inference-main/server/text_generation_server/models/seq2seq_lm.py import torch import torch.distributed import time from dataclasses import dataclass from opentelemetry import trace from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, PreTrainedTokenizerBase, AutoConfig from typing import Optional, Tuple, List, Type, Dict from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils import initialize_torch_distributed, weight_files, Weights from text_generation_server.utils.chunks import concat_text_chunks from text_generation_server.utils.quantization import get_loader from text_generation_server.utils.tokens import batch_top_tokens from text_generation_server.models import Model from text_generation_server.models.types import GeneratedText, Batch, Generation, Tokens from text_generation_server.pb import generate_pb2 from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling tracer = trace.get_tracer(__name__) @dataclass class Seq2SeqLMBatch(Batch): batch_id: int requests: List[generate_pb2.Request] requests_idx_mapping: Dict[int, int] input_ids: Optional[torch.Tensor] attention_mask: torch.Tensor decoder_input_ids: torch.Tensor decoder_attention_mask: Optional[torch.Tensor] encoder_last_hidden_state: Optional[torch.Tensor] all_decoder_input_ids: List[torch.Tensor] past_key_values: Optional[List[Tuple]] input_lengths: List[int] decoder_input_lengths: List[int] prefix_offsets: List[int] read_offsets: List[int] next_token_choosers: List[NextTokenChooser] stopping_criterias: List[StoppingCriteria] top_n_tokens: List[int] top_n_tokens_tensor: torch.Tensor max_input_length: int max_decoder_input_length: int padding_right_offset: int max_tokens: int def to_pb(self) -> generate_pb2.CachedBatch: return generate_pb2.CachedBatch(id=self.batch_id, request_ids=[r.id for r in self.requests], size=len(self), max_tokens=self.max_tokens) @classmethod def from_pb(cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device) -> 'Seq2SeqLMBatch': inputs = [] next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] decoder_input_lengths = [] prefix_offsets = [] read_offsets = [] requests_idx_mapping = {} max_truncation = 0 padding_right_offset = 0 max_decode_tokens = 0 for (i, r) in enumerate(pb.requests): inputs.append(concat_text_chunks(r.input_chunks.chunks)) requests_idx_mapping[r.id] = i decoder_input_lengths.append(1) next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device, tokenizer)) stopping_criteria = StoppingCriteria.from_pb(r.stopping_parameters, tokenizer) stopping_criterias.append(stopping_criteria) top_n_tokens.append(r.top_n_tokens) max_truncation = max(max_truncation, r.truncate) max_decode_tokens += stopping_criteria.max_new_tokens padding_right_offset = max(padding_right_offset, stopping_criteria.max_new_tokens) tokenized_inputs = tokenizer(inputs, return_tensors='pt', padding=True, return_token_type_ids=False, truncation=True, max_length=max_truncation).to(device) input_lengths = tokenized_inputs['attention_mask'].sum(1) max_input_length = input_lengths.max() decoder_input_ids = torch.tensor(tokenizer.bos_token_id, device=device).repeat(len(pb.requests)).view(-1, 1) for _ in pb.requests: prefix_offsets.append(0) read_offsets.append(1) all_decoder_input_ids = decoder_input_ids.view(-1).split(1) top_n_tokens_tensor = torch.tensor(top_n_tokens, device=device, dtype=torch.int64) max_tokens = len(inputs) * (max_input_length + max_decode_tokens) return cls(batch_id=pb.id, requests=pb.requests, requests_idx_mapping=requests_idx_mapping, input_ids=tokenized_inputs['input_ids'], attention_mask=tokenized_inputs['attention_mask'], decoder_input_ids=decoder_input_ids, all_decoder_input_ids=list(all_decoder_input_ids), decoder_attention_mask=None, encoder_last_hidden_state=None, past_key_values=None, input_lengths=input_lengths.tolist(), decoder_input_lengths=decoder_input_lengths, prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, max_input_length=max_input_length.item(), max_decoder_input_length=1, padding_right_offset=padding_right_offset, max_tokens=max_tokens) @tracer.start_as_current_span('filter') def filter(self, request_ids: List[int]) -> Optional['Seq2SeqLMBatch']: if len(request_ids) == 0: raise ValueError('Batch must have at least one request') if len(request_ids) == len(self): return self keep_indices = [] requests_idx_mapping = {} requests = [] input_lengths = [] decoder_input_lengths = [] prefix_offsets = [] read_offsets = [] all_decoder_input_ids = [] next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] max_input_length = 0 max_decoder_input_length = 0 padding_right_offset = 0 total_remaining_decode_tokens = 0 for (i, request_id) in enumerate(request_ids): idx = self.requests_idx_mapping[request_id] requests_idx_mapping[request_id] = i keep_indices.append(idx) requests.append(self.requests[idx]) prefix_offsets.append(self.prefix_offsets[idx]) read_offsets.append(self.read_offsets[idx]) all_decoder_input_ids.append(self.all_decoder_input_ids[idx]) request_input_length = self.input_lengths[idx] input_lengths.append(request_input_length) max_input_length = max(max_input_length, request_input_length) request_decoder_input_length = self.decoder_input_lengths[idx] decoder_input_lengths.append(request_decoder_input_length) max_decoder_input_length = max(max_decoder_input_length, request_decoder_input_length) next_token_choosers.append(self.next_token_choosers[idx]) stopping_criteria = self.stopping_criterias[idx] stopping_criterias.append(stopping_criteria) top_n_tokens.append(self.top_n_tokens[idx]) remaining_decode_tokens = stopping_criteria.max_new_tokens - stopping_criteria.current_tokens total_remaining_decode_tokens += remaining_decode_tokens padding_right_offset = max(padding_right_offset, remaining_decode_tokens) self.decoder_input_ids = self.decoder_input_ids[keep_indices] self.attention_mask = self.attention_mask[keep_indices, -max_input_length:] if self.decoder_attention_mask is not None: self.decoder_attention_mask = self.decoder_attention_mask[keep_indices, -(self.padding_right_offset + max_decoder_input_length):self.decoder_attention_mask.shape[1] - self.padding_right_offset + padding_right_offset] self.encoder_last_hidden_state = self.encoder_last_hidden_state[keep_indices, -max_input_length:] if type(self.past_key_values[0]) is tuple: self.past_key_values = [[t for t in layer] for layer in self.past_key_values] decoder_past_seq_len = max_decoder_input_length - 1 for layer in self.past_key_values: layer[0] = layer[0][keep_indices, :, -decoder_past_seq_len:] layer[1] = layer[1][keep_indices, :, -decoder_past_seq_len:] layer[2] = layer[2][keep_indices, :, -max_input_length:] layer[3] = layer[3][keep_indices, :, -max_input_length:] top_n_tokens_tensor = self.top_n_tokens_tensor[keep_indices] max_tokens = len(request_ids) * (max_input_length + max_decoder_input_length) + remaining_decode_tokens self.requests = requests self.requests_idx_mapping = requests_idx_mapping self.input_ids = None self.all_decoder_input_ids = all_decoder_input_ids self.input_lengths = input_lengths self.decoder_input_lengths = decoder_input_lengths self.prefix_offsets = prefix_offsets self.read_offsets = read_offsets self.next_token_choosers = next_token_choosers self.stopping_criterias = stopping_criterias self.top_n_tokens = top_n_tokens self.top_n_tokens_tensor = top_n_tokens_tensor self.max_input_length = max_input_length self.max_decoder_input_length = max_decoder_input_length self.padding_right_offset = padding_right_offset self.max_tokens = max_tokens return self @classmethod @tracer.start_as_current_span('concatenate') def concatenate(cls, batches: List['Seq2SeqLMBatch']) -> 'Seq2SeqLMBatch': total_batch_size = 0 max_input_length = 0 max_decoder_input_length = 0 padding_right_offset = 0 for batch in batches: total_batch_size += len(batch) max_input_length = max(max_input_length, batch.max_input_length) max_decoder_input_length = max(max_decoder_input_length, batch.max_decoder_input_length) padding_right_offset = max(padding_right_offset, batch.padding_right_offset) requests = [] requests_idx_mapping = {} all_decoder_input_ids = [] input_lengths = [] decoder_input_lengths = [] prefix_offsets = [] read_offsets = [] next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] max_tokens = 0 attention_mask = None decoder_input_ids = None decoder_attention_mask = None encoder_last_hidden_state = None top_n_tokens_tensor = None past_key_values = [] start_index = 0 for (i, batch) in enumerate(batches): requests.extend(batch.requests) all_decoder_input_ids.extend(batch.all_decoder_input_ids) input_lengths.extend(batch.input_lengths) decoder_input_lengths.extend(batch.decoder_input_lengths) prefix_offsets.extend(batch.prefix_offsets) read_offsets.extend(batch.read_offsets) next_token_choosers.extend(batch.next_token_choosers) stopping_criterias.extend(batch.stopping_criterias) top_n_tokens.extend(batch.top_n_tokens) if i == 0: requests_idx_mapping = batch.requests_idx_mapping else: for (k, v) in batch.requests_idx_mapping.items(): requests_idx_mapping[k] = v + start_index end_index = start_index + len(batch) if batch.encoder_last_hidden_state is None: raise ValueError('Batch encoder_last_hidden_state cannot be None') if attention_mask is None: attention_mask = batch.attention_mask.new_zeros((total_batch_size, max_input_length)) attention_mask[start_index:end_index, -batch.max_input_length:] = batch.attention_mask[:, -batch.max_input_length:] if decoder_input_ids is None: decoder_input_ids = batch.decoder_input_ids.new_zeros((total_batch_size, 1)) decoder_input_ids[start_index:end_index] = batch.decoder_input_ids if decoder_attention_mask is None: decoder_attention_mask = batch.attention_mask.new_zeros((total_batch_size, max_decoder_input_length + padding_right_offset)) left_offset = max_decoder_input_length - batch.max_decoder_input_length if batch.decoder_attention_mask is None: decoder_attention_mask[start_index:end_index, left_offset:-padding_right_offset] = 1 else: batch_left_offset = batch.decoder_attention_mask.shape[1] - batch.max_decoder_input_length - batch.padding_right_offset decoder_attention_mask[start_index:end_index, left_offset:-padding_right_offset] = batch.decoder_attention_mask[:, batch_left_offset:-batch.padding_right_offset] if encoder_last_hidden_state is None: encoder_last_hidden_state = batch.encoder_last_hidden_state.new_zeros((total_batch_size, max_input_length, batch.encoder_last_hidden_state.shape[-1])) if top_n_tokens_tensor is None: top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros(total_batch_size) top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor encoder_last_hidden_state[start_index:end_index, -batch.max_input_length:, :] = batch.encoder_last_hidden_state[:, -batch.max_input_length:, :] batch.encoder_last_hidden_state = None if isinstance(batch.past_key_values[0], tuple): batch.past_key_values = [[t for t in layer] for layer in batch.past_key_values] max_tokens += batch.max_tokens + (max_input_length - batch.max_input_length + max_decoder_input_length - batch.max_decoder_input_length) * len(batch) start_index = end_index first_past_kvs = batches[0].past_key_values (_, num_heads, _, head_dim) = first_past_kvs[0][0].shape padded_dec_t_shape = (total_batch_size, num_heads, max_decoder_input_length - 1, head_dim) padded_enc_t_shape = (total_batch_size, num_heads, max_input_length, head_dim) for j in range(len(first_past_kvs)): past_key_values.append([]) for k in range(0, 2): padded_past_values = first_past_kvs[j][k].new_zeros(padded_dec_t_shape) past_key_values[j].append(padded_past_values) start_index = 0 for batch in batches: t = batch.past_key_values[j][k] batch.past_key_values[j][k] = None end_index = start_index + len(batch) past_seq_len = batch.max_decoder_input_length - 1 padded_past_values[start_index:end_index, :, -past_seq_len:, :] = t[:, :, -past_seq_len:, :] del t start_index = end_index for k in range(2, 4): padded_past_values = first_past_kvs[j][k].new_zeros(padded_enc_t_shape) past_key_values[j].append(padded_past_values) start_index = 0 for batch in batches: t = batch.past_key_values[j][k] batch.past_key_values[j][k] = None end_index = start_index + len(batch) padded_past_values[start_index:end_index, :, -batch.max_input_length:, :] = t[:, :, -batch.max_input_length:, :] del t start_index = end_index return cls(batch_id=batches[0].batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=None, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, all_decoder_input_ids=all_decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_last_hidden_state=encoder_last_hidden_state, past_key_values=past_key_values, input_lengths=input_lengths, decoder_input_lengths=decoder_input_lengths, prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, max_input_length=max_input_length, max_decoder_input_length=max_decoder_input_length, padding_right_offset=padding_right_offset, max_tokens=max_tokens) def __len__(self): return len(self.requests) class Seq2SeqLM(Model): def __init__(self, model_id: str, model_class, revision: Optional[str]=None, quantize: Optional[str]=None, speculator: Optional[str]=None, dtype: Optional[torch.dtype]=None, default_dtype=torch.float16, trust_remote_code: bool=False, config_class=AutoConfig, tokenizer_class=AutoTokenizer, aliases=None): self.quantize = quantize (self.process_group, rank, world_size) = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device(f'cuda:{rank}') dtype = default_dtype if dtype is None else dtype elif SYSTEM == 'ipex': if hasattr(torch, 'xpu') and torch.xpu.is_available(): device = torch.device(f'xpu:{rank}') dtype = default_dtype if dtype is None else dtype else: device = torch.device('cpu') dtype = torch.bfloat16 if dtype is None else dtype else: device = torch.device('cpu') dtype = torch.float32 if dtype is None else dtype config = config_class.from_pretrained(model_id, revision=revision, trust_remote_code=trust_remote_code) config.quantize = quantize config.speculator = speculator tokenizer = tokenizer_class.from_pretrained(model_id, revision=revision, padding_side='left', truncation_side='left', trust_remote_code=trust_remote_code) tokenizer.bos_token_id = config.decoder_start_token_id weights_loader = get_loader(quantize=quantize, model_id=model_id, revision=revision) torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension='.safetensors') weights = Weights(filenames, device=device, dtype=dtype, process_group=self.process_group, aliases=aliases, weights_loader=weights_loader) if config.quantize in ['awq', 'exl2', 'gptq', 'marlin']: weights._set_gptq_params(model_id, revision) model = model_class(config, weights) torch.distributed.barrier(group=self.process_group) super().__init__(model_id=model_id, model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, rank=rank, world_size=world_size) @classmethod def fallback(cls, model_id: str, revision: Optional[str]=None, quantize: Optional[str]=None, speculator: Optional[str]=None, dtype: Optional[torch.dtype]=None, trust_remote_code: bool=False): if speculator: raise RuntimeError('Speculator decoding is not enabled for AutoModel') if torch.cuda.is_available(): device = torch.device('cuda') dtype = torch.float16 if dtype is None else dtype else: if quantize: raise ValueError('quantization is not available on CPU') device = torch.device('cpu') dtype = torch.float32 if dtype is None else dtype model = AutoModelForSeq2SeqLM.from_pretrained(model_id, revision=revision, torch_dtype=dtype, device_map='auto' if torch.cuda.is_available() and torch.cuda.device_count() > 1 else None, load_in_8bit=quantize == 'bitsandbytes', trust_remote_code=trust_remote_code) if torch.cuda.is_available() and torch.cuda.device_count() == 1: model = model.cuda() tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision, padding_side='left', truncation_side='left', trust_remote_code=trust_remote_code) tokenizer.bos_token_id = model.config.decoder_start_token_id self = cls.__new__(cls) super().__init__(self, model_id=model_id, model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device) self.quantize = quantize return self @property def batch_type(self) -> Type[Seq2SeqLMBatch]: return Seq2SeqLMBatch def forward(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask: Optional, encoder_last_hidden_state: Optional, past_key_values: Optional=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]]]: outputs = self.model.forward(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_last_hidden_state, past_key_values=past_key_values, use_cache=True) if isinstance(outputs, tuple): (outputs, speculative_logits) = outputs else: speculative_logits = None return (outputs.logits, speculative_logits, outputs.encoder_last_hidden_state, outputs.past_key_values) @tracer.start_as_current_span('generate_token') def generate_token(self, batch: Seq2SeqLMBatch) -> Tuple[List[Generation], Optional[Seq2SeqLMBatch], Tuple[int, int]]: start = time.time_ns() if batch.decoder_attention_mask is not None: decoder_attention_mask = batch.decoder_attention_mask[:, :-batch.padding_right_offset] else: decoder_attention_mask = None if batch.encoder_last_hidden_state is not None: encoder_last_hidden_state = [batch.encoder_last_hidden_state] else: encoder_last_hidden_state = None (logits, speculative_logits, encoder_last_hidden_state, past) = self.forward(batch.input_ids, batch.attention_mask, batch.decoder_input_ids, decoder_attention_mask, encoder_last_hidden_state, batch.past_key_values) accepted_ids = torch.ones_like(batch.decoder_input_ids)[:, 0] (batch_top_token_ids, batch_top_token_logprobs) = batch_top_tokens(batch.top_n_tokens, batch.top_n_tokens_tensor, torch.log_softmax(logits[:, -1], -1), accepted_ids) start_decode = time.time_ns() generations: List[Generation] = [] stopped = True iterator = zip(batch.requests, batch.input_lengths, batch.prefix_offsets, batch.read_offsets, batch.decoder_input_lengths, logits, batch.next_token_choosers, batch.stopping_criterias, batch.all_decoder_input_ids, batch.top_n_tokens, batch_top_token_ids, batch_top_token_logprobs) for (i, (request, input_length, prefix_offset, read_offset, decoder_input_length, logits, next_token_chooser, stopping_criteria, all_decoder_input_ids, top_n_tokens, top_token_ids, top_token_logprobs)) in enumerate(iterator): (next_token_id, logprobs) = next_token_chooser(all_decoder_input_ids.view(1, -1), logits[-1:, :]) all_decoder_input_ids = torch.cat([all_decoder_input_ids, next_token_id.squeeze(1)]) new_decoder_input_length = decoder_input_length + 1 next_token_logprob = logprobs[-1, next_token_id] next_token_id_squeezed = next_token_id.squeeze() (next_token_text, prefix_offset, read_offset) = self.decode_token(all_decoder_input_ids, prefix_offset, read_offset) (stop, reason) = stopping_criteria(next_token_id, next_token_text) if not stop: stopped = False if i % self.world_size == self.rank: if stop: (output_text, _, _) = self.decode_token(all_decoder_input_ids, prefix_offset=len(all_decoder_input_ids) - decoder_input_length - 1, read_offset=len(all_decoder_input_ids) - decoder_input_length, skip_special_tokens=True) if isinstance(next_token_chooser.choice, Sampling): seed = next_token_chooser.choice.seed else: seed = None generated_text = GeneratedText(output_text, stopping_criteria.current_tokens, reason, seed) else: generated_text = None if stopping_criteria.current_tokens == 1 and request.prefill_logprobs: prefill_tokens = Tokens([self.tokenizer.bos_token_id], [float('nan')], [self.tokenizer.bos_token], [False]) else: prefill_tokens = None if top_n_tokens > 0: all_top_tokens = [] for (top_token_ids, top_token_logprobs) in zip(top_token_ids, top_token_logprobs): toptoken_texts = self.tokenizer.batch_decode(top_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False) special_toptokens = [token_id in self.all_special_ids for token_id in top_token_ids] top_tokens = Tokens(top_token_ids, top_token_logprobs, toptoken_texts, special_toptokens) all_top_tokens.append(top_tokens) top_tokens = all_top_tokens else: top_tokens = None generation = Generation(request.id, prefill_tokens, Tokens([next_token_id_squeezed], [next_token_logprob], [next_token_text], [next_token_id_squeezed.item() in self.all_special_ids]), generated_text, top_tokens) generations.append(generation) batch.next_token_choosers[i] = batch.next_token_choosers[i].advance_grammar(next_token_id_squeezed.item()) batch.decoder_input_ids[i] = next_token_id batch.all_decoder_input_ids[i] = all_decoder_input_ids batch.input_lengths[i] = input_length batch.decoder_input_lengths[i] = new_decoder_input_length batch.prefix_offsets[i] = prefix_offset batch.read_offsets[i] = read_offset batch.max_input_length = max(batch.max_input_length, input_length) batch.max_decoder_input_length = max(batch.max_decoder_input_length, new_decoder_input_length) if stopped: forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return (generations, None, (forward_ns, decode_ns)) batch.input_ids = None batch.encoder_last_hidden_state = encoder_last_hidden_state batch.past_key_values = past if batch.decoder_attention_mask is not None: batch.decoder_attention_mask[:, -batch.padding_right_offset] = 1 batch.padding_right_offset -= 1 forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return (generations, batch, (forward_ns, decode_ns)) # File: text-generation-inference-main/server/text_generation_server/models/types.py import torch from abc import ABC, abstractmethod from dataclasses import dataclass from typing import List, Optional from transformers import PreTrainedTokenizerBase from text_generation_server.pb import generate_pb2 from text_generation_server.pb.generate_pb2 import FinishReason class Batch(ABC): @abstractmethod def to_pb(self) -> generate_pb2.CachedBatch: raise NotImplementedError @classmethod @abstractmethod def from_pb(cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device) -> 'Batch': raise NotImplementedError @abstractmethod def filter(self, request_ids: List[int]) -> 'Batch': raise NotImplementedError @classmethod @abstractmethod def concatenate(cls, batches: List['Batch']) -> 'Batch': raise NotImplementedError @abstractmethod def __len__(self): raise NotImplementedError @dataclass class GeneratedText: text: str generated_tokens: int finish_reason: FinishReason seed: Optional[int] def to_pb(self) -> generate_pb2.GeneratedText: return generate_pb2.GeneratedText(text=self.text, generated_tokens=self.generated_tokens, finish_reason=self.finish_reason, seed=self.seed) @dataclass class Tokens: token_ids: List[int] logprobs: List[float] texts: List[str] is_special: List[bool] def to_pb(self) -> generate_pb2.Tokens: return generate_pb2.Tokens(ids=self.token_ids, logprobs=self.logprobs, texts=self.texts, is_special=self.is_special) def __len__(self): return len(self.token_ids) @dataclass class Generation: request_id: int prefill_tokens: Optional[Tokens] tokens: Tokens generated_text: Optional[GeneratedText] top_tokens: Optional[List[Tokens]] def to_pb(self) -> generate_pb2.Generation: return generate_pb2.Generation(request_id=self.request_id, prefill_tokens=self.prefill_tokens.to_pb() if self.prefill_tokens is not None else None, tokens=self.tokens.to_pb(), generated_text=self.generated_text.to_pb() if self.generated_text is not None else None, top_tokens=[top_tokens.to_pb() for top_tokens in self.top_tokens] if self.top_tokens is not None else None) # File: text-generation-inference-main/server/text_generation_server/models/vlm_causal_lm.py import torch from PIL import Image from io import BytesIO from opentelemetry import trace from typing import Iterable, Optional, Tuple, List, Type, Dict from transformers import PreTrainedTokenizerBase from transformers.image_processing_utils import select_best_resolution from text_generation_server.pb import generate_pb2 from text_generation_server.models.flash_causal_lm import FlashCausalLMBatch, FlashCausalLM, block_tables_to_ragged from text_generation_server.models.globals import PREFIX_CACHING, ATTENTION from text_generation_server.utils.log import log_master from transformers import AutoProcessor from text_generation_server.layers.attention import Seqlen tracer = trace.get_tracer(__name__) IDEFICS2_FAKE_TOKEN = '' IDEFICS2_IMAGE_TOKEN = '' def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size): if not isinstance(grid_pinpoints, list): raise ValueError('grid_pinpoints should be a list of tuples or lists') (height, width) = select_best_resolution(image_size, grid_pinpoints) return (height // patch_size, width // patch_size) def image_text_replacement(processor, image_input, config, image_id: int) -> str: if config.model_type == 'idefics2': image_seq_len = 64 image_str = f'{IDEFICS2_FAKE_TOKEN}{IDEFICS2_IMAGE_TOKEN * image_seq_len}{IDEFICS2_FAKE_TOKEN}' if processor.image_processor.do_image_splitting: image_str *= 5 return image_str elif config.model_type == 'llava_next': (height, width) = image_input['image_sizes'][image_id] num_features = get_number_of_features(height, width, config) from loguru import logger log_master(logger.info, f'Found {num_features} features in image of resolution {height}x{width}') return '' * num_features elif config.model_type == 'paligemma': return '' * config.text_config.num_image_tokens else: raise RuntimeError(f'Unknown config {config.model_type} for multimodal') def image_text_replacement_fixup(config, text: str) -> str: if config.model_type == 'idefics2': return text.replace(f'{IDEFICS2_FAKE_TOKEN}{IDEFICS2_FAKE_TOKEN}', IDEFICS2_FAKE_TOKEN) return text def get_unpadded_features(original_height: int, original_width: int, npatches: int, num_patch_height: int, num_patch_width: int) -> Tuple[int, int]: current_height = npatches * num_patch_height current_width = npatches * num_patch_width aspect_ratio: float = original_width / original_height current_aspect_ratio: float = current_width / current_height if aspect_ratio > current_aspect_ratio: new_height = original_height * current_width // original_width padding = (current_height - new_height) // 2 current_height = current_height - 2 * padding else: new_width = original_width * current_height // original_height padding = (current_width - new_width) // 2 current_width = current_width - 2 * padding unpadded_features = current_height * current_width newline_features = current_height return (unpadded_features, newline_features) def get_number_of_features(height: int, width: int, config) -> int: image_grid_pinpoints = config.image_grid_pinpoints image_size = config.vision_config.image_size patch_size = config.vision_config.patch_size assert image_size % patch_size == 0 npatches = image_size // patch_size (num_patch_width, num_patch_height) = get_anyres_image_grid_shape([height, width], image_grid_pinpoints, image_size) (unpadded_features, newline_features) = get_unpadded_features(height, width, npatches, num_patch_height, num_patch_width) base_features = npatches ** 2 return unpadded_features + newline_features + base_features class VlmCausalLMBatch(FlashCausalLMBatch): pixel_values: Optional[List[torch.Tensor]] pixel_attention_mask: Optional[List[torch.Tensor]] image_sizes: Optional[List[Tuple[int, int]]] @classmethod @tracer.start_as_current_span('concatenate') def concatenate(cls, batches): batch = super(VlmCausalLMBatch, cls).concatenate(batches) batch.pixel_values = None batch.pixel_attention_mask = None batch.image_sizes = None return batch @tracer.start_as_current_span('filter') def filter(self, request_ids: List[int]): batch = super().filter(request_ids) batch.pixel_values = None batch.pixel_attention_mask = None batch.image_sizes = None return batch @classmethod def batch_tokenized_inputs(cls, requests: Iterable[generate_pb2.Request], tokenizer, processor, config): images = [] for r in requests: for chunk in r.input_chunks.chunks: chunk_type = chunk.WhichOneof('chunk') if chunk_type == 'text': pass elif chunk_type == 'image': image = Image.open(BytesIO(chunk.image.data)) if config.model_type == 'llava_next': images.append(image) else: images.append([image]) else: raise RuntimeError(f'Invalid chunk type {chunk_type}') if images: image_inputs = processor.image_processor(images, return_tensors='pt') else: image_inputs = None batch_inputs = [] max_truncation = 0 image_id = 0 for r in requests: full_text = '' for chunk in r.input_chunks.chunks: chunk_type = chunk.WhichOneof('chunk') if chunk_type == 'text': full_text += chunk.text elif chunk_type == 'image': full_text += image_text_replacement(processor, image_inputs, config, image_id) image_id += 1 full_text = image_text_replacement_fixup(config, full_text) batch_inputs.append(full_text) max_truncation = max(max_truncation, r.truncate) batch_tokenized_inputs = tokenizer(batch_inputs, truncation=True, max_length=max_truncation, add_special_tokens=not config.model_type == 'paligemma')['input_ids'] return (batch_tokenized_inputs, image_inputs) @classmethod def from_pb_processor(cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, processor, config, dtype: torch.dtype, device: torch.device) -> 'VlmCausalLMBatch': (batch_tokenized_inputs, image_inputs) = cls.batch_tokenized_inputs(pb.requests, tokenizer, processor, config) batch = cls.from_tokenized(pb, tokenizer, batch_tokenized_inputs, dtype, device) if image_inputs is not None: batch.pixel_values = image_inputs['pixel_values'].to(device=device) if 'pixel_attention_mask' in image_inputs: batch.pixel_attention_mask = image_inputs['pixel_attention_mask'].to(device=device) else: batch.pixel_attention_mask = None if 'image_sizes' in image_inputs: batch.image_sizes = image_inputs['image_sizes'].to(device=device) else: batch.image_sizes = None else: batch.pixel_values = None batch.pixel_attention_mask = None batch.image_sizes = None return batch class VlmCausalLM(FlashCausalLM): def __init__(self, model_id: str, *, processor_class=AutoProcessor, processor_kwargs=None, batch_class=VlmCausalLMBatch, revision, trust_remote_code: bool, **kwargs): if PREFIX_CACHING: raise NotImplementedError('Vlm do not work with prefix caching yet') if processor_kwargs is None: processor_kwargs = {} self.processor = processor_class.from_pretrained(model_id, revision=revision, trust_remote_code=trust_remote_code, **processor_kwargs) self.batch_class = batch_class super().__init__(model_id=model_id, revision=revision, trust_remote_code=trust_remote_code, **kwargs) @property def batch_type(self) -> Type[VlmCausalLMBatch]: return self.batch_class def max_past(self) -> Optional[int]: return getattr(self.model.text_model, 'max_past', None) def forward(self, batch: VlmCausalLMBatch, adapter_data: Optional[Dict[str, torch.Tensor]]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if batch.speculative_ids is not None: input_ids = batch.input_ids position_ids = batch.position_ids cu_seqlen_prefill = batch.cu_seqlen_prefill kv_cache = self.kv_cache block_tables = batch.block_tables_tensor slots = batch.slots[batch.slot_indices] input_lengths = batch.input_lengths_tensor max_s = batch.max_seqlen lm_head_indices = batch.prefill_head_indices speculative_ids = batch.speculative_ids (B, speculative_length) = speculative_ids.shape new_length = speculative_length + 1 new_input_ids = torch.cat([input_ids.unsqueeze(-1), speculative_ids], dim=1).reshape(-1) arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0) arange_int = arange.to(dtype=torch.int32) new_position_ids = (position_ids.unsqueeze(-1).expand(B, new_length) + arange).view(-1) slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) input_lengths = (input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) prefix_lens_tensor = batch.prefix_lens_tensor.unsqueeze(-1).expand(B, new_length).reshape(-1) block_tables = block_tables.unsqueeze(1).expand(B, new_length, -1).reshape(B * new_length, -1).contiguous() max_s = max_s + speculative_length input_ids = new_input_ids position_ids = new_position_ids else: input_ids = batch.input_ids position_ids = batch.position_ids cu_seqlen_prefill = batch.cu_seqlen_prefill kv_cache = self.kv_cache block_tables = batch.block_tables_tensor slots = batch.slots[batch.slot_indices] input_lengths = batch.input_lengths_tensor prefix_lens_tensor = batch.prefix_lens_tensor max_s = batch.max_seqlen lm_head_indices = batch.prefill_head_indices if cu_seqlen_prefill is None and self.max_past() is not None: max_s = min(self.max_past(), max_s) bs = input_ids.shape[0] bs = input_ids.shape[0] sorted_padded_bs = sorted([k for k in self.cuda_graphs.keys() if k >= bs]) if sorted_padded_bs: cuda_graph = self.cuda_graphs[sorted_padded_bs[0]] else: cuda_graph = None if cu_seqlen_prefill is not None or cuda_graph is None: input_lengths = input_lengths + prefix_lens_tensor if PREFIX_CACHING: block_tables = block_tables_to_ragged(block_tables=block_tables, input_lengths=batch.input_lengths, prefix_lens=batch.prefix_lens) with self._forward_context(block_tables=block_tables, cu_seqlen_prefill=cu_seqlen_prefill, input_lengths_tensor=input_lengths, prefix_lens_tensor=prefix_lens_tensor): max_k = (input_lengths + prefix_lens_tensor).max().item() seqlen = Seqlen(input_lengths=input_lengths, prefix_lengths=prefix_lens_tensor, cu_seqlen_q=cu_seqlen_prefill, max_q=max_s, max_k=max_k) (logits, speculative_logits) = self.model.forward(input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, block_tables=block_tables, slots=slots, seqlen=seqlen, max_s=max_s, prefill_cache_indices=batch.prefill_cache_indices, lm_head_indices=lm_head_indices, pixel_values=batch.pixel_values, pixel_attention_mask=batch.pixel_attention_mask, image_sizes=batch.image_sizes) if batch.prefill_cache_indices is not None: batch.prefill_cache_indices = None if batch.pixel_values is not None: batch.pixel_values = None if batch.pixel_attention_mask is not None: batch.pixel_attention_mask = None if batch.image_sizes is not None: batch.image_sizes = None return (logits, speculative_logits) cuda_graph['input_ids'][:input_ids.shape[0]] = input_ids cuda_graph['position_ids'][:position_ids.shape[0]] = position_ids if ATTENTION == 'flashinfer': block_tables = block_tables_to_ragged(block_tables=block_tables, input_lengths=batch.input_lengths, prefix_lens=batch.prefix_lens) cuda_graph['block_tables'][:block_tables.shape[0]] = block_tables else: cuda_graph['block_tables'][:block_tables.shape[0], :block_tables.shape[1]] = block_tables cuda_graph['slots'].fill_(-1) cuda_graph['slots'][:slots.shape[0]] = slots cuda_graph['input_lengths'].zero_() cuda_graph['input_lengths'][:input_lengths.shape[0]] = input_lengths + prefix_lens_tensor cuda_graph['graph'].replay() speculative_logits = cuda_graph['speculative_logits'][:bs] if cuda_graph['speculative_logits'] is not None else None logits = cuda_graph['logits'][:bs] return (logits, speculative_logits) # File: text-generation-inference-main/server/text_generation_server/server.py import asyncio import os import torch import time import signal from grpc import aio from loguru import logger from grpc_reflection.v1alpha import reflection from pathlib import Path from typing import List, Optional from text_generation_server.cache import Cache from text_generation_server.interceptor import ExceptionInterceptor from text_generation_server.models import Model, get_model_with_lora_adapters from text_generation_server.utils.adapter import AdapterInfo try: from text_generation_server.models.pali_gemma import PaliGemmaBatch from text_generation_server.models.vlm_causal_lm import VlmCausalLMBatch from text_generation_server.models.idefics_causal_lm import IdeficsCausalLMBatch VLM_BATCH_TYPES = {PaliGemmaBatch, VlmCausalLMBatch, IdeficsCausalLMBatch} except (ImportError, NotImplementedError): VLM_BATCH_TYPES = set() from text_generation_server.pb import generate_pb2_grpc, generate_pb2 from text_generation_server.tracing import UDSOpenTelemetryAioServerInterceptor from text_generation_server.models.globals import set_adapter_to_index class SignalHandler: KEEP_PROCESSING = True def __init__(self): signal.signal(signal.SIGINT, self.exit_gracefully) signal.signal(signal.SIGTERM, self.exit_gracefully) def exit_gracefully(self, signum, frame): print(f'Exiting gracefully: Signal {signum}') self.KEEP_PROCESSING = False class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer): def __init__(self, model: Model, cache: Cache, server_urls: List[str]): self.cache = cache self.model = model self.quantize = model.quantize self.server_urls = server_urls if model.device.type == 'cuda': self._inference_mode_raii_guard = torch._C._InferenceMode(True) async def Info(self, request, context): return self.model.info async def Health(self, request, context): if self.model.device.type == 'cuda': torch.zeros((2, 2)).cuda() return generate_pb2.HealthResponse() async def ServiceDiscovery(self, request, context): return generate_pb2.ServiceDiscoveryResponse(urls=self.server_urls) async def ClearCache(self, request, context): if request.HasField('id'): self.cache.delete(request.id) else: self.cache.clear() return generate_pb2.ClearCacheResponse() async def FilterBatch(self, request, context): batch = self.cache.pop(request.batch_id) if batch is None: raise ValueError(f'Batch ID {request.batch_id} not found in cache.') filtered_batch = batch.filter(request.request_ids) self.cache.set(filtered_batch) return generate_pb2.FilterBatchResponse(batch=filtered_batch.to_pb()) async def Warmup(self, request, context): if self.quantize in {'exl2', 'gptq'}: try: from text_generation_server.layers.gptq import create_exllama_buffers, set_device set_device(self.model.device) create_exllama_buffers(request.max_prefill_tokens) except ImportError: pass if self.model.batch_type in VLM_BATCH_TYPES: batch = self.model.batch_type.from_pb_processor(request.batch, self.model.tokenizer, self.model.processor, self.model.model.config, self.model.dtype, self.model.device) else: batch = self.model.batch_type.from_pb(request.batch, self.model.tokenizer, self.model.dtype, self.model.device) max_supported_total_tokens = self.model.warmup(batch) return generate_pb2.WarmupResponse(max_supported_total_tokens=max_supported_total_tokens) async def Prefill(self, request, context): start = time.time_ns() if self.model.batch_type in VLM_BATCH_TYPES: batch = self.model.batch_type.from_pb_processor(request.batch, self.model.tokenizer, self.model.processor, self.model.model.config, self.model.dtype, self.model.device) else: batch = self.model.batch_type.from_pb(request.batch, self.model.tokenizer, self.model.dtype, self.model.device) (generations, next_batch, timings) = self.model.generate_token(batch) self.cache.set(next_batch) return generate_pb2.PrefillResponse(generations=[generation.to_pb() for generation in generations], batch=next_batch.to_pb() if next_batch else None, forward_ns=timings[0], decode_ns=timings[1], total_ns=time.time_ns() - start) async def Decode(self, request, context): start = time.time_ns() if len(request.batches) == 0: raise ValueError('Must provide at least one batch') batches = [] for batch_pb in request.batches: batch = self.cache.pop(batch_pb.id) if batch is None: raise ValueError(f'Batch ID {batch_pb.id} not found in cache.') batches.append(batch) if len(batches) == 0: raise ValueError('All batches are empty') if len(batches) > 1: start_concat = time.time_ns() batch = self.model.batch_type.concatenate(batches) concat_ns = time.time_ns() - start_concat else: batch = batches[0] concat_ns = None (generations, next_batch, timings) = self.model.generate_token(batch) self.cache.set(next_batch) return generate_pb2.DecodeResponse(generations=[generation.to_pb() for generation in generations], batch=next_batch.to_pb() if next_batch else None, concat_ns=concat_ns, forward_ns=timings[0], decode_ns=timings[1], total_ns=time.time_ns() - start) def serve(model_id: str, lora_adapters: Optional[List[AdapterInfo]], revision: Optional[str], sharded: bool, quantize: Optional[str], speculate: Optional[int], dtype: Optional[str], trust_remote_code: bool, uds_path: Path, max_input_tokens: int): async def serve_inner(model_id: str, lora_adapters: Optional[List[AdapterInfo]], revision: Optional[str], sharded: bool=False, quantize: Optional[str]=None, speculate: Optional[int]=None, dtype: Optional[str]=None, trust_remote_code: bool=False): unix_socket_template = 'unix://{}-{}' adapter_to_index = {} if sharded: server_urls = [unix_socket_template.format(uds_path, rank) for rank in range(int(os.environ['WORLD_SIZE']))] local_url = server_urls[int(os.environ['RANK'])] else: local_url = unix_socket_template.format(uds_path, 0) server_urls = [local_url] try: model = get_model_with_lora_adapters(model_id, lora_adapters, revision, sharded, quantize, speculate, dtype, trust_remote_code, max_input_tokens, adapter_to_index) except Exception: logger.exception('Error when initializing model') raise set_adapter_to_index(adapter_to_index) server = aio.server(interceptors=[ExceptionInterceptor(), UDSOpenTelemetryAioServerInterceptor()], options=[('grpc.max_receive_message_length', (1 << 31) - 1)]) generate_pb2_grpc.add_TextGenerationServiceServicer_to_server(TextGenerationService(model, Cache(), server_urls), server) SERVICE_NAMES = (generate_pb2.DESCRIPTOR.services_by_name['TextGenerationService'].full_name, reflection.SERVICE_NAME) reflection.enable_server_reflection(SERVICE_NAMES, server) server.add_insecure_port(local_url) await server.start() logger.info('Server started at {}'.format(local_url)) signal_handler = SignalHandler() while signal_handler.KEEP_PROCESSING: await asyncio.sleep(0.5) asyncio.run(serve_inner(model_id, lora_adapters, revision, sharded, quantize, speculate, dtype, trust_remote_code)) # File: text-generation-inference-main/server/text_generation_server/tracing.py import grpc from opentelemetry import trace from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter from opentelemetry.instrumentation.grpc._aio_server import OpenTelemetryAioServerInterceptor from opentelemetry.semconv.trace import SpanAttributes from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor class UDSOpenTelemetryAioServerInterceptor(OpenTelemetryAioServerInterceptor): def __init__(self): super().__init__(trace.get_tracer(__name__)) def _start_span(self, handler_call_details, context, set_status_on_exception=False): attributes = {SpanAttributes.RPC_SYSTEM: 'grpc', SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.OK.value[0]} if handler_call_details.method: (service, method) = handler_call_details.method.lstrip('/').split('/', 1) attributes.update({SpanAttributes.RPC_METHOD: method, SpanAttributes.RPC_SERVICE: service}) metadata = dict(context.invocation_metadata()) if 'user-agent' in metadata: attributes['rpc.user_agent'] = metadata['user-agent'] attributes.update({SpanAttributes.NET_TRANSPORT: 'unix'}) return self._tracer.start_as_current_span(name=handler_call_details.method, kind=trace.SpanKind.SERVER, attributes=attributes, set_status_on_exception=set_status_on_exception) def setup_tracing(otlp_service_name: str, otlp_endpoint: str): resource = Resource.create(attributes={'service.name': otlp_service_name}) span_exporter = OTLPSpanExporter(endpoint=otlp_endpoint, insecure=True) span_processor = BatchSpanProcessor(span_exporter) trace.set_tracer_provider(TracerProvider(resource=resource)) trace.get_tracer_provider().add_span_processor(span_processor) # File: text-generation-inference-main/update_doc.py import subprocess import argparse import ast import json import os TEMPLATE = '\n# Supported Models and Hardware\n\nText Generation Inference enables serving optimized models on specific hardware for the highest performance. The following sections list which models (VLMs & LLMs) are supported.\n\n## Supported Models\n\nSUPPORTED_MODELS\n\nIf the above list lacks the model you would like to serve, depending on the model\'s pipeline type, you can try to initialize and serve the model anyways to see how well it performs, but performance isn\'t guaranteed for non-optimized models:\n\n```python\n# for causal LMs/text-generation models\nAutoModelForCausalLM.from_pretrained(, device_map="auto")`\n# or, for text-to-text generation models\nAutoModelForSeq2SeqLM.from_pretrained(, device_map="auto")\n```\n\nIf you wish to serve a supported model that already exists on a local folder, just point to the local folder.\n\n```bash\ntext-generation-launcher --model-id \n```\n' def check_cli(check: bool): output = subprocess.check_output(['text-generation-launcher', '--help']).decode('utf-8') wrap_code_blocks_flag = '' final_doc = f'# Text-generation-launcher arguments\n\n{wrap_code_blocks_flag}\n\n' lines = output.split('\n') header = '' block = [] for line in lines: if line.startswith(' -') or line.startswith(' -'): rendered_block = '\n'.join(block) if header: final_doc += f'## {header}\n```shell\n{rendered_block}\n```\n' else: final_doc += f'```shell\n{rendered_block}\n```\n' block = [] tokens = line.split('<') if len(tokens) > 1: header = tokens[-1][:-1] else: header = line.split('--')[-1] header = header.upper().replace('-', '_') block.append(line) rendered_block = '\n'.join(block) final_doc += f'## {header}\n```shell\n{rendered_block}\n```\n' block = [] filename = 'docs/source/reference/launcher.md' if check: with open(filename, 'r') as f: doc = f.read() if doc != final_doc: tmp = 'launcher.md' with open(tmp, 'w') as g: g.write(final_doc) diff = subprocess.run(['diff', tmp, filename], capture_output=True).stdout.decode('utf-8') print(diff) raise Exception('Cli arguments Doc is not up-to-date, run `python update_doc.py` in order to update it') else: with open(filename, 'w') as f: f.write(final_doc) def check_supported_models(check: bool): filename = 'server/text_generation_server/models/__init__.py' with open(filename, 'r') as f: tree = ast.parse(f.read()) enum_def = [x for x in tree.body if isinstance(x, ast.ClassDef) and x.name == 'ModelType'][0] _locals = {} _globals = {} exec(f'import enum\n{ast.unparse(enum_def)}', _globals, _locals) ModelType = _locals['ModelType'] list_string = '' for data in ModelType: list_string += f"- [{data.value['name']}]({data.value['url']})" if data.value.get('multimodal', None): list_string += ' (Multimodal)' list_string += '\n' final_doc = TEMPLATE.replace('SUPPORTED_MODELS', list_string) filename = 'docs/source/supported_models.md' if check: with open(filename, 'r') as f: doc = f.read() if doc != final_doc: tmp = 'supported.md' with open(tmp, 'w') as g: g.write(final_doc) diff = subprocess.run(['diff', tmp, filename], capture_output=True).stdout.decode('utf-8') print(diff) raise Exception('Supported models is not up-to-date, run `python update_doc.py` in order to update it') else: with open(filename, 'w') as f: f.write(final_doc) def get_openapi_schema(): try: output = subprocess.check_output(['text-generation-router', 'print-schema']) return json.loads(output) except subprocess.CalledProcessError as e: print(f'Error running text-generation-router print-schema: {e}') raise SystemExit(1) except json.JSONDecodeError: print('Error: Invalid JSON received from text-generation-router print-schema') raise SystemExit(1) def check_openapi(check: bool): new_openapi_data = get_openapi_schema() filename = 'docs/openapi.json' tmp_filename = 'openapi_tmp.json' with open(tmp_filename, 'w') as f: json.dump(new_openapi_data, f, indent=2) if check: diff = subprocess.run(['diff', '--ignore-trailing-space', tmp_filename, filename], capture_output=True).stdout.decode('utf-8') os.remove(tmp_filename) if diff: print(diff) raise Exception('OpenAPI documentation is not up-to-date, run `python update_doc.py` in order to update it') else: os.rename(tmp_filename, filename) print('OpenAPI documentation updated.') p = subprocess.run(['redocly', 'lint', filename], capture_output=True) errors = p.stderr.decode('utf-8') print(errors) if p.returncode != 0: print(errors) raise Exception(f'OpenAPI documentation is invalid, `redocly lint {filename}` showed some error:\n {errors}') return True def main(): parser = argparse.ArgumentParser() parser.add_argument('--check', action='store_true') args = parser.parse_args() check_cli(args.check) check_supported_models(args.check) check_openapi(args.check) if __name__ == '__main__': main()