mirror of
https://github.com/lilakk/BLEUBERI.git
synced 2026-04-19 12:58:12 +00:00
71 lines
2.8 KiB
Python
71 lines
2.8 KiB
Python
# Code adapted from https://huggingface.co/kaiokendev/superhot-13b-8k-no-rlhf-test/blob/main/llama_rope_scaled_monkey_patch.py
|
|
|
|
from functools import partial
|
|
|
|
import torch
|
|
import transformers
|
|
import transformers.models.llama.modeling_llama
|
|
|
|
|
|
class CondenseRotaryEmbedding(torch.nn.Module):
|
|
def __init__(
|
|
self, dim, ratio, max_position_embeddings=2048, base=10000, device=None
|
|
):
|
|
super().__init__()
|
|
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
|
|
self.register_buffer("inv_freq", inv_freq)
|
|
|
|
# Build here to make `torch.jit.trace` work.
|
|
self.ratio = ratio
|
|
max_position_embeddings *= ratio
|
|
self.max_seq_len_cached = max_position_embeddings
|
|
# print(f"Monkey Patching condense ratio {ratio}")
|
|
t = (
|
|
torch.arange(
|
|
self.max_seq_len_cached,
|
|
device=self.inv_freq.device,
|
|
dtype=self.inv_freq.dtype,
|
|
)
|
|
/ ratio
|
|
)
|
|
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
|
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
|
emb = torch.cat((freqs, freqs), dim=-1)
|
|
dtype = torch.get_default_dtype()
|
|
self.register_buffer(
|
|
"cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False
|
|
)
|
|
self.register_buffer(
|
|
"sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False
|
|
)
|
|
|
|
def forward(self, x, seq_len=None):
|
|
# x: [bs, num_attention_heads, seq_len, head_size]
|
|
# This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
|
|
if seq_len > self.max_seq_len_cached:
|
|
self.max_seq_len_cached = seq_len
|
|
t = (
|
|
torch.arange(
|
|
self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype
|
|
)
|
|
/ self.ratio
|
|
)
|
|
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
|
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
|
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
|
|
self.register_buffer(
|
|
"cos_cached", emb.cos()[None, None, :, :].to(x.dtype), persistent=False
|
|
)
|
|
self.register_buffer(
|
|
"sin_cached", emb.sin()[None, None, :, :].to(x.dtype), persistent=False
|
|
)
|
|
return (
|
|
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
|
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
|
)
|
|
|
|
|
|
def replace_llama_with_condense(ratio):
|
|
transformers.models.llama.modeling_llama.LlamaRotaryEmbedding = partial(
|
|
CondenseRotaryEmbedding, ratio=ratio
|
|
)
|