import math
import struct
import inspect
import time

from .LMConfig import LMConfig
from typing import Any, Optional, Tuple, List, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from transformers import PreTrainedModel
from transformers.modeling_outputs import CausalLMOutputWithPast
from torch import nn, einsum
from einops import rearrange, repeat

def exists(val):
    return val is not None


# RMSNorm 类定义了一个用于归一化输入张量的模块。
class RMSNorm(torch.nn.Module):
    def __init__(self, dim: int, eps: float = 1e-6):
        super().__init__()
        self.eps = eps
        self.weight = nn.Parameter(torch.ones(dim))

    def _norm(self, x):
        return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)

    def forward(self, x):
        return self.weight * self._norm(x.float()).type_as(x)

# precompute_pos_cis 函数用于预计算位置编码。
def precompute_pos_cis(dim: int, end: int = int(32 * 1024), theta: float = 1e6):
    freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
    t = torch.arange(end, device=freqs.device)  # type: ignore
    freqs = torch.outer(t, freqs).float()  # type: ignore
    pos_cis = torch.polar(torch.ones_like(freqs), freqs)  # complex64
    return pos_cis

# apply_rotary_emb 函数用于应用旋转位置编码。
def apply_rotary_emb(xq, xk, pos_cis):
    def unite_shape(pos_cis, x):
        ndim = x.ndim
        assert 0 <= 1 < ndim
        assert pos_cis.shape == (x.shape[1], x.shape[-1])
        shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
        return pos_cis.view(*shape)

    xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
    xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
    pos_cis = unite_shape(pos_cis, xq_)
    xq_out = torch.view_as_real(xq_ * pos_cis).flatten(3)
    xk_out = torch.view_as_real(xk_ * pos_cis).flatten(3)
    return xq_out.type_as(xq), xk_out.type_as(xk)

# repeat_kv 函数用于重复键值对。
def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
    """torch.repeat_interleave(x, dim=2, repeats=n_rep)"""
    bs, slen, n_kv_heads, head_dim = x.shape
    if n_rep == 1:
        return x
    return (
        x[:, :, :, None, :]
        .expand(bs, slen, n_kv_heads, n_rep, head_dim)
        .reshape(bs, slen, n_kv_heads * n_rep, head_dim)
    )


class Attention(nn.Module):
    def __init__(self, args: LMConfig):
        super().__init__()
        self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
        assert args.n_heads % self.n_kv_heads == 0
        self.n_local_heads = args.n_heads
        self.n_local_kv_heads = self.n_kv_heads
        self.n_rep = self.n_local_heads // self.n_local_kv_heads
        self.head_dim = args.dim // args.n_heads
        self.wq = nn.Linear(args.dim, args.n_heads * self.head_dim, bias=False)
        self.wk = nn.Linear(args.dim, self.n_kv_heads * self.head_dim, bias=False)
        self.wv = nn.Linear(args.dim, self.n_kv_heads * self.head_dim, bias=False)
        self.wo = nn.Linear(args.n_heads * self.head_dim, args.dim, bias=False)
        self.attn_dropout = nn.Dropout(args.dropout)
        self.resid_dropout = nn.Dropout(args.dropout)
        self.dropout = args.dropout
        self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention') and args.flash_attn
        # print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
        mask = torch.full((1, 1, args.max_seq_len, args.max_seq_len), float("-inf"))
        mask = torch.triu(mask, diagonal=1)
        self.register_buffer("mask", mask, persistent=False)

    def forward(self,
                x: torch.Tensor,
                pos_cis: torch.Tensor,
                past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
                use_cache=True,
                db_value=None):
        bsz, seq_len, _ = x.shape #bsz: 批量大小, seq_len: 序列长度, _: 隐藏维度
        xq, xk, xv = self.wq(x), self.wk(x), self.wv(x) #将输入张量x分别通过线性层wq, wk, wv进行变换,得到查询、键和值。
        xq = xq.view(bsz, seq_len, self.n_local_heads, self.head_dim) #将变换后的张量xq重塑为形状为(bsz, seq_len, n_local_heads, head_dim)的形状。
        xk = xk.view(bsz, seq_len, self.n_local_kv_heads, self.head_dim) #将变换后的张量xk重塑为形状为(bsz, seq_len, n_local_kv_heads, head_dim)的形状。
        xv = xv.view(bsz, seq_len, self.n_local_kv_heads, self.head_dim) #将变换后的张量xv重塑为形状为(bsz, seq_len, n_local_kv_heads, head_dim)的形状。

        # 应用旋转位置编码
        xq, xk = apply_rotary_emb(xq, xk, pos_cis)
        # kv_cache实现
        if past_key_value is not None:
            xk = torch.cat([past_key_value[0], xk], dim=1)
            xv = torch.cat([past_key_value[1], xv], dim=1)
        past_kv = (xk, xv) if use_cache else None

        # 重复键值对
        xq, xk, xv = (
            xq.transpose(1, 2),
            repeat_kv(xk, self.n_rep).transpose(1, 2),
            repeat_kv(xv, self.n_rep).transpose(1, 2)
        )
        
        # 如果提供了db_value,根据头的数量调整它的形状并与xv合并
        if db_value is not None:
            # 确保db_value的形状与xv兼容,假设db_value形状为[B, N, H, D]
            if db_value.ndim == 4:  # [B, N, H, D]
                db_value = db_value.transpose(1, 2)  # -> [B, H, N, D]
                
                # 检查是否需要调整D维度
                if db_value.shape[-1] != xv.shape[-1]:
                    # 如果db_value的维度与xv不同,可以添加一个投影层
                    # 或者在这里使用简单的调整方法
                    # 这里我们简单地通过均值池化或重复来调整维度
                    if db_value.shape[-1] > xv.shape[-1]:
                        # 降维
                        factor = db_value.shape[-1] // xv.shape[-1]
                        db_value = db_value.view(bsz, self.n_local_heads, seq_len, factor, xv.shape[-1])
                        db_value = db_value.mean(dim=3)
                    else:
                        # 升维
                        factor = xv.shape[-1] // db_value.shape[-1]
                        db_value = db_value.unsqueeze(-1).repeat(1, 1, 1, 1, factor)
                        db_value = db_value.view(bsz, self.n_local_heads, seq_len, xv.shape[-1])
                
                # 将db_value与xv相加或融合
                # 这里我们简单地将它们相加,但你也可以使用其他融合方法
                xv = xv + db_value
        
        # 使用Flash Attention
        if self.flash and seq_len != 1:
            dropout_p = self.dropout if self.training else 0.0
            output = F.scaled_dot_product_attention(
                xq, xk, xv,
                attn_mask=None,
                dropout_p=dropout_p,
                is_causal=True
            )
        else:
            scores = (xq @ xk.transpose(-2, -1)) / math.sqrt(self.head_dim)
            scores += self.mask[:, :, :seq_len, :seq_len]
            scores = F.softmax(scores.float(), dim=-1).type_as(xq)
            scores = self.attn_dropout(scores)
            output = scores @ xv

        output = output.transpose(1, 2).reshape(bsz, seq_len, -1)
        output = self.resid_dropout(self.wo(output))
        return output, past_kv




class CrossAttention(nn.Module):
    def __init__(
        self,
        config
    ):
        super().__init__()
        self.config = config
        self.num_heads = 8  
        self.head_dim = self.config.dim // self.num_heads  
        self.to_q = nn.Linear(self.config.dim, self.config.dim, bias=False)
        self.to_k = nn.Linear(self.config.dim, self.config.dim, bias=False)
        self.to_v = nn.Linear(self.config.dim, self.config.dim, bias=False)
        
        self.to_out = nn.Linear(self.config.dim, self.config.dim, bias=False)
    
    def forward(self, x, db, context_mask=None, pos_emb=None):
        batch_size = x.size(0)
        
        # 分离多头
        q = self.to_q(x).view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
        k = self.to_k(db).view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
        v = self.to_v(db).view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
        
        if pos_emb is not None:
            pos_emb = pos_emb.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
            q = q + pos_emb
            k = k + pos_emb
            v = v + pos_emb
        
        attn_scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
        
        if context_mask is not None:
            expanded_mask = context_mask.unsqueeze(1).expand(-1, self.num_heads, -1, -1)
            attn_scores = attn_scores.masked_fill(expanded_mask == 0, -1e10)
        
        attn_weights = F.softmax(attn_scores, dim=-1)

        context = torch.matmul(attn_weights, v)
        
        context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.config.dim)

        context = self.to_out(context)
        
        return context

class FeedForward(nn.Module):
    def __init__(self, config: LMConfig):
        super().__init__()
        if config.hidden_dim is None:
            hidden_dim = 4 * config.dim
            hidden_dim = int(2 * hidden_dim / 3)
            config.hidden_dim = config.multiple_of * ((hidden_dim + config.multiple_of - 1) // config.multiple_of)
        self.w1 = nn.Linear(config.dim, config.hidden_dim, bias=False)
        self.w2 = nn.Linear(config.hidden_dim, config.dim, bias=False)
        self.w3 = nn.Linear(config.dim, config.hidden_dim, bias=False)
        self.dropout = nn.Dropout(config.dropout)

    def forward(self, x):
        return self.dropout(self.w2(F.silu(self.w1(x)) * self.w3(x)))


class MoEGate(nn.Module):
    def __init__(self, config: LMConfig):
        super().__init__()
        self.config = config
        self.top_k = config.num_experts_per_tok
        self.n_routed_experts = config.n_routed_experts

        self.scoring_func = config.scoring_func
        self.alpha = config.aux_loss_alpha
        self.seq_aux = config.seq_aux

        self.norm_topk_prob = config.norm_topk_prob
        self.gating_dim = config.dim
        self.weight = nn.Parameter(torch.empty((self.n_routed_experts, self.gating_dim)))
        self.reset_parameters()

    def reset_parameters(self) -> None:
        import torch.nn.init as init
        init.kaiming_uniform_(self.weight, a=math.sqrt(5))

    def forward(self, hidden_states):
        bsz, seq_len, h = hidden_states.shape
        hidden_states = hidden_states.view(-1, h)
        logits = F.linear(hidden_states, self.weight, None)
        if self.scoring_func == 'softmax':
            scores = logits.softmax(dim=-1)
        else:
            raise NotImplementedError(f'insupportable scoring function for MoE gating: {self.scoring_func}')

        topk_weight, topk_idx = torch.topk(scores, k=self.top_k, dim=-1, sorted=False)

        if self.top_k > 1 and self.norm_topk_prob:
            denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20
            topk_weight = topk_weight / denominator

        if self.training and self.alpha > 0.0:
            scores_for_aux = scores
            aux_topk = self.top_k
            topk_idx_for_aux_loss = topk_idx.view(bsz, -1)
            if self.seq_aux:
                scores_for_seq_aux = scores_for_aux.view(bsz, seq_len, -1)
                ce = torch.zeros(bsz, self.n_routed_experts, device=hidden_states.device)
                ce.scatter_add_(1, topk_idx_for_aux_loss,
                                torch.ones(bsz, seq_len * aux_topk, device=hidden_states.device)).div_(
                    seq_len * aux_topk / self.n_routed_experts)
                aux_loss = (ce * scores_for_seq_aux.mean(dim=1)).sum(dim=1).mean() * self.alpha
            else:
                mask_ce = F.one_hot(topk_idx_for_aux_loss.view(-1), num_classes=self.n_routed_experts)
                ce = mask_ce.float().mean(0)
                Pi = scores_for_aux.mean(0)
                fi = ce * self.n_routed_experts
                aux_loss = (Pi * fi).sum() * self.alpha
        else:
            aux_loss = 0
        return topk_idx, topk_weight, aux_loss


class MOEFeedForward(nn.Module):
    def __init__(self, config: LMConfig):
        super().__init__()
        self.config = config
        self.experts = nn.ModuleList([
            FeedForward(config)
            for _ in range(config.n_routed_experts)
        ])
        self.gate = MoEGate(config)
        if config.n_shared_experts is not None:
            self.shared_experts = FeedForward(config)

    def forward(self, x):
        identity = x
        orig_shape = x.shape
        bsz, seq_len, _ = x.shape
        # 使用门控机制选择专家
        topk_idx, topk_weight, aux_loss = self.gate(x)
        x = x.view(-1, x.shape[-1])
        flat_topk_idx = topk_idx.view(-1)
        if self.training:
            x = x.repeat_interleave(self.config.num_experts_per_tok, dim=0)
            y = torch.empty_like(x, dtype=torch.float16)
            for i, expert in enumerate(self.experts):
                y[flat_topk_idx == i] = expert(x[flat_topk_idx == i]).to(y.dtype)  # 确保类型一致
            y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim=1)
            y = y.view(*orig_shape)
        else:
            y = self.moe_infer(x, flat_topk_idx, topk_weight.view(-1, 1)).view(*orig_shape)
        if self.config.n_shared_experts is not None:
            y = y + self.shared_experts(identity)
        self.aux_loss = aux_loss
        return y

    @torch.no_grad()
    def moe_infer(self, x, flat_expert_indices, flat_expert_weights):
        expert_cache = torch.zeros_like(x)
        idxs = flat_expert_indices.argsort()
        tokens_per_expert = flat_expert_indices.bincount().cpu().numpy().cumsum(0)
        token_idxs = idxs // self.config.num_experts_per_tok
        # 当tokens_per_expert = [6, 15, 20, 26],tokens_per_expert.shape[0]即为专家数量(此时为4)
        # 且token_idxs = [3, 7, 19, 21, 24, 25,  4,  5,  6, 10, 11, 12...] 时
        # 意味token_idxs[:6] -> [3, 7, 19, 21, 24, 25]这6个位置属于专家0处理的token(每个token有可能被多个专家处理,这取决于num_experts_per_tok)
        # 接下来9个位置token_idxs[6:15] -> [4,  5,  6, 10, 11, 12...]属于专家1处理的token...依此类推
        for i, end_idx in enumerate(tokens_per_expert):
            start_idx = 0 if i == 0 else tokens_per_expert[i - 1]
            if start_idx == end_idx:
                continue
            expert = self.experts[i]
            exp_token_idx = token_idxs[start_idx:end_idx]
            expert_tokens = x[exp_token_idx]
            expert_out = expert(expert_tokens).to(expert_cache.dtype)
            expert_out.mul_(flat_expert_weights[idxs[start_idx:end_idx]])
            expert_cache.scatter_add_(0, exp_token_idx.view(-1, 1).repeat(1, x.shape[-1]), expert_out)

        return expert_cache


class MiniMindBlock(nn.Module):
    def __init__(self, layer_id: int, config: LMConfig):
        super().__init__()
        self.n_heads = config.n_heads
        self.dim = config.dim
        self.head_dim = config.dim // config.n_heads
        self.attention = Attention(config)
        self.cross_att = CrossAttention(config)
        
        self.layer_id = layer_id
        self.attention_norm = RMSNorm(config.dim, eps=config.norm_eps)
        self.ffn_norm = RMSNorm(config.dim, eps=config.norm_eps)
        self.feed_forward = FeedForward(config) if not config.use_moe else MOEFeedForward(config)
        
        # 假设num_experts是已定义的总专家数量的平方根
        
        
        # 查询生成的参数
    
        
        # 创建查询生成模块
        # if weight_down_embed is not None:
        #     self.to_queries = nn.Sequential(
        #         nn.Linear(config.dim, self.dim_key * 2, bias=False),
        #         # nn.Unflatten(2, (2, self.n_heads, self.dim_key))  # 替代Rearrange
        #     )
            
        #     # 超参数
        #     self.product_key_topk = min(16, self.num_keys)  # 确保不超过num_keys
        #     self.num_experts_per_head_topk = 1  # 最终每个头选取的专家数

    def forward(self, x, db_value, pos_cis, past_key_value=None, use_cache=True):
        # import pdb;pdb.set_trace()
        # db_value = None
        
        # # 如果有weight_down_embed,使用Product Key机制
        # if self.weight_down_embed is not None:
        #     # 1. 生成queries
        #     batch_size, seq_len, dim = x.shape
            
        #     # collapse sequence dimension by averaging
        #     x_flat = x.mean(dim=1)  # [batch_size, dim]
        #     queries = self.to_queries(x_flat)  # [batch_size, 2*dim_key]
        #     queries = queries.reshape(batch_size, 2, self.dim_key)  # [batch_size, 2, dim_key]
        #     queries = queries.permute(1, 0, 2)  # [2, batch_size, dim_key]
            
        #     # 2. 计算queries与keys的相似度
        #     sim = torch.einsum('p b d, k p d -> p b k', queries, self.keys)
            
        #     # 3. 在两个子空间分别做top-k
        #     scores_and_indices = [sim[p].topk(self.product_key_topk, dim=-1) for p in range(2)]
        #     scores_x, scores_y = scores_and_indices[0][0], scores_and_indices[1][0]
        #     indices_x, indices_y = scores_and_indices[0][1], scores_and_indices[1][1]
            
        #     # 4. 组合两个子空间的分数和索引
        #     all_scores = scores_x.unsqueeze(-1) + scores_y.unsqueeze(-2)
        #     all_scores = all_scores.view(*all_scores.shape[:-2], -1)
            
        #     all_indices = (indices_x.unsqueeze(-1) * self.num_keys) + indices_y.unsqueeze(-2)
        #     all_indices = all_indices.view(*all_indices.shape[:-2], -1)
            
        #     # 5. 最终top-k选择
        #     scores, pk_indices = all_scores.topk(self.num_experts_per_head_topk, dim=-1)
        #     indices = all_indices.gather(-1, pk_indices)
            
        #     # 6. 从embedding中获取专家值
        
        #     # 从embedding中获取值
        #     flat_indices = indices.view(-1)  # 将索引展平为一维张量
        #     db_values = self.weight_down_embed(flat_indices)
            
        #     # 重塑回原始形状
        #     db_value = db_values.view(batch_size, -1, dim)
            
        
        # 注意力计算
        h_attn, past_kv = self.attention(
            self.attention_norm(x),
            pos_cis,
            past_key_value=past_key_value,
            use_cache=use_cache,
            db_value=db_value
        )

        h_attn = self.cross_att(h_attn, db_value)

        # 残差连接  
        h = x + h_attn

        # 前馈神经网络
        out = h + self.feed_forward(self.ffn_norm(h))
        return out, past_kv

class ExtractDB(nn.Module):
    def __init__(self,params):
         # 修改专家数量和知识维度,确保能开方
        super().__init__()
        self.batch_size = None
        self.dim = params.dim
        self.dim_key = self.dim // 2 
        self.num_experts = 10 * 10  # 100专家,确保是完全平方数
        # 将knowledge_dim设置为与head_dim相同,以便在attention中直接使用
        self.head_dim = params.dim // params.n_heads
        self.knowledge_dim = 8*params.dim
        
        # 使用register_buffer代替nn.Parameter,避免梯度问题
        self.register_buffer('weight_down_embed', torch.randn(self.num_experts, self.knowledge_dim) * 0.02)
        
        self.num_keys = int(math.sqrt(self.num_experts)) if self.num_experts > 0 else 0
        self.product_key_topk = min(16, self.num_keys)
        self.keys = nn.Parameter(torch.randn(self.num_keys, 2, self.dim_key) * 0.02)
        self.num_experts_per_head_topk = 1
        self.to_queries = nn.Sequential(
                nn.Linear(params.dim, self.dim_key * 2, bias=False),
        )
    
    def q_to_k(self,x):
        # 1. 生成queries
            self.batch_size, seq_len, dim = x.shape
            
            # collapse sequence dimension by averaging
            x_flat = x.mean(dim=1)  # [batch_size, dim]

            queries = self.to_queries(x_flat)  # [batch_size, 2*dim_key]
            queries = queries.reshape(self.batch_size, 2, self.dim_key)  # [batch_size, 2, dim_key]
            queries = queries.permute(1, 0, 2)  # [2, batch_size, dim_key]
            
            # 2. 计算queries与keys的相似度
            sim = torch.einsum('p b d, k p d -> p b k', queries, self.keys)
            
            # 3. 在两个子空间分别做top-k
            scores_and_indices = [sim[p].topk(self.product_key_topk, dim=-1) for p in range(2)]
            scores_x, scores_y = scores_and_indices[0][0], scores_and_indices[1][0]
            indices_x, indices_y = scores_and_indices[0][1], scores_and_indices[1][1]
            
            # 4. 组合两个子空间的分数和索引
            all_scores = scores_x.unsqueeze(-1) + scores_y.unsqueeze(-2)
            all_scores = all_scores.view(*all_scores.shape[:-2], -1)
            
            all_indices = (indices_x.unsqueeze(-1) * self.num_keys) + indices_y.unsqueeze(-2)
            all_indices = all_indices.view(*all_indices.shape[:-2], -1)
            
            # 5. 最终top-k选择
            scores, pk_indices = all_scores.topk(self.num_experts_per_head_topk, dim=-1)
            indices = all_indices.gather(-1, pk_indices)
            flat_indices = indices.view(-1)
            return flat_indices
    
    def get_data(self, index):
        # 直接从GPU获取embedding
        db_values = self.weight_down_embed[index]
        db_value = db_values.view(self.batch_size, -1, self.dim)
        return db_value
    
    @torch.no_grad()
    def updata_value(self, k, v):
        # 直接更新buffer上的值 (不需要梯度)
        v_reshaped = v.view(v.size(0), -1)
        # 确保数据类型匹配
        v_reshaped = v_reshaped.to(dtype=self.weight_down_embed.dtype)
        self.weight_down_embed[k] = v_reshaped

    

class MiniMindLM(PreTrainedModel):
    config_class = LMConfig

    def __init__(self, params: LMConfig = None):
        self.params = params or LMConfig()
        super().__init__(self.params)
        self.vocab_size, self.n_layers = params.vocab_size, params.n_layers
        self.tok_embeddings = nn.Embedding(params.vocab_size, params.dim)
        self.dropout = nn.Dropout(params.dropout)
        # 移除旧的weight_down_embed声明
        self.extract_db = ExtractDB(self.params)

        # 将self.weight_down_embed传递给每个MiniMindBlock
        self.layers = nn.ModuleList([MiniMindBlock(l, params) for l in range(self.n_layers)])
        self.norm = RMSNorm(params.dim, eps=params.norm_eps)
        self.output = nn.Linear(params.dim, params.vocab_size, bias=False)
        self.tok_embeddings.weight = self.output.weight
        
        # Calculate input dimension
        input_dim = (self.params.max_seq_len-1)*self.params.n_layers
        # Use a bottleneck architecture to reduce parameters
        bottleneck_dim = 256  # Significantly smaller bottleneck dimension
        
        # Factorized shared downsampling using two smaller convolutions
        self.shared_downsample = nn.Sequential(
            # First reduce input dimension to bottleneck
            nn.Conv1d(input_dim, bottleneck_dim, kernel_size=1, padding='same'),
            nn.ReLU(),  # Non-linearity to improve representation capacity
            # Then expand to target dimension
            nn.Conv1d(bottleneck_dim, 128*8, kernel_size=1, padding='same')
        )
        
        # Specific layers for v path
        self.downsample_v_specific = nn.Sequential(
            nn.Conv1d(128*8, 128, kernel_size=1, padding='same'),
            nn.Conv1d(128, 8, kernel_size=1, padding='same')
        )
        
        # Specific layers for q path
        self.downsample_q_specific = nn.Sequential(
            nn.Conv1d(128*8, 512, kernel_size=1, padding='same')
        )
        self.register_buffer("pos_cis",
                             precompute_pos_cis(dim=params.dim // params.n_heads, theta=params.rope_theta),
                             persistent=False)
        self.params = params

    def forward(self,
                input_ids: Optional[torch.Tensor] = None,
                past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
                use_cache: bool = False,
                logits_to_keep: Union[int, torch.Tensor] = 0,
                **args):
        past_key_values = past_key_values or [None] * len(self.layers)
        start_pos = args.get('start_pos', 0)
        h = self.dropout(self.tok_embeddings(input_ids))
        pos_cis = self.pos_cis[start_pos:start_pos + input_ids.size(1)]
        past_kvs = []
        h_list = []

        for l, layer in enumerate(self.layers):
            # 禁用数据库模式,使用固定值替代数据库查询
            if self.params.disable_db:
                # 创建一个形状为[batch_size, n_layers, dim]的tensor,所有元素值为1e-4
                batch_size = h.size(0)
                db_value = torch.full((batch_size, self.n_layers, self.params.dim), 1e-4, 
                                      dtype=h.dtype, device=h.device)
            else:
                # 正常模式,使用数据库查询
                index = self.extract_db.q_to_k(h)
                db_value = self.extract_db.get_data(index)
            
            h, past_kv = layer(
                h, db_value, pos_cis,
                past_key_value=past_key_values[l],
                use_cache=use_cache
            )

            past_kvs.append(past_kv)
            h_list.append(h.unsqueeze(0))
        
        h_tensor = torch.cat(h_list, dim=0).permute(1, 0, 2, 3)
        
        # 只在非禁用数据库模式下执行数据库更新逻辑
        if not self.params.disable_db:
            # 使用detach()分离计算图,避免多次反向传播
            h_tensor_detached = h_tensor.detach()
            h_tensor_detached = h_tensor_detached.reshape(h_tensor_detached.shape[0], -1, self.params.dim)
            
            # 数据库更新逻辑与主计算图分离
            with torch.no_grad():
                # Compute shared downsampling layer once
                shared_features = self.shared_downsample(h_tensor_detached)
                z_v = self.downsample_v_specific(shared_features)
                z_q = self.downsample_q_specific(shared_features)
                z_k = self.extract_db.q_to_k(z_q)
                self.extract_db.updata_value(z_k, z_v)
        
        slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
        logits = self.output(self.norm(h)[:, slice_indices, :])
        aux_loss = sum(l.feed_forward.aux_loss for l in self.layers if isinstance(l.feed_forward, MOEFeedForward))
        # 进一步简化,只保留必要的参数
        output = CausalLMOutputWithPast(
            logits=logits,
            past_key_values=past_kvs,
        )
        output.hidden_states = h

        output.aux_loss = aux_loss

        # 尝试添加其他属性(如果支持的话)
        # try:
        #     output.hidden_states = h

        #     output.aux_loss = aux_loss                                                                        
        # except:
        #     pass

        return output

    @torch.inference_mode()
    def generate(self, input_ids, eos_token_id=2, max_new_tokens=1024, temperature=0.75, top_p=0.90,
                 stream=False, rp=1., use_cache=True, pad_token_id=0, num_return_sequences=1, **args):
        # 流式生成
        if stream:
            return self._stream(input_ids, eos_token_id, max_new_tokens, temperature, top_p, rp, use_cache, **args)

        # 直接生成
        generated = []
        for i in range(input_ids.size(0)):
            non_pad = input_ids[i][input_ids[i] != pad_token_id].unsqueeze(0)
            for _ in range(num_return_sequences):
                out = self._stream(non_pad, eos_token_id, max_new_tokens, temperature, top_p, rp, use_cache, **args)
                tokens_list = [tokens[:, -1:] for tokens in out]
                gen = torch.cat(tokens_list, dim=-1) if tokens_list else non_pad
                full_sequence = torch.cat([non_pad, gen], dim=-1)
                generated.append(full_sequence)

        max_length = max(seq.size(1) for seq in generated)
        generated = [
            torch.cat(
                [seq, torch.full((1, max_length - seq.size(1)), pad_token_id, dtype=seq.dtype, device=seq.device)],
                dim=-1)
            for seq in generated
        ]
        output = torch.cat(generated, dim=0)
        res = output.view(input_ids.size(0) * num_return_sequences, -1)
        return res

    def _stream(self, input_ids, eos_token_id, max_new_tokens, temperature, top_p, rp, use_cache, **args):
        start, first_seq, past_kvs = input_ids.shape[1], True, None
        while input_ids.shape[1] < max_new_tokens - 1:
            if first_seq or not use_cache:
                out, first_seq = self(input_ids, past_key_values=past_kvs, use_cache=use_cache, **args), False
            else:
                out = self(input_ids[:, -1:], past_key_values=past_kvs, use_cache=use_cache,
                           start_pos=input_ids.shape[1] - 1, **args)
            logits, past_kvs = out.logits[:, -1, :], out.past_key_values
            logits[:, list(set(input_ids.tolist()[0]))] /= rp
            logits /= (temperature + 1e-9)
            if top_p is not None and top_p < 1.0:
                sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
                sorted_probs = F.softmax(sorted_logits, dim=-1)
                cumulative_probs = torch.cumsum(sorted_probs, dim=-1)
                sorted_indices_to_remove = cumulative_probs > top_p
                sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
                sorted_indices_to_remove[:, 0] = False
                indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
                logits[indices_to_remove] = -float('Inf')
            input_ids_next = torch.multinomial(F.softmax(logits, dim=-1), num_samples=1)
            input_ids = torch.cat((input_ids, input_ids_next), dim=1)
            yield input_ids[:, start:]
            if input_ids_next.item() == eos_token_id:
                break