|
| 1 | +import inspect |
| 2 | + |
| 3 | +import torch.nn as nn |
| 4 | + |
| 5 | +from llmc.utils.registry_factory import MODEL_REGISTRY |
| 6 | + |
| 7 | +from .base_model import BaseModel |
| 8 | + |
| 9 | + |
| 10 | +@MODEL_REGISTRY |
| 11 | +class ChatGLM(BaseModel): |
| 12 | + def __init__(self, config, device_map=None, use_cache=False): |
| 13 | + super().__init__(config, device_map, use_cache) |
| 14 | + |
| 15 | + def find_blocks(self, modality='language'): |
| 16 | + self.blocks = self.model.transformer.encoder.layers |
| 17 | + |
| 18 | + def find_embed_layers(self): |
| 19 | + self.embedding = self.model.transformer.embedding |
| 20 | + self.rotary_pos_emb = self.model.transformer.rotary_pos_emb |
| 21 | + |
| 22 | + def find_block_name(self): |
| 23 | + self.block_name_prefix = 'transformer.encoder.layers' |
| 24 | + |
| 25 | + def get_embed_layers(self): |
| 26 | + return [self.embedding] |
| 27 | + |
| 28 | + def get_attention_rotary_layers(self): |
| 29 | + return [self.rotary_pos_emb] |
| 30 | + |
| 31 | + def get_head_layers(self): |
| 32 | + return [self.model.transformer.output_layer] |
| 33 | + |
| 34 | + def get_pre_head_layernorm_layers(self): |
| 35 | + return [self.model.transformer.encoder.final_layernorm] |
| 36 | + |
| 37 | + def get_layers_except_blocks(self): |
| 38 | + return [self.embedding, self.rotary_pos_emb, self.model.transformer.output_layer, self.model.transformer.encoder.final_layernorm] # noqa |
| 39 | + |
| 40 | + def skip_layer_name(self): |
| 41 | + return ['final_layernorm'] |
| 42 | + |
| 43 | + def has_bias(self): |
| 44 | + return False |
| 45 | + |
| 46 | + def get_layernorms_in_block(self, block): |
| 47 | + return { |
| 48 | + 'input_layernorm': block.input_layernorm, |
| 49 | + 'post_attention_layernorm': block.post_attention_layernorm, |
| 50 | + } |
| 51 | + |
| 52 | + def get_subsets_in_block(self, block): |
| 53 | + return [ |
| 54 | + { |
| 55 | + 'layers': { |
| 56 | + 'self_attention.query_key_value': block.self_attention.query_key_value |
| 57 | + }, |
| 58 | + 'prev_op': [block.input_layernorm], |
| 59 | + 'input': ['self_attention.query_key_value'], |
| 60 | + 'inspect': block.self_attention, |
| 61 | + 'has_kwargs': True, |
| 62 | + }, |
| 63 | + { |
| 64 | + 'layers': {'self_attention.dense': block.self_attention.dense}, |
| 65 | + 'prev_op': [block.self_attention.query_key_value], |
| 66 | + 'input': ['self_attention.dense'], |
| 67 | + 'inspect': block.self_attention.dense, |
| 68 | + 'has_kwargs': False, |
| 69 | + }, |
| 70 | + { |
| 71 | + 'layers': { |
| 72 | + 'mlp.dense_h_to_4h': block.mlp.dense_h_to_4h |
| 73 | + }, |
| 74 | + 'prev_op': [block.post_attention_layernorm], |
| 75 | + 'input': ['mlp.dense_h_to_4h'], |
| 76 | + 'inspect': block.mlp, |
| 77 | + 'has_kwargs': False, |
| 78 | + 'is_mlp': True, |
| 79 | + }, |
| 80 | + { |
| 81 | + 'layers': {'mlp.down_proj': block.mlp.dense_4h_to_h}, |
| 82 | + 'prev_op': [block.mlp.dense_h_to_4h], |
| 83 | + 'input': ['mlp.dense_4h_to_h'], |
| 84 | + 'inspect': block.mlp.dense_4h_to_h, |
| 85 | + 'has_kwargs': False, |
| 86 | + 'is_mlp': True, |
| 87 | + }, |
| 88 | + ] |
0 commit comments