File tree Expand file tree Collapse file tree 6 files changed +4
-19
lines changed Expand file tree Collapse file tree 6 files changed +4
-19
lines changed Original file line number Diff line number Diff line change 22
22
import torch
23
23
from vllm .distributed .kv_transfer .kv_lookup_buffer .base import \
24
24
KVLookupBufferBase
25
- from vllm .logger import init_logger
25
+ from vllm .logger import logger
26
26
27
27
from vllm_ascend .distributed .kv_transfer .simple_pipe import SimplePipe
28
28
from vllm_ascend .distributed .kv_transfer .utils import TORCH_DTYPE_TO_NPU_DTYPE
29
29
30
- logger = init_logger (__name__ )
31
-
32
30
33
31
# Hash a string into a int32 value.
34
32
def int32_hash (data ):
Original file line number Diff line number Diff line change 26
26
import torchair # type: ignore
27
27
import zmq # type: ignore
28
28
from vllm .distributed .kv_transfer .kv_pipe .base import KVPipeBase
29
- from vllm .logger import init_logger
29
+ from vllm .logger import logger
30
30
from vllm .utils import get_ip
31
31
32
32
import vllm_ascend .envs as envs
33
33
from vllm_ascend .distributed .kv_transfer .utils import NPU_DTYPE_TO_TORCH_DTYPE
34
34
35
- logger = init_logger (__name__ )
36
-
37
35
38
36
class SimplePipe (KVPipeBase ):
39
37
Original file line number Diff line number Diff line change 33
33
from vllm .distributed .parallel_state import (get_dp_group , get_ep_group ,
34
34
get_tp_group , get_world_group )
35
35
from vllm .forward_context import get_forward_context
36
- from vllm .logger import init_logger
36
+ from vllm .logger import logger
37
37
from vllm .model_executor .layers .activation import SiluAndMul
38
38
from vllm .model_executor .layers .fused_moe import FusedMoE
39
39
from vllm .model_executor .layers .layernorm import RMSNorm
60
60
from vllm_ascend .ascend_config import get_ascend_config
61
61
from vllm_ascend .utils import ACL_FORMAT_FRACTAL_NZ , is_310p
62
62
63
- logger = init_logger (__name__ )
64
-
65
63
_ROUTER_SCALE = None
66
64
67
65
Original file line number Diff line number Diff line change 1
- from vllm .logger import init_logger
2
-
3
1
from .context import (get_multistream_layer_context ,
4
2
get_multistream_microbatch_context )
5
3
6
- logger = init_logger (__name__ )
7
-
8
4
9
5
# vllm v1 use get_forward_context to get the attn_metadata,
10
6
# we can use this decorator to update the attn metadata
Original file line number Diff line number Diff line change 4
4
import torch
5
5
import torch .nn as nn
6
6
import vllm .v1 .sample .rejection_sampler as rs
7
- from vllm .logger import init_logger
8
7
from vllm .v1 .sample .metadata import SamplingMetadata
9
8
from vllm .v1 .sample .rejection_sampler import (RejectionSampler , compute_probs ,
10
9
generate_uniform_probs )
11
10
from vllm .v1 .spec_decode .metadata import SpecDecodeMetadata
12
11
13
- logger = init_logger (__name__ )
14
-
15
12
PLACEHOLDER_TOKEN_ID = - 1
16
13
GREEDY_TEMPERATURE = - 1
17
14
# Maximum number of speculative draft tokens allowed per request in a single
Original file line number Diff line number Diff line change 7
7
from vllm .config import (CompilationLevel , VllmConfig ,
8
8
get_layers_from_vllm_config )
9
9
from vllm .distributed .parallel_state import get_pp_group
10
- from vllm .logger import init_logger
10
+ from vllm .logger import logger
11
11
from vllm .model_executor .model_loader import get_model
12
12
from vllm .model_executor .models import supports_multimodal
13
13
from vllm .model_executor .models .llama_eagle3 import Eagle3LlamaForCausalLM
17
17
from vllm_ascend .attention .attention_mask import AttentionMaskBuilder
18
18
from vllm_ascend .attention .attention_v1 import AscendAttentionState
19
19
20
- logger = init_logger (__name__ )
21
-
22
20
PADDING_SLOT_ID = - 1
23
21
24
22
You can’t perform that action at this time.
0 commit comments