Skip to content

Commit 34cfdf5

Browse files
authored
[Misc] Fix logger bug (vllm-project#2024)
1. Remove useless logger 2. Fix logger bug, same problem as vllm-project#515 - vLLM version: v0.10.0 - vLLM main: vllm-project/vllm@18cc33d Signed-off-by: wangxiyuan <[email protected]>
1 parent 3ad582c commit 34cfdf5

File tree

6 files changed

+4
-19
lines changed

6 files changed

+4
-19
lines changed

vllm_ascend/distributed/kv_transfer/simple_buffer.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,11 @@
2222
import torch
2323
from vllm.distributed.kv_transfer.kv_lookup_buffer.base import \
2424
KVLookupBufferBase
25-
from vllm.logger import init_logger
25+
from vllm.logger import logger
2626

2727
from vllm_ascend.distributed.kv_transfer.simple_pipe import SimplePipe
2828
from vllm_ascend.distributed.kv_transfer.utils import TORCH_DTYPE_TO_NPU_DTYPE
2929

30-
logger = init_logger(__name__)
31-
3230

3331
# Hash a string into a int32 value.
3432
def int32_hash(data):

vllm_ascend/distributed/kv_transfer/simple_pipe.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,14 +26,12 @@
2626
import torchair # type: ignore
2727
import zmq # type: ignore
2828
from vllm.distributed.kv_transfer.kv_pipe.base import KVPipeBase
29-
from vllm.logger import init_logger
29+
from vllm.logger import logger
3030
from vllm.utils import get_ip
3131

3232
import vllm_ascend.envs as envs
3333
from vllm_ascend.distributed.kv_transfer.utils import NPU_DTYPE_TO_TORCH_DTYPE
3434

35-
logger = init_logger(__name__)
36-
3735

3836
class SimplePipe(KVPipeBase):
3937

vllm_ascend/models/pangu_moe.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
from vllm.distributed.parallel_state import (get_dp_group, get_ep_group,
3434
get_tp_group, get_world_group)
3535
from vllm.forward_context import get_forward_context
36-
from vllm.logger import init_logger
36+
from vllm.logger import logger
3737
from vllm.model_executor.layers.activation import SiluAndMul
3838
from vllm.model_executor.layers.fused_moe import FusedMoE
3939
from vllm.model_executor.layers.layernorm import RMSNorm
@@ -60,8 +60,6 @@
6060
from vllm_ascend.ascend_config import get_ascend_config
6161
from vllm_ascend.utils import ACL_FORMAT_FRACTAL_NZ, is_310p
6262

63-
logger = init_logger(__name__)
64-
6563
_ROUTER_SCALE = None
6664

6765

vllm_ascend/multistream/decorator.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,6 @@
1-
from vllm.logger import init_logger
2-
31
from .context import (get_multistream_layer_context,
42
get_multistream_microbatch_context)
53

6-
logger = init_logger(__name__)
7-
84

95
# vllm v1 use get_forward_context to get the attn_metadata,
106
# we can use this decorator to update the attn metadata

vllm_ascend/sample/rejection_sampler.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,14 +4,11 @@
44
import torch
55
import torch.nn as nn
66
import vllm.v1.sample.rejection_sampler as rs
7-
from vllm.logger import init_logger
87
from vllm.v1.sample.metadata import SamplingMetadata
98
from vllm.v1.sample.rejection_sampler import (RejectionSampler, compute_probs,
109
generate_uniform_probs)
1110
from vllm.v1.spec_decode.metadata import SpecDecodeMetadata
1211

13-
logger = init_logger(__name__)
14-
1512
PLACEHOLDER_TOKEN_ID = -1
1613
GREEDY_TEMPERATURE = -1
1714
# Maximum number of speculative draft tokens allowed per request in a single

vllm_ascend/worker/eagle_proposer_v1.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from vllm.config import (CompilationLevel, VllmConfig,
88
get_layers_from_vllm_config)
99
from vllm.distributed.parallel_state import get_pp_group
10-
from vllm.logger import init_logger
10+
from vllm.logger import logger
1111
from vllm.model_executor.model_loader import get_model
1212
from vllm.model_executor.models import supports_multimodal
1313
from vllm.model_executor.models.llama_eagle3 import Eagle3LlamaForCausalLM
@@ -17,8 +17,6 @@
1717
from vllm_ascend.attention.attention_mask import AttentionMaskBuilder
1818
from vllm_ascend.attention.attention_v1 import AscendAttentionState
1919

20-
logger = init_logger(__name__)
21-
2220
PADDING_SLOT_ID = -1
2321

2422

0 commit comments

Comments
 (0)