Skip to content

Commit 24fa443

Browse files
committed
test steering
1 parent c304749 commit 24fa443

File tree

2 files changed

+288
-98
lines changed

2 files changed

+288
-98
lines changed

optillm/autothink/processor.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -135,10 +135,10 @@ def _setup_steering(self):
135135
self.tokenizer
136136
)
137137

138-
logger.info(f"Set up steering with {len(self.steering_hooks)} hooks")
138+
logger.info(f"STEERING: Set up steering with {len(self.steering_hooks)} hooks")
139139

140140
except Exception as e:
141-
logger.error(f"Error setting up steering: {e}")
141+
logger.error(f"STEERING: Error setting up steering: {e}")
142142
self.steering_manager = None
143143
self.steering_hooks = []
144144

@@ -147,6 +147,7 @@ def _cleanup_steering(self):
147147
if self.steering_hooks:
148148
remove_steering_hooks(self.steering_hooks)
149149
self.steering_hooks = []
150+
logger.info("STEERING: Hooks removed successfully")
150151

151152
def classify_complexity(self, query: str) -> Tuple[str, float]:
152153
"""
@@ -338,6 +339,16 @@ def process(self, messages: List[Dict[str, str]]) -> str:
338339
response_chunks.append(next_str)
339340
if not seen_end_think:
340341
n_thinking_tokens += 1
342+
343+
# Update steering hooks with new token
344+
if self.steering_hooks:
345+
for hook, _ in self.steering_hooks:
346+
# Update token history with the new token
347+
hook.update_token_history([next_token])
348+
# Check for matches occasionally during generation
349+
if random.random() < 0.1: # 10% chance per token
350+
hook.try_match()
351+
341352
tokens = torch.tensor([[next_token]]).to(tokens.device)
342353

343354
# Clean up steering hooks

0 commit comments

Comments
 (0)