-
Notifications
You must be signed in to change notification settings - Fork 15.1k
Revert "Reland "[lldb-dap] Improving consistency of tests by removing… #165892
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Revert "Reland "[lldb-dap] Improving consistency of tests by removing… #165892
Conversation
… concurrency." (llvm#165688)"" This reverts commit 17dbd86. This was causing timeouts on the premerge runners. Reverting for now until the timeouts trigger within lit and/or we have a better testing strategy for this.
|
@llvm/pr-subscribers-lldb Author: Aiden Grossman (boomanaiden154) Changes… concurrency." (#165688)"" This reverts commit 17dbd86. This was causing timeouts on the premerge runners. Reverting for now until the timeouts trigger within lit and/or we have a better testing strategy for this. Patch is 24.82 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/165892.diff 8 Files Affected:
diff --git a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py
index 8f3652172dfdf..d892c01f0bc71 100644
--- a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py
+++ b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py
@@ -10,8 +10,8 @@
import subprocess
import signal
import sys
+import threading
import warnings
-import selectors
import time
from typing import (
Any,
@@ -139,6 +139,35 @@ def dump_memory(base_addr, data, num_per_line, outfile):
outfile.write("\n")
+def read_packet(
+ f: IO[bytes], trace_file: Optional[IO[str]] = None
+) -> Optional[ProtocolMessage]:
+ """Decode a JSON packet that starts with the content length and is
+ followed by the JSON bytes from a file 'f'. Returns None on EOF.
+ """
+ line = f.readline().decode("utf-8")
+ if len(line) == 0:
+ return None # EOF.
+
+ # Watch for line that starts with the prefix
+ prefix = "Content-Length: "
+ if line.startswith(prefix):
+ # Decode length of JSON bytes
+ length = int(line[len(prefix) :])
+ # Skip empty line
+ separator = f.readline().decode()
+ if separator != "":
+ Exception("malformed DAP content header, unexpected line: " + separator)
+ # Read JSON bytes
+ json_str = f.read(length).decode()
+ if trace_file:
+ trace_file.write("from adapter:\n%s\n" % (json_str))
+ # Decode the JSON bytes into a python dictionary
+ return json.loads(json_str)
+
+ raise Exception("unexpected malformed message from lldb-dap: " + line)
+
+
def packet_type_is(packet, packet_type):
return "type" in packet and packet["type"] == packet_type
@@ -170,8 +199,16 @@ def __init__(
self.log_file = log_file
self.send = send
self.recv = recv
- self.selector = selectors.DefaultSelector()
- self.selector.register(recv, selectors.EVENT_READ)
+
+ # Packets that have been received and processed but have not yet been
+ # requested by a test case.
+ self._pending_packets: List[Optional[ProtocolMessage]] = []
+ # Received packets that have not yet been processed.
+ self._recv_packets: List[Optional[ProtocolMessage]] = []
+ # Used as a mutex for _recv_packets and for notify when _recv_packets
+ # changes.
+ self._recv_condition = threading.Condition()
+ self._recv_thread = threading.Thread(target=self._read_packet_thread)
# session state
self.init_commands = init_commands
@@ -197,6 +234,9 @@ def __init__(
# keyed by breakpoint id
self.resolved_breakpoints: dict[str, Breakpoint] = {}
+ # trigger enqueue thread
+ self._recv_thread.start()
+
@classmethod
def encode_content(cls, s: str) -> bytes:
return ("Content-Length: %u\r\n\r\n%s" % (len(s), s)).encode("utf-8")
@@ -212,46 +252,17 @@ def validate_response(cls, command, response):
f"seq mismatch in response {command['seq']} != {response['request_seq']}"
)
- def _read_packet(
- self,
- timeout: float = DEFAULT_TIMEOUT,
- ) -> Optional[ProtocolMessage]:
- """Decode a JSON packet that starts with the content length and is
- followed by the JSON bytes from self.recv. Returns None on EOF.
- """
-
- ready = self.selector.select(timeout)
- if not ready:
- warnings.warn(
- "timeout occurred waiting for a packet, check if the test has a"
- " negative assertion and see if it can be inverted.",
- stacklevel=4,
- )
- return None # timeout
-
- line = self.recv.readline().decode("utf-8")
- if len(line) == 0:
- return None # EOF.
-
- # Watch for line that starts with the prefix
- prefix = "Content-Length: "
- if line.startswith(prefix):
- # Decode length of JSON bytes
- length = int(line[len(prefix) :])
- # Skip empty line
- separator = self.recv.readline().decode()
- if separator != "":
- Exception("malformed DAP content header, unexpected line: " + separator)
- # Read JSON bytes
- json_str = self.recv.read(length).decode()
- if self.trace_file:
- self.trace_file.write(
- "%s from adapter:\n%s\n" % (time.time(), json_str)
- )
- # Decode the JSON bytes into a python dictionary
- return json.loads(json_str)
-
- raise Exception("unexpected malformed message from lldb-dap: " + line)
+ def _read_packet_thread(self):
+ try:
+ while True:
+ packet = read_packet(self.recv, trace_file=self.trace_file)
+ # `packet` will be `None` on EOF. We want to pass it down to
+ # handle_recv_packet anyway so the main thread can handle unexpected
+ # termination of lldb-dap and stop waiting for new packets.
+ if not self._handle_recv_packet(packet):
+ break
+ finally:
+ dump_dap_log(self.log_file)
def get_modules(
self, start_module: Optional[int] = None, module_count: Optional[int] = None
@@ -299,6 +310,34 @@ def collect_output(
output += self.get_output(category, clear=clear)
return output
+ def _enqueue_recv_packet(self, packet: Optional[ProtocolMessage]):
+ with self.recv_condition:
+ self.recv_packets.append(packet)
+ self.recv_condition.notify()
+
+ def _handle_recv_packet(self, packet: Optional[ProtocolMessage]) -> bool:
+ """Handles an incoming packet.
+
+ Called by the read thread that is waiting for all incoming packets
+ to store the incoming packet in "self._recv_packets" in a thread safe
+ way. This function will then signal the "self._recv_condition" to
+ indicate a new packet is available.
+
+ Args:
+ packet: A new packet to store.
+
+ Returns:
+ True if the caller should keep calling this function for more
+ packets.
+ """
+ with self._recv_condition:
+ self._recv_packets.append(packet)
+ self._recv_condition.notify()
+ # packet is None on EOF
+ return packet is not None and not (
+ packet["type"] == "response" and packet["command"] == "disconnect"
+ )
+
def _recv_packet(
self,
*,
@@ -322,34 +361,46 @@ def _recv_packet(
The first matching packet for the given predicate, if specified,
otherwise None.
"""
- deadline = time.time() + timeout
-
- while time.time() < deadline:
- packet = self._read_packet(timeout=deadline - time.time())
- if packet is None:
- return None
- self._process_recv_packet(packet)
- if not predicate or predicate(packet):
- return packet
-
- def _process_recv_packet(self, packet) -> None:
+ assert (
+ threading.current_thread != self._recv_thread
+ ), "Must not be called from the _recv_thread"
+
+ def process_until_match():
+ self._process_recv_packets()
+ for i, packet in enumerate(self._pending_packets):
+ if packet is None:
+ # We need to return a truthy value to break out of the
+ # wait_for, use `EOFError` as an indicator of EOF.
+ return EOFError()
+ if predicate and predicate(packet):
+ self._pending_packets.pop(i)
+ return packet
+
+ with self._recv_condition:
+ packet = self._recv_condition.wait_for(process_until_match, timeout)
+ return None if isinstance(packet, EOFError) else packet
+
+ def _process_recv_packets(self) -> None:
"""Process received packets, updating the session state."""
- if packet and ("seq" not in packet or packet["seq"] == 0):
- warnings.warn(
- f"received a malformed packet, expected 'seq != 0' for {packet!r}"
- )
- # Handle events that may modify any stateful properties of
- # the DAP session.
- if packet and packet["type"] == "event":
- self._handle_event(packet)
- elif packet and packet["type"] == "request":
- # Handle reverse requests and keep processing.
- self._handle_reverse_request(packet)
+ with self._recv_condition:
+ for packet in self._recv_packets:
+ if packet and ("seq" not in packet or packet["seq"] == 0):
+ warnings.warn(
+ f"received a malformed packet, expected 'seq != 0' for {packet!r}"
+ )
+ # Handle events that may modify any stateful properties of
+ # the DAP session.
+ if packet and packet["type"] == "event":
+ self._handle_event(packet)
+ elif packet and packet["type"] == "request":
+ # Handle reverse requests and keep processing.
+ self._handle_reverse_request(packet)
+ # Move the packet to the pending queue.
+ self._pending_packets.append(packet)
+ self._recv_packets.clear()
def _handle_event(self, packet: Event) -> None:
"""Handle any events that modify debug session state we track."""
- self.events.append(packet)
-
event = packet["event"]
body: Optional[Dict] = packet.get("body", None)
@@ -402,8 +453,6 @@ def _handle_event(self, packet: Event) -> None:
self.invalidated_event = packet
elif event == "memory":
self.memory_event = packet
- elif event == "module":
- self.module_events.append(packet)
def _handle_reverse_request(self, request: Request) -> None:
if request in self.reverse_requests:
@@ -472,14 +521,18 @@ def send_packet(self, packet: ProtocolMessage) -> int:
Returns the seq number of the request.
"""
- packet["seq"] = self.sequence
- self.sequence += 1
+ # Set the seq for requests.
+ if packet["type"] == "request":
+ packet["seq"] = self.sequence
+ self.sequence += 1
+ else:
+ packet["seq"] = 0
# Encode our command dictionary as a JSON string
json_str = json.dumps(packet, separators=(",", ":"))
if self.trace_file:
- self.trace_file.write("%s to adapter:\n%s\n" % (time.time(), json_str))
+ self.trace_file.write("to adapter:\n%s\n" % (json_str))
length = len(json_str)
if length > 0:
@@ -860,8 +913,6 @@ def request_restart(self, restartArguments=None):
if restartArguments:
command_dict["arguments"] = restartArguments
- # Clear state, the process is about to restart...
- self._process_continued(True)
response = self._send_recv(command_dict)
# Caller must still call wait_for_stopped.
return response
@@ -1428,10 +1479,8 @@ def request_testGetTargetBreakpoints(self):
def terminate(self):
self.send.close()
- self.recv.close()
- self.selector.close()
- if self.log_file:
- dump_dap_log(self.log_file)
+ if self._recv_thread.is_alive():
+ self._recv_thread.join()
def request_setInstructionBreakpoints(self, memory_reference=[]):
breakpoints = []
@@ -1528,7 +1577,6 @@ def launch(
stdout=subprocess.PIPE,
stderr=sys.stderr,
env=adapter_env,
- bufsize=0,
)
if connection is None:
diff --git a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py
index 405e91fc2dc36..29935bb8046ff 100644
--- a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py
+++ b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py
@@ -15,8 +15,6 @@
# DAP tests as a whole have been flakey on the Windows on Arm bot. See:
# https://github.com/llvm/llvm-project/issues/137660
@skipIf(oslist=["windows"], archs=["aarch64"])
-# The Arm Linux bot needs stable resources before it can run these tests reliably.
-@skipIf(oslist=["linux"], archs=["arm$"])
class DAPTestCaseBase(TestBase):
# set timeout based on whether ASAN was enabled or not. Increase
# timeout by a factor of 10 if ASAN is enabled.
@@ -418,7 +416,7 @@ def continue_to_next_stop(self):
return self.dap_server.wait_for_stopped()
def continue_to_breakpoint(self, breakpoint_id: str):
- self.continue_to_breakpoints([breakpoint_id])
+ self.continue_to_breakpoints((breakpoint_id))
def continue_to_breakpoints(self, breakpoint_ids):
self.do_continue()
diff --git a/lldb/test/API/tools/lldb-dap/breakpoint-events/TestDAP_breakpointEvents.py b/lldb/test/API/tools/lldb-dap/breakpoint-events/TestDAP_breakpointEvents.py
index 7b78541fb4f8e..beab4d6c1f5a6 100644
--- a/lldb/test/API/tools/lldb-dap/breakpoint-events/TestDAP_breakpointEvents.py
+++ b/lldb/test/API/tools/lldb-dap/breakpoint-events/TestDAP_breakpointEvents.py
@@ -81,20 +81,24 @@ def test_breakpoint_events(self):
breakpoint["verified"], "expect foo breakpoint to not be verified"
)
+ # Flush the breakpoint events.
+ self.dap_server.wait_for_breakpoint_events()
+
# Continue to the breakpoint
- self.continue_to_breakpoint(foo_bp_id)
- self.continue_to_next_stop() # foo_bp2
- self.continue_to_breakpoint(main_bp_id)
- self.continue_to_exit()
+ self.continue_to_breakpoints(dap_breakpoint_ids)
- bp_events = [e for e in self.dap_server.events if e["event"] == "breakpoint"]
+ verified_breakpoint_ids = []
+ unverified_breakpoint_ids = []
+ for breakpoint_event in self.dap_server.wait_for_breakpoint_events():
+ breakpoint = breakpoint_event["body"]["breakpoint"]
+ id = breakpoint["id"]
+ if breakpoint["verified"]:
+ verified_breakpoint_ids.append(id)
+ else:
+ unverified_breakpoint_ids.append(id)
- main_bp_events = [
- e for e in bp_events if e["body"]["breakpoint"]["id"] == main_bp_id
- ]
- foo_bp_events = [
- e for e in bp_events if e["body"]["breakpoint"]["id"] == foo_bp_id
- ]
+ self.assertIn(main_bp_id, unverified_breakpoint_ids)
+ self.assertIn(foo_bp_id, unverified_breakpoint_ids)
- self.assertTrue(main_bp_events)
- self.assertTrue(foo_bp_events)
+ self.assertIn(main_bp_id, verified_breakpoint_ids)
+ self.assertIn(foo_bp_id, verified_breakpoint_ids)
diff --git a/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py b/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py
index 09b13223e0a78..ca881f1d817c5 100644
--- a/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py
+++ b/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py
@@ -156,7 +156,6 @@ def test_debuggerRoot(self):
self.build_and_launch(
program, debuggerRoot=program_parent_dir, initCommands=commands
)
- self.continue_to_exit()
output = self.get_console()
self.assertTrue(output and len(output) > 0, "expect console output")
lines = output.splitlines()
@@ -172,6 +171,7 @@ def test_debuggerRoot(self):
% (program_parent_dir, line[len(prefix) :]),
)
self.assertTrue(found, "verified lldb-dap working directory")
+ self.continue_to_exit()
def test_sourcePath(self):
"""
diff --git a/lldb/test/API/tools/lldb-dap/module-event/TestDAP_module_event.py b/lldb/test/API/tools/lldb-dap/module-event/TestDAP_module_event.py
index 9d1d17b704f76..1f4afabbd161e 100644
--- a/lldb/test/API/tools/lldb-dap/module-event/TestDAP_module_event.py
+++ b/lldb/test/API/tools/lldb-dap/module-event/TestDAP_module_event.py
@@ -1,58 +1,58 @@
-"""
-Test 'module' events for dynamically loaded libraries.
-"""
-
+import dap_server
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
import lldbdap_testcase
+import re
class TestDAP_module_event(lldbdap_testcase.DAPTestCaseBase):
- def lookup_module_id(self, name):
- """Returns the identifier for the first module event starting with the given name."""
- for event in self.dap_server.module_events:
- if self.get_dict_value(event, ["body", "module", "name"]).startswith(name):
- return self.get_dict_value(event, ["body", "module", "id"])
- self.fail(f"No module events matching name={name}")
-
- def module_events(self, id):
- """Finds all module events by identifier."""
- return [
- event
- for event in self.dap_server.module_events
- if self.get_dict_value(event, ["body", "module", "id"]) == id
- ]
-
- def module_reasons(self, events):
- """Returns the list of 'reason' values from the given events."""
- return [event["body"]["reason"] for event in events]
-
@skipIfWindows
def test_module_event(self):
- """
- Test that module events are fired on target load and when the list of
- dynamic libraries updates while running.
- """
program = self.getBuildArtifact("a.out")
self.build_and_launch(program)
- # We can analyze the order of events after the process exits.
- self.continue_to_exit()
- a_out_id = self.lookup_module_id("a.out")
- a_out_events = self.module_events(id=a_out_id)
+ source = "main.cpp"
+ breakpoint1_line = line_number(source, "// breakpoint 1")
+ breakpoint2_line = line_number(source, "// breakpoint 2")
+ breakpoint3_line = line_number(source, "// breakpoint 3")
- self.assertIn(
- "new",
- self.module_reasons(a_out_events),
- "Expected a.out to load during the debug session.",
+ breakpoint_ids = self.set_source_breakpoints(
+ source, [breakpoint1_line, breakpoint2_line, breakpoint3_line]
)
+ self.continue_to_breakpoints(breakpoint_ids)
+
+ # We're now stopped at breakpoint 1 before the dlopen. Flush all the module events.
+ event = self.dap_server.wait_for_event(["module"])
+ while event is not None:
+ event = self.dap_server.wait_for_event(["module"])
+
+ # Continue to the second breakpoint, before the dlclose.
+ self.continue_to_breakpoints(breakpoint_ids)
+
+ # Make sure we got a module event for libother.
+ event = self.dap_server.wait_for_event(["module"])
+ self.assertIsNotNone(event, "didn't get a module event")
+ module_name = event["body"]["module"]["name"]
+ module_id = event["body"]["module"]["id"]
+ self.assertEqual(event["body"]["reason"], "new")
+ self.assertIn("libother", module_name)
+
+ # Continue to the third breakpoint, after the dlclose.
+ self.continue_to_breakpoints(breakpoint_ids)
+
+ # Make sure we got a module event for libother.
+ event = self.dap_server.wait_for_event(["module"])
+ self.assertIsNotNone(event, "didn't get a module event")
+ reason = event["body"]["reason"]
+ self.assertEqual(reason, "removed")
+ self.assertEqual(event["body"]["module"]["id"], module_id)
+
+ # The removed module event should omit everything but the module id and name
+ # as they are required fields.
+ module_data = event["body"]["module"]
+ required_keys = ["id", "name"]
+ self.assertListEqual(list(module_data.keys()), required_keys)
+ self.assertEqual(module_data["name"], "", "expects empty name.")
- libother_id = self.lookup_module_id(
- "libother." # libother.so or libother.dylib based on OS.
- )
- libother_events = self.mod...
[truncated]
|
llvm#165892) … concurrency." (llvm#165688)"" This reverts commit 17dbd86. This was causing timeouts on the premerge runners. Reverting for now until the timeouts trigger within lit and/or we have a better testing strategy for this.
llvm#165892) … concurrency." (llvm#165688)"" This reverts commit 17dbd86. This was causing timeouts on the premerge runners. Reverting for now until the timeouts trigger within lit and/or we have a better testing strategy for this.
… concurrency." (#165688)""
This reverts commit 17dbd86.
This was causing timeouts on the premerge runners. Reverting for now until the timeouts trigger within lit and/or we have a better testing strategy for this.