|
9 | 9 | [lsp4clj.protocols.endpoint :as protocols.endpoint]
|
10 | 10 | [lsp4clj.trace :as trace]
|
11 | 11 | [promesa.core :as p]
|
12 |
| - [promesa.protocols]) |
| 12 | + [promesa.protocols :as p.protocols]) |
13 | 13 | (:import
|
14 | 14 | (java.util.concurrent CancellationException)))
|
15 | 15 |
|
|
28 | 28 | `(binding [*out* null-output-stream-writer]
|
29 | 29 | ~@body))
|
30 | 30 |
|
| 31 | +(defn- resolve-ex-data [p] |
| 32 | + (p/catch p (fn [ex] (or (ex-data ex) (p/rejected ex))))) |
| 33 | + |
31 | 34 | (defprotocol IBlockingDerefOrCancel
|
32 | 35 | (deref-or-cancel [this timeout-ms timeout-val]))
|
33 | 36 |
|
34 |
| -(defrecord PendingRequest [p cancelled? id method started server] |
| 37 | +(defrecord PendingRequest [p id method started] |
35 | 38 | clojure.lang.IDeref
|
36 | 39 | (deref [_] (deref p))
|
37 | 40 | clojure.lang.IBlockingDeref
|
38 | 41 | (deref [_ timeout-ms timeout-val]
|
39 |
| - (deref p timeout-ms timeout-val)) |
| 42 | + (deref (resolve-ex-data p) timeout-ms timeout-val)) |
40 | 43 | IBlockingDerefOrCancel
|
41 |
| - (deref-or-cancel [this timeout-ms timeout-val] |
42 |
| - (let [result (deref this timeout-ms ::timeout)] |
43 |
| - (if (= ::timeout result) |
44 |
| - (do (future-cancel this) |
| 44 | + (deref-or-cancel [_ timeout-ms timeout-val] |
| 45 | + (let [result (deref (resolve-ex-data p) timeout-ms ::timeout)] |
| 46 | + (if (identical? ::timeout result) |
| 47 | + (do (p/cancel! p) |
45 | 48 | timeout-val)
|
46 | 49 | result)))
|
47 | 50 | clojure.lang.IPending
|
48 |
| - (isRealized [_] (realized? p)) |
| 51 | + (isRealized [_] (p/done? p)) |
49 | 52 | java.util.concurrent.Future
|
50 |
| - (get [this] |
51 |
| - (let [result (deref this)] |
52 |
| - (if (= ::cancelled result) |
53 |
| - (throw (java.util.concurrent.CancellationException.)) |
54 |
| - result))) |
55 |
| - (get [this timeout unit] |
56 |
| - (let [result (deref this (.toMillis unit timeout) ::timeout)] |
57 |
| - (case result |
58 |
| - ::cancelled (throw (java.util.concurrent.CancellationException.)) |
59 |
| - ::timeout (throw (java.util.concurrent.TimeoutException.)) |
| 53 | + (get [_] (deref p)) |
| 54 | + (get [_ timeout unit] |
| 55 | + (let [result (deref p (.toMillis unit timeout) ::timeout)] |
| 56 | + (if (identical? ::timeout result) |
| 57 | + (throw (java.util.concurrent.TimeoutException.)) |
60 | 58 | result)))
|
61 |
| - (isCancelled [_] @cancelled?) |
62 |
| - (isDone [this] (or (.isRealized this) (.isCancelled this))) |
63 |
| - (cancel [this _interrupt?] |
64 |
| - (if (.isDone this) |
65 |
| - false |
66 |
| - (if (compare-and-set! cancelled? false true) |
67 |
| - (do |
68 |
| - (protocols.endpoint/send-notification server "$/cancelRequest" {:id id}) |
69 |
| - (deliver p ::cancelled) |
70 |
| - true) |
71 |
| - false)))) |
| 59 | + (isCancelled [_] (p/cancelled? p)) |
| 60 | + (isDone [_] (p/done? p)) |
| 61 | + (cancel [_ _interrupt?] |
| 62 | + (p/cancel! p) |
| 63 | + (p/cancelled? p)) |
| 64 | + p.protocols/IPromiseFactory |
| 65 | + (-promise [_] p)) |
72 | 66 |
|
73 | 67 | ;; Avoid error: java.lang.IllegalArgumentException: Multiple methods in multimethod 'simple-dispatch' match dispatch value: class lsp4clj.server.PendingRequest -> interface clojure.lang.IPersistentMap and interface clojure.lang.IDeref, and neither is preferred
|
74 | 68 | ;; Only when CIDER is running? See https://github.com/thi-ng/color/issues/10
|
|
96 | 90 | Sends `$/cancelRequest` only once, though `lsp4clj.server/deref-or-cancel` or
|
97 | 91 | `future-cancel` can be called multiple times."
|
98 | 92 | [id method started server]
|
99 |
| - (map->PendingRequest {:p (promise) |
100 |
| - :cancelled? (atom false) |
101 |
| - :id id |
102 |
| - :method method |
103 |
| - :started started |
104 |
| - :server server})) |
| 93 | + (let [p (p/deferred)] |
| 94 | + ;; Set up a side-effect so that when the Request is cancelled, we inform the |
| 95 | + ;; client. This cannot be `(-> (p/deferred) (p/catch))` because that returns |
| 96 | + ;; a promise which, when cancelled, does nothing because there's no |
| 97 | + ;; exception handler chained onto it. Instead, we must cancel the |
| 98 | + ;; `(p/deffered)` promise itself. |
| 99 | + (p/catch p CancellationException |
| 100 | + (fn [_] |
| 101 | + (protocols.endpoint/send-notification server "$/cancelRequest" {:id id}))) |
| 102 | + (map->PendingRequest {:p p |
| 103 | + :id id |
| 104 | + :method method |
| 105 | + :started started}))) |
105 | 106 |
|
106 | 107 | (defn ^:private format-error-code [description error-code]
|
107 | 108 | (let [{:keys [code message]} (lsp.errors/by-key error-code)]
|
|
122 | 123 | (recur)))))
|
123 | 124 | ch))
|
124 | 125 |
|
125 |
| -(defn ^:private dispatch-input |
126 |
| - "Dispatches messages received on the input-ch based on message type. Returns a |
127 |
| - channel which will close after the input-ch is closed." |
128 |
| - [server context input-ch] |
129 |
| - (let [;; In order to process some requests and (all) notifications in series, |
130 |
| - ;; the language server sometimes needs to block client-initiated input. |
131 |
| - ;; If the language server sends requests during that time, it needs to |
132 |
| - ;; receive responses, even though it's blocking other input. Otherwise, |
133 |
| - ;; it will end up in a deadlock, where it's waiting to receive a |
134 |
| - ;; response off the input-ch and the input-ch isn't being read from |
135 |
| - ;; because the server is blocking input. See |
136 |
| - ;; https://github.com/clojure-lsp/clojure-lsp/issues/1500. |
137 |
| - |
138 |
| - ;; The messages all arrive in order on the input-ch so to get to the |
139 |
| - ;; client's response, we have to queue whatever other messages it's |
140 |
| - ;; sent. We do that by storing them in a sliding buffer. Because of the |
141 |
| - ;; sliding buffer: |
142 |
| - ;; * if the client sends a message which causes the language server to |
143 |
| - ;; block, and |
144 |
| - ;; * if the language server sends a request during that time, and |
145 |
| - ;; * if the client sends more than 100 other messages between when the |
146 |
| - ;; language server started blocking and when the client responds to |
147 |
| - ;; the language server's request, |
148 |
| - ;; * then the client's earliest messages will be dropped. |
149 |
| - ;; The same is true in reverse. |
150 |
| - |
151 |
| - ;; We process the client- and language-server-initiated messages in |
152 |
| - ;; separate threads. |
153 |
| - ;; * Threads, so the language server can use >!! and so that we can use |
154 |
| - ;; (>!! output-ch) to respect back pressure from clients that are slow |
155 |
| - ;; to read. |
156 |
| - ;; * Separate, so one can continue while the other is blocked. |
157 |
| - |
158 |
| - ;; (Jacob Maine): 100 is picked out of thin air. I have no idea how to |
159 |
| - ;; estimate how big the buffer should be to avoid dropping messages. LSP |
160 |
| - ;; communication tends to be very quiet, then very chatty, so it depends |
161 |
| - ;; a lot on what the client and server are doing. I also don't know how |
162 |
| - ;; many messages we could store without running into memory problems, |
163 |
| - ;; since this is dependent on so many variables, not just the size of |
164 |
| - ;; the JVM's memory, but also the size of the messages, which can be |
165 |
| - ;; anywhere from a few bytes to megabytes. |
166 |
| - server-initiated-in-ch (thread-loop |
167 |
| - (async/sliding-buffer 100) |
168 |
| - (fn [response] |
169 |
| - (protocols.endpoint/receive-response server response))) |
170 |
| - client-initiated-in-ch (thread-loop |
171 |
| - (async/sliding-buffer 100) |
172 |
| - (fn [[message-type message]] |
173 |
| - (if (identical? :request message-type) |
174 |
| - (protocols.endpoint/receive-request server context message) |
175 |
| - (protocols.endpoint/receive-notification server context message))))] |
176 |
| - (async/go-loop [] |
177 |
| - (if-let [message (async/<! input-ch)] |
178 |
| - (let [message-type (coercer/input-message-type message)] |
179 |
| - (case message-type |
180 |
| - (:parse-error :invalid-request) |
181 |
| - (protocols.endpoint/log server :error (format-error-code "Error reading message" message-type)) |
182 |
| - (:response.result :response.error) |
183 |
| - (async/>! server-initiated-in-ch message) |
184 |
| - (:request :notification) |
185 |
| - (async/>! client-initiated-in-ch [message-type message])) |
186 |
| - (recur)) |
187 |
| - (do |
188 |
| - (async/close! server-initiated-in-ch) |
189 |
| - (async/close! client-initiated-in-ch)))))) |
| 126 | +(def input-buffer-size |
| 127 | + ;; (Jacob Maine): This number is picked out of thin air. I have no idea how to |
| 128 | + ;; estimate how big the buffer could or should be. LSP communication tends to |
| 129 | + ;; be very quiet, then very chatty, so it depends a lot on what the client and |
| 130 | + ;; server are doing. I also don't know how many messages we could store |
| 131 | + ;; without running into memory problems, since this is dependent on so many |
| 132 | + ;; variables, not just the size of the JVM's memory, but also the size of the |
| 133 | + ;; messages, which can be anywhere from a few bytes to several megabytes. |
| 134 | + 1024) |
190 | 135 |
|
191 | 136 | ;; Expose endpoint methods to language servers
|
192 | 137 |
|
|
222 | 167 | (async/put! trace-ch [:debug trace-body])))
|
223 | 168 |
|
224 | 169 | (defrecord PendingReceivedRequest [result-promise cancelled?]
|
225 |
| - promesa.protocols/ICancellable |
| 170 | + p.protocols/ICancellable |
226 | 171 | (-cancel! [_]
|
227 | 172 | (p/cancel! result-promise)
|
228 | 173 | (reset! cancelled? true))
|
|
259 | 204 | protocols.endpoint/IEndpoint
|
260 | 205 | (start [this context]
|
261 | 206 | ;; Start receiving messages.
|
262 |
| - (let [pipeline (dispatch-input this context input-ch)] |
263 |
| - ;; Wait to stop receiving messages. |
| 207 | + (let [;; The language server sometimes needs to stop processing inbound |
| 208 | + ;; requests and notifications. |
| 209 | + |
| 210 | + ;; We do this automatically whenever we receive a notification, so |
| 211 | + ;; that we are sure the language server processes didChange before |
| 212 | + ;; moving on to other requests. But the language server can also |
| 213 | + ;; decide to do it itself, even in a request, by processing everything |
| 214 | + ;; synchronously instead of returning a future. |
| 215 | + |
| 216 | + ;; In a request (or even in a notification) the language server can |
| 217 | + ;; send a request to the client and block waiting for a response. |
| 218 | + |
| 219 | + ;; It's possible -- even likely -- that the client will send other |
| 220 | + ;; messages between the time that the server sent its request and the |
| 221 | + ;; time that the client responds. |
| 222 | + |
| 223 | + ;; All inbound messages -- requests, notifications, and responses -- |
| 224 | + ;; come in on a single stream. |
| 225 | + |
| 226 | + ;; If the server blocks waiting for a response, we have to set aside |
| 227 | + ;; the other inbound requests and notifications, so that we can get to |
| 228 | + ;; the response. That is, while the server is blocking we cannot stop |
| 229 | + ;; accepting input. Otherwise, the server will end up in a deadlock, |
| 230 | + ;; where it's waiting to receive a response, but the response is |
| 231 | + ;; waiting to be accepted. |
| 232 | + |
| 233 | + ;; To accomplish this we processes inbound requests and notifications |
| 234 | + ;; separately from inbound responses. If the server starts blocking |
| 235 | + ;; waiting for a response, we buffer the inbound requests and |
| 236 | + ;; notifications until the server is prepared to process them. |
| 237 | + |
| 238 | + ;; If the buffer becomes full, we assume that the server isn't |
| 239 | + ;; handling inbound requests and notifcations because it's waiting for |
| 240 | + ;; a response. So, following our assumption, we reject the server |
| 241 | + ;; requests so that the server will stop waiting for the response. |
| 242 | + ;; It's possible that this won't work -- our assumption might have |
| 243 | + ;; been wrong and the server might have stalled for some other reason. |
| 244 | + ;; So after rejecting, we park trying to add the latest inbound |
| 245 | + ;; request or notification to the buffer. |
| 246 | + |
| 247 | + ;; This ensures we don't drop any client messages, though we could |
| 248 | + ;; stop reading them if the server keeps blocking. If we're lucky |
| 249 | + ;; either the language server will unblock, or the client will decide |
| 250 | + ;; to stop sending messages because it's failed to receive a server |
| 251 | + ;; response (i.e., we will have managed to apply backpressure to the |
| 252 | + ;; client). If we're unlucky, the server could keep blocking forever. |
| 253 | + ;; In any case, this scenario -- where we stop reading messages -- is |
| 254 | + ;; presumed to be both very rare and indicative of a problem that can |
| 255 | + ;; be solved only in the client or the language server. |
| 256 | + client-initiated-in-ch (thread-loop |
| 257 | + input-buffer-size |
| 258 | + (fn [[message-type message]] |
| 259 | + (if (identical? :request message-type) |
| 260 | + (protocols.endpoint/receive-request this context message) |
| 261 | + (protocols.endpoint/receive-notification this context message)))) |
| 262 | + reject-pending-sent-requests (fn [exception] |
| 263 | + (doseq [pending-request (vals @pending-sent-requests*)] |
| 264 | + (p/reject! (:p pending-request) |
| 265 | + exception))) |
| 266 | + pipeline (async/go-loop [] |
| 267 | + (if-let [message (async/<! input-ch)] |
| 268 | + (let [message-type (coercer/input-message-type message)] |
| 269 | + (case message-type |
| 270 | + (:parse-error :invalid-request) |
| 271 | + (protocols.endpoint/log this :error (format-error-code "Error reading message" message-type)) |
| 272 | + (:response.result :response.error) |
| 273 | + (protocols.endpoint/receive-response this message) |
| 274 | + (:request :notification) |
| 275 | + (when-not (async/offer! client-initiated-in-ch [message-type message]) |
| 276 | + ;; Buffers full. Fail any waiting pending requests and... |
| 277 | + (reject-pending-sent-requests |
| 278 | + (ex-info "Buffer of client messages exhausted." {})) |
| 279 | + ;; ... try again, but park this time. |
| 280 | + (async/>! client-initiated-in-ch [message-type message]))) |
| 281 | + (recur)) |
| 282 | + (async/close! client-initiated-in-ch)))] |
264 | 283 | (async/go
|
265 |
| - ;; When pipeline closes, it indicates input-ch has closed. We're done |
266 |
| - ;; receiving. |
| 284 | + ;; Wait to stop receiving messages. |
267 | 285 | (async/<! pipeline)
|
268 |
| - ;; Do cleanup. |
| 286 | + ;; The pipeline has closed, indicating input-ch has closed. We're done |
| 287 | + ;; receiving. Do cleanup. |
| 288 | + |
| 289 | + (reject-pending-sent-requests (ex-info "Server shutting down. Input is closed so no response is possible." {})) |
269 | 290 |
|
270 | 291 | ;; The [docs](https://clojuredocs.org/clojure.core.async/close!) for
|
271 | 292 | ;; `close!` say A) "The channel will no longer accept any puts", B)
|
|
310 | 331 | req (lsp.requests/request id method body)
|
311 | 332 | pending-request (pending-request id method now this)]
|
312 | 333 | (trace this trace/sending-request req now)
|
313 |
| - ;; Important: record request before sending it, so it is sure to be |
| 334 | + ;; Important: record request before sending it, so it's sure to be |
314 | 335 | ;; available during receive-response.
|
315 | 336 | (swap! pending-sent-requests* assoc id pending-request)
|
316 | 337 | ;; respect back pressure from clients that are slow to read; (go (>!)) will not suffice
|
|
330 | 351 | (if-let [{:keys [p started] :as req} (get pending-requests id)]
|
331 | 352 | (do
|
332 | 353 | (trace this trace/received-response req resp started now)
|
333 |
| - (deliver p (if error resp result))) |
| 354 | + (if error |
| 355 | + (p/reject! p (ex-info "Received error response" resp)) |
| 356 | + (p/resolve! p result))) |
334 | 357 | (trace this trace/received-unmatched-response resp now)))
|
335 | 358 | (catch Throwable e
|
336 | 359 | (log-error-receiving this e resp))))
|
|
0 commit comments