Skip to content

Commit 4196c8f

Browse files
authored
Merge pull request #318 from Accenture/300-async-service-send-response-directly
make it possible to return cached response
2 parents 0603642 + e8b86c8 commit 4196c8f

File tree

3 files changed

+163
-12
lines changed

3 files changed

+163
-12
lines changed

docs/api-gateway.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ The endpoint expects the following request format:
180180
### Wait for response
181181
182182
Sometimes it makes sense to provide a simple request-response API to something that runs asynchronously on the backend. For example, let's say there's a ticket reservation process that takes 10 seconds in total and involves three different services that communicate via message passing. For an external client, it may be simpler to wait 10 seconds for the response instead of polling for a response every other second.
183-
A behavior like this can be configured using an endpoints' `response_from` property. When set to `kafka`, the response to the request is not taken from the `target` (e.g., for `target` = `http` this means the backend's HTTP response is ignored), but instead it's read from a Kafka topic. In order to enable RIG to correlate the response from the topic with the original request, RIG adds a correlation ID to the request (using a query parameter in case of `target` = `http`, or backed into the produced CloudEvent otherwise). **Backend services that work with the request need to include that correlation ID in their response; otherwise, RIG won't be able to forward it to the client (and times out).**
183+
A behavior like this can be configured using an endpoints' `response_from` property. When set to `kafka`, the response to the request is not necessarily taken from the `target`, e.g., for `target` = `http` this means the backend's HTTP response might be ignored - it's the responsibility of the backend service where to read the response from: If the backend returns with the HTTP code `202 Accepted`, the response will be read from a Kafka topic. If the backend returns a different HTTP code (can be `200` or `400` or whatever makes sense), only then the response will be read synchronously from the http target directly (this allows the backend to return cached responses). In order to enable RIG to correlate the response from the kafka topic with the original request, RIG adds a correlation ID to the request (using a query parameter in case of `target` = `http`, or baked into the produced CloudEvent otherwise). Backend services that work with the request need to include that correlation ID in their response; otherwise, RIG won't be able to forward it to the client (and times out).
184184
185185
Configuration of such API endpoint might look like this:
186186

lib/rig_inbound_gateway/api_proxy/handler/http.ex

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -85,10 +85,19 @@ defmodule RigInboundGateway.ApiProxy.Handler.Http do
8585
defp handle_response(conn, res, response_from)
8686

8787
defp handle_response(conn, res, "http"),
88-
do: send_or_chunk_response(conn, res)
88+
do: send_or_chunk_response(conn, res, "http")
8989

90-
defp handle_response(conn, _, response_from),
91-
do: wait_for_response(conn, response_from)
90+
defp handle_response(conn, res, response_from),
91+
do: respond_directly_or_wait_for_response(conn, res, response_from)
92+
93+
# ---
94+
95+
defp respond_directly_or_wait_for_response(conn, res, response_from) do
96+
case res.status_code do
97+
202 -> wait_for_response(conn, response_from)
98+
_ -> send_or_chunk_response(conn, res, response_from)
99+
end
100+
end
92101

93102
# ---
94103

@@ -192,14 +201,15 @@ defmodule RigInboundGateway.ApiProxy.Handler.Http do
192201

193202
# ---
194203

195-
@spec send_or_chunk_response(Plug.Conn.t(), HTTPoison.Response.t()) :: Plug.Conn.t()
204+
@spec send_or_chunk_response(Plug.Conn.t(), HTTPoison.Response.t(), String.t()) :: Plug.Conn.t()
196205
defp send_or_chunk_response(
197206
conn,
198207
%HTTPoison.Response{
199208
headers: headers,
200209
status_code: status_code,
201210
body: body
202-
}
211+
},
212+
response_from
203213
) do
204214
headers =
205215
headers
@@ -208,8 +218,7 @@ defmodule RigInboundGateway.ApiProxy.Handler.Http do
208218

209219
conn = %{conn | resp_headers: headers}
210220

211-
# only possibility for "response_from" = "http", therefore hardcoded here
212-
ProxyMetrics.count_proxy_request(conn.method, conn.request_path, "http", "http", "ok")
221+
ProxyMetrics.count_proxy_request(conn.method, conn.request_path, "http", response_from, "ok")
213222

214223
headers
215224
|> Map.new()

test/rig_tests/proxy/response_from/async_http_test.exs

Lines changed: 146 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,10 @@ defmodule RigTests.Proxy.ResponseFrom.AsyncHttpTest do
2727
end)
2828
end
2929

30-
test_with_server "Given response_from is set to http_async and response is in binary mode, the http response is taken from the internal HTTP endpoint." do
31-
test_name = "proxy-http-response-from-http-internal-binary"
30+
# Given response_from=http_async, when the backend responds with 202, RIG doesn't forward that 202-response.
31+
# Instead, RIG expects the actual response to be submitted to its API, identified by the correlation ID RIG has added to the forwarded request.
32+
test_with_server "Given response_from=http_async, when the backend responds with 202, the http response is taken from the internal HTTP endpoint" do
33+
test_name = "proxy-http-response-from-http-internal"
3234

3335
api_id = "mock-#{test_name}-api"
3436
endpoint_id = "mock-#{test_name}-endpoint"
@@ -43,7 +45,7 @@ defmodule RigTests.Proxy.ResponseFrom.AsyncHttpTest do
4345
|> put_req_header("content-type", "application/json;charset=utf-8")
4446
|> post("/v3/responses", Jason.encode!(async_response))
4547

46-
Response.ok!(sync_response, %{"content-type" => "application/json"})
48+
Response.accepted!(sync_response, %{"content-type" => "application/json"})
4749
end)
4850

4951
# We register the endpoint with the proxy:
@@ -112,7 +114,7 @@ defmodule RigTests.Proxy.ResponseFrom.AsyncHttpTest do
112114
assert res_status == 400
113115
assert res_body == "Failed to parse request body: {:error, {:not_an_integer, \"abc201\"}}"
114116

115-
Response.ok!(sync_response, %{"content-type" => "application/json"})
117+
Response.accepted!(sync_response, %{"content-type" => "application/json"})
116118
end)
117119

118120
# We register the endpoint with the proxy:
@@ -153,4 +155,144 @@ defmodule RigTests.Proxy.ResponseFrom.AsyncHttpTest do
153155
# ...the fake backend service has been called:
154156
assert FakeServer.hits() == 1
155157
end
158+
159+
test_with_server "Given response_from=http_async, when the backend responds with 200, RIG forwards this 200-response (and does not expect an asynchronous response for this request)." do
160+
test_name = "proxy-http-response-synchronous"
161+
162+
api_id = "mock-#{test_name}-api"
163+
endpoint_id = "mock-#{test_name}-endpoint"
164+
endpoint_path = "/#{endpoint_id}"
165+
sync_response = %{"message" => "this is the sync response that reaches the client"}
166+
async_response = %{"this response" => "the client never sees this response"}
167+
168+
route(endpoint_path, fn %{query: %{"correlation" => correlation_id}} ->
169+
event =
170+
Jason.encode!(%{
171+
specversion: "0.2",
172+
type: "rig.async-response",
173+
source: "fake-service",
174+
id: "1",
175+
rig: %{correlation: correlation_id},
176+
data: async_response
177+
})
178+
179+
build_conn()
180+
|> put_req_header("content-type", "application/json;charset=utf-8")
181+
|> post("/v2/responses", event)
182+
183+
Response.ok!(sync_response, %{"content-type" => "application/json"})
184+
end)
185+
186+
# We register the endpoint with the proxy:
187+
rig_api_url = "http://localhost:#{@api_port}/v2/apis"
188+
rig_proxy_url = "http://localhost:#{@proxy_port}"
189+
190+
body =
191+
Jason.encode!(%{
192+
id: api_id,
193+
name: "Mock API",
194+
version_data: %{
195+
default: %{
196+
endpoints: [
197+
%{
198+
id: endpoint_id,
199+
type: "http",
200+
method: "GET",
201+
path: endpoint_path,
202+
response_from: "http_async"
203+
}
204+
]
205+
}
206+
},
207+
proxy: %{
208+
target_url: "localhost",
209+
port: FakeServer.port()
210+
}
211+
})
212+
213+
headers = [{"content-type", "application/json"}]
214+
HTTPoison.post!(rig_api_url, body, headers)
215+
216+
# The client calls the proxy endpoint:
217+
request_url = rig_proxy_url <> endpoint_path
218+
%HTTPoison.Response{status_code: res_status, body: res_body} = HTTPoison.get!(request_url)
219+
220+
# Now we can assert that...
221+
# ...the fake backend service has been called:
222+
assert FakeServer.hits() == 1
223+
# ...the connection is closed and the status is OK:
224+
assert res_status == 200
225+
# ...the client receives the synchronous http response:
226+
assert Jason.decode!(res_body) == sync_response
227+
end
228+
229+
test_with_server "Given response_from=http_async, when the backend responds with 400, RIG forwards this 400-response (and does not expect an asynchronous response for this request)." do
230+
test_name = "proxy-http-no-response"
231+
232+
api_id = "mock-#{test_name}-api"
233+
endpoint_id = "mock-#{test_name}-endpoint"
234+
endpoint_path = "/#{endpoint_id}"
235+
sync_response = "Bad request from the test endpoint"
236+
async_response = %{"this response" => "the client never sees this response"}
237+
238+
route(endpoint_path, fn %{query: %{"correlation" => correlation_id}} ->
239+
event =
240+
Jason.encode!(%{
241+
specversion: "0.2",
242+
type: "rig.async-response",
243+
source: "fake-service",
244+
id: "1",
245+
rig: %{correlation: correlation_id},
246+
data: async_response
247+
})
248+
249+
build_conn()
250+
|> put_req_header("content-type", "application/json;charset=utf-8")
251+
|> post("/v2/responses", event)
252+
253+
Response.bad_request!(sync_response)
254+
end)
255+
256+
# We register the endpoint with the proxy:
257+
rig_api_url = "http://localhost:#{@api_port}/v2/apis"
258+
rig_proxy_url = "http://localhost:#{@proxy_port}"
259+
260+
body =
261+
Jason.encode!(%{
262+
id: api_id,
263+
name: "Mock API",
264+
version_data: %{
265+
default: %{
266+
endpoints: [
267+
%{
268+
id: endpoint_id,
269+
type: "http",
270+
method: "GET",
271+
path: endpoint_path,
272+
response_from: "http_async"
273+
}
274+
]
275+
}
276+
},
277+
proxy: %{
278+
target_url: "localhost",
279+
port: FakeServer.port()
280+
}
281+
})
282+
283+
headers = [{"content-type", "application/json"}]
284+
HTTPoison.post!(rig_api_url, body, headers)
285+
286+
# The client calls the proxy endpoint:
287+
request_url = rig_proxy_url <> endpoint_path
288+
%HTTPoison.Response{status_code: res_status, body: res_body} = HTTPoison.get!(request_url)
289+
290+
# Now we can assert that...
291+
# ...the fake backend service has been called:
292+
assert FakeServer.hits() == 1
293+
# ...the connection is closed and the status is OK:
294+
assert res_status == 400
295+
# ...the client does not get any response:
296+
assert res_body == sync_response
297+
end
156298
end

0 commit comments

Comments
 (0)