@@ -13,19 +13,20 @@ defmodule Zebra.Workers.JobRequestFactory.Cache do
13
13
# Overall, if cache system is down, we ignore every issue.
14
14
#
15
15
16
- def find ( nil ) do
16
+ def find ( nil , _repo_proxy ) do
17
17
# We don't fail any jobs due to a missing cache connection,
18
18
# but we should make sure we are aware of any issues in this area.
19
19
Watchman . increment ( "external.cachehub.describe.failed" )
20
20
21
21
{ :ok , nil }
22
22
end
23
23
24
- def find ( cache_id ) do
24
+ def find ( cache_id , repo_proxy ) do
25
25
Watchman . benchmark ( "external.cachehub.describe" , fn ->
26
26
req = Request . new ( cache_id: cache_id )
27
27
28
- with { :ok , endpoint } <- Application . fetch_env ( :zebra , :cachehub_api_endpoint ) ,
28
+ with false <- forked_pr? ( repo_proxy ) ,
29
+ { :ok , endpoint } <- Application . fetch_env ( :zebra , :cachehub_api_endpoint ) ,
29
30
{ :ok , channel } <- GRPC.Stub . connect ( endpoint ) ,
30
31
{ :ok , response } <- Stub . describe ( channel , req , timeout: 30_000 ) do
31
32
if response . status . code == InternalApi.ResponseStatus.Code . value ( :OK ) do
@@ -38,6 +39,10 @@ defmodule Zebra.Workers.JobRequestFactory.Cache do
38
39
{ :ok , nil }
39
40
end
40
41
else
42
+ true ->
43
+ Logger . info ( "Skipping fetching of the cache as the job is part of Forked PR build." )
44
+ { :ok , nil }
45
+
41
46
e ->
42
47
Watchman . increment ( "external.cachehub.describe.failed" )
43
48
Logger . info ( "Failed to fetch info from cachehub #{ cache_id } , #{ inspect ( e ) } " )
@@ -82,4 +87,12 @@ defmodule Zebra.Workers.JobRequestFactory.Cache do
82
87
{ :ok , vars }
83
88
end
84
89
end
90
+
91
+ defp forked_pr? ( _repo = % { pr_slug: "" } ) , do: false
92
+
93
+ defp forked_pr? ( repo ) do
94
+ [ pr_repo | _rest ] = repo . pr_slug |> String . split ( "/" )
95
+ [ base_repo | _rest ] = repo . repo_slug |> String . split ( "/" )
96
+ pr_repo != base_repo
97
+ end
85
98
end
0 commit comments