From 3d77e12522e7de5dece8479b4078c3f9deaf2ee6 Mon Sep 17 00:00:00 2001 From: "xiong.jiang" Date: Thu, 30 Nov 2023 15:21:29 +0800 Subject: [PATCH 1/2] Solving the problem of requiring all two tensors to be on different devices in the Flask environment --- spacy_curated_transformers/models/architectures.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spacy_curated_transformers/models/architectures.py b/spacy_curated_transformers/models/architectures.py index 51c2619..cae83d6 100644 --- a/spacy_curated_transformers/models/architectures.py +++ b/spacy_curated_transformers/models/architectures.py @@ -734,7 +734,8 @@ def _convert_inputs( span = X[i] span_len = span.shape[0] Xt[i, :span_len] = span - Xt = xp2torch(Xt) + device = model.shims[0].device + Xt = xp2torch(Xt, device=device) def convert_from_torch_backward(d_inputs: Any): # No gradients for the inputs. From d0fb8f6e62756895d70398af3af9cd8ffa78c7f3 Mon Sep 17 00:00:00 2001 From: "xiong.jiang" Date: Thu, 30 Nov 2023 18:51:16 +0800 Subject: [PATCH 2/2] Solving the problem of requiring all two tensors to be on different devices in the Flask environment --- spacy_curated_transformers/models/architectures.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/spacy_curated_transformers/models/architectures.py b/spacy_curated_transformers/models/architectures.py index cae83d6..5306d6b 100644 --- a/spacy_curated_transformers/models/architectures.py +++ b/spacy_curated_transformers/models/architectures.py @@ -734,8 +734,11 @@ def _convert_inputs( span = X[i] span_len = span.shape[0] Xt[i, :span_len] = span - device = model.shims[0].device - Xt = xp2torch(Xt, device=device) + if ops.device_type == 'gpu': + device = torch.device(f"cuda:{ops.device_id}") + Xt = xp2torch(Xt, device=device) + else: + Xt = xp2torch(Xt) def convert_from_torch_backward(d_inputs: Any): # No gradients for the inputs.