Skip to content

Commit d20a837

Browse files
committed
fixing reformatted files
1 parent 64541a8 commit d20a837

File tree

6 files changed

+57
-44
lines changed

6 files changed

+57
-44
lines changed

supporting-blog-content/building-a-recipe-search-with-elasticsearch/elasticsearch_connection.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,7 @@ def __init__(self, config_file="config.yml"):
88
with open(config_file, "r") as f:
99
config = yaml.safe_load(f)
1010
self.client = Elasticsearch(
11-
cloud_id=config["cloud_id"],
12-
api_key=config["api_key"]
11+
cloud_id=config["cloud_id"], api_key=config["api_key"]
1312
)
1413

1514
def get_client(self):
@@ -21,5 +20,6 @@ def get_async_client(self):
2120
self.client = AsyncElasticsearch(
2221
cloud_id=config["cloud_id"],
2322
api_key=config["api_key"],
24-
request_timeout=240)
25-
return self.client;
23+
request_timeout=240,
24+
)
25+
return self.client

supporting-blog-content/building-a-recipe-search-with-elasticsearch/infra.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,10 @@ def create_index_embedding():
1717
"unit": {"type": "keyword"},
1818
"description_embedding": {
1919
"type": "semantic_text",
20-
"inference_id": "elser_embeddings"
21-
}
20+
"inference_id": "elser_embeddings",
21+
},
2222
}
23-
}
23+
},
2424
)
2525
print(response)
2626

@@ -31,8 +31,9 @@ def create_inference():
3131
task_type="sparse_embedding",
3232
body={
3333
"service": "elser",
34-
"service_settings": {"num_allocations": 1, "num_threads": 1}
35-
})
34+
"service_settings": {"num_allocations": 1, "num_threads": 1},
35+
},
36+
)
3637
print(response)
3738

3839

supporting-blog-content/building-a-recipe-search-with-elasticsearch/infra_lexical_index.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,9 @@ def create_index():
1414
"category": {"type": "keyword"},
1515
"brand": {"type": "keyword"},
1616
"price": {"type": "float"},
17-
"unit": {"type": "keyword"}
17+
"unit": {"type": "keyword"},
1818
}
19-
}
19+
},
2020
)
2121
print(response)
2222

supporting-blog-content/building-a-recipe-search-with-elasticsearch/ingestion.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010

1111
def partition_list(lst, chunk_size):
12-
return [lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size)]
12+
return [lst[i : i + chunk_size] for i in range(0, len(lst), chunk_size)]
1313

1414

1515
async def index_data():
@@ -34,7 +34,9 @@ async def index_data():
3434

3535
async def async_bulk_indexing(client, documents):
3636
success, failed = await helpers.async_bulk(client, documents)
37-
print(f"Successfully indexed {success} documents. Failed to index {failed} documents.")
37+
print(
38+
f"Successfully indexed {success} documents. Failed to index {failed} documents."
39+
)
3840

3941

4042
async def main():

supporting-blog-content/building-a-recipe-search-with-elasticsearch/ingestion_lexical_index.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010

1111
def partition_list(lst, chunk_size):
12-
return [lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size)]
12+
return [lst[i : i + chunk_size] for i in range(0, len(lst), chunk_size)]
1313

1414

1515
async def index_data():
@@ -34,7 +34,9 @@ async def index_data():
3434

3535
async def async_bulk_indexing(client, documents):
3636
success, failed = await helpers.async_bulk(client, documents)
37-
print(f"Successfully indexed {success} documents. Failed to index {failed} documents.")
37+
print(
38+
f"Successfully indexed {success} documents. Failed to index {failed} documents."
39+
)
3840

3941

4042
async def main():

supporting-blog-content/building-a-recipe-search-with-elasticsearch/search.py

Lines changed: 37 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -22,24 +22,21 @@ def search_semantic(term):
2222
index="grocery-catalog-elser",
2323
size=size,
2424
source_excludes="description_embedding",
25-
query={
26-
"semantic": {
27-
"field": "description_embedding",
28-
"query": term
29-
30-
}
31-
})
25+
query={"semantic": {"field": "description_embedding", "query": term}},
26+
)
3227

3328
for hit in response["hits"]["hits"]:
3429
score = hit["_score"]
3530
name = format_text(hit["_source"]["name"], line_length=10)
3631
description = hit["_source"]["description"]
3732
formatted_description = format_text(description)
38-
result.append({
39-
"score": score,
40-
"name": name,
41-
"description": formatted_description,
42-
})
33+
result.append(
34+
{
35+
"score": score,
36+
"name": name,
37+
"description": formatted_description,
38+
}
39+
)
4340
return result
4441

4542

@@ -49,38 +46,49 @@ def search_lexical(term):
4946
index="grocery-catalog-elser",
5047
size=size,
5148
source_excludes="description_embedding",
52-
query={
53-
"multi_match": {
54-
"query": term,
55-
"fields": [
56-
"name",
57-
"description"]
58-
}
59-
}
49+
query={"multi_match": {"query": term, "fields": ["name", "description"]}},
6050
)
6151

6252
for hit in response["hits"]["hits"]:
6353
score = hit["_score"]
6454
name = format_text(hit["_source"]["name"], line_length=10)
6555
description = hit["_source"]["description"]
66-
result.append({
67-
"score": score,
68-
"name": name,
69-
"description": description,
70-
})
56+
result.append(
57+
{
58+
"score": score,
59+
"name": name,
60+
"description": description,
61+
}
62+
)
7163
return result
7264

7365

7466
if __name__ == "__main__":
7567
rs1 = search_semantic(term)
7668
rs2 = search_lexical(term)
7769

78-
df1 = pd.DataFrame(rs1)[["name", "score"]] if rs1 else pd.DataFrame(columns=["name", "score"])
79-
df2 = pd.DataFrame(rs2)[["name", "score"]] if rs2 else pd.DataFrame(columns=["name", "score"])
80-
df1 = pd.DataFrame(rs1)[["name", "score"]] if rs1 else pd.DataFrame(columns=["name", "score"])
70+
df1 = (
71+
pd.DataFrame(rs1)[["name", "score"]]
72+
if rs1
73+
else pd.DataFrame(columns=["name", "score"])
74+
)
75+
df2 = (
76+
pd.DataFrame(rs2)[["name", "score"]]
77+
if rs2
78+
else pd.DataFrame(columns=["name", "score"])
79+
)
80+
df1 = (
81+
pd.DataFrame(rs1)[["name", "score"]]
82+
if rs1
83+
else pd.DataFrame(columns=["name", "score"])
84+
)
8185
df1["Search Type"] = "Semantic"
8286

83-
df2 = pd.DataFrame(rs2)[["name", "score"]] if rs2 else pd.DataFrame(columns(["name", "score"]))
87+
df2 = (
88+
pd.DataFrame(rs2)[["name", "score"]]
89+
if rs2
90+
else pd.DataFrame(columns(["name", "score"]))
91+
)
8492
df2["Search Type"] = "Lexical"
8593

8694
tabela = pd.concat([df1, df2], axis=0).reset_index(drop=True)

0 commit comments

Comments
 (0)