Skip to content

Commit 59bc596

Browse files
committed
remove None union types when default values are set
1 parent 4d52051 commit 59bc596

File tree

8 files changed

+19
-19
lines changed

8 files changed

+19
-19
lines changed

graphrag/config/models/basic_search_config.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,15 +22,15 @@ class BasicSearchConfig(BaseModel):
2222
description="The conversation history maximum turns.",
2323
default=defs.BASIC_SEARCH_CONVERSATION_HISTORY_MAX_TURNS,
2424
)
25-
temperature: float | None = Field(
25+
temperature: float = Field(
2626
description="The temperature to use for token generation.",
2727
default=defs.BASIC_SEARCH_LLM_TEMPERATURE,
2828
)
29-
top_p: float | None = Field(
29+
top_p: float = Field(
3030
description="The top-p value to use for token generation.",
3131
default=defs.BASIC_SEARCH_LLM_TOP_P,
3232
)
33-
n: int | None = Field(
33+
n: int = Field(
3434
description="The number of completions to generate.",
3535
default=defs.BASIC_SEARCH_LLM_N,
3636
)

graphrag/config/models/cluster_graph_config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ class ClusterGraphConfig(BaseModel):
1818
description="Whether to use the largest connected component.",
1919
default=defs.USE_LCC,
2020
)
21-
seed: int | None = Field(
21+
seed: int = Field(
2222
description="The seed to use for the clustering.",
2323
default=defs.CLUSTER_GRAPH_SEED,
2424
)

graphrag/config/models/global_search_config.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,15 +20,15 @@ class GlobalSearchConfig(BaseModel):
2020
knowledge_prompt: str | None = Field(
2121
description="The global search general prompt to use.", default=None
2222
)
23-
temperature: float | None = Field(
23+
temperature: float = Field(
2424
description="The temperature to use for token generation.",
2525
default=defs.GLOBAL_SEARCH_LLM_TEMPERATURE,
2626
)
27-
top_p: float | None = Field(
27+
top_p: float = Field(
2828
description="The top-p value to use for token generation.",
2929
default=defs.GLOBAL_SEARCH_LLM_TOP_P,
3030
)
31-
n: int | None = Field(
31+
n: int = Field(
3232
description="The number of completions to generate.",
3333
default=defs.GLOBAL_SEARCH_LLM_N,
3434
)

graphrag/config/models/input_config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ class InputConfig(BaseModel):
3030
container_name: str | None = Field(
3131
description="The azure blob storage container name to use.", default=None
3232
)
33-
encoding: str | None = Field(
33+
encoding: str = Field(
3434
description="The input file encoding to use.",
3535
default=defs.INPUT_FILE_ENCODING,
3636
)

graphrag/config/models/language_model_config.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -76,27 +76,27 @@ def _validate_encoding_model(self) -> None:
7676
if self.encoding_model.strip() == "":
7777
self.encoding_model = tiktoken.encoding_name_for_model(self.model)
7878

79-
max_tokens: int | None = Field(
79+
max_tokens: int = Field(
8080
description="The maximum number of tokens to generate.",
8181
default=defs.LLM_MAX_TOKENS,
8282
)
83-
temperature: float | None = Field(
83+
temperature: float = Field(
8484
description="The temperature to use for token generation.",
8585
default=defs.LLM_TEMPERATURE,
8686
)
87-
top_p: float | None = Field(
87+
top_p: float = Field(
8888
description="The top-p value to use for token generation.",
8989
default=defs.LLM_TOP_P,
9090
)
91-
n: int | None = Field(
91+
n: int = Field(
9292
description="The number of completions to generate.",
9393
default=defs.LLM_N,
9494
)
95-
frequency_penalty: float | None = Field(
95+
frequency_penalty: float = Field(
9696
description="The frequency penalty to use for token generation.",
9797
default=defs.LLM_FREQUENCY_PENALTY,
9898
)
99-
presence_penalty: float | None = Field(
99+
presence_penalty: float = Field(
100100
description="The presence penalty to use for token generation.",
101101
default=defs.LLM_PRESENCE_PENALTY,
102102
)

graphrag/config/models/local_search_config.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,15 +34,15 @@ class LocalSearchConfig(BaseModel):
3434
description="The top k mapped relations.",
3535
default=defs.LOCAL_SEARCH_TOP_K_RELATIONSHIPS,
3636
)
37-
temperature: float | None = Field(
37+
temperature: float = Field(
3838
description="The temperature to use for token generation.",
3939
default=defs.LOCAL_SEARCH_LLM_TEMPERATURE,
4040
)
41-
top_p: float | None = Field(
41+
top_p: float = Field(
4242
description="The top-p value to use for token generation.",
4343
default=defs.LOCAL_SEARCH_LLM_TOP_P,
4444
)
45-
n: int | None = Field(
45+
n: int = Field(
4646
description="The number of completions to generate.",
4747
default=defs.LOCAL_SEARCH_LLM_N,
4848
)

graphrag/config/models/text_embedding_config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ class TextEmbeddingConfig(BaseModel):
2727
names: list[str] = Field(
2828
description="The specific embeddings to perform.", default=[]
2929
)
30-
vector_store: dict | None = Field(
30+
vector_store: dict = Field(
3131
description="The vector storage configuration", default=defs.VECTOR_STORE_DICT
3232
)
3333
strategy: dict | None = Field(

graphrag/index/input/csv.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ async def load_file(path: str, group: dict | None) -> pd.DataFrame:
3333
if group is None:
3434
group = {}
3535
buffer = BytesIO(await storage.get(path, as_bytes=True))
36-
data = pd.read_csv(buffer, encoding=config.encoding or "latin-1")
36+
data = pd.read_csv(buffer, encoding=config.encoding)
3737
additional_keys = group.keys()
3838
if len(additional_keys) > 0:
3939
data[[*additional_keys]] = data.apply(

0 commit comments

Comments
 (0)