@@ -229,8 +229,8 @@ class BERTUnfactoredDisambiguator(Disambiguator):
229229 use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
230230 Defaults to True.
231231 batch_size (:obj:`int`, optional): The batch size. Defaults to 32.
232- ranking_cache (:obj:`LFUCache`, optional): The cache dictionary of
233- pre-computed scored analyses. Defaults to `None`.
232+ ranking_cache (:obj:`LFUCache`, optional): The cache of pre-computed
233+ scored analyses. Defaults to `None`.
234234 ranking_cache_size (:obj:`int`, optional): The number of unique word
235235 disambiguations to cache. If 0, no ranked analyses will be cached.
236236 The cache uses a least-frequently-used eviction policy.
@@ -289,6 +289,7 @@ def pretrained(model_name='msa', top=1, use_gpu=True, batch_size=32,
289289 ranking_cache_size (:obj:`int`, optional): The number of unique
290290 word disambiguations to cache. If 0, no ranked analyses will be
291291 cached. The cache uses a least-frequently-used eviction policy.
292+ This argument is ignored if pretrained_cache is True.
292293 Defaults to 100000.
293294
294295 Returns:
@@ -328,7 +329,7 @@ def pretrained(model_name='msa', top=1, use_gpu=True, batch_size=32,
328329 ranking_cache_size = ranking_cache_size )
329330
330331 @staticmethod
331- def pretrained_from_config (config , top = 1 , use_gpu = True , batch_size = 32 ,
332+ def _pretrained_from_config (config , top = 1 , use_gpu = True , batch_size = 32 ,
332333 cache_size = 10000 , pretrained_cache = True ,
333334 ranking_cache_size = 100000 ):
334335 """Load a pre-trained model from a config file.
@@ -351,6 +352,7 @@ def pretrained_from_config(config, top=1, use_gpu=True, batch_size=32,
351352 ranking_cache_size (:obj:`int`, optional): The number of unique
352353 word disambiguations to cache. If 0, no ranked analyses will be
353354 cached. The cache uses a least-frequently-used eviction policy.
355+ This argument is ignored if pretrained_cache is True.
354356 Defaults to 100000.
355357
356358 Returns:
0 commit comments