@@ -958,7 +958,6 @@ def create_collection(self,
958958 key_generator = 'traditional' ,
959959 shard_fields = None ,
960960 shard_count = None ,
961- index_bucket_count = None ,
962961 replication_factor = None ,
963962 shard_like = None ,
964963 sync_replication = None ,
@@ -1000,14 +999,6 @@ def create_collection(self,
1000999 :type shard_fields: [str | unicode]
10011000 :param shard_count: Number of shards to create.
10021001 :type shard_count: int
1003- :param index_bucket_count: Number of buckets into which indexes using
1004- hash tables are split. The default is 16, and this number has to be
1005- a power of 2 and less than or equal to 1024. For large collections,
1006- one should increase this to avoid long pauses when the hash table
1007- has to be initially built or re-sized, since buckets are re-sized
1008- individually and can be initially built in parallel. For instance,
1009- 64 may be a sensible value for 100 million documents.
1010- :type index_bucket_count: int
10111002 :param replication_factor: Number of copies of each shard on different
10121003 servers in a cluster. Allowed values are 1 (only one copy is kept
10131004 and no synchronous replication), and n (n-1 replicas are kept and
@@ -1075,8 +1066,6 @@ def create_collection(self,
10751066 data ['numberOfShards' ] = shard_count
10761067 if shard_fields is not None :
10771068 data ['shardKeys' ] = shard_fields
1078- if index_bucket_count is not None :
1079- data ['indexBuckets' ] = index_bucket_count
10801069 if replication_factor is not None :
10811070 data ['replicationFactor' ] = replication_factor
10821071 if shard_like is not None :
0 commit comments