Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 1 addition & 4 deletions benchmarks/callback_full_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,7 @@ def insert_next(self, previous_result=sentinel):
def run(self):
self.start_profile()

if self.protocol_version >= 3:
concurrency = 1000
else:
concurrency = 100
concurrency = 1000

for _ in range(min(concurrency, self.num_queries)):
self.insert_next()
Expand Down
12 changes: 1 addition & 11 deletions cassandra/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,16 +135,6 @@ class ProtocolVersion(object):
"""
Defines native protocol versions supported by this driver.
"""
V1 = 1
"""
v1, supported in Cassandra 1.2-->2.2
"""

V2 = 2
"""
v2, supported in Cassandra 2.0-->2.2;
added support for lightweight transactions, batch operations, and automatic query paging.
"""

V3 = 3
"""
Expand Down Expand Up @@ -180,7 +170,7 @@ class ProtocolVersion(object):
DSE private protocol v2, supported in DSE 6.0+
"""

SUPPORTED_VERSIONS = (DSE_V2, DSE_V1, V6, V5, V4, V3, V2, V1)
SUPPORTED_VERSIONS = (DSE_V2, DSE_V1, V6, V5, V4, V3)
"""
A tuple of all supported protocol versions
"""
Expand Down
71 changes: 4 additions & 67 deletions cassandra/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@
NoSpeculativeExecutionPolicy, DefaultLoadBalancingPolicy,
NeverRetryPolicy)
from cassandra.pool import (Host, _ReconnectionHandler, _HostReconnectionHandler,
HostConnectionPool, HostConnection,
HostConnection,
NoConnectionsAvailable)
from cassandra.query import (SimpleStatement, PreparedStatement, BoundStatement,
BatchStatement, bind_params, QueryTrace, TraceUnavailable,
Expand Down Expand Up @@ -731,9 +731,6 @@
be an instance of a subclass of :class:`~cassandra.auth.AuthProvider`,
such as :class:`~.PlainTextAuthProvider`.

When :attr:`~.Cluster.protocol_version` is 1, this should be
a function that accepts one argument, the IP address of a node,
and returns a dict of credentials for that node.

When not using authentication, this should be left as :const:`None`.
"""
Expand Down Expand Up @@ -1452,18 +1449,6 @@

self._user_types = defaultdict(dict)

self._min_requests_per_connection = {
HostDistance.LOCAL_RACK: DEFAULT_MIN_REQUESTS,
HostDistance.LOCAL: DEFAULT_MIN_REQUESTS,
HostDistance.REMOTE: DEFAULT_MIN_REQUESTS
}

self._max_requests_per_connection = {
HostDistance.LOCAL_RACK: DEFAULT_MAX_REQUESTS,
HostDistance.LOCAL: DEFAULT_MAX_REQUESTS,
HostDistance.REMOTE: DEFAULT_MAX_REQUESTS
}

self._core_connections_per_host = {
HostDistance.LOCAL_RACK: DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST,
HostDistance.LOCAL: DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST,
Expand Down Expand Up @@ -1666,48 +1651,6 @@
if not_done:
raise OperationTimedOut("Failed to create all new connection pools in the %ss timeout.")

def get_min_requests_per_connection(self, host_distance):
return self._min_requests_per_connection[host_distance]

def set_min_requests_per_connection(self, host_distance, min_requests):
"""
Sets a threshold for concurrent requests per connection, below which
connections will be considered for disposal (down to core connections;
see :meth:`~Cluster.set_core_connections_per_host`).

Pertains to connection pool management in protocol versions {1,2}.
"""
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_min_requests_per_connection() only has an effect "
"when using protocol_version 1 or 2.")
if min_requests < 0 or min_requests > 126 or \
min_requests >= self._max_requests_per_connection[host_distance]:
raise ValueError("min_requests must be 0-126 and less than the max_requests for this host_distance (%d)" %
(self._min_requests_per_connection[host_distance],))
self._min_requests_per_connection[host_distance] = min_requests

def get_max_requests_per_connection(self, host_distance):
return self._max_requests_per_connection[host_distance]

def set_max_requests_per_connection(self, host_distance, max_requests):
"""
Sets a threshold for concurrent requests per connection, above which new
connections will be created to a host (up to max connections;
see :meth:`~Cluster.set_max_connections_per_host`).

Pertains to connection pool management in protocol versions {1,2}.
"""
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_max_requests_per_connection() only has an effect "
"when using protocol_version 1 or 2.")
if max_requests < 1 or max_requests > 127 or \
max_requests <= self._min_requests_per_connection[host_distance]:
raise ValueError("max_requests must be 1-127 and greater than the min_requests for this host_distance (%d)" %
(self._min_requests_per_connection[host_distance],))
self._max_requests_per_connection[host_distance] = max_requests

def get_core_connections_per_host(self, host_distance):
"""
Gets the minimum number of connections per Session that will be opened
Expand Down Expand Up @@ -3101,13 +3044,11 @@
spec_exec_policy = execution_profile.speculative_execution_policy

fetch_size = query.fetch_size
if fetch_size is FETCH_SIZE_UNSET and self._protocol_version >= 2:
if fetch_size is FETCH_SIZE_UNSET:
fetch_size = self.default_fetch_size
elif self._protocol_version == 1:
fetch_size = None

start_time = time.time()
if self._protocol_version >= 3 and self.use_client_timestamp:
if self.use_client_timestamp:
timestamp = self.cluster.timestamp_generator()
else:
timestamp = None
Expand Down Expand Up @@ -3378,11 +3319,7 @@

def run_add_or_renew_pool():
try:
if self._protocol_version >= 3:
new_pool = HostConnection(host, distance, self)
else:
# TODO remove host pool again ???
new_pool = HostConnectionPool(host, distance, self)
new_pool = HostConnection(host, distance, self)
except AuthenticationFailed as auth_exc:
conn_exc = ConnectionException(str(auth_exc), endpoint=host)
self.cluster.signal_connection_failure(host, conn_exc, is_host_addition)
Expand Down Expand Up @@ -4489,7 +4426,7 @@
self._scheduled_tasks.discard(task)
fn, args, kwargs = task
kwargs = dict(kwargs)
future = self._executor.submit(fn, *args, **kwargs)

Check failure on line 4429 in cassandra/cluster.py

View workflow job for this annotation

GitHub Actions / test asyncore (3.9)

cannot schedule new futures after shutdown

Check failure on line 4429 in cassandra/cluster.py

View workflow job for this annotation

GitHub Actions / test asyncio (3.9)

cannot schedule new futures after shutdown

Check failure on line 4429 in cassandra/cluster.py

View workflow job for this annotation

GitHub Actions / test libev (3.11)

cannot schedule new futures after shutdown

Check failure on line 4429 in cassandra/cluster.py

View workflow job for this annotation

GitHub Actions / test libev (3.12)

cannot schedule new futures after shutdown
future.add_done_callback(self._log_if_failed)
else:
self._queue.put_nowait((run_at, i, task))
Expand Down
23 changes: 8 additions & 15 deletions cassandra/connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,6 @@ def decompress(byts):
DEFAULT_LOCAL_PORT_LOW = 49152
DEFAULT_LOCAL_PORT_HIGH = 65535

frame_header_v1_v2 = struct.Struct('>BbBi')
frame_header_v3 = struct.Struct('>BhBi')


Expand Down Expand Up @@ -817,17 +816,12 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None,
if not self.ssl_context and self.ssl_options:
self.ssl_context = self._build_ssl_context_from_options()

if protocol_version >= 3:
self.max_request_id = min(self.max_in_flight - 1, (2 ** 15) - 1)
# Don't fill the deque with 2**15 items right away. Start with some and add
# more if needed.
initial_size = min(300, self.max_in_flight)
self.request_ids = deque(range(initial_size))
self.highest_request_id = initial_size - 1
else:
self.max_request_id = min(self.max_in_flight, (2 ** 7) - 1)
self.request_ids = deque(range(self.max_request_id + 1))
self.highest_request_id = self.max_request_id
self.max_request_id = min(self.max_in_flight - 1, (2 ** 15) - 1)
# Don't fill the deque with 2**15 items right away. Start with some and add
# more if needed.
initial_size = min(300, self.max_in_flight)
self.request_ids = deque(range(initial_size))
self.highest_request_id = initial_size - 1

self.lock = RLock()
self.connected_event = Event()
Expand Down Expand Up @@ -1205,11 +1199,10 @@ def _read_frame_header(self):
version = buf[0] & PROTOCOL_VERSION_MASK
if version not in ProtocolVersion.SUPPORTED_VERSIONS:
raise ProtocolError("This version of the driver does not support protocol version %d" % version)
frame_header = frame_header_v3 if version >= 3 else frame_header_v1_v2
# this frame header struct is everything after the version byte
header_size = frame_header.size + 1
header_size = frame_header_v3.size + 1
if pos >= header_size:
flags, stream, op, body_len = frame_header.unpack_from(buf, 1)
flags, stream, op, body_len = frame_header_v3.unpack_from(buf, 1)
if body_len < 0:
raise ProtocolError("Received negative body length: %r" % body_len)
self._current_frame = _Frame(version, flags, stream, op, header_size, body_len + header_size)
Expand Down
42 changes: 15 additions & 27 deletions cassandra/cqltypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -812,18 +812,13 @@ class _SimpleParameterizedType(_ParameterizedType):
@classmethod
def deserialize_safe(cls, byts, protocol_version):
subtype, = cls.subtypes
if protocol_version >= 3:
unpack = int32_unpack
length = 4
else:
unpack = uint16_unpack
length = 2
numelements = unpack(byts[:length])
length = 4
numelements = int32_unpack(byts[:length])
p = length
result = []
inner_proto = max(3, protocol_version)
for _ in range(numelements):
itemlen = unpack(byts[p:p + length])
itemlen = int32_unpack(byts[p:p + length])
p += length
if itemlen < 0:
result.append(None)
Expand All @@ -839,16 +834,15 @@ def serialize_safe(cls, items, protocol_version):
raise TypeError("Received a string for a type that expects a sequence")

subtype, = cls.subtypes
pack = int32_pack if protocol_version >= 3 else uint16_pack
buf = io.BytesIO()
buf.write(pack(len(items)))
buf.write(int32_pack(len(items)))
inner_proto = max(3, protocol_version)
for item in items:
if item is None:
buf.write(pack(-1))
buf.write(int32_pack(-1))
else:
itembytes = subtype.to_binary(item, inner_proto)
buf.write(pack(len(itembytes)))
buf.write(int32_pack(len(itembytes)))
buf.write(itembytes)
return buf.getvalue()

Expand All @@ -872,18 +866,13 @@ class MapType(_ParameterizedType):
@classmethod
def deserialize_safe(cls, byts, protocol_version):
key_type, value_type = cls.subtypes
if protocol_version >= 3:
unpack = int32_unpack
length = 4
else:
unpack = uint16_unpack
length = 2
numelements = unpack(byts[:length])
length = 4
numelements = int32_unpack(byts[:length])
p = length
themap = util.OrderedMapSerializedKey(key_type, protocol_version)
inner_proto = max(3, protocol_version)
for _ in range(numelements):
key_len = unpack(byts[p:p + length])
key_len = int32_unpack(byts[p:p + length])
p += length
if key_len < 0:
keybytes = None
Expand All @@ -893,7 +882,7 @@ def deserialize_safe(cls, byts, protocol_version):
p += key_len
key = key_type.from_binary(keybytes, inner_proto)

val_len = unpack(byts[p:p + length])
val_len = int32_unpack(byts[p:p + length])
p += length
if val_len < 0:
val = None
Expand All @@ -908,9 +897,8 @@ def deserialize_safe(cls, byts, protocol_version):
@classmethod
def serialize_safe(cls, themap, protocol_version):
key_type, value_type = cls.subtypes
pack = int32_pack if protocol_version >= 3 else uint16_pack
buf = io.BytesIO()
buf.write(pack(len(themap)))
buf.write(int32_pack(len(themap)))
try:
items = themap.items()
except AttributeError:
Expand All @@ -919,16 +907,16 @@ def serialize_safe(cls, themap, protocol_version):
for key, val in items:
if key is not None:
keybytes = key_type.to_binary(key, inner_proto)
buf.write(pack(len(keybytes)))
buf.write(int32_pack(len(keybytes)))
buf.write(keybytes)
else:
buf.write(pack(-1))
buf.write(int32_pack(-1))
if val is not None:
valbytes = value_type.to_binary(val, inner_proto)
buf.write(pack(len(valbytes)))
buf.write(int32_pack(len(valbytes)))
buf.write(valbytes)
else:
buf.write(pack(-1))
buf.write(int32_pack(-1))
return buf.getvalue()


Expand Down
5 changes: 0 additions & 5 deletions cassandra/marshal.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,6 @@ def _make_packer(format_string):
float_pack, float_unpack = _make_packer('>f')
double_pack, double_unpack = _make_packer('>d')

# Special case for cassandra header
header_struct = struct.Struct('>BBbB')
header_pack = header_struct.pack
header_unpack = header_struct.unpack

# in protocol version 3 and higher, the stream ID is two bytes
v3_header_struct = struct.Struct('>BBhB')
v3_header_pack = v3_header_struct.pack
Expand Down
7 changes: 1 addition & 6 deletions cassandra/metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,12 +153,7 @@ def refresh(self, connection, timeout, target_type=None, change_type=None, fetch
meta = parse_method(self.keyspaces, **kwargs)
if meta:
update_method = getattr(self, '_update_' + tt_lower)
if tt_lower == 'keyspace' and connection.protocol_version < 3:
# we didn't have 'type' target in legacy protocol versions, so we need to query those too
user_types = parser.get_types_map(self.keyspaces, **kwargs)
self._update_keyspace(meta, user_types)
else:
update_method(meta)
update_method(meta)
else:
drop_method = getattr(self, '_drop_' + tt_lower)
drop_method(**kwargs)
Expand Down
Loading
Loading