diff --git a/CMakeLists.txt b/CMakeLists.txt index 407d675e1..611ca120c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -42,12 +42,10 @@ option(CASS_INSTALL_HEADER_IN_SUBDIR "Install header file under 'include/cassand option(CASS_INSTALL_PKG_CONFIG "Install pkg-config file(s)" ON) option(CASS_MULTICORE_COMPILATION "Enable multicore compilation" ON) option(CASS_USE_BOOST_ATOMIC "Use Boost atomics library" OFF) -option(CASS_USE_KERBEROS "Use Kerberos" OFF) option(CASS_USE_LIBSSH2 "Use libssh2 for integration tests" OFF) option(CASS_USE_OPENSSL "Use OpenSSL" ON) option(CASS_USE_STATIC_LIBS "Link static libraries when building executables" OFF) option(CASS_USE_STD_ATOMIC "Use std::atomic library" ON) -option(CASS_USE_ZLIB "Use zlib" OFF) option(CASS_USE_TIMERFD "Use timerfd (Linux only)" ON) option(CASS_USE_LIBUV "Use libuv" OFF) @@ -65,7 +63,6 @@ endif() if(CASS_BUILD_INTEGRATION_TESTS OR CASS_BUILD_UNIT_TESTS) set(CASS_USE_OPENSSL ON) # Required for tests - set(CASS_USE_KERBEROS ON) # Required for tests set(CASS_USE_LIBUV ON) endif() diff --git a/Doxyfile.in b/Doxyfile.in new file mode 100644 index 000000000..82ff8b491 --- /dev/null +++ b/Doxyfile.in @@ -0,0 +1,1864 @@ +# Doxyfile 1.8.3 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" "). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or sequence of words) that should +# identify the project. Note that if you do not use Doxywizard you need +# to put quotes around the project name if it contains spaces. + +PROJECT_NAME = "ScyllaDB CPP-Rust Driver" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = 2.0 + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer +# a quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = "Library for the Apache Cassandra native binary protocol, optimised for ScyllaDB" + +# With the PROJECT_LOGO tag one can specify an logo or icon that is +# included in the documentation. The maximum height of the logo should not +# exceed 55 pixels and the maximum width should not exceed 200 pixels. +# Doxygen will copy the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = "doxygen" + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, +# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, +# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. Note that you specify absolute paths here, but also +# relative paths, which will be relative from the directory where doxygen is +# started. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful if your file system +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = cassandra{1}="Requires Apache Cassandra: \1" + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding +# "class=itcl::class" will allow you to use the command class in the +# itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, +# and language is one of the parsers supported by doxygen: IDL, Java, +# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, +# C++. For instance to make doxygen treat .inc files as Fortran files (default +# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note +# that for custom extensions you also need to set FILE_PATTERNS otherwise the +# files are not read by doxygen. + +EXTENSION_MAPPING = + +# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all +# comments according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you +# can mix doxygen, HTML, and XML commands with Markdown formatting. +# Disable only in case of backward compatibilities issues. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented classes, +# or namespaces to their corresponding documentation. Such a link can be +# prevented in individual cases by by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also makes the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES (the +# default) will make doxygen replace the get and set methods by a property in +# the documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and +# unions are shown inside the group in which they are included (e.g. using +# @ingroup) instead of on a separate page (for HTML and Man pages) or +# section (for LaTeX and RTF). + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and +# unions with only public data fields will be shown inline in the documentation +# of the scope in which they are defined (i.e. file, namespace, or group +# documentation), provided this scope is documented. If set to NO (the default), +# structs, classes, and unions are shown on a separate page (for HTML and Man +# pages) or section (for LaTeX and RTF). + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = YES + +# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to +# determine which symbols to keep in memory and which to flush to disk. +# When the cache is full, less often used symbols will be written to disk. +# For small to medium size projects (<1000 input files) the default value is +# probably good enough. For larger projects a too small cache size can cause +# doxygen to be busy swapping symbols to and from disk most of the time +# causing a significant performance penalty. +# If the system has enough physical memory increasing the cache will improve the +# performance by keeping more symbols in memory. Note that the value works on +# a logarithmic scale so increasing the size by one will roughly double the +# memory usage. The cache size is given by this formula: +# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols. +# +#SYMBOL_CACHE_SIZE = 0 + +# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be +# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given +# their name and scope. Since this can be an expensive process and often the +# same symbol appear multiple times in the code, doxygen keeps a cache of +# pre-resolved symbols. If the cache is too small doxygen will become slower. +# If the cache is too large, memory is wasted. The cache size is given by this +# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal +# scope will be included in the documentation. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespaces are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = NO + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen +# will list include files with double quotes in the documentation +# rather than with sharp brackets. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen +# will sort the (brief and detailed) documentation of class members so that +# constructors and destructors are listed first. If set to NO (the default) +# the constructors will appear in the respective orders defined by +# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. +# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO +# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to +# do proper type resolution of all parameters of a function it will reject a +# match between the prototype and the implementation of a member function even +# if there is only one candidate or it is obvious which candidate to choose +# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen +# will still accept a match between prototype and implementation in such cases. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST = YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if section-label ... \endif +# and \cond section-label ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or macro consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and macros in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. +# You can optionally specify a file name after the option, if omitted +# DoxygenLayout.xml will be used as the name of the layout file. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files +# containing the references data. This must be a list of .bib files. The +# .bib extension is automatically appended if omitted. Using this command +# requires the bibtex tool to be installed. See also +# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style +# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this +# feature you need bibtex and perl available in the search path. Do not use +# file names with spaces, bibtex cannot handle them. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = YES + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = NO + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# The WARN_NO_PARAMDOC option can be enabled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = include + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh +# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py +# *.f90 *.f *.for *.vhd *.vhdl + +FILE_PATTERNS = *.h *.hpp + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = XX* CASS_ERROR CASS_ERROR_MAP CASS_ERROR_LAST_ENTRY CASS_LOG_LEVEL_MAP CASS_LOG_LAST_ENTRY CASS_DEPRECATED + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty or if +# non of the patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) +# and it is also possible to disable source filtering for a specific pattern +# using *.ext= (so without naming a filter). This option only has effect when +# FILTER_SOURCE_FILES is enabled. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page (index.html). +# This can be useful if you have a project on for instance GitHub and want reuse +# the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C, C++ and Fortran comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. +# Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = YES + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = NO + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. Note that when using a custom header you are responsible +# for the proper inclusion of any scripts and style sheets that doxygen +# needs, which is dependent on the configuration options used. +# It is advised to generate a default header using "doxygen -w html +# header.html footer.html stylesheet.css YourConfigFile" and then modify +# that header. Note that the header is subject to change so you typically +# have to redo this when upgrading to a newer version of doxygen or when +# changing the value of configuration settings such as GENERATE_TREEVIEW! + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If left blank doxygen will +# generate a default style sheet. Note that it is recommended to use +# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this +# tag will in the future become obsolete. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional +# user-defined cascading style sheet that is included after the standard +# style sheets created by doxygen. Using this option one can overrule +# certain style aspects. This is preferred over using HTML_STYLESHEET +# since it does not replace the standard style sheet and is therefor more +# robust against future updates. Doxygen will copy the style sheet file to +# the output directory. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that +# the files will be copied as-is; there are no commands or markers available. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. +# Doxygen will adjust the colors in the style sheet and background images +# according to this color. Hue is specified as an angle on a colorwheel, +# see http://en.wikipedia.org/wiki/Hue for more information. +# For instance the value 0 represents red, 60 is yellow, 120 is green, +# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. +# The allowed range is 0 to 359. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of +# the colors in the HTML output. For a value of 0 the output will use +# grayscales only. A value of 255 will produce the most vivid colors. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to +# the luminance component of the colors in the HTML output. Values below +# 100 gradually make the output lighter, whereas values above 100 make +# the output darker. The value divided by 100 is the actual gamma applied, +# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, +# and 100 does not change the gamma. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting +# this to NO can help when comparing the output of multiple runs. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of +# entries shown in the various tree structured indices initially; the user +# can expand and collapse entries dynamically later on. Doxygen will expand +# the tree to such a level that at most the specified number of entries are +# visible (unless a fully collapsed tree already exceeds this amount). +# So setting the number of entries 1 will produce a full collapsed tree by +# default. 0 is a special value representing an infinite number of entries +# and will result in a full expanded tree by default. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely +# identify the documentation publisher. This should be a reverse domain-name +# style string, e.g. com.mycompany.MyDocSet.documentation. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated +# that can be used as input for Qt's qhelpgenerator to generate a +# Qt Compressed Help (.qch) of the generated HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to +# add. For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see +# +# Qt Help Project / Custom Filters. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's +# filter section matches. +# +# Qt Help Project / Filter Attributes. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files +# will be generated, which together with the HTML files, form an Eclipse help +# plugin. To install this plugin and make it available under the help contents +# menu in Eclipse, the contents of the directory containing the HTML and XML +# files needs to be copied into the plugins directory of eclipse. The name of +# the directory within the plugins directory should be the same as +# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before +# the help appears. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have +# this name. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) +# at top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. Since the tabs have the same information as the +# navigation tree you can set this option to NO if you already set +# GENERATE_TREEVIEW to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to YES, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. +# Since the tree basically has the same information as the tab index you +# could consider to set DISABLE_INDEX to NO when enabling this option. + +GENERATE_TREEVIEW = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values +# (range [0,1..20]) that doxygen will group on one line in the generated HTML +# documentation. Note that a value of 0 will completely suppress the enum +# values from appearing in the overview section. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open +# links to external symbols imported via tag files in a separate window. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are +# not supported properly for IE 6.0, but are supported on all modern browsers. +# Note that when changing this option you need to delete any form_*.png files +# in the HTML output before the changes have effect. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax +# (see http://www.mathjax.org) which uses client side Javascript for the +# rendering instead of using prerendered bitmaps. Use this if you do not +# have LaTeX installed or if you want to formulas look prettier in the HTML +# output. When enabled you may also need to install MathJax separately and +# configure the path to it using the MATHJAX_RELPATH option. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# thA MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and +# SVG. The default value is HTML-CSS, which is slower, but has the best +# compatibility. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the +# HTML output directory using the MATHJAX_RELPATH option. The destination +# directory should contain the MathJax.js script. For instance, if the mathjax +# directory is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to +# the MathJax Content Delivery Network so you can quickly see the result without +# installing MathJax. +# However, it is strongly recommended to install a local +# copy of MathJax from http://www.mathjax.org before deployment. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension +# names that should be enabled during MathJax rendering. + +MATHJAX_EXTENSIONS = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box +# for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using +# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets +# (GENERATE_DOCSET) there is already a search function so this one should +# typically be disabled. For large projects the javascript based search engine +# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. + +SEARCHENGINE = YES + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a web server instead of a web client using Javascript. +# There are two flavours of web server based search depending on the +# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for +# searching and an index file used by the script. When EXTERNAL_SEARCH is +# enabled the indexing and searching needs to be provided by external tools. +# See the manual for details. + +SERVER_BASED_SEARCH = NO + +# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP +# script for searching. Instead the search results are written to an XML file +# which needs to be processed by an external indexer. Doxygen will invoke an +# external search engine pointed to by the SEARCHENGINE_URL option to obtain +# the search results. Doxygen ships with an example indexer (doxyindexer) and +# search engine (doxysearch.cgi) which are based on the open source search engine +# library Xapian. See the manual for configuration details. + +EXTERNAL_SEARCH = NO + +# The SEARCHENGINE_URL should point to a search engine hosted by a web server +# which will returned the search results when EXTERNAL_SEARCH is enabled. +# Doxygen ships with an example search engine (doxysearch) which is based on +# the open source search engine library Xapian. See the manual for configuration +# details. + +SEARCHENGINE_URL = + +# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed +# search data is written to a file for indexing by an external tool. With the +# SEARCHDATA_FILE tag the name of this file can be specified. + +SEARCHDATA_FILE = searchdata.xml + +# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through other +# doxygen projects that are not otherwise connected via tags files, but are +# all added to the same search index. Each project needs to have a tag file set +# via GENERATE_TAGFILE. The search mapping then maps the name of the tag file +# to a relative location where the documentation can be found, +# similar to the +# TAGFILES option but without actually processing the tag file. +# The format is: EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ... + +EXTRA_SEARCH_MAPPINGS = + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. +# Note that when enabling USE_PDFLATEX this option is only used for +# generating bitmaps for formulas in the HTML output, but not in the +# Makefile that is written to the output directory. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4 + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for +# the generated latex document. The footer should contain everything after +# the last chapter. If it is left blank doxygen will generate a +# standard footer. Notice: only use this tag if you know what you are doing! + +LATEX_FOOTER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +# If LATEX_SOURCE_CODE is set to YES then doxygen will include +# source code with syntax highlighting in the LaTeX output. +# Note that which sources are shown also depends on other settings +# such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + +# The LATEX_BIB_STYLE tag can be used to specify the style to use for the +# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See +# http://en.wikipedia.org/wiki/BibTeX for more info. + +LATEX_BIB_STYLE = plain + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load style sheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = YES + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. +# +#XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. +# +#XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = YES + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# pointed to by INCLUDE_PATH will be searched when a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = "CASS_EXPORT=" + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition that +# overrules the definition found in the source code. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all references to function-like macros +# that are alone on a line, have an all uppercase name, and do not end with a +# semicolon, because these will confuse the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. For each +# tag file the location of the external documentation should be added. The +# format of a tag file without this location is as follows: +# +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths +# or URLs. Note that each tag file must have a unique name (where the name does +# NOT include the path). If a tag file is not located in the directory in which +# doxygen is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option also works with HAVE_DOT disabled, but it is recommended to +# install and use dot, since it yields more powerful graphs. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = NO + +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is +# allowed to run in parallel. When set to 0 (the default) doxygen will +# base this on the number of processors available in the system. You can set it +# explicitly to a value larger than 0 to get control over the balance +# between CPU load and processing speed. + +DOT_NUM_THREADS = 0 + +# By default doxygen will use the Helvetica font for all dot files that +# doxygen generates. When you want a differently looking font you can specify +# the font name using DOT_FONTNAME. You need to make sure dot is able to find +# the font, which can be done by putting it in a standard location or by setting +# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the +# directory containing the font. + +DOT_FONTNAME = Helvetica + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the Helvetica font. +# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to +# set the path where dot can find it. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If the UML_LOOK tag is enabled, the fields and methods are shown inside +# the class node. If there are many fields or methods and many nodes the +# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS +# threshold limits the number of items for each type to make the size more +# managable. Set this to 0 for no limit. Note that the threshold may be +# exceeded by 50% before the limit is enforced. + +UML_LIMIT_NUM_FIELDS = 10 + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will generate a graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are svg, png, jpg, or gif. +# If left blank png will be used. If you choose svg you need to set +# HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible in IE 9+ (other browsers do not have this requirement). + +DOT_IMAGE_FORMAT = png + +# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to +# enable generation of interactive SVG images that allow zooming and panning. +# Note that this requires a modern browser other than Internet Explorer. +# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you +# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible. Older versions of IE do not have SVG support. + +INTERACTIVE_SVG = NO + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MSCFILE_DIRS tag can be used to specify one or more directories that +# contain msc files that are included in the documentation (see the +# \mscfile command). + +MSCFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = NO + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES diff --git a/README.md b/README.md index be955a430..d7fb3e64f 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # ScyllaDB Cpp-Rust Driver ___ -Wrapper around ScyllaDB's rust-driver compatible with Datastax cpp-driver. +Wrapper around [ScyllaDB's Rust Driver](https://cpp-rust-driver.docs.scylladb.com/stable), which is API-compatible with both ScyllaDB and Datastax C/C++ Driver and may be considered a drop-in replacement (with some minor limitations, see [Limitations](#limitations)). #### Note: It is work in progress, bug reports and pull requests are welcome! @@ -137,7 +137,7 @@ int main() { ___ The driver inherits almost all the features of C/C++ and Rust drivers, such as: - * [Asynchronous API](http://datastax.github.io/cpp-driver/topics/#futures) + * [Asynchronous API](https://cpp-rust-driver.docs.scylladb.com/stable/topics/#futures) * Shard-aware routing * Simple, Prepared and Batch statements * Query paging @@ -146,9 +146,9 @@ The driver inherits almost all the features of C/C++ and Rust drivers, such as: * Retry policies * SSL * Authentication - * [Tuples](http://datastax.github.io/cpp-driver/topics/basics/tuples/) and [UDTs](http://datastax.github.io/cpp-driver/topics/basics/user_defined_types/) - * [Nested collections](http://datastax.github.io/cpp-driver/topics/basics/binding_parameters/#nested-collections) - * [Data types](http://datastax.github.io/cpp-driver/topics/basics/data_types/) + * [Tuples](https://cpp-rust-driver.docs.scylladb.com/stable/topics/basics/data-types/tuples/) and [UDTs](http://https://cpp-rust-driver.docs.scylladb.com/stable/topics/basics/data-types/user-defined-types/) + * [Nested collections](https://cpp-rust-driver.docs.scylladb.com/stable/topics/basics/binding-parameters/#nested-collections) + * [Data types](https://cpp-rust-driver.docs.scylladb.com/stable/topics/basics/data-types/) * Schema metadata (keyspace metadata, materialized views, etc.) # Limitations @@ -395,7 +395,7 @@ Some tests are added to GitHub Actions workflows and are used to test every pull To build and run the integration tests several requirements need to be met: -* Install `libuv`, `openssl` and `kerberos` on your system: +* Install `libuv` and `openssl` on your system: ```shell # On Ubuntu diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake index a7e6c70a8..584eccc21 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake @@ -107,51 +107,6 @@ if(CASS_USE_OPENSSL) set(CASS_LIBS ${CASS_LIBS} ${OPENSSL_LIBRARIES}) endif() -#------------------------ -# ZLIB -#------------------------ - -if(CASS_USE_ZLIB) - if(NOT ZLIB_LIBRARY_NAME) - # Setup the root directory for zlib - set(ZLIB_ROOT "${PROJECT_SOURCE_DIR}/lib/zlib/" - "${PROJECT_SOURCE_DIR}/build/libs/zlib/") - set(ZLIB_ROOT ${ZLIB_ROOT} ${ZLIB_ROOT_DIR} $ENV{ZLIB_ROOT_DIR}) - - # Ensure zlib was found (assign zlib include/libraries or present warning) - find_package(ZLIB) - if(ZLIB_FOUND) - # Determine if the static library needs to be used for Windows - if(WIN32 AND CASS_USE_STATIC_LIBS) - string(REPLACE "zlib.lib" "zlibstatic.lib" ZLIB_LIBRARIES "${ZLIB_LIBRARIES}") - endif() - - # Assign zlib properties - set(CASS_INCLUDES ${CASS_INCLUDES} ${ZLIB_INCLUDE_DIRS}) - set(CASS_LIBS ${CASS_LIBS} ${ZLIB_LIBRARIES}) - set(HAVE_ZLIB On) - else() - message(WARNING "Could not find zlib, try to set the path to zlib root folder in the system variable ZLIB_ROOT_DIR") - message(WARNING "zlib libraries will not be linked into build") - endif() - else() - # Assign zlib properties - set(CASS_INCLUDES ${CASS_INCLUDES} ${ZLIB_INCLUDE_DIRS}) - set(CASS_LIBS ${CASS_LIBS} ${ZLIB_LIBRARIES}) - endif() -endif() - -#------------------------ -# Kerberos -#------------------------ - -if(CASS_USE_KERBEROS) - # Discover Kerberos and assign Kerberos include and libraries - find_package(Kerberos REQUIRED) - set(CASS_INCLUDES ${CASS_INCLUDES} ${KERBEROS_INCLUDE_DIR}) - set(CASS_LIBS ${CASS_LIBS} ${KERBEROS_LIBRARIES}) -endif() - #------------------------ # Boost #------------------------ diff --git a/cmake/ExternalProject-OpenSSL.cmake b/cmake/ExternalProject-OpenSSL.cmake index e20c00642..f49c91155 100644 --- a/cmake/ExternalProject-OpenSSL.cmake +++ b/cmake/ExternalProject-OpenSSL.cmake @@ -25,9 +25,6 @@ if(NOT OPENSSL_VERSION) set(OPENSSL_VERSION "1.0.2s") endif() option(OPENSSL_INSTALL_PREFIX "OpenSSL installation prefix location") -if(CASS_USE_ZLIB) - include(ExternalProject-zlib) -endif() set(OPENSSL_VERSION ${OPENSSL_VERSION} CACHE STRING "OpenSSL version to build and install" FORCE) # Determine the major and minor version of OpenSSL used @@ -91,19 +88,6 @@ else() endif() set(OPENSSL_ROOT_DIR "${OPENSSL_INSTALL_DIR}" CACHE STRING "OpenSSL root directory" FORCE) -# Create build options for the platform build scripts -if(BUILD_SHARED_LIBS) - if(CASS_USE_ZLIB) - set(OPENSSL_ZLIB_CONFIGURE_ARGUMENT "zlib-dynamic") - set(ZLIB_LIB zlib.lib) - endif() -else() - if(CASS_USE_ZLIB) - set(OPENSSL_ZLIB_CONFIGURE_ARGUMENT "no-zlib-dynamic") - set(ZLIB_LIB zlibstatic.lib) - endif() -endif() - # Determine if shared or static library should be built set(OPENSSL_CONFIGURE_COMPILER "no-asm no-ssl2") if(BUILD_SHARED_LIBS) @@ -273,16 +257,6 @@ file(APPEND ${OPENSSL_CONFIGURE_SCRIPT} "POPD\r\n" "SET PATH=${PERL_PATH};%PATH%\r\n" "CALL :SHORTENPATH \"${OPENSSL_NATIVE_INSTALL_DIR}\" SHORTENED_OPENSSL_INSTALL_DIR\r\n") -if(CASS_USE_ZLIB) - # OpenSSL requires zlib paths to be relative (otherwise build errors may occur) - externalproject_get_property(${OPENSSL_LIBRARY_NAME} SOURCE_DIR) - file(RELATIVE_PATH ZLIB_INCLUDE_RELATIVE_DIR ${SOURCE_DIR} ${ZLIB_INCLUDE_DIR}) - file(TO_NATIVE_PATH ${ZLIB_INCLUDE_RELATIVE_DIR} ZLIB_NATIVE_INCLUDE_RELATIVE_DIR) - file(RELATIVE_PATH ZLIB_LIBRARY_RELATIVE_DIR ${SOURCE_DIR} ${ZLIB_LIBRARY_DIR}) - file(TO_NATIVE_PATH ${ZLIB_LIBRARY_RELATIVE_DIR} ZLIB_NATIVE_LIBRARY_RELATIVE_DIR) - set(OPENSSL_WITH_ZLIB_ARGUMENT "zlib ${OPENSSL_ZLIB_CONFIGURE_ARGUMENT} --with-zlib-include=\"${ZLIB_NATIVE_INCLUDE_RELATIVE_DIR}\" --with-zlib-lib=\"${ZLIB_NATIVE_LIBRARY_RELATIVE_DIR}\\${ZLIB_LIB}\"") - set(OPENSSL_WITH_ZLIB_ARGUMENT "zlib ${OPENSSL_ZLIB_CONFIGURE_ARGUMENT} --with-zlib-include=\"${ZLIB_INCLUDE_RELATIVE_DIR}\" --with-zlib-lib=\"${ZLIB_LIBRARY_RELATIVE_DIR}\\${ZLIB_LIB}\"") -endif() file(APPEND ${OPENSSL_CONFIGURE_SCRIPT} "perl Configure ${OPENSSL_WITH_ZLIB_ARGUMENT} --openssldir=!SHORTENED_OPENSSL_INSTALL_DIR! --prefix=!SHORTENED_OPENSSL_INSTALL_DIR! ${OPENSSL_CONFIGURE_COMPILER}\r\n" "IF NOT %ERRORLEVEL% EQU 0 (\r\n" @@ -317,10 +291,5 @@ file(APPEND ${OPENSSL_CONFIGURE_SCRIPT} " FOR %%A IN (\"%~1\") DO SET %~2=%%~SA\r\n" " EXIT /B\r\n") -# Determine if zlib should be added as a dependency -if(CASS_USE_ZLIB) - add_dependencies(${OPENSSL_LIBRARY_NAME} ${ZLIB_LIBRARY_NAME}) -endif() - # Update the include directory to use OpenSSL include_directories(${OPENSSL_INCLUDE_DIR}) diff --git a/cmake/ExternalProject-zlib.cmake b/cmake/ExternalProject-zlib.cmake deleted file mode 100644 index 5ccdb5b8f..000000000 --- a/cmake/ExternalProject-zlib.cmake +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (c) DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -cmake_minimum_required(VERSION 3.15 FATAL_ERROR) -include(ExternalProject) -include(Windows-Environment) - -# zlib related CMake options -option(ZLIB_VERSION "zlib version to build and install") -if(NOT ZLIB_VERSION) - set(ZLIB_VERSION "1.2.11") -endif() -option(ZLIB_INSTALL_PREFIX "zlib installation prefix location") -if(NOT ZLIB_INSTALL_PREFIX) - set(ZLIB_INSTALL_PREFIX "${CMAKE_BINARY_DIR}/libs/zlib" CACHE STRING "zlib installation prefix" FORCE) -endif() -set(ZLIB_VERSION ${ZLIB_VERSION} CACHE STRING "zlib version to build and install" FORCE) - -# Determine the zlib archive name to download -set(ZLIB_ARCHIVE_NAME v${ZLIB_VERSION} CACHE STRING "zlib archive name" FORCE) - -# zlib external project variables -set(ZLIB_LIBRARY_NAME "zlib-library") -set(ZLIB_PROJECT_PREFIX ${CMAKE_BINARY_DIR}/external/zlib) -set(ZLIB_ARCHIVE_URL_PREFIX "https://github.com/madler/zlib/archive/") -set(ZLIB_ARCHIVE_URL_SUFFIX ".tar.gz") -set(ZLIB_ARCHIVE_URL "${ZLIB_ARCHIVE_URL_PREFIX}${ZLIB_ARCHIVE_NAME}${ZLIB_ARCHIVE_URL_SUFFIX}") - -# Make sure Visual Studio is available -if(NOT MSVC) - message(FATAL_ERROR "Visual Studio is required to build zlib") -endif() -message(STATUS "zlib: v${ZLIB_VERSION}") - -# zlib library configuration variables -set(ZLIB_INSTALL_DIR "${ZLIB_INSTALL_PREFIX}" CACHE STRING "zlib installation directory" FORCE) -set(ZLIB_BINARY_DIR "${ZLIB_INSTALL_DIR}/bin" CACHE STRING "zlib binary directory" FORCE) -set(ZLIB_INCLUDE_DIR "${ZLIB_INSTALL_DIR}/include" CACHE STRING "zlib include directory" FORCE) -set(ZLIB_INCLUDE_DIRS "${ZLIB_INCLUDE_DIR}" CACHE STRING "zlib include directory" FORCE) # Alias to stay consistent with FindZlib -set(ZLIB_LIBRARY_DIR "${ZLIB_INSTALL_DIR}/lib" CACHE STRING "zlib library directory" FORCE) -if(CASS_USE_ZLIB) - set(ZLIB_LIBRARIES ${ZLIB_LIBRARY_DIR}/zlib.lib CACHE STRING "zlib libraries" FORCE) - if(CASS_USE_STATIC_LIBS) - set(ZLIB_LIBRARIES ${ZLIB_LIBRARY_DIR}/zlibstatic.lib CACHE STRING "zlib libraries" FORCE) - endif() -endif() -set(ZLIB_ROOT_DIR "${ZLIB_INSTALL_DIR}" CACHE STRING "zlib root directory" FORCE) - -# Create a package name for the binaries -set(ZLIB_PACKAGE_NAME "zlib-${ZLIB_VERSION}-${PACKAGE_ARCH_TYPE}-msvc${VS_INTERNAL_VERSION}.zip" CACHE STRING "zlib package name" FORCE) - -# Create an additional install script step for zlib -file(TO_NATIVE_PATH ${ZLIB_LIBRARY_DIR} ZLIB_NATIVE_LIBRARY_DIR) -set(ZLIB_INSTALL_EXTRAS_SCRIPT "${ZLIB_PROJECT_PREFIX}/scripts/install_zlib_extras.bat") -file(REMOVE ${ZLIB_INSTALL_EXTRAS_SCRIPT}) -file(WRITE ${ZLIB_INSTALL_EXTRAS_SCRIPT} - "@REM Generated install script for zlib\r\n" - "@ECHO OFF\r\n" - "IF EXIST zlibstatic.dir\\RelWithDebInfo\\*.pdb (\r\n" - " COPY /Y zlibstatic.dir\\RelWithDebInfo\\*.pdb \"${ZLIB_NATIVE_LIBRARY_DIR}\"\r\n" - " IF NOT %ERRORLEVEL% EQU 0 (\r\n" - " EXIT /B 1\r\n" - " )\r\n" - ")\r\n" - "EXIT /B\r\n") -set(ZLIB_INSTALL_EXTRAS_COMMAND "${ZLIB_INSTALL_EXTRAS_SCRIPT}") - -# Add zlib as an external project -externalproject_add(${ZLIB_LIBRARY_NAME} - PREFIX ${ZLIB_PROJECT_PREFIX} - URL ${ZLIB_ARCHIVE_URL} - DOWNLOAD_DIR ${ZLIB_PROJECT_PREFIX} - INSTALL_DIR ${ZLIB_INSTALL_DIR} - CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${ZLIB_INSTALL_DIR} - -DBUILD_SHARED_LIBS=On - -DASM686=Off # Disable assembly compiling (does not build on all compilers) - -DASM64=Off # Disable assembly compiling (does not build on all compilers) - BUILD_COMMAND ${CMAKE_COMMAND} --build --config RelWithDebInfo - INSTALL_COMMAND ${CMAKE_COMMAND} --build --config RelWithDebInfo --target install - COMMAND ${CMAKE_COMMAND} -E copy /README ${ZLIB_INSTALL_DIR} - COMMAND ${CMAKE_COMMAND} -E copy RelWithDebInfo/zlib.pdb ${ZLIB_BINARY_DIR} - COMMAND ${ZLIB_INSTALL_EXTRAS_COMMAND} - LOG_DOWNLOAD 1 - LOG_CONFIGURE 1 - LOG_BUILD 1 - LOG_INSTALL 1) - -# Update the include directory to use zlib -include_directories(${ZLIB_INCLUDE_DIR}) diff --git a/cmake/FindKerberos.cmake b/cmake/FindKerberos.cmake deleted file mode 100644 index 9aeaef10e..000000000 --- a/cmake/FindKerberos.cmake +++ /dev/null @@ -1,81 +0,0 @@ -include(FindPackageHandleStandardArgs) - -# Utilize pkg-config if available to check for modules -find_package(PkgConfig QUIET) -if(PKG_CONFIG_FOUND) - pkg_check_modules(GSSAPI QUIET krb5-gssapi) - pkg_check_modules(KERBEROS QUIET krb5) -endif() - -# Use pkg-config or attempt to manually locate -if(GSSAPI_FOUND AND KERBEROS_FOUND) - set(GSSAPI_INCLUDE_DIR ${GSSAPI_INCLUDEDIR}) - mark_as_advanced(GSSAPI_INCLUDEDIR GSSAPI_INCLUDE_DIR GSSAPI_LIBRARIES) - set(KERBEROS_INCLUDE_DIR ${KERBEROS_INCLUDEDIR}) - mark_as_advanced(KERBEROS_INCLUDEDIR KERBEROS_INCLUDE_DIR KERBEROS_LIBRARIES) -else() - # Setup the hints and patch for the Kerberos SDK location - set(_KERBEROS_ROOT_PATHS "${PROJECT_SOURCE_DIR}/lib/kerberos/") - set(_KERBEROS_ROOT_HINTS ${KERBEROS_ROOT_DIR} $ENV{KERBEROS_ROOT_DIR} - $ENV{KERBEROS_ROOT_DIR}/MIT/Kerberos) - if(WIN32) - if(CMAKE_CL_64) - set(_KERBEROS_SDK_PROGRAMFILES "$ENV{PROGRAMW6432}") - else() - set(_PF86 "PROGRAMFILES(X86)") - set(_KERBEROS_SDK_PROGRAMFILES "$ENV{${_PF86}}") - endif() - set(_KERBEROS_SDK "MIT/Kerberos") - set(_KERBEROS_ROOT_PATHS "${_KERBEROS_ROOT_PATHS}" - "${_KERBEROS_SDK_PROGRAMFILES}/${_KERBEROS_SDK}") - endif() - set(_KERBEROS_ROOT_HINTS_AND_PATHS - HINTS ${_KERBEROS_ROOT_HINTS} - PATHS ${_KERBEROS_ROOT_PATHS}) - - # Locate GSSAPI - find_path(GSSAPI_INCLUDE_DIR - NAMES gssapi/gssapi.h - HINTS ${_KERBEROS_INCLUDEDIR} ${_KERBEROS_ROOT_HINTS_AND_PATHS} - PATH_SUFFIXES include) - find_library(GSSAPI_LIBRARIES - NAMES gssapi_krb5 gssapi32 gssapi64 - HINTS ${_KERBEROS_LIBDIR} ${_KERBEROS_ROOT_HINTS_AND_PATHS} - PATH_SUFFIXES lib lib/i386 lib/amd64) - mark_as_advanced(GSSAPI_INCLUDE_DIR GSSAPI_LIBRARIES) - - # Locate Kerberos - find_path(KERBEROS_INCLUDE_DIR - NAMES krb5.h - HINTS ${_KERBEROS_INCLUDEDIR} ${_KERBEROS_ROOT_HINTS_AND_PATHS} - PATH_SUFFIXES include) - find_library(KERBEROS_LIBRARIES - NAMES krb5 libkrb5 krb5_32 krb5_64 - HINTS ${_KERBEROS_LIBDIR} ${_KERBEROS_ROOT_HINTS_AND_PATHS} - PATH_SUFFIXES lib lib/i386 lib/amd64) - mark_as_advanced(KERBEROS_INCLUDE_DIR KERBEROS_LIBRARIES) -endif() - -# Set the fail message appropriately for OS -if(NOT WIN32) - set(_GSSAPI_LIBRARY "gssapi_krb5") - set(_KERBEROS_LIBRARY "krb5") -else() - if(CMAKE_CL_64) - set(_GSSAPI_LIBRARY "gssapi64") - set(_KERBEROS_LIBRARY "krb5_64") - else() - set(_GSSAPI_LIBRARY "gssapi32") - set(_KERBEROS_LIBRARY "krb5_32") - endif() -endif() -set(KERBERBOS_FAIL_MESSAGE "Could NOT find ${_GSSAPI_LIBRARY} and/or ${_KERBEROS_LIBRARY}, try to set the path to the Kerberos root folder in the system variable KERBEROS_ROOT_DIR") - -# Determine if Kerberos was fully located (GSSAPI dependent) -set(KERBEROS_INCLUDE_DIR ${KERBEROS_INCLUDE_DIR} ${GSSAPI_INCLUDE_DIR}) -set(KERBEROS_LIBRARIES ${KERBEROS_LIBRARIES} ${GSSAPI_LIBRARIES}) -message(STATUS "Kerberos: ${KERBEROS_INCLUDE_DIR} ${KERBEROS_LIBRARIES}") -find_package_handle_standard_args(Kerberos - ${KERBERBOS_FAIL_MESSAGE} - KERBEROS_LIBRARIES - KERBEROS_INCLUDE_DIR) diff --git a/dist/redhat/scylla-cpp-rust-driver.spec b/dist/redhat/scylla-cpp-rust-driver.spec index ecf65aa6c..950bb4720 100644 --- a/dist/redhat/scylla-cpp-rust-driver.spec +++ b/dist/redhat/scylla-cpp-rust-driver.spec @@ -17,7 +17,7 @@ Conflicts: scylla-cpp-driver API-compatible rewrite of https://github.com/scylladb/cpp-driver as a wrapper for Rust driver. %package devel -Summary: Development libraries for ${name} +Summary: Development libraries for %{name} Group: Development/Tools Requires: %{name} = %{version}-%{release} Requires: pkgconfig diff --git a/docs/Makefile b/docs/Makefile index 4c82ad3be..9d241e1d5 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -5,7 +5,7 @@ SPHINXOPTS = -j auto SPHINXBUILD = $(POETRY) run sphinx-build PAPER = BUILDDIR = _build -SOURCEDIR = source +SOURCEDIR = _source # Internal variables PAPEROPT_a4 = -D latex_paper_size=a4 @@ -20,10 +20,16 @@ all: dirhtml .PHONY: setupenv setupenv: pip install -q poetry + sudo apt-get install doxygen .PHONY: setup setup: $(POETRY) install + $(POETRY) update + cd .. && doxygen Doxyfile.in + @if [ ! -d "$(SOURCEDIR)" ]; then mkdir -p "$(SOURCEDIR)"; fi + cp -RL source/* $(SOURCEDIR) + # cd $(SOURCEDIR) && find . -name README.md -execdir mv '{}' index.md ';' .PHONY: update update: diff --git a/docs/poetry.lock b/docs/poetry.lock index 0800c868c..c3e6ea877 100644 --- a/docs/poetry.lock +++ b/docs/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. [[package]] name = "alabaster" @@ -6,7 +6,6 @@ version = "0.7.16" description = "A light, configurable Sphinx theme" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92"}, {file = "alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65"}, @@ -14,14 +13,13 @@ files = [ [[package]] name = "anyio" -version = "4.9.0" -description = "High level compatibility layer for multiple asynchronous event loop implementations" +version = "4.10.0" +description = "High-level concurrency and networking framework on top of asyncio or Trio" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ - {file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"}, - {file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"}, + {file = "anyio-4.10.0-py3-none-any.whl", hash = "sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1"}, + {file = "anyio-4.10.0.tar.gz", hash = "sha256:3f3fae35c96039744587aa5b8371e7e8e603c0702999535961dd336026973ba6"}, ] [package.dependencies] @@ -31,8 +29,6 @@ sniffio = ">=1.1" typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] -doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] -test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""] trio = ["trio (>=0.26.1)"] [[package]] @@ -41,32 +37,30 @@ version = "2.17.0" description = "Internationalization utilities" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2"}, {file = "babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d"}, ] [package.extras] -dev = ["backports.zoneinfo ; python_version < \"3.9\"", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest (>=6.0)", "pytest-cov", "pytz", "setuptools", "tzdata ; sys_platform == \"win32\""] +dev = ["backports.zoneinfo", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest (>=6.0)", "pytest-cov", "pytz", "setuptools", "tzdata"] [[package]] name = "beartype" -version = "0.20.2" +version = "0.21.0" description = "Unbearably fast near-real-time hybrid runtime-static type-checking in pure Python." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ - {file = "beartype-0.20.2-py3-none-any.whl", hash = "sha256:5171a91ecf01438a59884f0cde37d2d5da2c992198b53d6ba31db3940f47ff04"}, - {file = "beartype-0.20.2.tar.gz", hash = "sha256:38c60c065ad99364a8c767e8a0e71ba8263d467b91414ed5dcffb7758a2e8079"}, + {file = "beartype-0.21.0-py3-none-any.whl", hash = "sha256:b6a1bd56c72f31b0a496a36cc55df6e2f475db166ad07fa4acc7e74f4c7f34c0"}, + {file = "beartype-0.21.0.tar.gz", hash = "sha256:f9a5078f5ce87261c2d22851d19b050b64f6a805439e8793aecf01ce660d3244"}, ] [package.extras] -dev = ["autoapi (>=0.9.0)", "click", "coverage (>=5.5)", "equinox ; sys_platform == \"linux\"", "jax[cpu] ; sys_platform == \"linux\"", "jaxtyping ; sys_platform == \"linux\"", "langchain", "mypy (>=0.800) ; platform_python_implementation != \"PyPy\"", "nuitka (>=1.2.6) ; sys_platform == \"linux\"", "numba ; python_version < \"3.13.0\"", "numpy ; sys_platform != \"darwin\" and platform_python_implementation != \"PyPy\"", "pandera", "pydata-sphinx-theme (<=0.7.2)", "pygments", "pyright (>=1.1.370)", "pytest (>=4.0.0)", "rich-click", "sphinx", "sphinx (>=4.2.0,<6.0.0)", "sphinxext-opengraph (>=0.7.5)", "tox (>=3.20.1)", "typing-extensions (>=3.10.0.0)", "xarray"] +dev = ["autoapi (>=0.9.0)", "click", "coverage (>=5.5)", "equinox", "jax[cpu]", "jaxtyping", "langchain", "mypy (>=0.800)", "nuitka (>=1.2.6)", "numba", "numpy", "pandera", "pydata-sphinx-theme (<=0.7.2)", "pygments", "pyright (>=1.1.370)", "pytest (>=4.0.0)", "rich-click", "sphinx", "sphinx (>=4.2.0,<6.0.0)", "sphinxext-opengraph (>=0.7.5)", "sqlalchemy", "tox (>=3.20.1)", "typing-extensions (>=3.10.0.0)", "xarray"] doc-rtd = ["autoapi (>=0.9.0)", "pydata-sphinx-theme (<=0.7.2)", "sphinx (>=4.2.0,<6.0.0)", "sphinxext-opengraph (>=0.7.5)"] -test = ["click", "coverage (>=5.5)", "equinox ; sys_platform == \"linux\"", "jax[cpu] ; sys_platform == \"linux\"", "jaxtyping ; sys_platform == \"linux\"", "langchain", "mypy (>=0.800) ; platform_python_implementation != \"PyPy\"", "nuitka (>=1.2.6) ; sys_platform == \"linux\"", "numba ; python_version < \"3.13.0\"", "numpy ; sys_platform != \"darwin\" and platform_python_implementation != \"PyPy\"", "pandera", "pygments", "pyright (>=1.1.370)", "pytest (>=4.0.0)", "rich-click", "sphinx", "tox (>=3.20.1)", "typing-extensions (>=3.10.0.0)", "xarray"] -test-tox = ["click", "equinox ; sys_platform == \"linux\"", "jax[cpu] ; sys_platform == \"linux\"", "jaxtyping ; sys_platform == \"linux\"", "langchain", "mypy (>=0.800) ; platform_python_implementation != \"PyPy\"", "nuitka (>=1.2.6) ; sys_platform == \"linux\"", "numba ; python_version < \"3.13.0\"", "numpy ; sys_platform != \"darwin\" and platform_python_implementation != \"PyPy\"", "pandera", "pygments", "pyright (>=1.1.370)", "pytest (>=4.0.0)", "rich-click", "sphinx", "typing-extensions (>=3.10.0.0)", "xarray"] +test = ["click", "coverage (>=5.5)", "equinox", "jax[cpu]", "jaxtyping", "langchain", "mypy (>=0.800)", "nuitka (>=1.2.6)", "numba", "numpy", "pandera", "pygments", "pyright (>=1.1.370)", "pytest (>=4.0.0)", "rich-click", "sphinx", "sqlalchemy", "tox (>=3.20.1)", "typing-extensions (>=3.10.0.0)", "xarray"] +test-tox = ["click", "equinox", "jax[cpu]", "jaxtyping", "langchain", "mypy (>=0.800)", "nuitka (>=1.2.6)", "numba", "numpy", "pandera", "pygments", "pyright (>=1.1.370)", "pytest (>=4.0.0)", "rich-click", "sphinx", "sqlalchemy", "typing-extensions (>=3.10.0.0)", "xarray"] test-tox-coverage = ["coverage (>=5.5)"] [[package]] @@ -75,7 +69,6 @@ version = "4.13.4" description = "Screen-scraping library" optional = false python-versions = ">=3.7.0" -groups = ["main"] files = [ {file = "beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b"}, {file = "beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195"}, @@ -92,130 +85,129 @@ charset-normalizer = ["charset-normalizer"] html5lib = ["html5lib"] lxml = ["lxml"] +[[package]] +name = "breathe" +version = "4.35.0" +description = "Sphinx Doxygen renderer" +optional = false +python-versions = "*" +files = [ + {file = "breathe-4.35.0-py3-none-any.whl", hash = "sha256:52c581f42ca4310737f9e435e3851c3d1f15446205a85fbc272f1f97ed74f5be"}, + {file = "breathe-4.35.0.tar.gz", hash = "sha256:5165541c3c67b6c7adde8b3ecfe895c6f7844783c4076b6d8d287e4f33d62386"}, +] + +[package.dependencies] +docutils = ">=0.12" +Sphinx = ">=4.0,<5.0.0 || >5.0.0" + [[package]] name = "certifi" -version = "2025.1.31" +version = "2025.8.3" description = "Python package for providing Mozilla's CA Bundle." optional = false -python-versions = ">=3.6" -groups = ["main"] +python-versions = ">=3.7" files = [ - {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, - {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, + {file = "certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5"}, + {file = "certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407"}, ] [[package]] name = "charset-normalizer" -version = "3.4.1" +version = "3.4.3" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" -groups = ["main"] files = [ - {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"}, - {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, - {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-win32.whl", hash = "sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-win32.whl", hash = "sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-win32.whl", hash = "sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-win32.whl", hash = "sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0f2be7e0cf7754b9a30eb01f4295cc3d4358a479843b31f328afd210e2c7598c"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c60e092517a73c632ec38e290eba714e9627abe9d301c8c8a12ec32c314a2a4b"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:252098c8c7a873e17dd696ed98bbe91dbacd571da4b87df3736768efa7a792e4"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3653fad4fe3ed447a596ae8638b437f827234f01a8cd801842e43f3d0a6b281b"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8999f965f922ae054125286faf9f11bc6932184b93011d138925a1773830bbe9"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d95bfb53c211b57198bb91c46dd5a2d8018b3af446583aab40074bf7988401cb"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:5b413b0b1bfd94dbf4023ad6945889f374cd24e3f62de58d6bb102c4d9ae534a"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:b5e3b2d152e74e100a9e9573837aba24aab611d39428ded46f4e4022ea7d1942"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a2d08ac246bb48479170408d6c19f6385fa743e7157d716e144cad849b2dd94b"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-win32.whl", hash = "sha256:ec557499516fc90fd374bf2e32349a2887a876fbf162c160e3c01b6849eaf557"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:5d8d01eac18c423815ed4f4a2ec3b439d654e55ee4ad610e153cf02faf67ea40"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:70bfc5f2c318afece2f5838ea5e4c3febada0be750fcf4775641052bbba14d05"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:23b6b24d74478dc833444cbd927c338349d6ae852ba53a0d02a2de1fce45b96e"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:34a7f768e3f985abdb42841e20e17b330ad3aaf4bb7e7aeeb73db2e70f077b99"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:fb731e5deb0c7ef82d698b0f4c5bb724633ee2a489401594c5c88b02e6cb15f7"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:257f26fed7d7ff59921b78244f3cd93ed2af1800ff048c33f624c87475819dd7"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1ef99f0456d3d46a50945c98de1774da86f8e992ab5c77865ea8b8195341fc19"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:2c322db9c8c89009a990ef07c3bcc9f011a3269bc06782f916cd3d9eed7c9312"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:511729f456829ef86ac41ca78c63a5cb55240ed23b4b737faca0eb1abb1c41bc"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:88ab34806dea0671532d3f82d82b85e8fc23d7b2dd12fa837978dad9bb392a34"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-win32.whl", hash = "sha256:16a8770207946ac75703458e2c743631c79c59c5890c80011d536248f8eaa432"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:d22dbedd33326a4a5190dd4fe9e9e693ef12160c77382d9e87919bce54f3d4ca"}, + {file = "charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a"}, + {file = "charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14"}, ] [[package]] name = "click" -version = "8.1.8" +version = "8.2.1" description = "Composable command line interface toolkit" optional = false -python-versions = ">=3.7" -groups = ["main"] +python-versions = ">=3.10" files = [ - {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, - {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, + {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, + {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, ] [package.dependencies] @@ -227,19 +219,31 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["main"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +[[package]] +name = "commonmark" +version = "0.9.1" +description = "Python parser for the CommonMark Markdown spec" +optional = false +python-versions = "*" +files = [ + {file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"}, + {file = "commonmark-0.9.1.tar.gz", hash = "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60"}, +] + +[package.extras] +test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] + [[package]] name = "docutils" version = "0.21.2" description = "Docutils -- Python Documentation Utilities" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2"}, {file = "docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f"}, @@ -247,17 +251,18 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.2.2" +version = "1.3.0" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" -groups = ["main"] -markers = "python_version < \"3.11\"" files = [ - {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, - {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, + {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, + {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, ] +[package.dependencies] +typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""} + [package.extras] test = ["pytest (>=6)"] @@ -267,7 +272,6 @@ version = "0.16.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, @@ -279,7 +283,6 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" -groups = ["main"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -294,7 +297,6 @@ version = "1.4.1" description = "Getting image size from png/jpeg/jpeg2000/gif file" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -groups = ["main"] files = [ {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"}, {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"}, @@ -306,7 +308,6 @@ version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, @@ -318,13 +319,27 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "markdown" +version = "3.8.2" +description = "Python implementation of John Gruber's Markdown." +optional = false +python-versions = ">=3.9" +files = [ + {file = "markdown-3.8.2-py3-none-any.whl", hash = "sha256:5c83764dbd4e00bdd94d85a19b8d55ccca20fe35b2e678a1422b380324dd5f24"}, + {file = "markdown-3.8.2.tar.gz", hash = "sha256:247b9a70dd12e27f67431ce62523e675b866d254f900c4fe75ce3dda62237c45"}, +] + +[package.extras] +docs = ["mdx_gh_links (>=0.2)", "mkdocs (>=1.6)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] +testing = ["coverage", "pyyaml"] + [[package]] name = "markdown-it-py" version = "3.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, @@ -349,7 +364,6 @@ version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, @@ -416,18 +430,17 @@ files = [ [[package]] name = "mdit-py-plugins" -version = "0.4.2" +version = "0.5.0" description = "Collection of plugins for markdown-it-py" optional = false -python-versions = ">=3.8" -groups = ["main"] +python-versions = ">=3.10" files = [ - {file = "mdit_py_plugins-0.4.2-py3-none-any.whl", hash = "sha256:0c673c3f889399a33b95e88d2f0d111b4447bdfea7f237dab2d488f459835636"}, - {file = "mdit_py_plugins-0.4.2.tar.gz", hash = "sha256:5f2cd1fdb606ddf152d37ec30e46101a60512bc0e5fa1a7002c36647b09e26b5"}, + {file = "mdit_py_plugins-0.5.0-py3-none-any.whl", hash = "sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f"}, + {file = "mdit_py_plugins-0.5.0.tar.gz", hash = "sha256:f4918cb50119f50446560513a8e311d574ff6aaed72606ddae6d35716fe809c6"}, ] [package.dependencies] -markdown-it-py = ">=1.0.0,<4.0.0" +markdown-it-py = ">=2.0.0,<5.0.0" [package.extras] code-style = ["pre-commit"] @@ -440,7 +453,6 @@ version = "0.1.2" description = "Markdown URL utilities" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, @@ -452,7 +464,6 @@ version = "3.0.1" description = "An extended [CommonMark](https://spec.commonmark.org/) compliant parser," optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "myst_parser-3.0.1-py3-none-any.whl", hash = "sha256:6457aaa33a5d474aca678b8ead9b3dc298e89c68e67012e73146ea6fd54babf1"}, {file = "myst_parser-3.0.1.tar.gz", hash = "sha256:88f0cb406cb363b077d176b51c476f62d60604d68a8dcdf4832e080441301a87"}, @@ -479,7 +490,6 @@ version = "25.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, @@ -487,14 +497,13 @@ files = [ [[package]] name = "pygments" -version = "2.19.1" +version = "2.19.2" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ - {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, - {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, + {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, + {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, ] [package.extras] @@ -506,7 +515,6 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -563,13 +571,28 @@ files = [ {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] +[[package]] +name = "recommonmark" +version = "0.7.1" +description = "A docutils-compatibility bridge to CommonMark, enabling you to write CommonMark inside of Docutils & Sphinx projects." +optional = false +python-versions = "*" +files = [ + {file = "recommonmark-0.7.1-py2.py3-none-any.whl", hash = "sha256:1b1db69af0231efce3fa21b94ff627ea33dee7079a01dd0a7f8482c3da148b3f"}, + {file = "recommonmark-0.7.1.tar.gz", hash = "sha256:bdb4db649f2222dcd8d2d844f0006b958d627f732415d399791ee436a3686d67"}, +] + +[package.dependencies] +commonmark = ">=0.8.1" +docutils = ">=0.11" +sphinx = ">=1.3.1" + [[package]] name = "redirects-cli" version = "0.1.3" description = "Generates static redirections from a YAML file." optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "redirects_cli-0.1.3-py3-none-any.whl", hash = "sha256:8a7a548d5f45b98db7d110fd8affbbb44b966cf250e35b5f4c9bd6541622272d"}, {file = "redirects_cli-0.1.3.tar.gz", hash = "sha256:0cc6f35ae372d087d56bc03cfc639d6e2eac0771454c3c173ac6f3dc233969bc"}, @@ -584,19 +607,18 @@ test = ["pre-commit", "pytest"] [[package]] name = "requests" -version = "2.32.3" +version = "2.32.4" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ - {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, - {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, + {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, + {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, ] [package.dependencies] certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" +charset_normalizer = ">=2,<4" idna = ">=2.5,<4" urllib3 = ">=1.21.1,<3" @@ -606,44 +628,41 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "rich" -version = "14.0.0" +version = "14.1.0" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.8.0" -groups = ["main"] files = [ - {file = "rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0"}, - {file = "rich-14.0.0.tar.gz", hash = "sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725"}, + {file = "rich-14.1.0-py3-none-any.whl", hash = "sha256:536f5f1785986d6dbdea3c75205c473f970777b4a0d6c6dd1b696aa05a3fa04f"}, + {file = "rich-14.1.0.tar.gz", hash = "sha256:e497a48b844b0320d45007cdebfeaeed8db2a4f4bcf49f15e455cfc4af11eaa8"}, ] [package.dependencies] markdown-it-py = ">=2.2.0" pygments = ">=2.13.0,<3.0.0" -typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "setuptools" -version = "78.1.1" +version = "79.0.1" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ - {file = "setuptools-78.1.1-py3-none-any.whl", hash = "sha256:c3a9c4211ff4c309edb8b8c4f1cbfa7ae324c4ba9f91ff254e3d305b9fd54561"}, - {file = "setuptools-78.1.1.tar.gz", hash = "sha256:fcc17fd9cd898242f6b4adfaca46137a9edef687f43e6f78469692a5e70d851d"}, + {file = "setuptools-79.0.1-py3-none-any.whl", hash = "sha256:e147c0549f27767ba362f9da434eab9c5dc0045d5304feb602a0af001089fc51"}, + {file = "setuptools-79.0.1.tar.gz", hash = "sha256:128ce7b8f33c3079fd1b067ecbb4051a66e8526e7b65f6cec075dfc650ddfa88"}, ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""] -core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.8.0)"] +core = ["importlib_metadata (>=6)", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] -test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] -type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.14.*)", "pytest-mypy"] [[package]] name = "shellingham" @@ -651,7 +670,6 @@ version = "1.5.4" description = "Tool to Detect Surrounding Shell" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, @@ -663,7 +681,6 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -671,14 +688,13 @@ files = [ [[package]] name = "snowballstemmer" -version = "2.2.0" -description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." +version = "3.0.1" +description = "This package provides 32 stemmers for 30 languages generated from Snowball algorithms." optional = false -python-versions = "*" -groups = ["main"] +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*" files = [ - {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, - {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, + {file = "snowballstemmer-3.0.1-py3-none-any.whl", hash = "sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064"}, + {file = "snowballstemmer-3.0.1.tar.gz", hash = "sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895"}, ] [[package]] @@ -687,7 +703,6 @@ version = "2.7" description = "A modern CSS selector implementation for Beautiful Soup." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4"}, {file = "soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a"}, @@ -699,7 +714,6 @@ version = "7.4.7" description = "Python documentation generator" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239"}, {file = "sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe"}, @@ -735,7 +749,6 @@ version = "2024.10.3" description = "Rebuild Sphinx documentation on changes, with hot reloading in the browser." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "sphinx_autobuild-2024.10.3-py3-none-any.whl", hash = "sha256:158e16c36f9d633e613c9aaf81c19b0fc458ca78b112533b20dafcda430d60fa"}, {file = "sphinx_autobuild-2024.10.3.tar.gz", hash = "sha256:248150f8f333e825107b6d4b86113ab28fa51750e5f9ae63b59dc339be951fb1"}, @@ -758,7 +771,6 @@ version = "0.1.3" description = "Collapse extension for Sphinx." optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "sphinx_collapse-0.1.3-py3-none-any.whl", hash = "sha256:85fadb2ec8769b93fd04276538668fa96239ef60c20c4a9eaa3e480387a6e65b"}, {file = "sphinx_collapse-0.1.3.tar.gz", hash = "sha256:cae141e6f03ecd52ed246a305a69e1b0d5d05e6cdf3fe803d40d583ad6ad895a"}, @@ -777,7 +789,6 @@ version = "0.5.2" description = "Add a copy button to each of your code cells." optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "sphinx-copybutton-0.5.2.tar.gz", hash = "sha256:4cf17c82fb9646d1bc9ca92ac280813a3b605d8c421225fd9913154103ee1fbd"}, {file = "sphinx_copybutton-0.5.2-py3-none-any.whl", hash = "sha256:fb543fd386d917746c9a2c50360c7905b605726b9355cd26e9974857afeae06e"}, @@ -790,13 +801,40 @@ sphinx = ">=1.8" code-style = ["pre-commit (==2.12.1)"] rtd = ["ipython", "myst-nb", "sphinx", "sphinx-book-theme", "sphinx-examples"] +[[package]] +name = "sphinx-last-updated-by-git" +version = "0.3.8" +description = "Get the \"last updated\" time for each Sphinx page from Git" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sphinx_last_updated_by_git-0.3.8-py3-none-any.whl", hash = "sha256:6382c8285ac1f222483a58569b78c0371af5e55f7fbf9c01e5e8a72d6fdfa499"}, + {file = "sphinx_last_updated_by_git-0.3.8.tar.gz", hash = "sha256:c145011f4609d841805b69a9300099fc02fed8f5bb9e5bcef77d97aea97b7761"}, +] + +[package.dependencies] +sphinx = ">=1.8" + +[[package]] +name = "sphinx-markdown-tables" +version = "0.0.17" +description = "A Sphinx extension for rendering tables written in markdown" +optional = false +python-versions = "*" +files = [ + {file = "sphinx-markdown-tables-0.0.17.tar.gz", hash = "sha256:6bc6d3d400eaccfeebd288446bc08dd83083367c58b85d40fe6c12d77ef592f1"}, + {file = "sphinx_markdown_tables-0.0.17-py3-none-any.whl", hash = "sha256:2bd0c30779653e4dd120300cbd9ca412c480738cc2241f6dea477a883f299e04"}, +] + +[package.dependencies] +markdown = ">=3.4" + [[package]] name = "sphinx-multiversion-scylla" version = "0.3.2" description = "Add support for multiple versions to sphinx" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "sphinx_multiversion_scylla-0.3.2.tar.gz", hash = "sha256:f415311273228f4f766c36256503da8e2ce01f9d13423f3fcee3160d6284852b"}, ] @@ -810,7 +848,6 @@ version = "1.1.0" description = "Sphinx extension to build a 404 page with absolute URLs" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "sphinx_notfound_page-1.1.0-py3-none-any.whl", hash = "sha256:835dc76ff7914577a1f58d80a2c8418fb6138c0932c8da8adce4d9096fbcd389"}, {file = "sphinx_notfound_page-1.1.0.tar.gz", hash = "sha256:913e1754370bb3db201d9300d458a8b8b5fb22e9246a816643a819a9ea2b8067"}, @@ -823,13 +860,28 @@ sphinx = ">=5" doc = ["sphinx-autoapi", "sphinx-rtd-theme", "sphinx-tabs", "sphinxemoji"] test = ["tox"] +[[package]] +name = "sphinx-scylladb-markdown" +version = "0.1.3" +description = "Sphinx extension for ScyllaDB documentation with enhanced Markdown support through MystParser and recommonmark." +optional = false +python-versions = "*" +files = [ + {file = "sphinx_scylladb_markdown-0.1.3-py3-none-any.whl", hash = "sha256:f20160b4aadf4c8cf95637f0a544121954b792914ab6ec05b67cae75e20a5566"}, +] + +[package.dependencies] +myst-parser = ">=2.0.0" +recommonmark = "0.7.1" +sphinx = ">=2.1" +sphinx-markdown-tables = "0.0.17" + [[package]] name = "sphinx-scylladb-theme" version = "1.8.7" description = "A Sphinx Theme for ScyllaDB documentation projects" optional = false python-versions = "<4.0,>=3.10" -groups = ["main"] files = [ {file = "sphinx_scylladb_theme-1.8.7-py3-none-any.whl", hash = "sha256:64c86e86737e16d8bbdbec492622865ec1e9c0c3a5915d747a9c109fd69145f1"}, {file = "sphinx_scylladb_theme-1.8.7.tar.gz", hash = "sha256:7b84fc99e1156ebf14149f5c1f88b61b5ea852e367fb3940eb99f514db0a6c41"}, @@ -848,21 +900,20 @@ sphinxcontrib-mermaid = ">=1.0.0,<2.0.0" [[package]] name = "sphinx-sitemap" -version = "2.6.0" +version = "2.8.0" description = "Sitemap generator for Sphinx" optional = false python-versions = "*" -groups = ["main"] files = [ - {file = "sphinx_sitemap-2.6.0-py3-none-any.whl", hash = "sha256:7478e417d141f99c9af27ccd635f44c03a471a08b20e778a0f9daef7ace1d30b"}, - {file = "sphinx_sitemap-2.6.0.tar.gz", hash = "sha256:5e0c66b9f2e371ede80c659866a9eaad337d46ab02802f9c7e5f7bc5893c28d2"}, + {file = "sphinx_sitemap-2.8.0-py3-none-any.whl", hash = "sha256:332042cd5b9385f61ec2861dfd550d9bccbdfcff86f6b68c7072cf40c9f16363"}, + {file = "sphinx_sitemap-2.8.0.tar.gz", hash = "sha256:749d7184a0c7b73d486a232b54b5c1b38a0e2d6f18cf19fb1b033b8162b44a82"}, ] [package.dependencies] -sphinx = ">=1.2" +sphinx-last-updated-by-git = "*" [package.extras] -dev = ["build", "flake8", "pre-commit", "pytest", "sphinx", "tox"] +dev = ["build", "flake8", "pre-commit", "pytest", "sphinx", "sphinx-last-updated-by-git", "tox"] [[package]] name = "sphinx-substitution-extensions" @@ -870,7 +921,6 @@ version = "2025.1.2" description = "Extensions for Sphinx which allow for substitutions." optional = false python-versions = ">=3.10" -groups = ["main"] files = [ {file = "sphinx_substitution_extensions-2025.1.2-py2.py3-none-any.whl", hash = "sha256:ff14f40e4393bd7434a196badb8d47983355d9755af884b902e3023fb456b958"}, {file = "sphinx_substitution_extensions-2025.1.2.tar.gz", hash = "sha256:53b8d394d5098a09aef36bc687fa310aeb28466319d2c750e996e46400fb2474"}, @@ -891,7 +941,6 @@ version = "3.4.7" description = "Tabbed views for Sphinx" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "sphinx-tabs-3.4.7.tar.gz", hash = "sha256:991ad4a424ff54119799ba1491701aa8130dd43509474aef45a81c42d889784d"}, {file = "sphinx_tabs-3.4.7-py3-none-any.whl", hash = "sha256:c12d7a36fd413b369e9e9967a0a4015781b71a9c393575419834f19204bd1915"}, @@ -912,7 +961,6 @@ version = "2.0.0" description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5"}, {file = "sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1"}, @@ -929,7 +977,6 @@ version = "2.0.0" description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp documents" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2"}, {file = "sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad"}, @@ -946,7 +993,6 @@ version = "2.1.0" description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8"}, {file = "sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9"}, @@ -963,7 +1009,6 @@ version = "1.0.1" description = "A sphinx extension which renders display math in HTML via JavaScript" optional = false python-versions = ">=3.5" -groups = ["main"] files = [ {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"}, @@ -978,7 +1023,6 @@ version = "1.0.0" description = "Mermaid diagrams in yours Sphinx powered docs" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "sphinxcontrib_mermaid-1.0.0-py3-none-any.whl", hash = "sha256:60b72710ea02087f212028feb09711225fbc2e343a10d34822fe787510e1caa3"}, {file = "sphinxcontrib_mermaid-1.0.0.tar.gz", hash = "sha256:2e8ab67d3e1e2816663f9347d026a8dee4a858acdd4ad32dd1c808893db88146"}, @@ -997,7 +1041,6 @@ version = "2.0.0" description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb"}, {file = "sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab"}, @@ -1014,7 +1057,6 @@ version = "2.0.0" description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331"}, {file = "sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d"}, @@ -1027,18 +1069,18 @@ test = ["pytest"] [[package]] name = "starlette" -version = "0.46.2" +version = "0.47.2" description = "The little ASGI library that shines." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ - {file = "starlette-0.46.2-py3-none-any.whl", hash = "sha256:595633ce89f8ffa71a015caed34a5b2dc1c0cdb3f0f1fbd1e69339cf2abeec35"}, - {file = "starlette-0.46.2.tar.gz", hash = "sha256:7f7361f34eed179294600af672f565727419830b54b7b084efe44bb82d2fccd5"}, + {file = "starlette-0.47.2-py3-none-any.whl", hash = "sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b"}, + {file = "starlette-0.47.2.tar.gz", hash = "sha256:6ae9aa5db235e4846decc1e7b79c4f346adf41e9777aebeb49dfd09bbd7023d8"}, ] [package.dependencies] anyio = ">=3.6.2,<5" +typing-extensions = {version = ">=4.10.0", markers = "python_version < \"3.13\""} [package.extras] full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] @@ -1049,8 +1091,6 @@ version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" -groups = ["main"] -markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -1088,14 +1128,13 @@ files = [ [[package]] name = "typer" -version = "0.15.2" +version = "0.16.0" description = "Typer, build great CLIs. Easy to code. Based on Python type hints." optional = false python-versions = ">=3.7" -groups = ["main"] files = [ - {file = "typer-0.15.2-py3-none-any.whl", hash = "sha256:46a499c6107d645a9c13f7ee46c5d5096cae6f5fc57dd11eccbbb9ae3e44ddfc"}, - {file = "typer-0.15.2.tar.gz", hash = "sha256:ab2fab47533a813c49fe1f16b1a370fd5819099c00b119e0633df65f22144ba5"}, + {file = "typer-0.16.0-py3-none-any.whl", hash = "sha256:1f79bed11d4d02d4310e3c1b7ba594183bcedb0ac73b27a9e5f28f6fb5b98855"}, + {file = "typer-0.16.0.tar.gz", hash = "sha256:af377ffaee1dbe37ae9440cb4e8f11686ea5ce4e9bae01b84ae7c63b87f1dd3b"}, ] [package.dependencies] @@ -1106,44 +1145,41 @@ typing-extensions = ">=3.7.4.3" [[package]] name = "typing-extensions" -version = "4.13.2" -description = "Backported and Experimental Type Hints for Python 3.8+" +version = "4.14.1" +description = "Backported and Experimental Type Hints for Python 3.9+" optional = false -python-versions = ">=3.8" -groups = ["main"] +python-versions = ">=3.9" files = [ - {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, - {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, + {file = "typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76"}, + {file = "typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36"}, ] [[package]] name = "urllib3" -version = "2.4.0" +version = "2.5.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ - {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, - {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, + {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, + {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, ] [package.extras] -brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] [[package]] name = "uvicorn" -version = "0.34.2" +version = "0.35.0" description = "The lightning-fast ASGI server." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ - {file = "uvicorn-0.34.2-py3-none-any.whl", hash = "sha256:deb49af569084536d269fe0a6d67e3754f104cf03aba7c11c40f01aadf33c403"}, - {file = "uvicorn-0.34.2.tar.gz", hash = "sha256:0e929828f6186353a80b58ea719861d2629d766293b6d19baf086ba31d4f3328"}, + {file = "uvicorn-0.35.0-py3-none-any.whl", hash = "sha256:197535216b25ff9b785e29a0b79199f55222193d47f820816e7da751e9bc8d4a"}, + {file = "uvicorn-0.35.0.tar.gz", hash = "sha256:bc662f087f7cf2ce11a1d7fd70b90c9f98ef2e2831556dd078d131b96cc94a01"}, ] [package.dependencies] @@ -1152,87 +1188,121 @@ h11 = ">=0.8" typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} [package.extras] -standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"] +standard = ["colorama (>=0.4)", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] [[package]] name = "watchfiles" -version = "1.0.5" +version = "1.1.0" description = "Simple, modern and high performance file watching and code reload in python." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ - {file = "watchfiles-1.0.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:5c40fe7dd9e5f81e0847b1ea64e1f5dd79dd61afbedb57759df06767ac719b40"}, - {file = "watchfiles-1.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8c0db396e6003d99bb2d7232c957b5f0b5634bbd1b24e381a5afcc880f7373fb"}, - {file = "watchfiles-1.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b551d4fb482fc57d852b4541f911ba28957d051c8776e79c3b4a51eb5e2a1b11"}, - {file = "watchfiles-1.0.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:830aa432ba5c491d52a15b51526c29e4a4b92bf4f92253787f9726fe01519487"}, - {file = "watchfiles-1.0.5-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a16512051a822a416b0d477d5f8c0e67b67c1a20d9acecb0aafa3aa4d6e7d256"}, - {file = "watchfiles-1.0.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe0cbc787770e52a96c6fda6726ace75be7f840cb327e1b08d7d54eadc3bc85"}, - {file = "watchfiles-1.0.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d363152c5e16b29d66cbde8fa614f9e313e6f94a8204eaab268db52231fe5358"}, - {file = "watchfiles-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ee32c9a9bee4d0b7bd7cbeb53cb185cf0b622ac761efaa2eba84006c3b3a614"}, - {file = "watchfiles-1.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29c7fd632ccaf5517c16a5188e36f6612d6472ccf55382db6c7fe3fcccb7f59f"}, - {file = "watchfiles-1.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8e637810586e6fe380c8bc1b3910accd7f1d3a9a7262c8a78d4c8fb3ba6a2b3d"}, - {file = "watchfiles-1.0.5-cp310-cp310-win32.whl", hash = "sha256:cd47d063fbeabd4c6cae1d4bcaa38f0902f8dc5ed168072874ea11d0c7afc1ff"}, - {file = "watchfiles-1.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:86c0df05b47a79d80351cd179893f2f9c1b1cae49d96e8b3290c7f4bd0ca0a92"}, - {file = "watchfiles-1.0.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:237f9be419e977a0f8f6b2e7b0475ababe78ff1ab06822df95d914a945eac827"}, - {file = "watchfiles-1.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0da39ff917af8b27a4bdc5a97ac577552a38aac0d260a859c1517ea3dc1a7c4"}, - {file = "watchfiles-1.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cfcb3952350e95603f232a7a15f6c5f86c5375e46f0bd4ae70d43e3e063c13d"}, - {file = "watchfiles-1.0.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:68b2dddba7a4e6151384e252a5632efcaa9bc5d1c4b567f3cb621306b2ca9f63"}, - {file = "watchfiles-1.0.5-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:95cf944fcfc394c5f9de794ce581914900f82ff1f855326f25ebcf24d5397418"}, - {file = "watchfiles-1.0.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecf6cd9f83d7c023b1aba15d13f705ca7b7d38675c121f3cc4a6e25bd0857ee9"}, - {file = "watchfiles-1.0.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:852de68acd6212cd6d33edf21e6f9e56e5d98c6add46f48244bd479d97c967c6"}, - {file = "watchfiles-1.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5730f3aa35e646103b53389d5bc77edfbf578ab6dab2e005142b5b80a35ef25"}, - {file = "watchfiles-1.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:18b3bd29954bc4abeeb4e9d9cf0b30227f0f206c86657674f544cb032296acd5"}, - {file = "watchfiles-1.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ba5552a1b07c8edbf197055bc9d518b8f0d98a1c6a73a293bc0726dce068ed01"}, - {file = "watchfiles-1.0.5-cp311-cp311-win32.whl", hash = "sha256:2f1fefb2e90e89959447bc0420fddd1e76f625784340d64a2f7d5983ef9ad246"}, - {file = "watchfiles-1.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:b6e76ceb1dd18c8e29c73f47d41866972e891fc4cc7ba014f487def72c1cf096"}, - {file = "watchfiles-1.0.5-cp311-cp311-win_arm64.whl", hash = "sha256:266710eb6fddc1f5e51843c70e3bebfb0f5e77cf4f27129278c70554104d19ed"}, - {file = "watchfiles-1.0.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b5eb568c2aa6018e26da9e6c86f3ec3fd958cee7f0311b35c2630fa4217d17f2"}, - {file = "watchfiles-1.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0a04059f4923ce4e856b4b4e5e783a70f49d9663d22a4c3b3298165996d1377f"}, - {file = "watchfiles-1.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e380c89983ce6e6fe2dd1e1921b9952fb4e6da882931abd1824c092ed495dec"}, - {file = "watchfiles-1.0.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fe43139b2c0fdc4a14d4f8d5b5d967f7a2777fd3d38ecf5b1ec669b0d7e43c21"}, - {file = "watchfiles-1.0.5-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee0822ce1b8a14fe5a066f93edd20aada932acfe348bede8aa2149f1a4489512"}, - {file = "watchfiles-1.0.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a0dbcb1c2d8f2ab6e0a81c6699b236932bd264d4cef1ac475858d16c403de74d"}, - {file = "watchfiles-1.0.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a2014a2b18ad3ca53b1f6c23f8cd94a18ce930c1837bd891262c182640eb40a6"}, - {file = "watchfiles-1.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10f6ae86d5cb647bf58f9f655fcf577f713915a5d69057a0371bc257e2553234"}, - {file = "watchfiles-1.0.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1a7bac2bde1d661fb31f4d4e8e539e178774b76db3c2c17c4bb3e960a5de07a2"}, - {file = "watchfiles-1.0.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ab626da2fc1ac277bbf752446470b367f84b50295264d2d313e28dc4405d663"}, - {file = "watchfiles-1.0.5-cp312-cp312-win32.whl", hash = "sha256:9f4571a783914feda92018ef3901dab8caf5b029325b5fe4558c074582815249"}, - {file = "watchfiles-1.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:360a398c3a19672cf93527f7e8d8b60d8275119c5d900f2e184d32483117a705"}, - {file = "watchfiles-1.0.5-cp312-cp312-win_arm64.whl", hash = "sha256:1a2902ede862969077b97523987c38db28abbe09fb19866e711485d9fbf0d417"}, - {file = "watchfiles-1.0.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0b289572c33a0deae62daa57e44a25b99b783e5f7aed81b314232b3d3c81a11d"}, - {file = "watchfiles-1.0.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a056c2f692d65bf1e99c41045e3bdcaea3cb9e6b5a53dcaf60a5f3bd95fc9763"}, - {file = "watchfiles-1.0.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9dca99744991fc9850d18015c4f0438865414e50069670f5f7eee08340d8b40"}, - {file = "watchfiles-1.0.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:894342d61d355446d02cd3988a7326af344143eb33a2fd5d38482a92072d9563"}, - {file = "watchfiles-1.0.5-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab44e1580924d1ffd7b3938e02716d5ad190441965138b4aa1d1f31ea0877f04"}, - {file = "watchfiles-1.0.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d6f9367b132078b2ceb8d066ff6c93a970a18c3029cea37bfd7b2d3dd2e5db8f"}, - {file = "watchfiles-1.0.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2e55a9b162e06e3f862fb61e399fe9f05d908d019d87bf5b496a04ef18a970a"}, - {file = "watchfiles-1.0.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0125f91f70e0732a9f8ee01e49515c35d38ba48db507a50c5bdcad9503af5827"}, - {file = "watchfiles-1.0.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:13bb21f8ba3248386337c9fa51c528868e6c34a707f729ab041c846d52a0c69a"}, - {file = "watchfiles-1.0.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:839ebd0df4a18c5b3c1b890145b5a3f5f64063c2a0d02b13c76d78fe5de34936"}, - {file = "watchfiles-1.0.5-cp313-cp313-win32.whl", hash = "sha256:4a8ec1e4e16e2d5bafc9ba82f7aaecfeec990ca7cd27e84fb6f191804ed2fcfc"}, - {file = "watchfiles-1.0.5-cp313-cp313-win_amd64.whl", hash = "sha256:f436601594f15bf406518af922a89dcaab416568edb6f65c4e5bbbad1ea45c11"}, - {file = "watchfiles-1.0.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:2cfb371be97d4db374cba381b9f911dd35bb5f4c58faa7b8b7106c8853e5d225"}, - {file = "watchfiles-1.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a3904d88955fda461ea2531fcf6ef73584ca921415d5cfa44457a225f4a42bc1"}, - {file = "watchfiles-1.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b7a21715fb12274a71d335cff6c71fe7f676b293d322722fe708a9ec81d91f5"}, - {file = "watchfiles-1.0.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dfd6ae1c385ab481766b3c61c44aca2b3cd775f6f7c0fa93d979ddec853d29d5"}, - {file = "watchfiles-1.0.5-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b659576b950865fdad31fa491d31d37cf78b27113a7671d39f919828587b429b"}, - {file = "watchfiles-1.0.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1909e0a9cd95251b15bff4261de5dd7550885bd172e3536824bf1cf6b121e200"}, - {file = "watchfiles-1.0.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:832ccc221927c860e7286c55c9b6ebcc0265d5e072f49c7f6456c7798d2b39aa"}, - {file = "watchfiles-1.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85fbb6102b3296926d0c62cfc9347f6237fb9400aecd0ba6bbda94cae15f2b3b"}, - {file = "watchfiles-1.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:15ac96dd567ad6c71c71f7b2c658cb22b7734901546cd50a475128ab557593ca"}, - {file = "watchfiles-1.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b6227351e11c57ae997d222e13f5b6f1f0700d84b8c52304e8675d33a808382"}, - {file = "watchfiles-1.0.5-cp39-cp39-win32.whl", hash = "sha256:974866e0db748ebf1eccab17862bc0f0303807ed9cda465d1324625b81293a18"}, - {file = "watchfiles-1.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:9848b21ae152fe79c10dd0197304ada8f7b586d3ebc3f27f43c506e5a52a863c"}, - {file = "watchfiles-1.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f59b870db1f1ae5a9ac28245707d955c8721dd6565e7f411024fa374b5362d1d"}, - {file = "watchfiles-1.0.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9475b0093767e1475095f2aeb1d219fb9664081d403d1dff81342df8cd707034"}, - {file = "watchfiles-1.0.5-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc533aa50664ebd6c628b2f30591956519462f5d27f951ed03d6c82b2dfd9965"}, - {file = "watchfiles-1.0.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fed1cd825158dcaae36acce7b2db33dcbfd12b30c34317a88b8ed80f0541cc57"}, - {file = "watchfiles-1.0.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:554389562c29c2c182e3908b149095051f81d28c2fec79ad6c8997d7d63e0009"}, - {file = "watchfiles-1.0.5-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a74add8d7727e6404d5dc4dcd7fac65d4d82f95928bbee0cf5414c900e86773e"}, - {file = "watchfiles-1.0.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb1489f25b051a89fae574505cc26360c8e95e227a9500182a7fe0afcc500ce0"}, - {file = "watchfiles-1.0.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0901429650652d3f0da90bad42bdafc1f9143ff3605633c455c999a2d786cac"}, - {file = "watchfiles-1.0.5.tar.gz", hash = "sha256:b7529b5dcc114679d43827d8c35a07c493ad6f083633d573d81c660abc5979e9"}, + {file = "watchfiles-1.1.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:27f30e14aa1c1e91cb653f03a63445739919aef84c8d2517997a83155e7a2fcc"}, + {file = "watchfiles-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3366f56c272232860ab45c77c3ca7b74ee819c8e1f6f35a7125556b198bbc6df"}, + {file = "watchfiles-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8412eacef34cae2836d891836a7fff7b754d6bcac61f6c12ba5ca9bc7e427b68"}, + {file = "watchfiles-1.1.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:df670918eb7dd719642e05979fc84704af913d563fd17ed636f7c4783003fdcc"}, + {file = "watchfiles-1.1.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7642b9bc4827b5518ebdb3b82698ada8c14c7661ddec5fe719f3e56ccd13c97"}, + {file = "watchfiles-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:199207b2d3eeaeb80ef4411875a6243d9ad8bc35b07fc42daa6b801cc39cc41c"}, + {file = "watchfiles-1.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a479466da6db5c1e8754caee6c262cd373e6e6c363172d74394f4bff3d84d7b5"}, + {file = "watchfiles-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:935f9edd022ec13e447e5723a7d14456c8af254544cefbc533f6dd276c9aa0d9"}, + {file = "watchfiles-1.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8076a5769d6bdf5f673a19d51da05fc79e2bbf25e9fe755c47595785c06a8c72"}, + {file = "watchfiles-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:86b1e28d4c37e89220e924305cd9f82866bb0ace666943a6e4196c5df4d58dcc"}, + {file = "watchfiles-1.1.0-cp310-cp310-win32.whl", hash = "sha256:d1caf40c1c657b27858f9774d5c0e232089bca9cb8ee17ce7478c6e9264d2587"}, + {file = "watchfiles-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:a89c75a5b9bc329131115a409d0acc16e8da8dfd5867ba59f1dd66ae7ea8fa82"}, + {file = "watchfiles-1.1.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c9649dfc57cc1f9835551deb17689e8d44666315f2e82d337b9f07bd76ae3aa2"}, + {file = "watchfiles-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:406520216186b99374cdb58bc48e34bb74535adec160c8459894884c983a149c"}, + {file = "watchfiles-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb45350fd1dc75cd68d3d72c47f5b513cb0578da716df5fba02fff31c69d5f2d"}, + {file = "watchfiles-1.1.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:11ee4444250fcbeb47459a877e5e80ed994ce8e8d20283857fc128be1715dac7"}, + {file = "watchfiles-1.1.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bda8136e6a80bdea23e5e74e09df0362744d24ffb8cd59c4a95a6ce3d142f79c"}, + {file = "watchfiles-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b915daeb2d8c1f5cee4b970f2e2c988ce6514aace3c9296e58dd64dc9aa5d575"}, + {file = "watchfiles-1.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ed8fc66786de8d0376f9f913c09e963c66e90ced9aa11997f93bdb30f7c872a8"}, + {file = "watchfiles-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe4371595edf78c41ef8ac8df20df3943e13defd0efcb732b2e393b5a8a7a71f"}, + {file = "watchfiles-1.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b7c5f6fe273291f4d414d55b2c80d33c457b8a42677ad14b4b47ff025d0893e4"}, + {file = "watchfiles-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7738027989881e70e3723c75921f1efa45225084228788fc59ea8c6d732eb30d"}, + {file = "watchfiles-1.1.0-cp311-cp311-win32.whl", hash = "sha256:622d6b2c06be19f6e89b1d951485a232e3b59618def88dbeda575ed8f0d8dbf2"}, + {file = "watchfiles-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:48aa25e5992b61debc908a61ab4d3f216b64f44fdaa71eb082d8b2de846b7d12"}, + {file = "watchfiles-1.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:00645eb79a3faa70d9cb15c8d4187bb72970b2470e938670240c7998dad9f13a"}, + {file = "watchfiles-1.1.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9dc001c3e10de4725c749d4c2f2bdc6ae24de5a88a339c4bce32300a31ede179"}, + {file = "watchfiles-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d9ba68ec283153dead62cbe81872d28e053745f12335d037de9cbd14bd1877f5"}, + {file = "watchfiles-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:130fc497b8ee68dce163e4254d9b0356411d1490e868bd8790028bc46c5cc297"}, + {file = "watchfiles-1.1.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:50a51a90610d0845a5931a780d8e51d7bd7f309ebc25132ba975aca016b576a0"}, + {file = "watchfiles-1.1.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc44678a72ac0910bac46fa6a0de6af9ba1355669b3dfaf1ce5f05ca7a74364e"}, + {file = "watchfiles-1.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a543492513a93b001975ae283a51f4b67973662a375a403ae82f420d2c7205ee"}, + {file = "watchfiles-1.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ac164e20d17cc285f2b94dc31c384bc3aa3dd5e7490473b3db043dd70fbccfd"}, + {file = "watchfiles-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7590d5a455321e53857892ab8879dce62d1f4b04748769f5adf2e707afb9d4f"}, + {file = "watchfiles-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:37d3d3f7defb13f62ece99e9be912afe9dd8a0077b7c45ee5a57c74811d581a4"}, + {file = "watchfiles-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7080c4bb3efd70a07b1cc2df99a7aa51d98685be56be6038c3169199d0a1c69f"}, + {file = "watchfiles-1.1.0-cp312-cp312-win32.whl", hash = "sha256:cbcf8630ef4afb05dc30107bfa17f16c0896bb30ee48fc24bf64c1f970f3b1fd"}, + {file = "watchfiles-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:cbd949bdd87567b0ad183d7676feb98136cde5bb9025403794a4c0db28ed3a47"}, + {file = "watchfiles-1.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:0a7d40b77f07be87c6faa93d0951a0fcd8cbca1ddff60a1b65d741bac6f3a9f6"}, + {file = "watchfiles-1.1.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:5007f860c7f1f8df471e4e04aaa8c43673429047d63205d1630880f7637bca30"}, + {file = "watchfiles-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:20ecc8abbd957046f1fe9562757903f5eaf57c3bce70929fda6c7711bb58074a"}, + {file = "watchfiles-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2f0498b7d2a3c072766dba3274fe22a183dbea1f99d188f1c6c72209a1063dc"}, + {file = "watchfiles-1.1.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:239736577e848678e13b201bba14e89718f5c2133dfd6b1f7846fa1b58a8532b"}, + {file = "watchfiles-1.1.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eff4b8d89f444f7e49136dc695599a591ff769300734446c0a86cba2eb2f9895"}, + {file = "watchfiles-1.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12b0a02a91762c08f7264e2e79542f76870c3040bbc847fb67410ab81474932a"}, + {file = "watchfiles-1.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:29e7bc2eee15cbb339c68445959108803dc14ee0c7b4eea556400131a8de462b"}, + {file = "watchfiles-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9481174d3ed982e269c090f780122fb59cee6c3796f74efe74e70f7780ed94c"}, + {file = "watchfiles-1.1.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:80f811146831c8c86ab17b640801c25dc0a88c630e855e2bef3568f30434d52b"}, + {file = "watchfiles-1.1.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:60022527e71d1d1fda67a33150ee42869042bce3d0fcc9cc49be009a9cded3fb"}, + {file = "watchfiles-1.1.0-cp313-cp313-win32.whl", hash = "sha256:32d6d4e583593cb8576e129879ea0991660b935177c0f93c6681359b3654bfa9"}, + {file = "watchfiles-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:f21af781a4a6fbad54f03c598ab620e3a77032c5878f3d780448421a6e1818c7"}, + {file = "watchfiles-1.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:5366164391873ed76bfdf618818c82084c9db7fac82b64a20c44d335eec9ced5"}, + {file = "watchfiles-1.1.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:17ab167cca6339c2b830b744eaf10803d2a5b6683be4d79d8475d88b4a8a4be1"}, + {file = "watchfiles-1.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:328dbc9bff7205c215a7807da7c18dce37da7da718e798356212d22696404339"}, + {file = "watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7208ab6e009c627b7557ce55c465c98967e8caa8b11833531fdf95799372633"}, + {file = "watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a8f6f72974a19efead54195bc9bed4d850fc047bb7aa971268fd9a8387c89011"}, + {file = "watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d181ef50923c29cf0450c3cd47e2f0557b62218c50b2ab8ce2ecaa02bd97e670"}, + {file = "watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:adb4167043d3a78280d5d05ce0ba22055c266cf8655ce942f2fb881262ff3cdf"}, + {file = "watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8c5701dc474b041e2934a26d31d39f90fac8a3dee2322b39f7729867f932b1d4"}, + {file = "watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b067915e3c3936966a8607f6fe5487df0c9c4afb85226613b520890049deea20"}, + {file = "watchfiles-1.1.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:9c733cda03b6d636b4219625a4acb5c6ffb10803338e437fb614fef9516825ef"}, + {file = "watchfiles-1.1.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:cc08ef8b90d78bfac66f0def80240b0197008e4852c9f285907377b2947ffdcb"}, + {file = "watchfiles-1.1.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:9974d2f7dc561cce3bb88dfa8eb309dab64c729de85fba32e98d75cf24b66297"}, + {file = "watchfiles-1.1.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c68e9f1fcb4d43798ad8814c4c1b61547b014b667216cb754e606bfade587018"}, + {file = "watchfiles-1.1.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95ab1594377effac17110e1352989bdd7bdfca9ff0e5eeccd8c69c5389b826d0"}, + {file = "watchfiles-1.1.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fba9b62da882c1be1280a7584ec4515d0a6006a94d6e5819730ec2eab60ffe12"}, + {file = "watchfiles-1.1.0-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3434e401f3ce0ed6b42569128b3d1e3af773d7ec18751b918b89cd49c14eaafb"}, + {file = "watchfiles-1.1.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa257a4d0d21fcbca5b5fcba9dca5a78011cb93c0323fb8855c6d2dfbc76eb77"}, + {file = "watchfiles-1.1.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7fd1b3879a578a8ec2076c7961076df540b9af317123f84569f5a9ddee64ce92"}, + {file = "watchfiles-1.1.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62cc7a30eeb0e20ecc5f4bd113cd69dcdb745a07c68c0370cea919f373f65d9e"}, + {file = "watchfiles-1.1.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:891c69e027748b4a73847335d208e374ce54ca3c335907d381fde4e41661b13b"}, + {file = "watchfiles-1.1.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:12fe8eaffaf0faa7906895b4f8bb88264035b3f0243275e0bf24af0436b27259"}, + {file = "watchfiles-1.1.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:bfe3c517c283e484843cb2e357dd57ba009cff351edf45fb455b5fbd1f45b15f"}, + {file = "watchfiles-1.1.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a9ccbf1f129480ed3044f540c0fdbc4ee556f7175e5ab40fe077ff6baf286d4e"}, + {file = "watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba0e3255b0396cac3cc7bbace76404dd72b5438bf0d8e7cefa2f79a7f3649caa"}, + {file = "watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4281cd9fce9fc0a9dbf0fc1217f39bf9cf2b4d315d9626ef1d4e87b84699e7e8"}, + {file = "watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d2404af8db1329f9a3c9b79ff63e0ae7131986446901582067d9304ae8aaf7f"}, + {file = "watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e78b6ed8165996013165eeabd875c5dfc19d41b54f94b40e9fff0eb3193e5e8e"}, + {file = "watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:249590eb75ccc117f488e2fabd1bfa33c580e24b96f00658ad88e38844a040bb"}, + {file = "watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d05686b5487cfa2e2c28ff1aa370ea3e6c5accfe6435944ddea1e10d93872147"}, + {file = "watchfiles-1.1.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:d0e10e6f8f6dc5762adee7dece33b722282e1f59aa6a55da5d493a97282fedd8"}, + {file = "watchfiles-1.1.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:af06c863f152005c7592df1d6a7009c836a247c9d8adb78fef8575a5a98699db"}, + {file = "watchfiles-1.1.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:865c8e95713744cf5ae261f3067861e9da5f1370ba91fc536431e29b418676fa"}, + {file = "watchfiles-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:42f92befc848bb7a19658f21f3e7bae80d7d005d13891c62c2cd4d4d0abb3433"}, + {file = "watchfiles-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa0cc8365ab29487eb4f9979fd41b22549853389e22d5de3f134a6796e1b05a4"}, + {file = "watchfiles-1.1.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:90ebb429e933645f3da534c89b29b665e285048973b4d2b6946526888c3eb2c7"}, + {file = "watchfiles-1.1.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c588c45da9b08ab3da81d08d7987dae6d2a3badd63acdb3e206a42dbfa7cb76f"}, + {file = "watchfiles-1.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7c55b0f9f68590115c25272b06e63f0824f03d4fc7d6deed43d8ad5660cabdbf"}, + {file = "watchfiles-1.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd17a1e489f02ce9117b0de3c0b1fab1c3e2eedc82311b299ee6b6faf6c23a29"}, + {file = "watchfiles-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da71945c9ace018d8634822f16cbc2a78323ef6c876b1d34bbf5d5222fd6a72e"}, + {file = "watchfiles-1.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:51556d5004887045dba3acdd1fdf61dddea2be0a7e18048b5e853dcd37149b86"}, + {file = "watchfiles-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04e4ed5d1cd3eae68c89bcc1a485a109f39f2fd8de05f705e98af6b5f1861f1f"}, + {file = "watchfiles-1.1.0-cp39-cp39-win32.whl", hash = "sha256:c600e85f2ffd9f1035222b1a312aff85fd11ea39baff1d705b9b047aad2ce267"}, + {file = "watchfiles-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:3aba215958d88182e8d2acba0fdaf687745180974946609119953c0e112397dc"}, + {file = "watchfiles-1.1.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3a6fd40bbb50d24976eb275ccb55cd1951dfb63dbc27cae3066a6ca5f4beabd5"}, + {file = "watchfiles-1.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9f811079d2f9795b5d48b55a37aa7773680a5659afe34b54cc1d86590a51507d"}, + {file = "watchfiles-1.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2726d7bfd9f76158c84c10a409b77a320426540df8c35be172444394b17f7ea"}, + {file = "watchfiles-1.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df32d59cb9780f66d165a9a7a26f19df2c7d24e3bd58713108b41d0ff4f929c6"}, + {file = "watchfiles-1.1.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:0ece16b563b17ab26eaa2d52230c9a7ae46cf01759621f4fbbca280e438267b3"}, + {file = "watchfiles-1.1.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:51b81e55d40c4b4aa8658427a3ee7ea847c591ae9e8b81ef94a90b668999353c"}, + {file = "watchfiles-1.1.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2bcdc54ea267fe72bfc7d83c041e4eb58d7d8dc6f578dfddb52f037ce62f432"}, + {file = "watchfiles-1.1.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:923fec6e5461c42bd7e3fd5ec37492c6f3468be0499bc0707b4bbbc16ac21792"}, + {file = "watchfiles-1.1.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7b3443f4ec3ba5aa00b0e9fa90cf31d98321cbff8b925a7c7b84161619870bc9"}, + {file = "watchfiles-1.1.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7049e52167fc75fc3cc418fc13d39a8e520cbb60ca08b47f6cedb85e181d2f2a"}, + {file = "watchfiles-1.1.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54062ef956807ba806559b3c3d52105ae1827a0d4ab47b621b31132b6b7e2866"}, + {file = "watchfiles-1.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a7bd57a1bb02f9d5c398c0c1675384e7ab1dd39da0ca50b7f09af45fa435277"}, + {file = "watchfiles-1.1.0.tar.gz", hash = "sha256:693ed7ec72cbfcee399e92c895362b6e66d63dac6b91e2c11ae03d10d503e575"}, ] [package.dependencies] @@ -1244,7 +1314,6 @@ version = "15.0.1" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b"}, {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205"}, @@ -1318,6 +1387,6 @@ files = [ ] [metadata] -lock-version = "2.1" +lock-version = "2.0" python-versions = "^3.10" -content-hash = "4db924267bcc466f893e5d4d306e13231f8a5f866996cd2496b8c973a8220d6d" +content-hash = "9848f748229decb817b16158a9905c3d5d4fdc86813ce00a6036816c91a93b7b" diff --git a/docs/pyproject.toml b/docs/pyproject.toml index a83c33473..8eff382e5 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -14,7 +14,9 @@ sphinx-autobuild = "^2024.4.19" Sphinx = "^7.3.7" sphinx-multiversion-scylla = "^0.3.1" sphinx-sitemap = "^2.6.0" -redirects_cli ="^0.1.3" +redirects_cli = "^0.1.3" +breathe = "4.35.0" +sphinx-scylladb-markdown = "^0.1.2" [build-system] requires = ["poetry>=1.8.0"] diff --git a/docs/sample-page.rst b/docs/sample-page.rst deleted file mode 100644 index e69de29bb..000000000 diff --git a/docs/source/api/index.rst b/docs/source/api/index.rst new file mode 100644 index 000000000..2c085634d --- /dev/null +++ b/docs/source/api/index.rst @@ -0,0 +1,10 @@ +API Documentation +================= + +Classes manifested in the C API of the CPP-over-Rust Driver for ScyllaDB. + +.. toctree:: + :maxdepth: 2 + :glob: + + * diff --git a/docs/source/conf.py b/docs/source/conf.py index 163202e3c..91aaf705c 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- import os +import re import sys import warnings from datetime import date @@ -37,13 +38,15 @@ "sphinx_scylladb_theme", "sphinx_multiversion", # optional "myst_parser", # optional + 'breathe', + 'sphinx_scylladb_markdown', ] # The suffix(es) of source filenames. source_suffix = [".rst", ".md"] # The master toctree document. -master_doc = "index" +master_doc = "contents" # General information about the project. project = "ScyllaDB CPP-Rust Driver" @@ -95,6 +98,43 @@ smv_outputdir_format = "{ref.name}" # -- Options for HTML output ---------------------------------------- + +# -- Options for Doxygen (API Reference) +breathe_projects = { + 'API': "../../doxygen/xml/" +} +breathe_default_project = 'API' +breathe_default_members = ('members', 'undoc-members') + +# Autogenerate API reference +def _generate_structs(outdir, structs, project): + """Write structs docs in the designated outdir folder""" + for obj in structs: + with open(outdir + '/struct.' + obj + '.rst', 'w') as t_file: + t_file.write(obj + "\n" + "=" * len(obj) + "\n\n" + ".. doxygenstruct:: " + obj +" \n :project: " + project) + +def _generate_doxygen_rst(xmldir, outdir): + """Autogenerate doxygen docs in the designated outdir folder""" + structs = [] + files = os.listdir(os.path.join(os.path.dirname(__file__), xmldir)) + for file_name in files: + if 'struct' in file_name and '__' not in file_name: + structs.append(file_name + .replace('struct_', '') + .replace('_', ' ') + .replace('.xml','') + .title() + .replace(' ', '')) + _generate_structs(outdir, structs, breathe_default_project) + +def generate_doxygen(app): + DOXYGEN_XML_DIR = breathe_projects[breathe_default_project] + _generate_doxygen_rst(DOXYGEN_XML_DIR, app.builder.srcdir + '/api') + +# -- Options for sitemap extension + +sitemap_url_scheme = '/stable/{link}' + # The theme to use for pages. html_theme = "sphinx_scylladb_theme" html_theme_path = ["../.."] @@ -108,7 +148,7 @@ "hide_feedback_buttons": "false", "github_issues_repository": "scylladb/cpp-rust-driver", "github_repository": "scylladb/cpp-rust-driver", - "site_description": "API-compatible rewrite of scylladb/cpp-driver as a wrapper for Rust driver.", + "site_description": "API-compatible rewrite of scylladb/cpp-driver as a wrapper over ScyllaDB Rust driver.", "hide_version_dropdown": [], "zendesk_tag": "gq6ltsh3nfex3cnwfy4aj9", "versions_unstable": UNSTABLE_VERSIONS, @@ -132,6 +172,12 @@ # -- Initialize Sphinx ---------------------------------------------- +def replace_relative_links(app, docname, source): + result = source[0] + for item in app.config.replacements: + for key, value in item.items(): + result = re.sub(key, value, result) + source[0] = result def setup(sphinx): warnings.filterwarnings( @@ -139,3 +185,16 @@ def setup(sphinx): category=UserWarning, message=r".*Container node skipped.*", ) + + # Workaround to replace DataStax links + # FIXME(David Garcia) - Make this workaround work correctly. + replacements = [ + {"https://cpp-rust-driver.docs.scylladb.com/stable/api/cassandra.h/": "https://cpp-rust-driver.docs.scylladb.com/" + smv_latest_version + "/api"}, + {"http://datastax.github.io/cpp-driver": "https://cpp-rust-driver.docs.scylladb.com/" + smv_latest_version}, + {"http://docs.datastax.com/en/developer/cpp-driver/latest": "https://cpp-rust-driver.docs.scylladb.com/" + smv_latest_version}, + ] + sphinx.add_config_value('replacements', replacements, True) + sphinx.connect('source-read', replace_relative_links) + + # Autogenerate API Reference + sphinx.connect("builder-inited", generate_doxygen) diff --git a/docs/source/contents.rst b/docs/source/contents.rst new file mode 100644 index 000000000..9601f95ec --- /dev/null +++ b/docs/source/contents.rst @@ -0,0 +1,20 @@ +======================== +ScyllaDB CPP-Rust Driver +======================== + +.. toctree:: + :hidden: + :glob: + :titlesonly: + + index + api/index + topics/getting-started + topics/architecture-overview + topics/installation + topics/building + topics/testing + topics/using/index + topics/configuration/index + topics/security/index + topics/observability/index diff --git a/docs/source/index.md b/docs/source/index.md new file mode 100644 index 000000000..759f8b0ff --- /dev/null +++ b/docs/source/index.md @@ -0,0 +1,23 @@ +# CPP-over-Rust Driver +This book contains documentation for [cpp-rust-driver](https://github.com/scylladb/cpp-rust-driver) - an API-compatible rewrite of [cpp-driver](https://github.com/scylladb/cpp-driver) as a wrapper over [ScyllaDB Rust Driver](https://github.com/scylladb/scylla-rust-driver). +Although optimized for ScyllaDB, the driver is also compatible with [Apache Cassandra®](https://cassandra.apache.org/). + +## Other documentation +* [Examples](https://github.com/scylladb/cpp-rust-driver/tree/master/examples) +* ScyllaDB CPP Driver lessons - [part 1](https://university.scylladb.com/courses/using-scylla-drivers/lessons/cpp-driver-part-1/) + and [part 2](https://university.scylladb.com/courses/using-scylla-drivers/lessons/cpp-driver-part-2-prepared-statements/) at Scylla University. +* [ScyllaDB documentation](https://docs.scylladb.com) +* [Cassandra® documentation](https://cassandra.apache.org/doc/latest/) + + +## Contents +* {doc}`API Reference ` - Listing of the whole Driver API. +* [Getting Started](topics/getting-started.md) - Getting the driver built, executing CQL statements and examining query results. +* [Architecture Overview](topics/architecture-overview.md) - Grasping main driver's concepts. +* [Installation](topics/installation.md) - How to instal the driver. +* [Building](topics/building.md) - How to build the driver from source. +* [Testing](topics/testing.md) - Integration testing architecture of the driver. +* [Using The Driver](topics/using/index.md) - How to use the driver in your application. +* [Configuration](topics/configuration/index.md) - Various configuration options and performance tips for the driver. +* [Security](topics/security/index.md) - Security features, such as authentication and encryption. +* [Observability](topics/observability/index.md) - Ways to observe driver's activities and performance. diff --git a/docs/source/index.rst b/docs/source/index.rst deleted file mode 100644 index 65490e884..000000000 --- a/docs/source/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -======================== -ScyllaDB CPP-Rust Driver -======================== - -Lorem ipsum. - -.. toctree:: - - sample-page \ No newline at end of file diff --git a/docs/source/sample-page.rst b/docs/source/sample-page.rst deleted file mode 100644 index a607f9c35..000000000 --- a/docs/source/sample-page.rst +++ /dev/null @@ -1,5 +0,0 @@ -=========== -Sample page -=========== - -Lorem ipsum. \ No newline at end of file diff --git a/docs/source/topics/architecture-overview.md b/docs/source/topics/architecture-overview.md new file mode 100644 index 000000000..7c3f83879 --- /dev/null +++ b/docs/source/topics/architecture-overview.md @@ -0,0 +1,70 @@ +# Architecture Overview + +## Cluster + +The [`CassCluster`] object describes a ScyllaDB/Cassandra cluster’s configuration. +The default cluster object is good for most clusters and only requires a single +or multiple lists of contact points in order to establish a session connection. +Once a session is connected using a cluster object, its configuration is +constant. Modifying the cluster's object configuration after a session is +established does not alter the session's configuration. + +## Session + +The [`CassSession`] object is used for query execution. Internally, a session +object also manages a pool of client connections to ScyllaDB/Cassandra and uses +a load balancing policy to distribute requests across those connections. An +application should create a single session object per keyspace. A session +object is designed to be created once, reused, and shared by multiple threads +within the application. The throughput of a session can be scaled by +increasing the number of I/O threads. An I/O thread is used to drive the inner +driver machinery, which among others sends requests to Cassandra/Scylla and handle +responses. The number of I/O threads defaults to one per CPU core, but it can be +configured using [`cass_cluster_set_num_threads_io()`]. It’s generally much better +to create a single session with more I/O threads than multiple sessions with +a smaller number of I/O threads, especially that a session is a heavyweight +object - it keeps the connection pool and up-to-date cluster metadata. + +## Asynchronous I/O + +Each session maintains a number of connections for each node in the cluster. +This number can be controlled by `cass_cluster_set_core_connections_per_host()`. +In case of ScyllaDB, it is possible to specify a number of connection per **shard** +instead of a node by calling `cass_cluster_set_core_connections_per_shard()`, +which is the recommended way to configure the driver for ScyllaDB. + +Each of those connections can handle several simultaneous requests using +pipelining. Asynchronous I/O and pipelining together allow each connection +to handle several (up to 32k) in-flight requests concurrently. +This significantly reduces the number of connections required to be open to +ScyllaDB/Cassandra and allows the driver to batch requests destined for the +same node. + +## Thread safety + +A [`CassSession`] is designed to be used concurrently from multiple threads. +[`CassFuture`] is also thread safe. Other than these exclusions, in general, +functions that might modify an object's state are **NOT** thread safe. Objects +that are immutable (marked 'const') can be read safely by multiple threads. + +**NOTE:** The object/resource free-ing functions (e.g. `cass_cluster_free`, +`cass_session_free`, ... `cass_*_free`) cannot be called concurrently on the +same instance of an object. + +## Memory handling + +Values such as strings (`const char*`), bytes and decimals +(`const cass_bytes_t*`) point to memory held by the result object. The +lifetimes of these values are valid as long as the result object isn’t freed. +These values **must** be copied into application memory if they need to live +longer than the result object’s lifetime. Primitive types such as +[`cass_int32_t`] are copied by the driver because it can be done cheaply +without incurring extra allocations. + +**NOTE:** Advancing an iterator invalidates the value it previously returned. + +[`cass_int32_t`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/cassandra.h#cass-int32-t +[`cass_cluster_set_num_threads_io()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassCluster#function-cass_cluster_set_num_threads_io +[`CassCluster`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassCluster +[`CassFuture`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassFuture +[`CassSession`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassSession diff --git a/docs/source/topics/building.md b/docs/source/topics/building.md new file mode 100644 index 000000000..2d071e7fe --- /dev/null +++ b/docs/source/topics/building.md @@ -0,0 +1,176 @@ +# Building + +The ScyllaDB C/C++ Driver will build on most standard Unix-like platforms. +Packages are available for the platforms listed in [Installation][installation.md]. + +These packages can be successfully installed on other, compatible systems, but +we do not support such configurations and recommend building from sources +instead. + +## Compatibility + +* Compilers: + - rustc 1.82+ (as part of the Rust toolchain, available at [rustup.rs]); + - any reasonable C/C++ compiler, such as GCC or Clang (for tests & examples). + +## Dependencies + +The C/C++ driver depends on the following software: + +* [CMake] v3.15+ +* [libuv] 1.x (only for tests and examples, not required for the driver itself) +* [OpenSSL] versions 1.0.1 through 3.x.x are supported (optional, only if you want to enable TLS support in the driver) + +__\*__ Use the `CASS_USE_OPENSSL` CMake option to enable/disable OpenSSL + support. Disabling this option will disable SSL/TLS protocol support + within the driver; defaults to `On`. + +Note that only `CMake` is mandatory for building the driver. The others: +- `libuv` is only required for building tests and some of the examples, not the driver itself. +- `OpenSSL` is only required if you want to enable TLS support in the driver. + +## Linux/MacOS + +### Installing dependencies + +#### Initial environment setup + +##### RHEL/Rocky (dnf) + +```bash +dnf install automake cmake gcc-c++ git libtool +``` + +##### Ubuntu (APT) + +```bash +apt update +apt install build-essential cmake git +``` + +##### Mac OS (Brew) + +[Homebrew][Homebrew] (or brew) is a free and open-source software package +management system that simplifies the installation of software on the Mac OS +operating system. Ensure [Homebrew is installed][Homebrew] before proceeding. + +```bash +brew update +brew upgrade +brew install autoconf automake cmake libtool +``` + +#### libuv + +**libuv is required only for tests and examples, not required for the driver itself.** +libuv v1.x is recommended. When using a package manager for your operating system, +make sure you install v1.x. + +##### Ubuntu + +```bash +sudo apt update +sudo apt install libuv-dev +``` + +##### RHEL/Rocky + +```bash +sudo dnf install libuv-devel +``` + +##### Mac OS (Brew) + +```bash +brew install libuv +``` + +##### Manually build and install + +_The following procedures should be performed if packages are not available for +your system._ + +Browse [https://dist.libuv.org/dist/] and download the newest stable version available. +Follow the instructions in the downloaded package to build and install it. + +#### OpenSSL + +##### RHEL/Rocky (dnf) + +```bash +dnf install openssl-devel +``` + +##### Ubuntu (APT) + +```bash +apt install libssl-dev +``` + +##### Mac OS (Brew) + +```bash +brew install openssl +``` + +__Note__: For Mac OS X, a link needs to be created in order to make OpenSSL + available to the building libraries: + +```bash +brew link --force openssl +``` + +##### Manually build and install + +Browse [https://openssl-library.org/source/] and download the newest stable version available. +Follow the instructions in the downloaded package to build and install it. + +### Building and installing the C/C++ driver + +```bash +mkdir build +pushd build +cmake .. +make +make install +popd +``` + +#### Building examples (optional) + +Examples are not built by default and need to be enabled. Update your [CMake] +line to build examples. + +```bash +cmake -DCASS_BUILD_EXAMPLES=On .. +``` + +#### Building tests (optional) + +Tests (integration and unit) are not built by default and need to be enabled. + +##### All tests + +```bash +cmake -DCASS_BUILD_TESTS=On .. +``` + +__Note__: This will build both the integration and unit tests. + +##### Integration tests + +```bash +cmake -DCASS_BUILD_INTEGRATION_TESTS=On .. +``` + +##### Unit tests + +```bash +cmake -DCASS_BUILD_UNIT_TESTS=On .. +``` + +[download server]: https://github.com/scylladb/cpp-rust-driver/releases +[Homebrew]: https://brew.sh +[CMake]: http://www.cmake.org/download +[libuv]: http://libuv.org +[OpenSSL]: https://www.openssl.org diff --git a/docs/source/topics/configuration/client-identity.md b/docs/source/topics/configuration/client-identity.md new file mode 100644 index 000000000..cf0589e0d --- /dev/null +++ b/docs/source/topics/configuration/client-identity.md @@ -0,0 +1,48 @@ +# Client Configuration + +Client configuration allows an application to provide additional metadata to +the cluster which can be useful for troubleshooting and performing diagnostics. +In addition to the optional application metadata the cluster will automatically +be provided with the driver's name, driver's version, and a unique session +identifier. + +## Application Options (Optional) + +Application name and version metadata can be provided to the cluster during +configuration. This information can be used to isolate specific applications on +the server-side when troubleshooting or performing diagnostics on clusters that +support multiple applications. + +```c +CassCluster* cluster = cass_cluster_new(); + +/* Assign a name for the application connecting to the cluster */ +cass_cluster_set_application_name(cluster, "Application Name"); + +/* Assign a version for the application connecting to the cluster */ +cass_cluster_set_application_version(cluster, "1.0.0"); + +/* ... */ + +cass_cluster_free(cluster); +``` + +## Client Identification + +Each session is assigned a unique identifier (UUID) which can be used to +identify specific client connections server-side. The identifier can also be +retrieved client-side using the following function: + +```c +CassSession* session = cass_session_new(); + +/* Retrieve the session's unique identifier */ +CassUuid client_id = cass_session_get_client_id(session); + +/* ... */ + +cass_session_free(session); +``` + +**Note**: A session's unique identifier is constant for its lifetime and does + not change when re-establishing connection to a cluster. diff --git a/docs/source/topics/configuration/connection.md b/docs/source/topics/configuration/connection.md new file mode 100644 index 000000000..b2e002ea9 --- /dev/null +++ b/docs/source/topics/configuration/connection.md @@ -0,0 +1,43 @@ +# Connection + +## Heartbeats + +To prevent intermediate network devices (routers, switches, etc.) from +disconnecting pooled connections the driver sends a lightweight heartbeat +request (using an [`OPTIONS`] protocol request) periodically. By default the +driver sends a heartbeat every 30 seconds. This can be changed or disabled (0 +second interval) using the following: + +```c +CassCluster* cluster = cass_cluster_new(); + +/* Change the heartbeat interval to 1 minute */ +cass_cluster_set_connection_heartbeat_interval(cluster, 60); + +/* Disable heartbeat requests */ +cass_cluster_set_connection_heartbeat_interval(cluster, 0); + +/* ... */ + +cass_cluster_free(cluster); +``` +Heartbeats are also used to detect unresponsive connections. An idle timeout +setting controls the amount of time a connection is allowed to be without a +successful heartbeat before being terminated and scheduled for reconnection. This +interval can be changed from the default of 60 seconds: + +```c +CassCluster* cluster = cass_cluster_new(); + +/* Change the idle timeout to 2 minute */ +cass_cluster_set_connection_idle_timeout(cluster, 120); + +/* ... */ + +cass_cluster_free(cluster); +``` + +It can be disabled by setting the value to a very long timeout or by disabling +heartbeats. + +[`OPTIONS`]: https://github.com/apache/cassandra/blob/a39f3b066f010d465a1be1038d5e06f1e31b0391/doc/native_protocol_v4.spec#L330 diff --git a/docs/source/topics/configuration/execution-profiles.md b/docs/source/topics/configuration/execution-profiles.md new file mode 100644 index 000000000..36599ef34 --- /dev/null +++ b/docs/source/topics/configuration/execution-profiles.md @@ -0,0 +1,106 @@ +# Execution Profiles + +Execution profiles provide a mechanism to group together a set of configuration +options and reuse them across different query executions. These options include: + +* Request timeout +* Consistency and serial consistency level +* Load balancing policy (*) +* Retry policy +* Speculative execution policy + +__*__ - Load balancing policies are disabled by default and must be explicity + enabled for child policy settings to be applied (e.g. Token aware, + latency aware, and filtering options) + +Execution profiles are being introduced to help deal with the exploding number +of configuration options, especially as the database platform evolves into more +complex workloads. The number of options being introduced with the execution +profiles is limited and may be expanded based on feedback from the community. + +## Creating Execution Profiles + +An execution profile must be associated with a cluster and will be made +available for that session connection to attach to any statement before query +execution. + +```c +/* Create a cluster object */ +CassCluster* cluster = cass_cluster_new(); + +/* Create a new execution profile */ +CassExecProfile* exec_profile = cass_execution_profile_new(); + +/* Set execution profile options */ +cass_execution_profile_set_request_timeout(exec_profile, + 120000); /* 2 min timeout */ +cass_execution_profile_set_consistency(exec_profile, + CASS_CONSISTENCY_ALL); + +/* Associate the execution profile with the cluster configuration */ +cass_cluster_set_execution_profile(cluster, + "long_query", + exec_profile); + +/* Execution profile may be freed once added to cluster configuration */ +cass_execution_profile_free(exec_profile); + +/* Provide the cluster object as configuration to connect the session */ +``` + +The cluster configuration options will be used in place of any unassigned +options after a connection is established. Once the execution profile is added +to a cluster configuration it is immutable and any changes made will require +the execution profile to be re-added before a session is connected in order for +those settings to be available during query execution. + +__Note__: There is no limit on how many execution profiles can be associated + with a cluster/session; however the control connection may require + more time to update the additional load balancing policies. + +## Using Execution Profiles + +Execution profiles are copied from the cluster object to the session object +during the session connection process. To use an execution profile the name +must be assigned to a statement. + +```c +void execute_with_a_profile(CassSession* session) { + CassStatement* statement = cass_statement_new("SELECT * FROM ...", 0); + + /* OR create a prepared statement */ + + /* Assign the execution profile to the statement */ + cass_statement_set_execution_profile(statement, "long_query"); + + /* Execute the statement */ + CassFuture* query_future = cass_session_execute(session, statement); + + /* ... */ + + cass_future_free(query_future); + cass_statement_free(statement); +} +``` + + +__Note__: Use `cass_batch_set_execution_profile(batch, "name")` for batch + statements. + +## Using the Default Cluster Configuration Options + +For statements that do not have an assigned execution profile, the default +cluster configuration options will be used. Those statements that have already +defined an execution profile and are being re-used can pass a `NULL` or empty +string `""` when assigning the execution profile. + +```c +CassStatement* statement = cass_statement_new("SELECT * FROM ...", 0); + +/* Remove the assigned execution profile */ +cass_statement_set_execution_profile(statement, NULL); + +/* ... */ + +cass_statement_free(statement); +``` diff --git a/docs/source/topics/configuration/index.md b/docs/source/topics/configuration/index.md new file mode 100644 index 000000000..fa433e02f --- /dev/null +++ b/docs/source/topics/configuration/index.md @@ -0,0 +1,24 @@ +# Configuration + +The following sections describe various configuration options and performance tips for the driver. + +* [Load Balancing](load-balancing.md) - How the driver distributes requests across nodes in a cluster to optimize performance and availability. +* [Retries](retry-policies.md) - Handling transient errors and ensuring query reliability. +* [Speculative Execution](speculative-execution.md) - Executing requests speculatively to improve performance and reduce latency. +* [Execution Profiles](execution-profiles.md) - Grouping most common configuration settings for different execution scenarios. +* [Performance Tips](performance-tips.md) - Best practices for efficiency and how to tune the driver for optimal performance. +* [Client Identity](client-identity.md) - Configuring the client identity visible for the server. + +```{eval-rst} +.. toctree:: + :hidden: + :glob: + + load-balancing + retry-policies + speculative-execution + connection + execution-profiles + performance-tips + client-identity +``` diff --git a/docs/source/topics/configuration/load-balancing.md b/docs/source/topics/configuration/load-balancing.md new file mode 100644 index 000000000..e19bdc11b --- /dev/null +++ b/docs/source/topics/configuration/load-balancing.md @@ -0,0 +1,268 @@ +# Load balancing + +Load balancing controls how queries are distributed to nodes in a ScyllaDB/Cassandra +cluster. + +Without additional configuration the C/C++ driver defaults to using Datacenter-unaware +load balancing with token-aware routing. This means that the driver will use the primary key +of queries to route them directly to the nodes where the corresponding data is located. +It will not consider datacenter locality when distributing queries, because it does not +know if there exists a datacenter that should be preferred over others, and, if so, +which one. + +## Round-robin Load Balancing + +This load balancing policy equally distributes queries across cluster without +consideration of datacenter locality. This policy normally should always be +accompanied by token-aware routing to ensure that queries are sent directly +to the nodes where the data is located. + +## Datacenter-aware Load Balancing + +This load balancing policy equally distributes queries to nodes in the local +datacenter. Nodes in remote datacenters are only used when all local nodes are +unavailable. Additionally, remote nodes are only considered when non-local +consistency levels are used or if the driver is configured to use remote nodes +with the [`allow_remote_dcs_for_local_cl`] setting. + +```c +CassCluster* cluster = cass_cluster_new(); + +const char* local_dc = "dc1"; /* Local datacenter name */ + +/* + * Use up to 2 remote datacenter nodes for remote consistency levels + * or when `allow_remote_dcs_for_local_cl` is enabled. + */ +unsigned used_hosts_per_remote_dc = 2; + +/* Don't use remote datacenter nodes for local consistency levels */ +cass_bool_t allow_remote_dcs_for_local_cl = cass_false; + +cass_cluster_set_load_balance_dc_aware(cluster, + local_dc, + used_hosts_per_remote_dc, + allow_remote_dcs_for_local_cl); + +/* ... */ + +cass_cluster_free(cluster); +``` + +## Token-aware Routing + +Token-aware routing uses the primary key of queries to route requests directly to +the ScyllaDB/Cassandra nodes where the data is located. Using this policy avoids having +to route requests through an extra coordinator node in the ScyllaDB/Cassandra cluster. This +can improve query latency and reduce load on the ScyllaDB/Cassandra nodes. It can be used +in conjunction with other load balancing and routing policies. + +```c +CassCluster* cluster = cass_cluster_new(); + +/* Enable token-aware routing (this is the default setting) */ +cass_cluster_set_token_aware_routing(cluster, cass_true); + +/* Disable token-aware routing */ +cass_cluster_set_token_aware_routing(cluster, cass_false); + +/* ... */ + +cass_cluster_free(cluster); +``` + +## Latency-aware Routing + +Latency-aware routing tracks the latency of queries to avoid sending new queries +to poorly performing ScyllaDB/Cassandra nodes. It can be used in conjunction with other +load balancing and routing policies. +The way latency-aware interacts with other policies is tricky and may as well worsen +the performance of your application. It is recommended to use it only after +comparing the performance of your application with and without it. + +```c +CassCluster* cluster = cass_cluster_new(); + +/* Disable latency-aware routing (this is the default setting) */ +cass_cluster_set_latency_aware_routing(cluster, cass_false); + +/* Enable latency-aware routing */ +cass_cluster_set_latency_aware_routing(cluster, cass_true); + +/* + * Configure latency-aware routing settings + */ + +/* Up to 2 times the best performing latency is okay */ +cass_double_t exclusion_threshold = 2.0; + + /* Use the default scale */ +cass_uint64_t scale_ms = 100; + +/* Retry a node after 10 seconds even if it was performing poorly before */ +cass_uint64_t retry_period_ms = 10000; + +/* Find the best performing latency every 100 milliseconds */ +cass_uint64_t update_rate_ms = 100; + +/* Only consider the average latency of a node after it's been queried 50 times */ +cass_uint64_t min_measured = 50; + +cass_cluster_set_latency_aware_routing_settings(cluster, + exclusion_threshold, + scale_ms, + retry_period_ms, + update_rate_ms, + min_measured); + +/* ... */ + +cass_cluster_free(cluster); +``` + +## Filtering policies + +### Whitelist + +This policy ensures that only hosts from the provided whitelist filter will +ever be used. Any host that is not contained within the whitelist will be +considered ignored and a connection will not be established. It can be used in +conjunction with other load balancing and routing policies. + +NOTE: Using this policy to limit the connections of the driver to a predefined + set of hosts will defeat the auto-detection features of the driver. If + the goal is to limit connections to hosts in a local datacenter use + DC aware in conjunction with the round robin load balancing policy. + +```c +CassCluster* cluster = cass_cluster_new(); + +/* Set the list of predefined hosts the driver is allowed to connect to */ +cass_cluster_set_whitelist_filtering(cluster, + "127.0.0.1, 127.0.0.3, 127.0.0.5"); + +/* The whitelist can be cleared (and disabled) by using an empty string */ +cass_cluster_set_whitelist_filtering(cluster, ""); + +/* ... */ + +cass_cluster_free(cluster); +``` + +### Blacklist + +This policy is the inverse of the whitelist policy where hosts provided in the +blacklist filter will be ignored and a connection will not be established. + +```c +CassCluster* cluster = cass_cluster_new(); + +/* Set the list of predefined hosts the driver is NOT allowed to connect to */ +cass_cluster_set_blacklist_filtering(cluster, + "127.0.0.1, 127.0.0.3, 127.0.0.5"); + +/* The blacklist can be cleared (and disabled) by using an empty string */ +cass_cluster_set_blacklist_filtering(cluster, ""); + +/* ... */ + +cass_cluster_free(cluster); +``` + +### Datacenter + +Filtering can also be performed on all hosts in a datacenter or multiple +datacenters when using the whitelist/blacklist datacenter filtering polices. + +```c +CassCluster* cluster = cass_cluster_new(); + +/* Set the list of predefined datacenters the driver is allowed to connect to */ +cass_cluster_set_whitelist_dc_filtering(cluster, "dc2, dc4"); + +/* The datacenter whitelist can be cleared/disabled by using an empty string */ +cass_cluster_set_whitelist_dc_filtering(cluster, ""); + +/* ... */ + +cass_cluster_free(cluster); +``` + +```c +CassCluster* cluster = cass_cluster_new(); + + +/* Set the list of predefined datacenters the driver is NOT allowed to connect to */ +cass_cluster_set_blacklist_dc_filtering(cluster, "dc2, dc4"); + +/* The datacenter blacklist can be cleared/disabled by using an empty string */ +cass_cluster_set_blacklist_dc_filtering(cluster, ""); + +/* ... */ + +cass_cluster_free(cluster); +``` + +## Shard-Awareness + +ScyllaDB is built around the concept o a *sharded architecture*. What it means for +clients is that each piece of data is bound to specific CPU(s) on specific +node(s). The ability of the driver to query specific shard (CPU) is called +"shard-awareness". + +One can think of shard-awareness as token-awareness brought to a higher level. +Token-aware drivers execute the queries on specific node(s) - where the data +of interest "belongs". This eliminates the network traffic between the +coordinator node and the "data owning node" and thus leads to performance +improvements. That idea can be taken further: the driver can open a separate +connection to every CPU on the target node and use the *right connection* to +query the *right CPU on the right node*. This eliminates the cross-CPU traffic +on that node and results in even greater speedups. + +**NOTE:** Only prepared statements benefit from shard-awareness. + +### "Basic" shard-awareness + +Through extensions to the CQL protocol, ScyllaDB node informs the incoming CQL +connection about: + +1. the total number of shards within the node; +2. the ID of the specific shard that handles this connection. + +Driver opens new connections until it reaches or exceeds the number specified +by `cass_cluster_set_core_connections_per_shard()`. No particular action is needed to +achieve shard-awareness this way, as this is the default behavior +of the driver. + +### "Advanced" shard-awareness + +Since ScyllaDB 4.3 however, drivers can use a new, more powerful method of +establishing per-shard connection pools. This is the recommended usage pattern, +commonly referred to as "advanced" shard-awareness. The idea behind it is that +ScyllaDB listens for CQL connections on an additional port, by default 19042. +Connections incoming to that port, are being routed to the shard determined by +*client-side (ephemeral) port number*. Precisely, if a client socket has local +port number `P` then that connection lands on shard `P % shard_count`. The +function of the usual port 9042 (`native_transport_port`) is unchanged and +non-shard-aware drivers should continue using it. + +Advanced shard-awareness is the preferred mode because it reduces load on +the cluster while building connection pools. The reason is that with basic +shard-awareness, driver keeps opening CQL connections until it randomly reaches +each shard, often ending up with some excess connections being established and +discarded soon after. In advanced mode, driver opens only as many connections +as needed. + +**NOTE:** It's important to unblock `native_shard_aware_transport_port` and/or +`native_shard_aware_transport_port_ssl` in the firewall rules, if applicable. + +**NOTE:** If the client app runs behind a NAT (e.g. on a desktop in the office +network) while the ScyllaDB cluster is hosted somewhere else (e.g. on Azure or +AWS) then, most likely, the router at the office alters the client-side port +numbers. In this case port-based ("advanced") shard selection will not work and +will fall back to the "basic" mode. + +The advanced mode is also supported by the driver by default. + + +[`allow_remote_dcs_for_local_cl`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassCluster#1a46b9816129aaa5ab61a1363489dccfd0 diff --git a/docs/source/topics/configuration/nonsupported.md b/docs/source/topics/configuration/nonsupported.md new file mode 100644 index 000000000..49348207d --- /dev/null +++ b/docs/source/topics/configuration/nonsupported.md @@ -0,0 +1,82 @@ + + +```{eval-rst} +:orphan: +``` + +# Nonsupported configuration features + +## Not implemented in the CPP-Rust Driver + +### Host State Changes + +The status and membership of a node can change within the life-cycle of the +cluster. A host listener callback can be used to detect these changes. + +**Important**: The driver runs the host listener callback on a thread that is + different from the application. Any data accessed in the + callback must be immutable or synchronized with a mutex, + semaphore, etc. + +```c +void on_host_listener(CassHostListenerEvent event, CassInet inet, void* data) { + /* Get the string representation of the inet address */ + char address[CASS_INET_STRING_LENGTH]; + cass_inet_string(inet, address); + + /* Perform application logic for host listener event */ + if (event == CASS_HOST_LISTENER_EVENT_ADD) { + printf("Host %s has been ADDED\n", address); + } else if (event == CASS_HOST_LISTENER_EVENT_REMOVE) { + printf("Host %s has been REMOVED\n", address); + } else if (event == CASS_HOST_LISTENER_EVENT_UP) { + printf("Host %s is UP\n", address); + } else if (event == CASS_HOST_LISTENER_EVENT_DOWN) { + printf("Host %s is DOWN\n", address); + } +} + +int main() { + CassCluster* cluster = cass_cluster_new(); + + /* Register the host listener callback */ + cass_cluster_set_host_listener_callback(cluster, on_host_listener, NULL); + + /* ... */ + + cass_cluster_free(cluster); +} +``` + +**Note**: Expensive (e.g. slow) operations should not be performed in host + listener callbacks. Performing expensive operations in a callback + will block or slow the driver's normal operation. + +### Reconnection Policy + +The reconnection policy controls the interval between each attempt for a given +connection. + +#### Exponential Reconnection Policy + +The exponential reconnection policy is the default reconnection policy. It +starts by using a base delay in milliseconds which is then exponentially +increased (doubled) during each reconnection attempt; up to the defined maximum +delay. + +**Note**: Once the connection is re-established, this policy will restart using + base delay if a reconnection occurs. + +#### Constant Reconnection Policy + +The constant reconnection policy is a fixed delay for each reconnection +attempt. diff --git a/docs/source/topics/configuration/performance-tips.md b/docs/source/topics/configuration/performance-tips.md new file mode 100644 index 000000000..b53d4d3ef --- /dev/null +++ b/docs/source/topics/configuration/performance-tips.md @@ -0,0 +1,92 @@ +# Performance Tips + +## General Tips + +### Use a single persistent session + +Sessions are expensive objects to create in both time and resources because they +maintain a pool of connections to your ScyllaDB/Cassandra cluster. An application should +create a minimal number of sessions and maintain them for the lifetime of an +application. + +### Use token-aware and latency-aware policies + +The token-aware load balancing can reduce the latency of requests by avoiding an +extra network hop through a coordinator node. When using the token-aware policy +requests are sent to one of the nodes which will retrieved or stored instead of +routing the request through a proxy node (coordinator node). + +The latency-aware load balancing policy can also reduce the latency of requests +by routing requests to nodes that historical performing with the lowest latency. +This can prevent requests from being sent to nodes that are underperforming. + +Both [latency-aware] and [token-aware] can be use together to obtain the benefits of +both. + +### Use [paging] when retrieving large result sets + +Using a large page size or a very high `LIMIT` clause can cause your application +to delay for each individual request. The driver's paging mechanism can be used +to decrease the latency of individual requests. + +### Choose a lower consistency level + +Ultimately, choosing a consistency level is a trade-off between consistency and +availability. Performance should not be a large deciding factor when choosing a +consistency level. However, it can affect high-percentile latency numbers +because requests with consistency levels greater than `ONE` can cause requests +to wait for one or more nodes to respond back to the coordinator node before a +request can complete. In multi-datacenter configurations, consistency levels such as +`EACH_QUORUM` can cause a request to wait for replication across a slower cross +datacenter network link. More information about setting the consistency level +can be found [here](https://cpp-rust-driver.docs.scylladb.com/stable/topics/basics/consistency/). + +## Driver Tuning + +Beyond the performance tips and best practices considered in the previous +section your application might consider tuning the more fine-grain driver +settings in this section to achieve optimal performance for your application's +specific workload. + +### Increasing core connections + +In some workloads, throughput can be increased by increasing the number of core +connections. By default, the driver uses a single core connection per host. It's +recommended that you try increasing the core connections to two and slowly +increase this number while doing performance testing. Two core connections is +often a good setting and increasing the core connections too high will decrease +performance because having multiple connections to a single host inhibits the +driver's ability to coalesce multiple requests into a fewer number of system +calls. + +### Coalesce delay + +The coalesce delay is an optimization to reduce the number of system calls +required to process requests. This setting controls how long the driver's I/O +threads wait for requests to accumulate before flushing them on to the wire. +Larger values for coalesce delay are preferred for throughput-based workloads as +it can significantly reduce the number of system calls required to process +requests. + +In general, the coalesce delay should be increased for throughput-based +workloads and can be decreased for latency-based workloads. Most importantly, +the delay should consider the responsiveness guarantees of your application. + +Note: Single, sporadic requests are not generally affected by this delay and +are processed immediately. + +### New request ratio + +The new request ratio controls how much time an I/O thread spends processing new +requests versus handling outstanding requests. This value is a percentage (with +a value from 1 to 100), where larger values will dedicate more time to +processing new requests and less time on outstanding requests. The goal of this +setting is to balance the time spent processing new/outstanding requests and +prevent either from fully monopolizing the I/O thread's processing time. It's +recommended that your application decrease this value if computationally +expensive or long-running future callbacks are used (via +`cass_future_set_callback()`), otherwise this can be left unchanged. + +[token-aware]: https://cpp-rust-driver.docs.scylladb.com/stable/topics/configuration/load-balancing#latency-aware-routing +[latency-aware]: https://cpp-rust-driver.docs.scylladb.com/stable/topics/configuration/load-balancing#token-aware-routing +[paging]: https://cpp-rust-driver.docs.scylladb.com/stable/topics/basics/handling-results#paging diff --git a/docs/source/topics/configuration/retry-policies.md b/docs/source/topics/configuration/retry-policies.md new file mode 100644 index 000000000..5bec39113 --- /dev/null +++ b/docs/source/topics/configuration/retry-policies.md @@ -0,0 +1,149 @@ +# Retry policies + +Retry polices allow the driver to automatically handle server-side failures when +ScyllaDB/Cassandra is unable to fulfill the consistency requirement of a request. + +**Important**: Retry policies do not handle client-side failures such as +client-side timeouts or client-side connection issues. In these cases +application code must handle the failure and retry the request. The driver will +automatically recover requests that haven't been written, but once a request is +written, the driver will only recover the request if the statement is marked +as idempotent (see [`cass_statement_set_is_idempotent()`] +and [`cass_batch_set_is_idempotent()`]). Otherwise, it will return an error +and will not try to automatically recover. This is done because +not all operations are idempotent and the driver is unable to distinguish which +requests can automatically retried without side effect. It's up to application +code to make this distinction. + +## Setting Retry Policy + +By default, the driver uses the `default retry policy` for all requests unless +it is overridden. The retry policy can be set globally using +[`cass_cluster_set_retry_policy()`] or it can be set per statement or batch +using [`cass_statement_set_retry_policy()`] or +[`cass_batch_set_retry_policy()`], respectively. + +## Default Retry Policy + +The default retry policy will only retry a request when it is safe to do so +while preserving the consistency level of the request and it is likely to +succeed. In all other cases, this policy will return an error. + + + + + + + + + + + + + + + + + + + + + + + + +
Failure TypeAction
Read TimeoutRetry if the number of received responses is greater than or equal to the + number of required responses, but the data was not received. Returns and + error in all other cases.
Write TimeoutRetry only if the request is a logged batch request and the request failed to + write the batch log. Returns an error in all other cases.
UnavailableRetries the request using the next host in the query plan.
+ +```c +CassRetryPolicy* default_policy = cass_retry_policy_default_new(); + +/* ... */ + +/* Retry policies must be freed */ +cass_retry_policy_free(default_policy); +``` + +## Downgrading Consistency Retry Policy + +**Deprecated:** Please do not use this policy in new applications. The use of +this policy can lead to unexpected behavior. Application errors can happen when +the consistency level is unexpectedly changed because the cluster is in a +degraded state. The assumptions made at the normal operating consistency level +may no longer apply when the consistency level is downgraded. Instead, +applications should always use the lowest consistency that can be tolerated by a +specific use case. The consistency level can be set per cluster using +`cass_cluster_set_consistency()`, per execution profile using +`cass_execution_profile_set_consistency(), or per statement using +`cass_statement_set_consistency()`. + +## Fallthrough Retry Policy + +This policy never retries or ignores a server-side failures. Errors are always +returned. This policy is useful for application that want to handle retries +directly. + + + + + + + + + + + + + + + + + + + + + + + +
Failure TypeAction
Read TimeoutReturn error
Write TimeoutReturn error
UnavailableReturn error
+ +```c +CassRetryPolicy* fallthrough_policy = +cass_retry_policy_fallthrough_new(); + +/* ... */ + +/* Retry policies must be freed */ +cass_retry_policy_free(fallthrough_policy); +``` + +## Logging + +This policy can be added as a parent policy to all the other polices. It logs +the retry decision of its child policy. The log messages created by this policy +are done using the [`CASS_LOG_INFO`] level. + +```c +CassCluster* cluster = cass_cluster_new(); + +CassRetryPolicy* default_policy = cass_retry_policy_default_new(); +CassRetryPolicy* logging_policy = cass_retry_policy_logging_new(default_policy); + +cass_cluster_set_retry_policy(cluster, logging_policy); + +/* ... */ + +/* Retry policies must be freed */ +cass_retry_policy_free(default_policy); +cass_retry_policy_free(logging_policy); + +cass_cluster_free(cluster); +``` +[`cass_cluster_set_retry_policy()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassCluster#cass-cluster-set-retry-policy +[`cass_statement_set_retry_policy()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassStatement#cass-statement-set-retry-policy +[`cass_statement_set_is_idempotent()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassStatement#cass-statement-set-is-idempotent +[`cass_batch_set_retry_policy()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassBatch#cass-batch-set-retry-policy +[`cass_batch_set_is_idempotent()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassBatch#cass-batch-set-is-idempotent +[`CASS_LOG_INFO`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/cassandra.h#cass-log-level diff --git a/docs/source/topics/configuration/speculative-execution.md b/docs/source/topics/configuration/speculative-execution.md new file mode 100644 index 000000000..e3042de0a --- /dev/null +++ b/docs/source/topics/configuration/speculative-execution.md @@ -0,0 +1,59 @@ +# Speculative Execution + +For certain applications it is of the utmost importance to minimize latency. +Speculative execution is a way to minimize latency by preemptively executing +several instances of the same query against different nodes. The fastest +response is then returned to the client application and the other requests are +cancelled. Speculative execution is disabled by default. + +## Query Idempotence + +Speculative execution will result in executing the same query several times. +Therefore, it is important that queries are idempotent i.e. a query can be +applied multiple times without changing the result beyond the initial +application. Queries that are not explicitly marked as idempotent will not be +scheduled for speculative executions. + +The following types of queries are not idempotent: + +* Mutation of `counter` columns +* Prepending or appending to a `list` column +* Use of non-idempotent CQL function e.g. `now()` or `uuid()` + +The driver is unable to determine if a query is idempotent therefore it is up to +an application to explicitly mark a statement as being idempotent. + +```c +CassStatement* statement = cass_statement_new( "SELECT * FROM table1", 0); + +/* Make the statement idempotent */ +cass_statement_set_is_idempotent(statement, cass_true); + +cass_statement_free(statement); +``` + +## Enabling speculative execution + +Speculative execution is enabled by connecting a `CassSession` with a +`CassCluster` that has a speculative execution policy enabled. The driver +currently only supports a constant policy, but may support more in the future. + +### Constant speculative execution policy + +The following will start up to 2 more executions after the initial execution +with the subsequent executions being created 500 milliseconds apart. + +```c +CassCluster* cluster = cass_cluster_new(); + +cass_int64_t constant_delay_ms = 500; /* Delay before a new execution is created */ +int max_speculative_executions = 2; /* Number of executions */ + +cass_cluster_set_constant_speculative_execution_policy(cluster, + constant_delay_ms, + max_speculative_executions); + +/* ... */ + +cass_cluster_free(cluster); +``` diff --git a/docs/source/topics/getting-started.md b/docs/source/topics/getting-started.md new file mode 100644 index 000000000..de9b5b388 --- /dev/null +++ b/docs/source/topics/getting-started.md @@ -0,0 +1,227 @@ +# Getting Started + +## Installation + +### Dependencies + +#### Dependencies + +Packages for the dependencies: libuv (1.x), OpenSSL can be installed +from distribution's repositories. + +```bash +# Example: Ubuntu/Debian: +sudo apt update +sudo apt install -y libuv1 openssl libssl + +# Example: Rocky/RedHat: +sudo dnf install -y libuv openssl +``` + +The driver can also be [built from source], in which case dependencies need +to be installed in `-dev` or `-devel` versions. + +### Driver + +Packages are available for some platforms - see the [Installation section](installation.md) for a list. + +They are available for download from the [Releases][cpp-rust-driver-releases] section. + +NOTE: If you have Datastax or ScyllaDB C/C++ Driver installed, you need to remove it first: + +```bash +# Ubuntu/Debian: +sudo apt remove cassandra-cpp-driver +sudo apt remove scylla-cpp-driver + + +# Rocky/RedHat: +sudo dnf remove cassandra-cpp-driver +sudo dnf remove scylla-cpp-driver +``` + + + +```bash +# Example: Ubuntu/Debian: +wget https://github.com/scylladb/cpp-rust-driver/releases/download//libscylla-cpp-driver-dev_.deb +sudo apt update +sudo apt install -y ./libscylla-cpp-driver-dev_.deb + + +# Example: Rocky/RedHat: +wget https://github.com/scylladb/cpp-rust-driver/releases/download//scylla-cpp-rust-driver-.rpm https://github.com/scylladb/cpp-rust-driver/releases/download//scylla-cpp-rust-driver-devel-.rpm +sudo dnf install -y ./scylla-cpp-rust-driver-.rpm ./scylla-cpp-rust-driver-devel-.rpm +``` + +## Connecting + +```c +#include +#include + +int main() { + /* Setup and connect to cluster */ + CassFuture* connect_future = NULL; + CassCluster* cluster = cass_cluster_new(); + CassSession* session = cass_session_new(); + + /* Add contact points */ + cass_cluster_set_contact_points(cluster, "127.0.0.1"); + + /* Shard-awareness (ScyllaDB only): choose the local (ephemeral) port range */ + cass_cluster_set_local_port_range(cluster, 49152, 65535); + + /* Provide the cluster object as configuration to connect the session */ + connect_future = cass_session_connect(session, cluster); + + /* This operation will block until the result is ready */ + CassError rc = cass_future_error_code(connect_future); + + if (rc != CASS_OK) { + /* Display connection error message */ + const char* message; + size_t message_length; + cass_future_error_message(connect_future, &message, &message_length); + fprintf(stderr, "Connect error: '%.*s'\n", (int)message_length, message); + } + + /* Run queries... */ + + cass_future_free(connect_future); + cass_session_free(session); + cass_cluster_free(cluster); + + return 0; +} +``` + +To connect a session, a [`CassCluster`] object will need to be created and +configured. The minimal configuration needed to connect is a list of contact +points. The contact points are used to initialize the driver and it will +automatically discover the rest of the nodes in your cluster. + +**Perfomance Tip:** Include more than one contact point to be robust against +node failures. + +## Futures + +The driver is designed so that no operation will force an application to block. +Operations that would normally cause the application to block, such as +connecting to a cluster or running a query, instead return a [`CassFuture`] +object that can be waited on, polled, or used to register a callback. + +**NOTE:** The API can also be used synchronously by waiting on or immediately +attempting to get the result from a future. + +## Executing Queries + +Queries are executed using [`CassStatement`] objects. Statements encapsulate +the query string and the query parameters. + +```c +void execute_query(CassSession* session) { + /* Create a statement with zero parameters */ + CassStatement* statement + = cass_statement_new("INSERT INTO example (key, value) VALUES ('abc', 123)", 0); + + CassFuture* query_future = cass_session_execute(session, statement); + + /* Statement objects can be freed immediately after being executed */ + cass_statement_free(statement); + + /* This will block until the query has finished */ + CassError rc = cass_future_error_code(query_future); + + printf("Query result: %s\n", cass_error_desc(rc)); + + cass_future_free(query_future); +} +``` + +## Parameterized Queries (Positional) + +Parameterized queries allow the same query string to be executed multiple times with different values, +avoiding string manipulation in your application. + +**Perfomance Tip:** If the same query is being reused multiple times, +[prepared statements] should be used to optimize performance. + +```c +void execute_parametrized_query(CassSession* session) { + /* There are two bind variables in the query string */ + CassStatement* statement + = cass_statement_new("INSERT INTO example (key, value) VALUES (?, ?)", 2); + + /* Bind the values using the indices of the bind variables */ + cass_statement_bind_string(statement, 0, "abc"); + cass_statement_bind_int32(statement, 1, 123); + + CassFuture* query_future = cass_session_execute(session, statement); + + /* Statement objects can be freed immediately after being executed */ + cass_statement_free(statement); + + /* This will block until the query has finished */ + CassError rc = cass_future_error_code(query_future); + + printf("Query result: %s\n", cass_error_desc(rc)); + + cass_future_free(query_future); +} +``` + +## Handling Query Results + +A single row can be retrieved using the convenience function +[`cass_result_first_row()`] to get the first row. A [`CassIterator`] object may +also be used to iterate over the returned row(s). + +```c +void handle_query_result(CassFuture* future) { + /* This will also block until the query returns */ + const CassResult* result = cass_future_get_result(future); + + /* If there was an error then the result won't be available */ + if (result == NULL) { + + /* Handle error */ + + cass_future_free(future); + return; + } + + /* The future can be freed immediately after getting the result object */ + cass_future_free(future); + + /* This can be used to retrieve the first row of the result */ + const CassRow* row = cass_result_first_row(result); + + /* Now we can retrieve the column values from the row */ + const char* key; + size_t key_length; + /* Get the column value of "key" by name */ + cass_value_get_string(cass_row_get_column_by_name(row, "key"), &key, &key_length); + + cass_int32_t value; + /* Get the column value of "value" by name */ + cass_value_get_int32(cass_row_get_column_by_name(row, "value"), &value); + + + /* This will free the result as well as the string pointed to by 'key' */ + cass_result_free(result); +} +``` + +[cpp-rust-driver-releases]: https://github.com/scylladb/cpp-rust-driver/releases + +[built from source]: https://cpp-rust-driver.docs.scylladb.com/stable/topics/building/ +[prepared statements]: https://cpp-rust-driver.docs.scylladb.com/stable/topics/basics/prepared-statements/ + +[`cass_int32_t`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/cassandra.h#cass-int32-t +[`cass_result_first_row()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassResult#cass-result-first-row +[`CassCluster`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassCluster +[`CassSession`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassSession +[`CassStatement`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassStatement +[`CassFuture`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassFuture +[`CassIterator`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassIterator diff --git a/docs/source/topics/installation.md b/docs/source/topics/installation.md new file mode 100644 index 000000000..a285745f3 --- /dev/null +++ b/docs/source/topics/installation.md @@ -0,0 +1,52 @@ +# Installation + +## Packages + +Pre-built packages are available for: +- Rocky Linux 9, +- Fedora 41 and 42, +- Ubuntu 22.04 LTS and 24.04 LTS. + +### RHEL/Rocky + +To install the dependencies: + +```bash +sudo dnf -y install libuv openssl +``` + +Install the runtime library. Replace `` with the version+platform string: + +```bash +sudo dnf install -y scylla-cpp-driver-.rpm +``` + +When developing against the driver you'll also want to install the development +package and the debug symbols: + +```bash +sudo dnf install -y scylla-cpp-driver-devel-.rpm scylla-cpp-driver-debuginfo-.rpm +``` + +### Ubuntu/Debian + +Ubuntu's apt-get will resolve and install the dependencies by itself. +Replace `` with the appropriate version+platform string: + +```bash +sudo apt-get update +sudo apt-get install -y ./scylla-cpp-driver_.deb +``` + +When developing against the driver you'll also want to install the development +package and the debug symbols: + +```bash +sudo apt-get install -y ./scylla-cpp-driver-dev_.deb ./scylla-cpp-driver-dbg_.deb +``` + +## Building + +If pre-built packages are not available for your platform or architecture, +you will need to build the driver from source. Directions for building and +installing the ScyllaDB C/C++ Driver can be found [here](building.md). diff --git a/docs/source/topics/observability/index.md b/docs/source/topics/observability/index.md new file mode 100644 index 000000000..9a3bcfb5d --- /dev/null +++ b/docs/source/topics/observability/index.md @@ -0,0 +1,17 @@ +# Observability + +Various techniques and tools to monitor and understand the behavior of the driver and the ScyllaDB cluster it interacts with. + +* [Logging](logging.md) - Emitting logs for debugging and monitoring. +* [Tracing](tracing.md) - Viewing detailed execution traces of server-side operations. +* [Metrics](metrics.md) - Collecting and exposing metrics about the driver's performance and behavior. + +```{eval-rst} +.. toctree:: + :hidden: + :glob: + + logging + tracing + metrics +``` diff --git a/docs/source/topics/observability/logging.md b/docs/source/topics/observability/logging.md new file mode 100644 index 000000000..17931ffa8 --- /dev/null +++ b/docs/source/topics/observability/logging.md @@ -0,0 +1,34 @@ +# Logging + +The driver's logging system uses `stderr` by default and the log level `CASS_LOG_WARN`. Both of these settings can be changed using the driver's `cass_log_*()` configuration functions. + +**Important**: Logging configuration must be done before calling any other driver function. + +## Log Level + +To update the log level use `cass_log_set_level()`. + +```c +cass_log_set_level(CASS_LOG_INFO); + +/* Create cluster and connect session */ +``` + +## Custom Logging Callback + +The use of a logging callback allows an application to log messages to a file, syslog, or any other logging mechanism. This callback must be thread-safe because it is possible for it to be called from multiple threads concurrently. The `data` parameter allows custom resources to be passed to the logging callback. + +```c +void on_log(const CassLogMessage* message, void* data) { + /* Handle logging */ +} + +int main() { + void* log_data = NULL /* Custom log resource */; + cass_log_set_callback(on_log, log_data); + cass_log_set_level(CASS_LOG_INFO); + + /* Create cluster and connect session */ + +} +``` diff --git a/docs/source/topics/observability/metrics.md b/docs/source/topics/observability/metrics.md new file mode 100644 index 000000000..e21a8dd6a --- /dev/null +++ b/docs/source/topics/observability/metrics.md @@ -0,0 +1,45 @@ +# Metrics + +Performance metrics and diagnostic information can be obtained from the C/C++ +driver using [`cass_session_get_metrics()`]. The resulting [`CassMetrics`] object +contains several useful metrics for accessing request performance and/or +debugging issues. + +```c +CassSession* session = cass_session_new(); + +/* Connect session */ + +CassMetrics metrics; + +/* Get a snapshot of the driver's metrics */ +cass_session_get_metrics(session, &metrics); + +/* Run queries */ + +cass_session_free(session); +``` + +## Request metrics + +The [`requests`] field contains information about request latency and +throughput. All latency times are in microseconds and throughput +numbers are in requests per seconds. + +## Statistics + +The [`stats`] field contains information about the total number of connections. + +## Errors + +The [`errors`] field contains information about the +occurrence of requests and connection timeouts. Request timeouts occur when +a request fails to get a timely response. +Connection timeouts occur when the process of establishing new connections is +unresponsive. + +[`cass_session_get_metrics()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassSession#1ab3773670c98c00290bad48a6df0f9eae +[`CassMetrics`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassMetrics +[`requests`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassMetrics#attribute-requests +[`stats`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassMetrics#attribute-stats +[`errors`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassMetrics#attribute-errors diff --git a/docs/source/topics/observability/tracing.md b/docs/source/topics/observability/tracing.md new file mode 100644 index 000000000..fbcb5b028 --- /dev/null +++ b/docs/source/topics/observability/tracing.md @@ -0,0 +1,76 @@ +# Tracing + +Tracing can be used to troubleshoot query performance problems and can be +enabled per request. When enabled, it provides detailed request information +related to internal, server-side operations. Those operations are stored in +tables under the `system_traces` keyspace. + +## Enabling + +Tracing can be enabled per request for both statements (`CassStatement`) and +batches (`CassBatch`). + +### Enable Tracing on a Statement (`CassStatement`) + +```c +const char* query = "SELECT * FROM keyspace1.table1"; +CassStatement* statement = cass_statement_new(query, 0); + +/* Enable tracing on the statement */ +cass_statement_set_tracing(statement, cass_true); + +/* ... */ +``` + +### Enable Tracing on a Batch (`CassBatch`) + +```c +CassBatch* batch = cass_batch_new(CASS_BATCH_TYPE_UNLOGGED); + +/* Enable tracing on the batch */ +cass_batch_set_tracing(batch, cass_true); + +/* ... */ +``` + +## Tracing Identifier + +When tracing is enabled, a request's future (`CassFuture`) will provide a unique +tracing identifier. This tracing identifier can be used by an application to +query tables in the `system_traces` keyspace. + +```c +CassUuid tracing_id; +if (cass_future_tracing_id(future, &tracing_id) == CASS_OK) { + /* Use `tracing_id` to query tables in the `system_trace` keyspace */ +} else { + /* Handle error. If this happens then either a request error occurred or the + * request type for the future does not support tracing. + */ +} +``` + +**Note**: The driver does not return the actual tracing data for the request. The +application itself must use the returned tracing identifier to query the tables. + +## Configuration + +By default, when tracing is enabled, the driver will wait for the query's tracing +data to become available in the server-side tables before setting the request's +future. The amount of time it will wait, retry, and the consistency level of the +tracing data can be controlled by setting `CassCluster` configuration options. + +```c +CassCluster* cluster = cass_cluster_new(); + +/* Wait a maximum of 15 milliseconds for tracing data to become available */ +cass_cluster_set_tracing_max_wait_time(cluster, 15); + +/* Wait 3 milliseconds before rechecking for the tracing data */ +cass_cluster_set_tracing_retry_wait_time(cluster, 3); + +/* Check the tracing data tables using consistency level ONE */ +cass_cluster_set_tracing_consistency(cluster, CASS_CONSISTENCY_ONE); + +/* ... */ +``` diff --git a/docs/source/topics/security/authentication.md b/docs/source/topics/security/authentication.md new file mode 100644 index 000000000..62b94bcea --- /dev/null +++ b/docs/source/topics/security/authentication.md @@ -0,0 +1,24 @@ +# Authentication + +## Plain text + +Credentials are provided using the [`cass_cluster_set_credentials()`] function. + +```c +CassCluster* cluster = cass_cluster_new(); + +const char* username = "cassandra"; +const char* password = "cassandra"; + +cass_cluster_set_credentials(cluster, username, password); + +/* Connect session object */ + +cass_cluster_free(cluster); +``` + +**Important**: The credentials are sent in plain text to the server. For this +reason, it is highly recommended that this be used in conjunction with +client-to-node encryption (TLS), or in a trusted network environment. + +[`cass_cluster_set_credentials()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassCluster#function-cass_cluster_set_credentials_n diff --git a/docs/source/topics/security/index.md b/docs/source/topics/security/index.md new file mode 100644 index 000000000..6fa29c58b --- /dev/null +++ b/docs/source/topics/security/index.md @@ -0,0 +1,14 @@ +# Security + +The driver currently supports: +- [authentication](authentication.md) using plain text, +- [TLS](tls.md) (via OpenSSL). + +```{eval-rst} +.. toctree:: + :hidden: + :glob: + + authentication + tls +``` diff --git a/docs/source/topics/security/nonsupported.md b/docs/source/topics/security/nonsupported.md new file mode 100644 index 000000000..ec0bb878c --- /dev/null +++ b/docs/source/topics/security/nonsupported.md @@ -0,0 +1,127 @@ + + +```{eval-rst} +:orphan: +``` + +# Nonsupported security features + +## Authentication + +### Custom + +A custom authentication implementation can be set using +`cass_cluster_set_authenticator_callbacks()`. This is useful for integrating +with more complex authentication systems such as Kerberos. + +```c +typedef struct Credentials_ { + const char* password; + const char* username; +} Credentials; + +void on_auth_initial(CassAuthenticator* auth, void* data) { + /* + * This callback is used to initiate a request to begin an authentication + * exchange. Required resources can be acquired and initialized here. + * + * Resources required for this specific exchange can be stored in the + * auth->data field and will be available in the subsequent challenge + * and success phases of the exchange. The cleanup callback should be used to + * free these resources. + */ + + /* + * The data parameter contains the credentials passed in when the + * authentication callbacks were set and is available to all + * authentication exchanges. + */ + const Credentials* credentials = (const Credentials *)data; + + size_t username_size = strlen(credentials->username); + size_t password_size = strlen(credentials->password); + size_t size = username_size + password_size + 2; + + /* Allocate a response token */ + char* response = cass_authenticator_response(auth, size); + + /* Credentials are prefixed with '\0' */ + response[0] = '\0'; + memcpy(response + 1, credentials->username, username_size); + + response[username_size + 1] = '\0'; + memcpy(response + username_size + 2, credentials->password, password_size); +} + +void on_auth_challenge(CassAuthenticator* auth, void* data, + const char* token, size_t token_size) { + /* + * This is used for handling an authentication challenge initiated + * by the server. The information contained in the token parameter is + * authentication protocol specific. It may be NULL or empty. + */ +} + +void on_auth_success(CassAuthenticator* auth, void* data, + const char* token, size_t token_size) { + /* + * This is to be used for handling the success phase of an exchange. The + * token parameters contains information that may be used to finialize + * the request. The information contained in the token parameter is + * authentication protocol specific. It may be NULL or empty. + */ +} + +void on_auth_cleanup(CassAuthenticator* auth, void* data) { + /* + * This is used to cleanup resources acquired during the authentication + * exchange. + */ +} + +int main() { + CassCluster* cluster = cass_cluster_new(); + + /* ... */ + + /* Setup authentication callbacks and credentials */ + CassAuthenticatorCallbacks auth_callbacks = { + on_auth_initial, + on_auth_challenge, + on_auth_success, + on_auth_cleanup + }; + + /* + * The `credentials` argument passed into `cass_cluster_set_auth_callbacks()` + * is passed as the `data` parameter into the authentication callbacks. + * Callbacks will be called by multiple threads concurrently so it is important + * makes sure this data is either immutable or its access is serialized. The + * `data` parameter can be cleaned up be passing a `CassAuthenticatorDataCleanupCallback` + * to `cass_cluster_set_authenticator_callbacks()`. + */ + Credentials credentials = { + "cassandra", + "cassandra" + }; + + /* Set custom authentication callbacks and credentials */ + cass_cluster_set_authenticator_callbacks(cluster, + &auth_callbacks, + NULL, /* No cleanup callback required */ + &credentials); + + /* ... */ + + cass_cluster_free(cluster); +} +``` diff --git a/docs/source/topics/security/tls.md b/docs/source/topics/security/tls.md new file mode 100644 index 000000000..5a1046267 --- /dev/null +++ b/docs/source/topics/security/tls.md @@ -0,0 +1,302 @@ +# TLS + +This is a guide to setting up TLS using the C/C++ driver. This guide will use self-signed certificates, but most steps will be similar for certificates generated by a certificate authority (CA). The first step is to generate a public and private key pair for all ScyllaDB/Cassandra nodes and configure them to use the generated certificate. + +Some notes on this guide: + +- Keystore and truststore might be used interchangeably. These can and often times are the same file. This guide uses the same file for both (`keystore.jks`) The difference is that keystores generally hold private keys, and truststores hold public keys/certificate chains. +- Angle bracket fields (e.g. ``) in examples need to be replaced with values specific to your environment. +- [keytool](https://docs.oracle.com/javase/6/docs/technotes/tools/solaris/keytool.html) is an application included with Java 6+ + +## Prerequisites + +### Generating the ScyllaDB/Cassandra Public and Private Keys + +The most secure method of setting up TLS is to verify that DNS or IP address used to connect to the server matches identity information found in the TLS certificate. This helps to prevent man-in-the-middle attacks. ScyllaDB/Cassandra uses IP addresses internally so those can be used directly for verification or a domain name can be used via reverse DNS (PTR record). That means that the IP address or domain name of the ScyllaDB/Cassandra server where the certficate is installed needs to be present in either the certficate's common name (CN) or one of its subject alternative names (SANs). It's possible to create the certficate without either, but then it will not be possible to verify the server's identity. Although this is not as secure, it eases the deployment of TLS by allowing the same certficate to be deployed across the entire ScyllaDB/Cassandra cluster. + +To generate a public/private key pair with the IP address in the CN field use the following: + +```bash +keytool -genkeypair -noprompt -keyalg RSA -validity 36500 \ + -alias node \ + -keystore keystore.jks \ + -storepass \ + -keypass \ + -dname "CN=, OU=Drivers and Tools, O=DataStax Inc., L=Santa Clara, ST=California, C=US" +``` + +If SAN is preferred use this command: + +```bash +keytool -genkeypair -noprompt -keyalg RSA -validity 36500 \ + -alias node \ + -keystore keystore.jks \ + -storepass \ + -keypass \ + -ext SAN="" \ + -dname "CN=node1.datastax.com, OU=Drivers and Tools, O=DataStax Inc., L=Santa Clara, ST=California, C=US" +``` + +**NOTE:** If an IP address SAN is present then it overrides checking the CN. + +### Enabling `client-to-node` Encryption on ScyllaDB/Cassandra + +The generated keystore from the previous step will need to be copied to all ScyllaDB/Cassandra node(s) and an update of the `cassandra.yaml` configuration file will need to be performed. + +```bash +client_encryption_options: + enabled: true + keystore: /keystore.jks + keystore_password: ## The password used when generating the keystore. + truststore: /keystore.jks + truststore_password: + require_client_auth: +``` + +**NOTE:** In this example keystore and truststore are identical. + +The following [guide](http://www.datastax.com/documentation/cassandra/2.1/cassandra/security/secureSSLClientToNode_t.html) has more information related to configuring TLS on ScyllaDB/Cassandra. + +## Setting up the C/C++ Driver to Use TLS + +A [`CassSsl`](https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassSsl) object is required and must be configured: + +```c +#include + +void setup_ssl(CassCluster* cluster) { + CassSsl* ssl = cass_ssl_new(); + + /* Configure TLS object... */ + + /* To enable TLS attach it to the cluster object */ + cass_cluster_set_ssl(cluster, ssl); + + /* You can detach your reference to this object once it's + * added to the cluster object + */ + cass_ssl_free(ssl); +} +``` + +### Enable TLS without initializing the underlying library (e.g. OpenSSL) + +This is useful for integrating with applications that have already initialized +the underlying TLS library. + +```c +#include + +void setup_ssl_no_lib_init(CassCluster* cluster) { + /* The underlying TLS implemenation should be initialized */ + + /* Enable TLS */ + CassSsl* ssl = cass_ssl_new_no_lib_init(); /* Don't reinitialize the library */ + + /* Configure TLS object... */ + + /* To enable TLS attach it to the cluster object */ + cass_cluster_set_ssl(cluster, ssl); + + /* You can detach your reference to this object once it's + * added to the cluster object + */ + cass_ssl_free(ssl); +} +``` + +#### Exporting and Loading the ScyllaDB/Cassandra Public Key + +The default setting of the driver is to verify the certificate sent during the TLS handshake. For the driver to properly verify the ScyllaDB/Cassandra certificate the driver needs either the public key from the self-signed public key or the CA certificate chain used to sign the public key. To have this work, extract the public key from the ScyllaDB/Cassandra keystore generated in the previous steps. This exports a [PEM formatted](https://en.wikipedia.org/wiki/Privacy-enhanced_Electronic_Mail) certificate which is required by the C/C++ driver. + +```bash +keytool -exportcert -rfc -noprompt \ + -alias node \ + -keystore keystore.jks \ + -storepass \ + -file cassandra.pem +``` + +The trusted certificate can then be loaded using the following code: + +```c +int load_trusted_cert_file(const char* file, CassSsl* ssl) { + CassError rc; + char* cert; + long cert_size; + + FILE *in = fopen(file, "rb"); + if (in == NULL) { + fprintf(stderr, "Error loading certificate file '%s'\n", file); + return 0; + } + + fseek(in, 0, SEEK_END); + cert_size = ftell(in); + rewind(in); + + cert = (char*)malloc(cert_size); + fread(cert, sizeof(char), cert_size, in); + fclose(in); + + // Add the trusted certificate (or chain) to the driver + rc = cass_ssl_add_trusted_cert_n(ssl, cert, cert_size); + if (rc != CASS_OK) { + fprintf(stderr, "Error loading TLS certificate: %s\n", cass_error_desc(rc)); + free(cert); + return 0; + } + + free(cert); + return 1; +} +``` + +It is possible to load multiple self-signed certificates or CA certificate chains. This will be required in cases when self-signed certificates with unique IP addresses are being used. It is possible to disable the certificate verification process, but it is not recommended. + +```c +CassSsl* ssl = cass_ssl_new(); + +// Disable certifcate verifcation +cass_ssl_set_verify_flags(ssl, CASS_SSL_VERIFY_NONE); + +/* ... */ + +cass_ssl_free(ssl); +``` + +#### Enabling ScyllaDB/Cassandra identity verification + +If a unique certificate has been generated for each ScyllaDB/Cassandra node with the IP address or domain name in the CN or SAN fields, you also need to enable identity verification. + +**NOTE:** This is disabled by default. + +```c +CassSsl* ssl = cass_ssl_new(); + +// Add identity verification flag: CASS_SSL_VERIFY_PEER_IDENTITY (IP address) +cass_ssl_set_verify_flags(ssl, CASS_SSL_VERIFY_PEER_CERT | CASS_SSL_VERIFY_PEER_IDENTITY); + +// Or use: CASS_SSL_VERIFY_PEER_IDENTITY_DNS (domain name) +cass_ssl_set_verify_flags(ssl, CASS_SSL_VERIFY_PEER_CERT | CASS_SSL_VERIFY_PEER_IDENTITY_DNS); +``` + +If using a domain name to verify the peer's identity then hostname resolution +(reverse DNS) needs to be enabled: + +**NOTE:** This is also disabled by default. + +```c +CassCluster* cluster = cass_cluster_new(); + +// Enable reverse DNS +cass_cluster_set_use_hostname_resolution(cluster, cass_true); + +/* ... */ + +cass_cluster_free(cluster); +``` + +### Using ScyllaDB/Cassandra and the C/C++ driver with client-side certificates + +Client-side certificates allow ScyllaDB/Cassandra to authenticate the client using public key cryptography and chains of trust. This is same process as above but in reverse. The client has a public and private key and the ScyllaDB/Cassandra node has a copy of the private key or the CA chain used to generate the pair. + +#### Generating and loading the client-side certificate + +A new public/private key pair needs to be generated for client authentication. + +```bash +keytool -genkeypair -noprompt -keyalg RSA -validity 36500 \ + -alias driver \ + -keystore keystore-driver.jks \ + -storepass \ + -keypass +``` + +The public and private key then need to be extracted and converted to the PEM format. + +To extract the public: + +```bash +keytool -exportcert -rfc -noprompt \ + -alias driver \ + -keystore keystore-driver.jks \ + -storepass \ + -file driver.pem +``` + +To extract and convert the private key: + +```bash +keytool -importkeystore -noprompt -srcalias certificatekey -deststoretype PKCS12 \ + -srcalias driver \ + -srckeystore keystore-driver.jks \ + -srcstorepass \ + -storepass \ + -destkeystore keystore-driver.p12 + +openssl pkcs12 -nomacver -nocerts \ + -in keystore-driver.p12 \ + -password pass: \ + -passout pass: \ + -out driver-private.pem +``` + +Now PEM formatted public and private key can be loaded. These files can be loaded using the same code from above in load_trusted_cert_file(). + +```c +CassError rc = CASS_OK; +CassSsl* ssl = cass_ssl_new(); + +char* cert = NULL; +size_t cert_size = 0; + +// Load PEM-formatted certificate data and size into cert and cert_size... + +rc = cass_ssl_set_cert_n(ssl, cert, cert_size); +if (rc != CASS_OK) { + // Handle error +} + +char* key = NULL; +size_t key_size = 0; + +// A password is required when the private key is encrypted. If the private key +// is NOT password protected use NULL. +const char* key_password = ""; + +// Load PEM-formatted private key data and size into key and key_size... + +rc = cass_ssl_set_private_key_n(ssl, key, key_size, key_password, strlen(key_password)); +if (rc != CASS_OK) { + // Handle error +} + +cass_ssl_free(ssl); +``` + +#### Setting up client authentication with ScyllaDB/Cassandra + +The driver's public key or the CA chain used to sign the driver's certificate will need to be added to ScyllaDB/Cassandra's truststore. If using self-signed certificate then the public key will need to be extracted from the driver's keystore generated in the previous steps. + +Extract the public key from the driver's keystore and add it to ScyllaDB/Cassandra's truststore. + +```bash +keytool -exportcert -noprompt \ + -alias driver \ + -keystore keystore-driver.jks \ + -storepass cassandra \ + -file cassandra-driver.crt + +keytool -import -noprompt \ + -alias truststore \ + -keystore keystore.jks \ + -storepass cassandra \ + -file cassandra-driver.crt +``` + +You also need to enable client authentication in `cassandra.yaml`: + +```yaml +require_client_auth: true +``` diff --git a/docs/source/topics/testing.md b/docs/source/topics/testing.md new file mode 100644 index 000000000..9a97f0c7f --- /dev/null +++ b/docs/source/topics/testing.md @@ -0,0 +1,119 @@ +# Testing +Before proceeding ensure the tests were built using the [build procedures]. + +Integration tests rely on Cassandra Cluster Manager (CCM) to be installed. + +Each test performs a [setup](#setup-cassandra), [execute](#execute-test), and +[teardown](#teardown-cassandra). This ensures that each test has a clean and +consistent run against the Apache Cassandra instance during the execution +phase. Cluster instances are maintained for the entire duration of the test +unless the test is chaotic at which point the cluster will be destroyed at the +end. + +Most of the tests performed will utilize a single node cluster; however a +cluster may be as large as nine nodes depending on the test being performed. + +## Execution Sequences +### Setup Cassandra +```ditaa +/------------------\ /------------\ /-------------\ /----------\ +| Integration Test | | CCM Bridge | | CCM Machine | | CCM Tool | +| cYEL | | cBLK | | cBLU | |cBLK | +\---------+--------/ \------+-----/ \-------+-----/ \-----+----/ + : : : : + : Create and Start Cluster : : : + +++---------------------------->+++ Establish SSH Connection : : + | | | |----------------------------->+++ : + | | | | Connection Established | | : + | | | |<-----------------------------| | : + | | | | Create N-Node Cluster | | : + | | | |----------------------------->| | Execute Create Cluster : + | | | | | |--------------------------->+++ + | | | | | | Download Cassandra | | + | | | | | |<---------------------------| | + | | | | | | Build Cassandra | | + | | | | | |<---------------------------| | + | | | | | | Start Cluster | | + | | | | | |<---------------------------+++ + | | | | Cassandra Cluster Ready | | : + | | Cassandra Cluster is UP | |<-----------------------------+++ : + +++<----------------------------+++ : : + : : : : + : : : : + +``` + +#### Execute Test +```ditaa + /-----------\ /------------\ + | Unit Test | Perform Test | C++ Driver | + | cYEL +--------------------------------->| cBLU | + \-----+-----/ \------+-----/ + ^ | + | | + | Validate Results | + +-----------------------------------------------+ + + + + /------------\ + | C++ Driver | +/--+------------+--\ /-------------\ +| Integration Test | Perform Test | CCM Machine +------\ +| cYEL +------------------------------------------------->| cBLU |NODE 1| +\--------+---------/ | +------/ + ^ | +------\ + | | |NODE 2| + | Validate Results | +------/ + +-----------------------------------+------------------------+ +------\ + | | |NODE 3| + | | +------/ + | \-------+-----/ + | | + | | + | | + /---------+----------\ | + | Cassandra Cluster | | + | (or DSE) | Perform Test | + | +<--------------------+ + | | + | {s} | + | cGRE | + \--------------------/ +``` + +#### Teardown Cassandra +```ditaa +/------------------\ /------------\ /-------------\ /----------\ +| Integration Test | | CCM Bridge | | CCM Machine | | CCM Tool | +| cYEL | | cBLK | | cBLU | | cBLK | +\---------+--------/ \------+-----/ \-------+-----/ \-----+----/ + : : : : + : Stop and Destroy Cluster : : : + +++---------------------------->+++ Establish SSH Connection : : + | | | |----------------------------->+++ : + | | | | Connection Established | | : + | | | |<-----------------------------| | : + | | | | Destroy N-Node Cluster | | : + | | | |----------------------------->| | Remove Cluster : + | | | | | |-------------------------->+++ + | | | | | | Stop Cassandra Instances | | + | | | | | |<--------------------------| | + | | | | | | Destroy Cluster | | + | | | | | |<--------------------------+++ + | | | | Cluster Destroyed | | : + | | Cluster Destrored | |<-----------------------------+++ : + +++<----------------------------+++ : : + : : : : + : : : : + +``` + +## TODO +Here are some of the items being scheduled for future enhancements. + +- Incorporate integration tests into Jenkins environment +- Updates to CCM Bridge + - Allow files to be copied over SSH established connection + +[build procedures]: https://cpp-rust-driver.docs.scylladb.com/stable/topics/building#test-dependencies-and-building-tests-not-required diff --git a/docs/source/topics/using/batches.md b/docs/source/topics/using/batches.md new file mode 100644 index 000000000..4e6ca7f13 --- /dev/null +++ b/docs/source/topics/using/batches.md @@ -0,0 +1,51 @@ +# Batches + +Batches can be used to group multiple mutations (`UPDATE`, `INSERT`, `DELETE`) together into a single statement; simple or prepared. There are three different types of batches. + +* `CASS_BATCH_TYPE_LOGGED` is used to make sure that multiple mutations across multiple partitions happen atomically, that is, all the included mutations will eventually succeed. However, there is a performance penalty imposed by atomicity guarantee. +* `CASS_BATCH_TYPE_UNLOGGED` is generally used to group mutations for a single partition and do not suffer from the performance penalty imposed by logged batches, but there is no atomicity guarantee for multi-partition updates. +* `CASS_BATCH_TYPE_COUNTER` is used to group counters updates. + +**Important**: Be careful when using batches as a [performance optimization](https://forum.scylladb.com/t/batch-performance-considerations-when-grouping-queries-on-the-client-side/2076). + +```c +void execute_batch(CassSession* session) { + /* This logged batch will make sure that all the mutations eventually succeed */ + CassBatch* batch = cass_batch_new(CASS_BATCH_TYPE_LOGGED); + + /* Statements can be immediately freed after being added to the batch */ + + { + CassStatement* statement + = cass_statement_new("INSERT INTO example1(key, value) VALUES ('a', '1')", 0); + cass_batch_add_statement(batch, statement); + cass_statement_free(statement); + } + + { + CassStatement* statement + = cass_statement_new("UPDATE example2 set value = '2' WHERE key = 'b'", 0); + cass_batch_add_statement(batch, statement); + cass_statement_free(statement); + } + + { + CassStatement* statement + = cass_statement_new("DELETE FROM example3 WHERE key = 'c'", 0); + cass_batch_add_statement(batch, statement); + cass_statement_free(statement); + } + + CassFuture* batch_future = cass_session_execute_batch(session, batch); + + /* Batch objects can be freed immediately after being executed */ + cass_batch_free(batch); + + /* This will block until the query has finished */ + CassError rc = cass_future_error_code(batch_future); + + printf("Batch result: %s\n", cass_error_desc(rc)); + + cass_future_free(batch_future); +} +``` diff --git a/docs/source/topics/using/binding-parameters.md b/docs/source/topics/using/binding-parameters.md new file mode 100644 index 000000000..041c6e831 --- /dev/null +++ b/docs/source/topics/using/binding-parameters.md @@ -0,0 +1,88 @@ +# Binding Parameters + +The ‘?’ marker is used to denote the bind variables in a query string. This can be used for both regular and prepared parameterized queries. In addition to adding the bind marker to your query string your application must also provide the number of bind variables to `cass_statement_new()` when constructing a new statement. If a query doesn’t require any bind variables then 0 can be used. `cass_statement_bind_*()` functions are then used to bind values to the statement’s variables. Bind variables can be bound by the marker’s index or by name and must be supplied for all bound variables. + +```c +/* Create a statement with a single parameter */ +CassStatement* statement = cass_statement_new("SELECT * FROM table1 WHERE column1 = ?", 1); + +cass_statement_bind_string(statement, 0, "abc"); + +/* Execute statement */ + +cass_statement_free(statement); +``` + +Variables also could be bound by name, where name could be inferred from query, or explicitly specified as `:name`. This limitation did exist because query metadata provided by ScyllaDB/Cassandra is required to map the variable name to the variable’s marker index.) + +```c +void execute_prepared_statement(const CassPrepared* prepared) { + /* The prepared query allocates the correct number of paramters automatically */ + CassStatement* statement = cass_prepared_bind(prepared); + + /* The parameter can now be bound by name */ + cass_statement_bind_string_by_name(statement, "column1", "abc"); + + /* Execute statement */ + + cass_statement_free(statement); +} +``` + +## Unbound parameters + +The driver will send a special `unset` value for unbound parameters (leaving the unbound column unaffected). +Calling `cass_statement_reset_parameters()` will unbind (or resize) a statement's parameters. + +## Constructing Collections + +Collections are supported using [`CassCollection`](https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassCollection) objects; supporting `list`, `map` and `set` ScyllaDB/Cassandra types types. The code below shows how to construct a `list` collection; however, a set can be constructed in a very similar way. The only difference is the type `CASS_COLLECTION_TYPE_SET` is used to create the collection instead of `CASS_COLLECTION_TYPE_LIST`. + +**Important**: Values appended to the collection can be freed immediately afterward because the values are copied. + +```c +const char* query = "SELECT * FROM ..."; + +CassStatement* statement = cass_statement_new(query, 1); + +CassCollection* list = cass_collection_new(CASS_COLLECTION_TYPE_LIST, 3); + +cass_collection_append_string(list, "123"); +cass_collection_append_string(list, "456"); +cass_collection_append_string(list, "789"); + +cass_statement_bind_collection(statement, 0, list); + +/* The colleciton can be freed after binding */ +cass_collection_free(list); +``` + +Maps are built similarly, but the key and value need to be interleaved as they are appended to the collection. + +```c +const char* query = "SELECT * FROM ..."; + +CassStatement* statement = cass_statement_new(query, 1); + +CassCollection* map = cass_collection_new(CASS_COLLECTION_TYPE_MAP, 2); + +/* map["abc"] = 123 */ +cass_collection_append_string(map, "abc"); +cass_collection_append_int32(map, 123); + +/* map["def"] = 456 */ +cass_collection_append_string(map, "def"); +cass_collection_append_int32(map, 456); + +cass_statement_bind_collection(statement, 0, map); + +/* The colleciton can be freed after binding */ +cass_collection_free(map); +``` + +## Nested Collections + +A collection can be added to another collection using [`cass_collection_append_collection()`]. + +[`cass_collection_append_collection()`]: +https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassCollection#cass-collection-append-collection diff --git a/docs/source/topics/using/client-side-timestamps.md b/docs/source/topics/using/client-side-timestamps.md new file mode 100644 index 000000000..13ce283e5 --- /dev/null +++ b/docs/source/topics/using/client-side-timestamps.md @@ -0,0 +1,65 @@ +# Client-side timestamps + +ScyllaDB/Cassandra uses timestamps to serialize write operations. That is, values with a +more current timestamp are considered to be the most up-to-date version of that +information. By default, timestamps are assigned by the driver on the +client-side using a monotonically increasing timestamp generator. This behavior +can be overridden by configuring the driver to use a server-side timestamps or +assigning a timestamp directly to a [`CassStatement`] or [`CassBatch`]. + +## Monotonically Increasing Timestamp Generator + +The monotonic timestamp generator guarantees that all writes that share this +generator will be given monotonically increasing timestamps. This generator +produces microsecond timestamps with the sub-millisecond part generated using an +atomic counter. That guarantees that no more than 1000 timestamps will be +generated for a given millisecond clock tick even when shared by multiple +sessions. + +**Warning**: If the rate of 1000 timestamps per millisecond is exceeded, this +generator will produce duplicate timestamps. + +```c +CassCluster* cluster = cass_cluster_new(); + +CassTimestampGen* timestamp_gen = cass_timestamp_gen_monotonic_new(); + +cass_cluster_set_timestamp_gen(cluster, timestamp_gen); + +/* ... */ + +/* Connect sessions */ + +/* Timestamp generators must be freed */ +cass_timestamp_gen_free(timestamp_gen); + +cass_cluster_free(cluster); +``` + +All sessions that connect using this cluster object will share this same +timestamp generator. + + +## Per Statement/Batch timestamps + +Timestamps can also be assigned to individuals [`CassStatement`] or +[`CassBatch`] requests. + +```c +CassStatement* statement = cass_statement_new("INSERT INTO * ...", 2); + +/* Add a timestamp to the statement */ +cass_statement_set_timestamp(statement, 123456789); +``` + +```c +CassBatch* batch = cass_batch_new(CASS_BATCH_TYPE_LOGGED); + +/* Add a timestamp to the batch */ +cass_batch_set_timestamp(batch, 123456789); + +/* Add statements to batch */ +``` + +[`CassStatement`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassStatement/ +[`CassBatch`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassBatch/ diff --git a/docs/source/topics/using/consistency.md b/docs/source/topics/using/consistency.md new file mode 100644 index 000000000..eebbcc0d4 --- /dev/null +++ b/docs/source/topics/using/consistency.md @@ -0,0 +1,101 @@ +# Consistency + +A setting that defines a successful write or read by the number of cluster +replicas that acknowledge the write or respond to the read request, +respectively. + +## Default consistency + +The default consistency is `CASS_CONSISTENCY_LOCAL_ONE`. + +The default serial consistency is `CASS_CONSISTENCY_ANY`, which means no serial consistency specified. +This may result in an error upon execution of a statement that requires serial consistency (Lightweight Transaction, LWT). +In order to avoid this, you should set the serial consistency level to `CASS_CONSISTENCY_SERIAL` or `CASS_CONSISTENCY_LOCAL_SERIAL` +before executing such statement. + +## Consistency Levels + +### Read and Write Consistency Levels + +The consistency level determines the number of replicas on which the read/write +must respond/succeed before returning an acknowledgment to the client +application. Descriptions for each read/write consistency level can be found +[here](https://docs.scylladb.com/manual/stable/cql/consistency.html). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LevelDriver
AllCASS_CONSISTENCY_ALL
Each QuorumCASS_CONSISTENCY_EACH_QUORUM
QuorumCASS_CONSISTENCY_QUORUM
Local QuorumCASS_CONSISTENCY_LOCAL_QUORUM
OneCASS_CONSISTENCY_ONE
TwoCASS_CONSISTENCY_TWO
ThreeCASS_CONSISTENCY_THREE
Local OneCASS_CONSISTENCY_LOCAL_ONE
AnyCASS_CONSISTENCY_ANY
SerialCASS_CONSISTENCY_SERIAL
Local SerialCASS_CONSISTENCY_LOCAL_SERIAL
+ +**NOTE:** Consistency level `CASS_CONSISTENCY_ANY` is only valid for write operation statements. + +## Setting Consistency Level + +A ['CassStatement'](https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassFuture/) object +can have its consistency level altered at anytime before the statement is +executed by the session. + +```c +CassStatement* statement = NULL; + +/* Create a simple or prepared statment */ + +/* Ensure the session executed statement has strong consistency */ +cass_statement_set_consistency(statement, CASS_CONSISTENCY_QUORUM); + +cass_statement_free(statement); +``` + +**NOTE:** Consistency level is ignored for `USE`, `TRUNCATE`, `CREATE` and `ALTER` +statements, and some, like `CASS_CONSISTENCY_ANY`, aren’t allowed in all situations. diff --git a/docs/source/topics/using/data-types/date-and-time.md b/docs/source/topics/using/data-types/date-and-time.md new file mode 100644 index 000000000..bc400cfa5 --- /dev/null +++ b/docs/source/topics/using/data-types/date-and-time.md @@ -0,0 +1,94 @@ +# The `date` and `time` Types + +The driver currently uses raw types to handle `date` and `time` because date +and time handling is often very application specific in C/C++. It currently +provides methods to handle converting Epoch (January 1, 1970) time in seconds +to and from `date`/`time`. + +The `date` type uses an unsigned 32-bit integer (`cass_uint32_t`) to +represent the number of days with Epoch centered at 2^31. +Because it's centered at Epoch it can be used to represent days before Epoch. +The `time` type uses a signed 64-bit integer (`cass_int64_t`) to +represent the number of nanoseconds since midnight and valid values are in the +range 0 to 86399999999999. + + +The following examples both use this schema: + +```cql +CREATE TABLE date_time (key text PRIMARY KEY, + year_month_day date, + time_of_day time); +``` + +## `INSERT`ing the `date` and `time` Types + +```c +#include + +void insert_date_time(CassSession* session) { + + CassStatement* statement = cass_statement_new("INSERT INTO date_time (key, year_month_day, time_of_day) " + "VALUES (?, ?, ?)", 3); + + time_t now = time(NULL); /* Time in seconds from Epoch */ + + /* Converts the time since the Epoch in seconds to the 'date' type */ + cass_uint32_t year_month_day = cass_date_from_epoch(now); + + /* Converts the time since the Epoch in seconds to the 'time' type */ + cass_int64_t time_of_day = cass_time_from_epoch(now); + + cass_statement_bind_string(statement, 0, "xyz"); + + /* 'date' uses an unsigned 32-bit integer */ + cass_statement_bind_uint32(statement, 1, year_month_day); + + /* 'time' uses a signed 64-bit integer */ + cass_statement_bind_int64(statement, 2, time_of_day); + + CassFuture* future = cass_session_execute(session, statement); + + /* Handle future result */ + + /* CassStatement and CassFuture both need to be freed */ + cass_statement_free(statement); + cass_future_free(future); +} +``` + +## `SELECT`ing the `date` and `time` Types + +```c +#include + +void select_date_time(CassSession* session) { + CassStatement* statement = cass_statement_new("SELECT * FROM date_time WHERE key = ?", 1); + + CassFuture* future = cass_session_execute(session, statement); + + const CassResult* result = cass_future_get_result(future); + /* Make sure there's a valid result */ + if (result != NULL && cass_result_row_count(result) > 0) { + const CassRow* row = cass_result_first_row(result); + + /* Get the value of the "year_month_day" column */ + cass_uint32_t year_month_day; + cass_value_get_uint32(cass_row_get_column(row, 1), &year_month_day); + + /* Get the value of the "time_of_day" column */ + cass_int64_t time_of_day; + cass_value_get_int64(cass_row_get_column(row, 2), &time_of_day); + + /* Convert 'date' and 'time' to Epoch time */ + time_t time = (time_t)cass_date_time_to_epoch(year_month_day, time_of_day); + printf("Date and time: %s", asctime(localtime(&time))); + } else { + /* Handle error */ + } + + /* CassStatement and CassFuture both need to be freed */ + cass_statement_free(statement); + cass_future_free(future); +} +``` diff --git a/docs/source/topics/using/data-types/index.md b/docs/source/topics/using/data-types/index.md new file mode 100644 index 000000000..024c45a09 --- /dev/null +++ b/docs/source/topics/using/data-types/index.md @@ -0,0 +1,237 @@ +# Data Types + +## Datatypes Mapping + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ScyllaDB/Cassandra Type(s)Driver Type
intcass_int32_t
bigint, counter, timestampcass_int64_t
floatcass_float_t
doublecass_double_t
booleancass_bool_t
ascii, text, varcharconst char*
blob, varintconst cass_byte_t*
uuid, timeuuidCassUuid
inetCassInet
decimalconst cass_byte_t* (varint) and a cass_int32_t (scale)
list, map, setCassCollection
tupleCassTuple
user-defined typeCassUserType
tinyintcass_int8_t
smallintcass_int16_t
datecass_uint32_t
timecass_int64_t
+ +[`CassDataType`] objects are useful for describing the different values that can +be stored in ScyllaDB/Cassandra, from primitive types to more complex composite types, +such as, UDTs (user-defined types), tuples and collections. Data types can be retrieved from existing +metadata found in schema, results, values or prepared statements, or they can be +constructed programmatically. + +The following code snippets use the following type schema: + +```cql +CREATE TYPE person (name text, + // Street address, zip code, state/province, and country + address frozen>, + // Type and number + phone_numbers frozen>); +``` + +## Retrieving an Existing Data Type + +**Important**: Any `const CassDataType*` object doesn't need to be freed. Its +lifetime is bound to the object it came from. + +UDT data types can be retrieved using a [`CassSchemaMeta`] object. The resulting +data type object can be used to construct a new [`CassUserType`] object using +[`cass_user_type_new_from_data_type()`]. + +```c +void get_person_data_type_from_keyspace(CassSession* session) { + /* Get schema object (this should be cached) */ + const CassSchemaMeta* schema_meta = cass_session_get_schema_meta(session); + + /* Get the keyspace for the user-defined type. It doesn't need to be freed */ + const CassKeyspaceMeta* keyspace_meta = + cass_schema_meta_keyspace_by_name(schema_meta, "examples"); + + /* This data type object doesn't need to be freed */ + const CassDataType* person_data_type = + cass_keyspace_meta_user_type_by_name(keyspace_meta, "person"); + + /* ... */ + + /* Schema object must be freed */ + cass_schema_meta_free(schema_meta); +} +``` + +Data types can also be retrieved from [`CassResult`], [`CassPrepared`], and +[`CassValue`] objects. + +* [`cass_result_column_data_type()`] can be used to get the + data type of a column for a [`CassResult`]. + * [`cass_prepared_parameter_data_type()`] can be used to get the data type of + the parameters for a [`CassPrepared`] object. There are also functions to get + the data type of a prepared parameter by name. +* [`cass_value_data_type()`] can be used to get the data type represented by a + [`CassValue`] object. + +## Building a Data Type Programmatically + +Data types could be constructed programmatically. This is useful for application that may +have schema metadata disabled. + +```c +CassDataType* address_data_type = cass_data_type_new_type(4); +CassDataType* phone_numbers_data_type = cass_data_type_new(2); +CassDataType* person_data_type = cass_data_type_new_udt(3); + +/* Street address, zip code, state/province, and country */ +cass_data_type_add_sub_value_type(address_data_type, CASS_VALUE_TYPE_TEXT); +cass_data_type_add_sub_value_type(address_data_type, CASS_VALUE_TYPE_INT); +cass_data_type_add_sub_value_type(address_data_type, CASS_VALUE_TYPE_TEXT); +cass_data_type_add_sub_value_type(address_data_type, CASS_VALUE_TYPE_TEXT); + +/* Phone type and number*/ +cass_data_type_add_sub_value_type(phone_numbers_data_type, CASS_VALUE_TYPE_TEXT); +cass_data_type_add_sub_value_type(phone_numbers_data_type, CASS_VALUE_TYPE_INT); + +/* Add fields to the person data type */ +cass_data_type_add_sub_value_type_by_name(person_data_type, "name", CASS_VALUE_TYPE_TEXT); +cass_data_type_add_sub_data_type_by_name(person_data_type, "address", address_data_type); +cass_data_type_add_sub_value_type_by_name(person_data_type, "phone_numbers", phone_numbers_data_type); + +/* ... */ + +/* Data types must be freed */ +cass_data_type_free(address_data_type); +cass_data_type_free(phone_numbers_data_type); +cass_data_type_free(person_data_type); +``` + +## Creating UDTs, Tuples and Collections Using Data Types + +After the user type object is retrieved or created manually, it can be used to +construct composite data types. The subtypes of a data type can be used to +construct other nested types. + +```c +CassDataType* person_data_type = NULL; + +/* Construct or lookup data type */ + +/* Construct a new UDT from a data type */ +CassUserType* person = cass_user_type_new_from_data_type(person_data_type); + +/* ... */ + +/* Construct a new tuple from a nested data type */ +CassTuple* address = + cass_tuple_new_from_data_type( + cass_data_type_sub_data_type_by_name(person_data_type, "address")); + +/* ... */ + +/* Construct a new map collection from a nested data type */ +CassCollection* phone_numbers = + cass_collection_new_from_data_type( + cass_data_type_sub_data_type_by_name(person_data_type, "phone_numbers"), 2); + +/* ... */ + +/* Add fields to the UDT */ +cass_user_type_set_string_by_name(person, "name", "Bob"); +cass_user_type_set_user_type_by_name(person, "address", address); +cass_user_type_set_collection_by_name(person, "phone_numbers", phone_numbers); + +/* ... */ + +/* UDT, tuple, and collection objects must be freed */ +cass_user_type_free(person); +cass_tuple_free(address); +cass_collection_free(phone_numbers); +``` + +[`CassDataType`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassDataType +[`CassUserType`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassUserType +[`CassPrepared`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassPrepared +[`CassResult`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassResult +[`CassValue`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassValue +[`CassSchemaMeta`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassSchemaMeta +[`cass_user_type_new_from_data_type()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassUserType#cass-user-type-new-from-data-type +[`cass_result_column_data_type()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassResult#cass-result-column-data-type +[`cass_prepared_parameter_data_type()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassPrepared#cass-prepared-parameter-data-type +[`cass_value_data_type()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassValue#cass-value-data-type + +```{eval-rst} +.. toctree:: + :hidden: + :glob: + + date-and-time + tuples + user-defined-types + uuids +``` diff --git a/docs/source/topics/using/data-types/tuples.md b/docs/source/topics/using/data-types/tuples.md new file mode 100644 index 000000000..6f375ad7a --- /dev/null +++ b/docs/source/topics/using/data-types/tuples.md @@ -0,0 +1,95 @@ +# Tuples + +Tuples are fixed-length sets of values. They are similar to UDTs in that they +can contain different types of values, but unlike UDTs tuples can only be +accessed by position and not by name. + +## Creating a Tuple + +Creating a [`CassTuple`] is done by allocating a new tuple object with the +number of items that will be contained in it. Items can the be set in the tuple +using their position. + +```c +/* The number of items must be set properly */ +CassTuple* tuple = cass_tuple_new(2); + +/* Items are set by position */ +cass_tuple_set_string(tuple, 0, "abc"); +cass_tuple_set_int64(tuple, 1, 123); + +/* ... */ + +/* Tuples must be freed */ +cass_tuple_free(tuple); +``` + +## Create a Tuple using a Data Type + +A tuple can also be created using a [`CassDataType`] that comes from schema +metadata or is manually constructed. However, this is not a necessary step as +a tuple can be created without a data type. A typed tuple will not allow invalid +type to be added to it. [`cass_tuple_set_*()`] functions will return an error +code if the incorrect type is added to a position. + +```c +/* Creata new tuple data type */ +CassDataType* data_type = cass_data_type_new_tuple(2); + +/* Add a string at position 0 and an 64-bit integer at position 1 */ +cass_data_type_add_sub_value_type(data_type, CASS_VALUE_TYPE_TEXT); +cass_data_type_add_sub_value_type(data_type, CASS_VALUE_TYPE_BIGINT); + +/* Create a new tuple using data type */ +CassTuple* tuple = cass_tuple_new_from_data_type(data_type); + +/* This will now return an error because the data type of the first item is + * a string not an integer + */ +CassError rc = cass_tuple_set_int32(tuple, 0, 123); + +assert(rc != CASS_OK); + +/* These are the correct types */ +cass_tuple_set_string(tuple, 0, "abc"); +cass_tuple_set_int64(tuple, 1, 123); + +/* ... */ + +/* Constructed data types must be freed */ +cass_data_type_free(data_type); + +/* Tuples must be freed */ +cass_tuple_free(tuple); +``` + +## Consuming values from a Tuple result + +[`CassTuple`]s are consumed using an iterator. + +```c +void iterate_tuple(const CassRow* row) { + /* Retrieve tuple value from column */ + const CassValue* tuple_value = cass_row_get_column_by_name(row, "value1"); + + /* Create an iterator for the UDT value */ + CassIterator* tuple_iterator = cass_iterator_from_tuple(tuple_value); + + /* Iterate over the tuple fields */ + while (cass_iterator_next(tuple_iterator)) { + const char* field_name; + size_t field_name_length; + /* Get tuple value */ + const CassValue* value = cass_iterator_get_value(tuple_iterator); + + /* ... */ + } + + /* The tuple iterator needs to be freed */ + cass_iterator_free(tuple_iterator); +} +``` + +[`CassTuple`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassTuple +[`CassUserType`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassUserType +[`cass_tuple_set_*()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassTuple#cass-tuple-set-null diff --git a/docs/source/topics/using/data-types/user-defined-types.md b/docs/source/topics/using/data-types/user-defined-types.md new file mode 100644 index 000000000..eff632b5e --- /dev/null +++ b/docs/source/topics/using/data-types/user-defined-types.md @@ -0,0 +1,96 @@ +# User-Defined Types (UDTs) + +User-defined types (UDT) can be used to create arbitrary user types with fields +that can be accessed by name or position. When used with the driver they can be +created from a previously defined type determined from schema or they can be +created from a manually defined data type. + +## Creating a UDT from Schema + +An [`CassSchemaMeta`] instance can be used to construct a new [`CassUserType`]. The +[`CassSchemaMeta`] instance returns a [`CassDataType`] object which is used to +describe ScyllaDB/Cassandra types including UDTs, tuples, collections and all basic types +(`int`, `bigint`, `uuid`, etc.). + +```c +void get_user_type_from_schema(CassSession* session) { + const CassSchemaMeta* schema_meta = cass_session_get_schema_meta(session); + + const CassKeyspaceMeta* keyspace_meta = + cass_schema_meta_keyspace_by_name(schema_meta, "keyspace"); + + const CassDataType* data_type = + cass_keyspace_meta_user_type_by_name(keyspace_meta, "typename"); + + CassUserType* user_type = cass_user_type_new_from_data_type(data_type); + + /* Bind values to user type fields and bind user type to a statement */ + +} +``` + +## Manually Constructing a UDT Data Type + +If schema metadata updates are disabled it is still possible to create UDTs +from a manually construct [`CassDataType`]. + +```c +CassDataType* data_type = cass_data_type_new_udt(3); + +cass_data_type_add_sub_value_type_by_name(data_type, "field1", +CASS_VALUE_TYPE_INT); +cass_data_type_add_sub_value_type_by_name(data_type, "field2", +CASS_VALUE_TYPE_UUID); +cass_data_type_add_sub_value_type_by_name(data_type, "field3", +CASS_VALUE_TYPE_TEXT); + +CassUserType* user_type = cass_user_type_new_from_data_type(data_type); + +/* Bind values to user type fields */ + +cass_data_type_free(data_type); +``` + +The preceding code is equivalent to defining the following schema and using +[`cass_session_get_schema()`] obtain the data type. + +```cql +CREATE TYPE IF NOT EXISTS udt (field1 int, field2 uuid, field3 text); +``` + +## Consuming values from a UDT result + +A UDT returned from ScyllaDB/Cassandra is consumed by iterating over its fields similar +to the way collections or tuples are consumed. + +```c +void iterate_udt(const CassRow* row) { + /* Retrieve UDT value from column */ + const CassValue* udt_value = cass_row_get_column_by_name(row, "value1"); + + /* Create an iterator for the UDT value */ + CassIterator* udt_iterator = cass_iterator_fields_from_user_type(udt_value); + + /* Iterate over the UDT fields */ + while (cass_iterator_next(udt_iterator)) { + const char* field_name; + size_t field_name_length; + /* Get UDT field name */ + cass_iterator_get_user_type_field_name(udt_iterator, + &field_name, &field_name_length); + + /* Get UDT field value */ + const CassValue* field_value = + cass_iterator_get_user_type_field_value(udt_iterator); + + /* ... */ + } + + /* The UDT iterator needs to be freed */ + cass_iterator_free(udt_iterator); +} +``` +[`CassSchemaMeta`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassSchemaMeta +[`CassUserType`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassUserType +[`CassDataType`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassDataType +[`cass_session_get_schema()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassSession#cass-session-get-schema diff --git a/docs/source/topics/using/data-types/uuids.md b/docs/source/topics/using/data-types/uuids.md new file mode 100644 index 000000000..a02e25571 --- /dev/null +++ b/docs/source/topics/using/data-types/uuids.md @@ -0,0 +1,67 @@ +# UUIDs + +UUIDs are 128-bit identifiers that can be used to uniquely identify information +without requiring central coordination. These are often used in ScyllaDB/Cassandra +for primary and clustering keys. There are two types of UUIDs supported by +the driver (and ScyllaDB/Cassandra), version 1 which is time-based and version 4 which +is randomly generated. Version 1 can be used with ScyllaDB/Cassandra's `timeuuid` type +and can be used as a timestamp for data. Timestamp information can be +extracted from the time part of a version 1 UUID using [`cass_uuid_timestamp()`]. +Version 4 can be used with ScyllaDB/Cassandra's `uuid` type for unique identification. + +## Generator + +A UUID generator object is used to create new UUIDs. The [`CassUuidGen`] object +is thread-safe. It should only be created once per application and reused. + +```c +CassUuidGen* uuid_gen = cass_uuid_gen_new(); + +CassUuid uuid; + +/* Generate a version 1 UUID */ +cass_uuid_gen_time(uuid_gen, &uuid); + +/* Generate a version 1 UUID from an existing timestamp */ +cass_uuid_gen_from_time(uuid_gen, 1234, &uuid); + +/* Generate a version 4 UUID */ +cass_uuid_gen_random(uuid_gen, &uuid); + +cass_uuid_gen_free(uuid_gen); +``` + +A [`CassUuidGen`] can also be created with user provided information for the +node part of the UUID. This only affects version 1 UUIDs. + +```c +/* Only the 48 least signficant bits of the node are considered */ +cass_uint64_t node = 0x0000AAAABBBBCCCC; + +CassUuidGen* uuid_gen = cass_uuid_gen_new_with_node(node); + +/* Generate UUIDs */ + +cass_uuid_gen_free(uuid_gen); +``` + +## Extracting information + +Information such as the timestamp (for version 1 only) and the version can be +extracted from UUIDs. They can also be converted to and created from the their +hexadecimal string representation e.g. "550e8400-e29b-41d4-a716-446655440000". + +```c +CassUuid uuid; +cass_uuid_from_string("550e8400-e29b-41d4-a716-446655440000", &uuid); + +/* Extract timestamp and version */ +cass_uint64_t timestamp = cass_uuid_timestamp(uuid); +cass_uint8_t version = cass_uuid_version(uuid); + +/* Get string representation of the UUID */ +char uuid_str[CASS_UUID_STRING_LENGTH]; +cass_uuid_string(uuid, uuid_str); +``` +[`cass_uuid_timestamp()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassUuid#1a3980467a0bb6642054ecf37d49aebf1a +[`CassUuidGen`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassUuidGen diff --git a/docs/source/topics/using/futures.md b/docs/source/topics/using/futures.md new file mode 100644 index 000000000..255dd6093 --- /dev/null +++ b/docs/source/topics/using/futures.md @@ -0,0 +1,116 @@ +# Futures + +Futures are returned from any driver call that would result in blocking the client application/thread. This allows the client application to continue processing and/or also submit multiple queries in succession. Although the driver has an asynchronous design, it can be used synchronously by immediately attempting to get result or explicitly waiting on the future. + +## Waiting for Results + +Futures results can be waited on indefinitely by either calling the `cass_future_wait()` method or by attempting to get the result with `cass_future_get_result()`. They can also be waited on for an explicit amount of time (`cass_future_wait_timed()`) or periodically polled (`cass_future_ready()`) without waiting to execute application code. + +### Synchronously Waiting on the Future +```c +CassFuture* future = NULL; + +/* Do some operation to get a future */ + +/* Block until a result or error is set */ +cass_future_wait(future); + +cass_future_free(future); +``` + +### Synchronously Waiting for the Result +```c +void wait_for_future(CassSession* session, CassStatement* statement) { + CassFuture* future = cass_session_execute(session, statement); + + /* Blocks and gets a result */ + const CassResult* result = cass_future_get_result(future); + + /* If there was an error then the result won't be available */ + if (result == NULL) { + /* The error code and message will be set instead */ + CassError error_code = cass_future_error_code(future); + const char* error_message; + size_t error_message_length; + cass_future_error_message(future, &error_message, &error_message_length); + + /* Handle error */ + + cass_future_free(future); + return; + } + + /* The future can be freed immediately after getting the result object */ + cass_future_free(future); + + /* Use the result object */ + + cass_result_free(result); +} +``` + +### Timed Wait +```c +CassFuture* future = NULL; + +/* Do some operation to get a future */ + +cass_duration_t microseconds = 30 * 1000000; /* 30 seconds */ + +/* Block for a fixed amount of time */ +if (cass_future_wait_timed(future, microseconds)) { + /* A result or error was set during the wait call */ +} else { + /* The operation hasn't completed yet */ +} + +cass_future_free(future); +``` + +### Polling + +```c +CassFuture* future = NULL; + +/* Do some operation to get a future */ + +/* Poll to see if the future is ready */ +while (!cass_future_ready(future)) { + /* Run other application logic or wait*/ +} + +/* A result or error was set */ + +cass_future_free(future); +``` + +## Callbacks + +A callback can be set on a future to notify the client application when a request has completed. Using a future callback is the lowest latency method of notification when waiting for several asynchronous operations. + +**Important**: The driver may run the callback on thread that’s different from the application’s calling thread. Any data accessed in the callback must be immutable or synchronized with a mutex, semaphore, etc. + +**BEWARE:**: The callback code must not issue blocking operations, including waiting for a not-yet-ready future to complete. Doing so will make the driver panic, because the asynchronous executor threads (which normally execute the callbacks) must not get blocked. As a notable exception, it is allowed to call otherwise blocking future functions like `cass_future_wait()` or `cass_future_get_result()` from within the callback, but only if the future is guaranteed to be ready at that point. This is the case, among others, when calling the function on a future whose completion has triggered the callback. + +```c +void on_result(CassFuture* future, void* data) { + /* This result will now return immediately */ + CassError rc = cass_future_error_code(future); + printf("%s\n", cass_error_desc(rc)); +} + +int main() { + CassFuture* future = NULL; + + /* Do some operation to get a future */ + + /* Set a callback instead of waiting for the result to be returned */ + cass_future_set_callback(future, on_result, NULL); + + /* The application's reference to the future can be freed immediately */ + cass_future_free(future); + + /* Run other application logic */ + +} +``` diff --git a/docs/source/topics/using/handling-results.md b/docs/source/topics/using/handling-results.md new file mode 100644 index 000000000..c8775eb49 --- /dev/null +++ b/docs/source/topics/using/handling-results.md @@ -0,0 +1,191 @@ +# Handling Results + +The [`CassResult`](https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassResult) object +is typically returned for `SELECT` statements. For mutations (`INSERT`, `UPDATE`, +and `DELETE`) only a status code will be present and can be accessed using +`cass_future_error_code()`. However, when using lightweight transactions a +result object will be available to check the status of the transaction. The +result object is obtained from executed statements' future object. + +**Important**: Rows, column values, collections, decimals, strings, and bytes +objects are all invalidated when the result object is freed. All of these +objects point to memory held by the result. This allows the driver to avoid +unnecessarily copying data. + +```c +void process_result(CassFuture* future) { + const CassResult* result = cass_future_get_result(future); + + /* Process result */ + + cass_result_free(result); +} +``` + +*Note*: The result object is immutable and can be accessed by multiple threads concurrently. + +## Rows and Column Values + +The result object represents a collection of rows. The first row, if present, +can be obtained using `cass_result_first_row()`. Multiple rows are accessed +using a [`CassIterator`](https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassIterator) +object. After a row has been retrieved, the column value(s) can be accessed from +a row by either index or by name. The iterator object can also be used with +enumerated column values. + +```c +void process_first_row(const CassResult* result) { + const CassRow* row = cass_result_first_row(result); + + /* Get the first column value using the index */ + const CassValue* column1 = cass_row_get_column(row, 0); +} +``` + +```c +void process_first_row_by_name(const CassResult* result) { + const CassRow* row = cass_result_first_row(result); + + /* Get the value of the column named "column1" */ + const CassValue* column1 = cass_row_get_column_by_name(row, "column1"); +} +``` + +Once the [`CassValue`](https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassValue) +has been obtained from the column, the actual value can be retrieved and +assigned into the proper datatype. + +```c +void get_values_from_row(const CassRow* row) { + cass_int32_t int_value; + const CassValue* column1 = cass_row_get_column(row, 0); + cass_value_get_int32(column1, &int_value); + + cass_int64_t timestamp_value; + const CassValue* column2 = cass_row_get_column(row, 1); + cass_value_get_int64(column2, ×tamp_value); + + const char* string_value; + size_t string_value_length; + const CassValue* column3 = cass_row_get_column(row, 2); + cass_value_get_string(column3, &string_value, &string_value_length); +} +``` + +## Iterators + +Iterators can be used to iterate over the rows in a result, the columns in a +row, or the values in a collection. + +**Important**: `cass_iterator_next()` invalidates values retrieved by the +previous iteration. + +```c +void iterate_over_rows(CassFuture* future) { + const CassResult* result = cass_future_get_result(future); + + CassIterator* iterator = cass_iterator_from_result(result); + + while (cass_iterator_next(iterator)) { + const CassRow* row = cass_iterator_get_row(iterator); + /* Retreive and use values from the row */ + } + + cass_iterator_free(iterator); + + cass_result_free(result); +} +``` + +All iterators use the same pattern, but will have different iterator creation +and retrieval functions. Iterating over a map collection is slightly different +because it has two values per entry, but utilizes the same basic pattern. + +```c +/* Execute SELECT query where a map colleciton is returned */ + +void iterator_over_map_value(CassFuture* future) { + const CassResult* result = cass_future_get_result(future); + + const CassRow* row = cass_result_first_row(result); + + const CassValue* map = cass_row_get_column(row, 0); + + CassIterator* iterator = cass_iterator_from_map(map); + + while (cass_iterator_next(iterator)) { + /* A seperate call is used to get the key and the value */ + const CassValue* key = cass_iterator_get_map_key(iterator); + const CassValue* value = cass_iterator_get_map_value(iterator); + + /* Use key/value pair */ + } + + cass_iterator_free(iterator); + + cass_result_free(result); +} +``` + +## Paging + +Large result sets can be divided into multiple pages automatically. The +[`CassResult`](https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassResult) object +keeps track of the pagination state for the sequence of paging queries. When +paging through the result set, the result object is checked to see if more pages +exist where it is then attached to the statement before re-executing the query +to get the next page. + +```c +void page_results(CassSession* session) { + + CassStatement* statement = cass_statement_new("SELECT * FROM table1", 0); + + /* Return a 100 rows every time this statement is executed */ + cass_statement_set_paging_size(statement, 100); + + cass_bool_t has_more_pages = cass_true; + + while (has_more_pages) { + CassFuture* query_future = cass_session_execute(session, statement); + + const CassResult* result = cass_future_get_result(query_future); + + if (result == NULL) { + + /* Handle error */ + + cass_future_free(query_future); + break; + } + + /* Get values from result... */ + + /* Check to see if there are more pages remaining for this result */ + has_more_pages = cass_result_has_more_pages(result); + + if (has_more_pages) { + /* If there are more pages we need to set the position for the next execute */ + cass_statement_set_paging_state(statement, result); + } + + cass_future_free(query_future); + cass_result_free(result); + } + + cass_statement_free(statement); +} +``` + +The [`cass_statement_set_paging_state()`] function abstracts the actual paging +state token away from the application. The raw paging state token can be +accessed using [`cass_result_paging_state()`] and added to a statement using +[`cass_statement_set_paging_state_token()`]. + +**Warning**: The paging state token should not be exposed to or come from +untrusted environments. That paging state could be spoofed and potentially used +to gain access to other data. + +[`cass_statement_set_paging_state()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassStatement#cass-statement-set-paging-state +[`cass_result_paging_state()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassResult#cass-result-paging-state +[`cass_statement_set_paging_state_token()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassStatement#cass-statement-set-paging-state-token diff --git a/docs/source/topics/using/index.md b/docs/source/topics/using/index.md new file mode 100644 index 000000000..9d4319340 --- /dev/null +++ b/docs/source/topics/using/index.md @@ -0,0 +1,31 @@ +# Using the Driver + +Basic concepts and features of the driver are described in the following sections. + +* [Batches](batches.md) - Grouping multiple statements into a single request for atomicity, and sometimes better performance. +* [Binding Parameters](binding-parameters.md) - How to bind values to parameters in statements. +* [Client-side Timestamps](client-side-timestamps.md) - Using timestamps generated by the client to ensure consistent write order on server. +* [Consistency Levels](consistency.md) - Configuring the consistency level for read and write operations. +* [Data Types](data-types/index.md) - Supported data types (CQL-side and C/C++-side) and how to use them. +* [Futures](futures.md) - Asynchronous programming model using futures to handle results. +* [Handling Results](handling-results.md) - How to process results returned by the server. +* [Keyspaces](keyspaces.md) - Working with keyspaces, the top-level namespace for data. +* [Prepared Statements](prepared-statements.md) - Preparing statements for further execution to improve performance and safety. +* [Schema Metadata](schema-metadata.md) - Accessing and viewing cluster schema metadata. + +```{eval-rst} +.. toctree:: + :hidden: + :glob: + + batches + binding-parameters + client-side-timestamps + consistency + data-types/index + futures + handling-results + keyspaces + prepared-statements + schema-metadata +``` diff --git a/docs/source/topics/using/keyspaces.md b/docs/source/topics/using/keyspaces.md new file mode 100644 index 000000000..6f31ea690 --- /dev/null +++ b/docs/source/topics/using/keyspaces.md @@ -0,0 +1,75 @@ +# Keyspaces + +## Setting the Keyspace at Connection Time + +A session can be initially connected using a supplied keyspace. This is the recommended way to set the keyspace for a session, as it prevents races between setting keyspace and requests being executed. This is particularly important in applications that use a single session object shared by multiple threads. + +**Performance Tip:** An application should create a single session object per keyspace as a session object is designed to be created once, reused, and shared by multiple threads within the application. Even better performance can be achieved by creating a single session object per application, but then if the application interacts with multiple keyspaces, it must fully qualify the table names in its statements. + +```c +CassSession* session = cass_session_new(); +CassCluster* cluster = cass_cluster_new(); + +/* Configure cluster */ + +CassFuture* connect_future + = cass_session_connect_keyspace(session, cluster, "keyspace1"); + +/* Handle connect future */ + +cass_future_free(connect_future); + +cass_session_free(session); +cass_cluster_free(cluster); +``` + +## Changing Keyspaces + +You can specify a keyspace to change to by executing a `USE` statement on a connection session object. + +```c +void use_keyspace(CassSession* session) { + CassStatement* use_statement + = cass_statement_new("USE keyspace1", 0); + + CassFuture* use_future + = cass_session_execute(session, use_statement); + + /* Check future result... */ + + cass_statement_free(use_statement); + cass_future_free(use_future); +} +``` + +Be very careful though: if the session is shared by multiple threads, switching the keyspace at runtime could easily cause unexpected query failures. + +## Single Session and Multiple Keyspaces + +It is possible to interact with multiple keyspaces using a single session object by fully qualifying the table names in your queries e.g. `keyspace_name.table_name`. + +### Examples + +```cql +SELECT * FROM keyspace_name.table_name WHERE ...; +INSERT INTO keyspace_name.table_name (...) VALUES (...); +``` + +## Creating Keyspaces and Tables + +It is also possible to create keyspaces and tables by executing CQL using a session object. + +### Examples + +```cql +CREATE KEYSPACE cpp_driver + WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }; +CREATE TABLE cpp_driver.contributors ( + lastname text, + firstname text, + company text, + website text, + since timestamp, + last_activity timestamp, + PRIMARY KEY(lastname)); +``` diff --git a/docs/source/topics/using/prepared-statements.md b/docs/source/topics/using/prepared-statements.md new file mode 100644 index 000000000..d8f0fe776 --- /dev/null +++ b/docs/source/topics/using/prepared-statements.md @@ -0,0 +1,44 @@ +# Prepared Statements + +Prepared statements can be used to improve the performance of frequently executed queries. Preparing the query caches it on the ScyllaDB/Cassandra cluster and only needs to be performed once. Once created, prepared statements should be reused with different bind variables. Prepared queries use the `?` marker to denote bind variables in the query string. You can also specify bind variables as `:name`. + +```c +void prepare_statement(CassSession* session) { + /* Prepare the statement on the ScyllaDB/Cassandra cluster */ + CassFuture* prepare_future + = cass_session_prepare(session, "INSERT INTO example (key, value) VALUES (?, ?)"); + + /* Wait for the statement to prepare and get the result */ + CassError rc = cass_future_error_code(prepare_future); + + printf("Prepare result: %s\n", cass_error_desc(rc)); + + if (rc != CASS_OK) { + /* Handle error */ + cass_future_free(prepare_future); + return; + } + + /* Get the prepared object from the future */ + const CassPrepared* prepared = cass_future_get_prepared(prepare_future); + + /* The future can be freed immediately after getting the prepared object */ + cass_future_free(prepare_future); + + /* The prepared object can now be used to create statements that can be executed */ + CassStatement* statement = cass_prepared_bind(prepared); + + /* Bind variables by name this time (this can only be done with prepared statements)*/ + cass_statement_bind_string_by_name(statement, "key", "abc"); + cass_statement_bind_int32_by_name(statement, "value", 123); + + /* Execute statement - same as the non-prepared code. + Here we'll discard the result. */ + CassFuture* execute_future = cass_session_execute(session, statement); + cass_future_wait(execute_future); + cass_future_free(execute_future); + + /* The prepared object must be freed */ + cass_prepared_free(prepared); +} +``` diff --git a/docs/source/topics/using/schema-metadata.md b/docs/source/topics/using/schema-metadata.md new file mode 100644 index 000000000..576a11c94 --- /dev/null +++ b/docs/source/topics/using/schema-metadata.md @@ -0,0 +1,61 @@ +# Schema Metadata + +The driver provides access to keyspace and table metadata. This schema metadata +is monitored by the control connection and automatically kept up-to-date. + +```c +void get_schema_metadata(CassSession* session) { + /* Get snapshot of the schema */ + const CassSchemaMeta* schema_meta = cass_session_get_schema_meta(session); + + /* Get information about the "keyspace1" schema */ + const CassKeyspaceMeta* keyspace1_meta + = cass_schema_meta_keyspace_by_name(schema_meta, "keyspace1"); + + if (keyspace1_meta == NULL) { + /* Handle error */ + } + + /* Get the key-value filed for "strategy_class" */ + const CassValue* strategy_class_value + = cass_keyspace_meta_field_by_name(keyspace1_meta, "strategy_class"); + + if (strategy_class_value == NULL) { + /* Handle error */ + } + + /* Fields values use the existing cass_value*() API */ + const char* strategy_class; + size_t strategy_class_length; + cass_value_get_string(strategy_class_value, + &strategy_class, + &strategy_class_length); + + /* Do something with strategy_class */ + + /* All values derived from the schema are cleaned up */ + cass_schema_meta_free(schema_meta); +} +``` + +The snapshot obtained by [`cass_session_get_schema_meta()`] will not see schema changes +that happened after the call. A new snapshot needs to be obtained to see +subsequent updates to the schema. + +## Enabling/Disabling Schema Metadata + +Retrieving and updating schema metadata can be enabled or disabled. It is +enabled by default. However, some application might wish to reduce this +overhead. This can be useful to improve the startup performance of the +short-lived sessions or an environment where up-to-date schema metadata is +unnecessary. + +```c +CassCluster* cluster = cass_cluster_new(); + +/* Disable schema metadata */ +cass_cluster_set_use_schema(cluster, cass_false); + +cass_cluster_free(cluster); +``` +[`cass_session_get_schema_meta()`]: https://cpp-rust-driver.docs.scylladb.com/stable/api/struct.CassSession#cass-session-get-schema-meta diff --git a/driver_config.hpp.in b/driver_config.hpp.in index 9694e12c6..0b6375ba8 100644 --- a/driver_config.hpp.in +++ b/driver_config.hpp.in @@ -1,7 +1,6 @@ #ifndef DATASTAX_INTERNAL_DRIVER_CONFIG_HPP #define DATASTAX_INTERNAL_DRIVER_CONFIG_HPP -#cmakedefine HAVE_KERBEROS #cmakedefine HAVE_OPENSSL #cmakedefine HAVE_STD_ATOMIC #cmakedefine CASS_CPP_STANDARD @CASS_CPP_STANDARD@ @@ -14,6 +13,5 @@ #cmakedefine HAVE_ARC4RANDOM #cmakedefine HAVE_GETRANDOM #cmakedefine HAVE_TIMERFD -#cmakedefine HAVE_ZLIB #endif diff --git a/examples/schema_meta/schema_meta.c b/examples/schema_meta/schema_meta.c index 7f0927833..8e956d276 100644 --- a/examples/schema_meta/schema_meta.c +++ b/examples/schema_meta/schema_meta.c @@ -184,14 +184,8 @@ int main(int argc, char* argv[]) { INITCOND(0, 0)"); print_table(session, "examples", "schema_meta"); - if (version.major_version >= 3) { - /* Collection types are marked as frozen in Cassandra 3.x and later. */ - print_function(session, "examples", "avg_state", "frozen>,int"); - print_function(session, "examples", "avg_final", "frozen>"); - } else { - print_function(session, "examples", "avg_state", "tuple,int"); - print_function(session, "examples", "avg_final", "tuple"); - } + print_function(session, "examples", "avg_state", "frozen>,int"); + print_function(session, "examples", "avg_final", "frozen>"); print_aggregate(session, "examples", "average", "int"); cass_schema_meta_free(schema_meta); diff --git a/include/cassandra.h b/include/cassandra.h index faa0e390b..09da7f06f 100644 --- a/include/cassandra.h +++ b/include/cassandra.h @@ -182,8 +182,6 @@ typedef struct CassStatement_ CassStatement; * * Note: Batches are not supported by the binary protocol version 1. * - * @cassandra{2.0+} - * * @struct CassBatch */ typedef struct CassBatch_ CassBatch; @@ -266,15 +264,11 @@ typedef struct CassDataType_ CassDataType; /** * @struct CassFunctionMeta - * - * @cassandra{2.2+} */ typedef struct CassFunctionMeta_ CassFunctionMeta; /** * @struct CassAggregateMeta - * - * @cassandra{2.2+} */ typedef struct CassAggregateMeta_ CassAggregateMeta; @@ -289,8 +283,6 @@ typedef struct CassCollection_ CassCollection; * A tuple of values. * * @struct CassTuple - * - * @cassandra{2.1+} */ typedef struct CassTuple_ CassTuple; @@ -298,8 +290,6 @@ typedef struct CassTuple_ CassTuple; * A user defined type. * * @struct CassUserType - * - * @cassandra{2.1+} */ typedef struct CassUserType_ CassUserType; @@ -347,8 +337,6 @@ typedef struct CassTableMeta_ CassTableMeta; * MaterializedView metadata * * @struct CassMaterializedViewMeta - * - * @cassandra{3.0+} */ typedef struct CassMaterializedViewMeta_ CassMaterializedViewMeta; @@ -382,8 +370,6 @@ typedef struct CassUuidGen_ CassUuidGen; * Generators of client-side, microsecond-precision timestamps. * * @struct CassTimestampGen - * - * @cassandra{2.1+} */ typedef struct CassTimestampGen_ CassTimestampGen; @@ -394,8 +380,6 @@ typedef struct CassRetryPolicy_ CassRetryPolicy; /** * @struct CassCustomPayload - * - * @cassandra{2.2+} */ typedef struct CassCustomPayload_ CassCustomPayload; @@ -2655,8 +2639,6 @@ cass_cluster_set_tcp_keepalive(CassCluster* cluster, * * Default: Monotonically increasing, client-side timestamp generator. * - * @cassandra{2.1+} - * * @public @memberof CassCluster * * @param[in] cluster @@ -3315,8 +3297,6 @@ cass_session_execute(CassSession* session, /** * Execute a batch statement. * - * @cassandra{2.0+} - * * @public @memberof CassSession * * @param[in] session @@ -3513,8 +3493,6 @@ cass_keyspace_meta_table_by_name_n(const CassKeyspaceMeta* keyspace_meta, /** * Gets the materialized view metadata for the provided view name. * - * @cassandra{3.0+} - * * @public @memberof CassKeyspaceMeta * * @param[in] keyspace_meta @@ -3530,8 +3508,6 @@ cass_keyspace_meta_materialized_view_by_name(const CassKeyspaceMeta* keyspace_me * Same as cass_keyspace_meta_materialized_view_by_name(), but with lengths for string * parameters. * - * @cassandra{3.0+} - * * @public @memberof CassKeyspaceMeta * * @param[in] keyspace_meta @@ -3549,8 +3525,6 @@ cass_keyspace_meta_materialized_view_by_name_n(const CassKeyspaceMeta* keyspace_ /** * Gets the data type for the provided type name. * - * @cassandra{2.1+} - * * @public @memberof CassKeyspaceMeta * * @param[in] keyspace_meta @@ -3566,8 +3540,6 @@ cass_keyspace_meta_user_type_by_name(const CassKeyspaceMeta* keyspace_meta, * Same as cass_keyspace_meta_type_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassKeyspaceMeta * * @param[in] keyspace_meta @@ -3585,8 +3557,6 @@ cass_keyspace_meta_user_type_by_name_n(const CassKeyspaceMeta* keyspace_meta, /** * Gets the function metadata for the provided function name. * - * @cassandra{2.2+} - * * @public @memberof CassKeyspaceMeta * * @param[in] keyspace_meta @@ -3605,8 +3575,6 @@ cass_keyspace_meta_function_by_name(const CassKeyspaceMeta* keyspace_meta, * Same as cass_keyspace_meta_function_by_name(), but with lengths for string * parameters. * - * @cassandra{2.2+} - * * @public @memberof CassKeyspaceMeta * * @param[in] keyspace_meta @@ -3628,8 +3596,6 @@ cass_keyspace_meta_function_by_name_n(const CassKeyspaceMeta* keyspace_meta, /** * Gets the aggregate metadata for the provided aggregate name. * - * @cassandra{2.2+} - * * @public @memberof CassKeyspaceMeta * * @param[in] keyspace_meta @@ -3648,8 +3614,6 @@ cass_keyspace_meta_aggregate_by_name(const CassKeyspaceMeta* keyspace_meta, * Same as cass_keyspace_meta_aggregate_by_name(), but with lengths for string * parameters. * - * @cassandra{2.2+} - * * @public @memberof CassKeyspaceMeta * * @param[in] keyspace_meta @@ -3840,8 +3804,6 @@ cass_table_meta_index(const CassTableMeta* table_meta, /** * Gets the materialized view metadata for the provided view name. * - * @cassandra{3.0+} - * * @public @memberof CassTableMeta * * @param[in] table_meta @@ -3857,8 +3819,6 @@ cass_table_meta_materialized_view_by_name(const CassTableMeta* table_meta, * Same as cass_table_meta_materialized_view_by_name(), but with lengths for string * parameters. * - * @cassandra{3.0+} - * * @public @memberof CassTableMeta * * @param[in] table_meta @@ -3876,8 +3836,6 @@ cass_table_meta_materialized_view_by_name_n(const CassTableMeta* table_meta, /** * Gets the total number of views for the table. * - * @cassandra{3.0+} - * * @public @memberof CassTableMeta * * @param[in] table_meta @@ -3889,8 +3847,6 @@ cass_table_meta_materialized_view_count(const CassTableMeta* table_meta); /** * Gets the materialized view metadata for the provided index. * - * @cassandra{3.0+} - * * @public @memberof CassTableMeta * * @param[in] table_meta @@ -4004,8 +3960,6 @@ cass_table_meta_field_by_name_n(const CassTableMeta* table_meta, /** * Gets the column metadata for the provided column name. * - * @cassandra{3.0+} - * * @public @memberof CassMaterializedViewMeta * * @param[in] view_meta @@ -4021,8 +3975,6 @@ cass_materialized_view_meta_column_by_name(const CassMaterializedViewMeta* view_ * Same as cass_materialized_view_meta_column_by_name(), but with lengths for string * parameters. * - * @cassandra{3.0+} - * * @public @memberof CassMaterializedViewMeta * * @param[in] view_meta @@ -4040,8 +3992,6 @@ cass_materialized_view_meta_column_by_name_n(const CassMaterializedViewMeta* vie /** * Gets the name of the view. * - * @cassandra{3.0+} - * * @public @memberof CassMaterializedViewMeta * * @param[in] view_meta @@ -4056,8 +4006,6 @@ cass_materialized_view_meta_name(const CassMaterializedViewMeta* view_meta, /** * Gets the base table of the view. * - * @cassandra{3.0+} - * * @public @memberof CassMaterializedViewMeta * * @param[in] view_meta @@ -4070,8 +4018,6 @@ cass_materialized_view_meta_base_table(const CassMaterializedViewMeta* view_meta /** * Gets the total number of columns for the view. * - * @cassandra{3.0+} - * * @public @memberof CassMaterializedViewMeta * * @param[in] view_meta @@ -4083,8 +4029,6 @@ cass_materialized_view_meta_column_count(const CassMaterializedViewMeta* view_me /** * Gets the column metadata for the provided index. * - * @cassandra{3.0+} - * * @public @memberof CassMaterializedViewMeta * * @param[in] view_meta @@ -4098,8 +4042,6 @@ cass_materialized_view_meta_column(const CassMaterializedViewMeta* view_meta, /** * Gets the number of columns for the view's partition key. * - * @cassandra{3.0+} - * * @public @memberof CassMaterializedViewMeta * * @param[in] view_meta @@ -4111,8 +4053,6 @@ cass_materialized_view_meta_partition_key_count(const CassMaterializedViewMeta* /** * Gets the partition key column metadata for the provided index. * - * @cassandra{3.0+} - * * @public @memberof CassMaterializedViewMeta * * @param[in] view_meta @@ -4126,8 +4066,6 @@ cass_materialized_view_meta_partition_key(const CassMaterializedViewMeta* view_m /** * Gets the number of columns for the view's clustering key. * - * @cassandra{3.0+} - * * @public @memberof CassMaterializedViewMeta * * @param[in] view_meta @@ -4139,8 +4077,6 @@ cass_materialized_view_meta_clustering_key_count(const CassMaterializedViewMeta* /** * Gets the clustering key column metadata for the provided index. * - * @cassandra{3.0+} - * * @public @memberof CassMaterializedViewMeta * * @param[in] view_meta @@ -4171,8 +4107,6 @@ cass_materialized_view_meta_clustering_key_order(const CassMaterializedViewMeta* * Gets a metadata field for the provided name. Metadata fields allow direct * access to the column data found in the underlying "views" metadata view. * - * @cassandra{3.0+} - * * @public @memberof CassMaterializedViewMeta * * @param[in] view_meta @@ -4187,8 +4121,6 @@ cass_materialized_view_meta_field_by_name(const CassMaterializedViewMeta* view_m * Same as cass_materialized_view_meta_field_by_name(), but with lengths for string * parameters. * - * @cassandra{3.0+} - * * @public @memberof CassMaterializedViewMeta * * @param[in] view_meta @@ -4356,8 +4288,6 @@ cass_index_meta_field_by_name_n(const CassIndexMeta* index_meta, /** * Gets the name of the function. * - * @cassandra{2.2+} - * * @public @memberof CassFunctionMeta * * @param[in] function_meta @@ -4374,8 +4304,6 @@ cass_function_meta_name(const CassFunctionMeta* function_meta, * function's name and the function's signature: * "name(type1 type2.. typeN)". * - * @cassandra{2.2+} - * * @public @memberof CassFunctionMeta * * @param[in] function_meta @@ -4390,8 +4318,6 @@ cass_function_meta_full_name(const CassFunctionMeta* function_meta, /** * Gets the body of the function. * - * @cassandra{2.2+} - * * @public @memberof CassFunctionMeta * * @param[in] function_meta @@ -4406,8 +4332,6 @@ cass_function_meta_body(const CassFunctionMeta* function_meta, /** * Gets the language of the function. * - * @cassandra{2.2+} - * * @public @memberof CassFunctionMeta * * @param[in] function_meta @@ -4422,8 +4346,6 @@ cass_function_meta_language(const CassFunctionMeta* function_meta, /** * Gets whether a function is called on "null". * - * @cassandra{2.2+} - * * @public @memberof CassFunctionMeta * * @param[in] function_meta @@ -4435,8 +4357,6 @@ cass_function_meta_called_on_null_input(const CassFunctionMeta* function_meta); /** * Gets the number of arguments this function takes. * - * @cassandra{2.2+} - * * @public @memberof CassFunctionMeta * * @param[in] function_meta @@ -4448,8 +4368,6 @@ cass_function_meta_argument_count(const CassFunctionMeta* function_meta); /** * Gets the function's argument name and type for the provided index. * - * @cassandra{2.2+} - * * @public @memberof CassFunctionMeta * * @param[in] function_meta @@ -4469,8 +4387,6 @@ cass_function_meta_argument(const CassFunctionMeta* function_meta, /** * Gets the function's argument and type for the provided name. * - * @cassandra{2.2+} - * * @public @memberof CassFunctionMeta * * @param[in] function_meta @@ -4485,8 +4401,6 @@ cass_function_meta_argument_type_by_name(const CassFunctionMeta* function_meta, * Same as cass_function_meta_argument_type_by_name(), but with lengths for string * parameters. * - * @cassandra{2.2+} - * * @public @memberof CassFunctionMeta * * @param[in] function_meta @@ -4504,8 +4418,6 @@ cass_function_meta_argument_type_by_name_n(const CassFunctionMeta* function_meta /** * Gets the return type of the function. * - * @cassandra{2.2+} - * * @public @memberof CassFunctionMeta * * @param[in] function_meta @@ -4518,8 +4430,6 @@ cass_function_meta_return_type(const CassFunctionMeta* function_meta); * Gets a metadata field for the provided name. Metadata fields allow direct * access to the column data found in the underlying "functions" metadata table. * - * @cassandra{2.2+} - * * @public @memberof CassFunctionMeta * * @param[in] function_meta @@ -4534,8 +4444,6 @@ cass_function_meta_field_by_name(const CassFunctionMeta* function_meta, * Same as cass_function_meta_field_by_name(), but with lengths for string * parameters. * - * @cassandra{2.2+} - * * @public @memberof CassFunctionMeta * * @param[in] function_meta @@ -4553,8 +4461,6 @@ cass_function_meta_field_by_name_n(const CassFunctionMeta* function_meta, /** * Gets the name of the aggregate. * - * @cassandra{2.2+} - * * @public @memberof CassAggregateMeta * * @param[in] aggregate_meta @@ -4571,8 +4477,6 @@ cass_aggregate_meta_name(const CassAggregateMeta* aggregate_meta, * aggregate's name and the aggregate's signature: * "name(type1 type2.. typeN)". * - * @cassandra{2.2+} - * * @public @memberof CassAggregateMeta * * @param[in] aggregate_meta @@ -4587,8 +4491,6 @@ cass_aggregate_meta_full_name(const CassAggregateMeta* aggregate_meta, /** * Gets the number of arguments this aggregate takes. * - * @cassandra{2.2+} - * * @public @memberof CassAggregateMeta * * @param[in] aggregate_meta @@ -4600,8 +4502,6 @@ cass_aggregate_meta_argument_count(const CassAggregateMeta* aggregate_meta); /** * Gets the aggregate's argument type for the provided index. * - * @cassandra{2.2+} - * * @public @memberof CassAggregateMeta * * @param[in] aggregate_meta @@ -4615,8 +4515,6 @@ cass_aggregate_meta_argument_type(const CassAggregateMeta* aggregate_meta, /** * Gets the return type of the aggregate. * - * @cassandra{2.2+} - * * @public @memberof CassAggregateMeta * * @param[in] aggregate_meta @@ -4628,8 +4526,6 @@ cass_aggregate_meta_return_type(const CassAggregateMeta* aggregate_meta); /** * Gets the state type of the aggregate. * - * @cassandra{2.2+} - * * @public @memberof CassAggregateMeta * * @param[in] aggregate_meta @@ -4641,8 +4537,6 @@ cass_aggregate_meta_state_type(const CassAggregateMeta* aggregate_meta); /** * Gets the function metadata for the aggregate's state function. * - * @cassandra{2.2+} - * * @public @memberof CassAggregateMeta * * @param[in] aggregate_meta @@ -4654,8 +4548,6 @@ cass_aggregate_meta_state_func(const CassAggregateMeta* aggregate_meta); /** * Gets the function metadata for the aggregates's final function. * - * @cassandra{2.2+} - * * @public @memberof CassAggregateMeta * * @param[in] aggregate_meta @@ -4667,8 +4559,6 @@ cass_aggregate_meta_final_func(const CassAggregateMeta* aggregate_meta); /** * Gets the initial condition value for the aggregate. * - * @cassandra{2.2+} - * * Note: The value of the initial condition will always be * a "varchar" type for Cassandra 3.0+. * @@ -4684,8 +4574,6 @@ cass_aggregate_meta_init_cond(const CassAggregateMeta* aggregate_meta); * Gets a metadata field for the provided name. Metadata fields allow direct * access to the column data found in the underlying "aggregates" metadata table. * - * @cassandra{2.2+} - * * @public @memberof CassAggregateMeta * * @param[in] aggregate_meta @@ -4700,8 +4588,6 @@ cass_aggregate_meta_field_by_name(const CassAggregateMeta* aggregate_meta, * Same as cass_aggregate_meta_field_by_name(), but with lengths for string * parameters. * - * @cassandra{2.2+} - * * @public @memberof CassAggregateMeta * * @param[in] aggregate_meta @@ -5193,8 +5079,6 @@ cass_future_tracing_id(CassFuture* future, * Gets a the number of custom payload items from a response future. If the future is not * ready this method will wait for the future to be set. * - * @cassandra{2.2+} - * * @public @memberof CassFuture * * @param[in] future @@ -5207,8 +5091,6 @@ cass_future_custom_payload_item_count(CassFuture* future); * Gets a custom payload item from a response future at the specified index. If the future is not * ready this method will wait for the future to be set. * - * @cassandra{2.2+} - * * @public @memberof CassFuture * * @param[in] future @@ -5383,8 +5265,6 @@ cass_statement_set_consistency(CassStatement* statement, /** * Sets the statement's serial consistency level. * - * @cassandra{2.0+} - * * Default: Not set * * @public @memberof CassStatement @@ -5400,8 +5280,6 @@ cass_statement_set_serial_consistency(CassStatement* statement, /** * Sets the statement's page size. * - * @cassandra{2.0+} - * * Default: -1 (Disabled) * * @public @memberof CassStatement @@ -5418,8 +5296,6 @@ cass_statement_set_paging_size(CassStatement* statement, * Sets the statement's paging state. This can be used to get the next page of * data in a multi-page query. * - * @cassandra{2.0+} - * * @public @memberof CassStatement * * @param[in] statement @@ -5434,8 +5310,6 @@ cass_statement_set_paging_state(CassStatement* statement, * Sets the statement's paging state. This can be used to get the next page of * data in a multi-page query. * - * @cassandra{2.0+} - * * Warning: The paging state should not be exposed to or come from * untrusted environments. The paging state could be spoofed and potentially * used to gain access to other data. @@ -5457,8 +5331,6 @@ cass_statement_set_paging_state_token(CassStatement* statement, /** * Sets the statement's timestamp. * - * @cassandra{2.1+} - * * @public @memberof CassStatement * * @param[in] statement @@ -5520,8 +5392,6 @@ cass_statement_set_retry_policy(CassStatement* statement, /** * Sets the statement's custom payload. * - * @cassandra{2.2+} - * * @public @memberof CassStatement * * @param[in] statement @@ -5570,8 +5440,6 @@ cass_statement_set_execution_profile_n(CassStatement* statement, /** * Sets whether the statement should use tracing. * - * @cassandra{2.2+} - * * @public @memberof CassStatement * * @param[in] statement @@ -5669,9 +5537,7 @@ cass_statement_bind_null(CassStatement* statement, /** * Binds a null to all the values with the specified name. - * - * This can only be used with statements created by - * cass_prepared_bind() when using Cassandra 2.0 or earlier. + * * @public @memberof CassStatement * @@ -5704,8 +5570,6 @@ cass_statement_bind_null_by_name_n(CassStatement* statement, /** * Binds a "tinyint" to a query or bound statement at the specified index. * - * @cassandra{2.2+} - * * @public @memberof CassStatement * * @param[in] statement @@ -5721,8 +5585,6 @@ cass_statement_bind_int8(CassStatement* statement, /** * Binds a "tinyint" to all the values with the specified name. * - * @cassandra{2.2+} - * * @public @memberof CassStatement * * @param[in] statement @@ -5739,8 +5601,6 @@ cass_statement_bind_int8_by_name(CassStatement* statement, * Same as cass_statement_bind_int8_by_name(), but with lengths for string * parameters. * - * @cassandra{2.2+} - * * @public @memberof CassStatement * * @param[in] statement @@ -5760,8 +5620,6 @@ cass_statement_bind_int8_by_name_n(CassStatement* statement, /** * Binds an "smallint" to a query or bound statement at the specified index. * - * @cassandra{2.2+} - * * @public @memberof CassStatement * * @param[in] statement @@ -5777,8 +5635,6 @@ cass_statement_bind_int16(CassStatement* statement, /** * Binds an "smallint" to all the values with the specified name. * - * @cassandra{2.2+} - * * @public @memberof CassStatement * * @param[in] statement @@ -5795,8 +5651,6 @@ cass_statement_bind_int16_by_name(CassStatement* statement, * Same as cass_statement_bind_int16_by_name(), but with lengths for string * parameters. * - * @cassandra{2.2+} - * * @public @memberof CassStatement * * @param[in] statement @@ -5831,9 +5685,6 @@ cass_statement_bind_int32(CassStatement* statement, /** * Binds an "int" to all the values with the specified name. * - * This can only be used with statements created by - * cass_prepared_bind() when using Cassandra 2.0 or earlier. - * * @public @memberof CassStatement * * @param[in] statement @@ -5869,8 +5720,6 @@ cass_statement_bind_int32_by_name_n(CassStatement* statement, /** * Binds a "date" to a query or bound statement at the specified index. * - * @cassandra{2.2+} - * * @public @memberof CassStatement * * @param[in] statement @@ -5886,8 +5735,6 @@ cass_statement_bind_uint32(CassStatement* statement, /** * Binds a "date" to all the values with the specified name. * - * @cassandra{2.2+} - * * @public @memberof CassStatement * * @param[in] statement @@ -5904,8 +5751,6 @@ cass_statement_bind_uint32_by_name(CassStatement* statement, * Same as cass_statement_bind_uint32_by_name(), but with lengths for string * parameters. * - * @cassandra{2.2+} - * * @public @memberof CassStatement * * @param[in] statement @@ -5942,9 +5787,6 @@ cass_statement_bind_int64(CassStatement* statement, * Binds a "bigint", "counter", "timestamp" or "time" to all values * with the specified name. * - * This can only be used with statements created by - * cass_prepared_bind() when using Cassandra 2.0 or earlier. - * * @public @memberof CassStatement * * @param[in] statement @@ -5995,9 +5837,6 @@ cass_statement_bind_float(CassStatement* statement, /** * Binds a "float" to all the values with the specified name. * - * This can only be used with statements created by - * cass_prepared_bind() when using Cassandra 2.0 or earlier. - * * @public @memberof CassStatement * * @param[in] statement @@ -6048,9 +5887,6 @@ cass_statement_bind_double(CassStatement* statement, /** * Binds a "double" to all the values with the specified name. * - * This can only be used with statements created by - * cass_prepared_bind() when using Cassandra 2.0 or earlier. - * * @public @memberof CassStatement * * @param[in] statement @@ -6101,9 +5937,6 @@ cass_statement_bind_bool(CassStatement* statement, /** * Binds a "boolean" to all the values with the specified name. * - * This can only be used with statements created by - * cass_prepared_bind() when using Cassandra 2.0 or earlier. - * * @public @memberof CassStatement * * @param[in] statement @@ -6177,9 +6010,6 @@ cass_statement_bind_string_n(CassStatement* statement, * Binds an "ascii", "text" or "varchar" to all the values * with the specified name. * - * This can only be used with statements created by - * cass_prepared_bind() when using Cassandra 2.0 or earlier. - * * @public @memberof CassStatement * * @param[in] statement @@ -6237,9 +6067,6 @@ cass_statement_bind_bytes(CassStatement* statement, * Binds a "blob", "varint" or "custom" to all the values with the * specified name. * - * This can only be used with statements created by - * cass_prepared_bind() when using Cassandra 2.0 or earlier. - * * @public @memberof CassStatement * * @param[in] statement @@ -6322,9 +6149,6 @@ cass_statement_bind_custom_n(CassStatement* statement, /** * Binds a "custom" to all the values with the specified name. * - * This can only be used with statements created by - * cass_prepared_bind() when using Cassandra 2.0 or earlier. - * * @public @memberof CassStatement * * @param[in] statement @@ -6387,9 +6211,6 @@ cass_statement_bind_uuid(CassStatement* statement, * Binds a "uuid" or "timeuuid" to all the values * with the specified name. * - * This can only be used with statements created by - * cass_prepared_bind() when using Cassandra 2.0 or earlier. - * * @public @memberof CassStatement * * @param[in] statement @@ -6440,9 +6261,6 @@ cass_statement_bind_inet(CassStatement* statement, /** * Binds an "inet" to all the values with the specified name. * - * This can only be used with statements created by - * cass_prepared_bind() when using Cassandra 2.0 or earlier. - * * @public @memberof CassStatement * * @param[in] statement @@ -6498,9 +6316,6 @@ cass_statement_bind_decimal(CassStatement* statement, /** * Binds a "decimal" to all the values with the specified name. * - * This can only be used with statements created by - * cass_prepared_bind() when using Cassandra 2.0 or earlier. - * * @public @memberof CassStatement * * @param[in] statement @@ -6630,9 +6445,6 @@ cass_statement_bind_collection(CassStatement* statement, * Bind a "list", "map" or "set" to all the values with the * specified name. * - * This can only be used with statements created by - * cass_prepared_bind() when using Cassandra 2.0 or earlier. - * * @public @memberof CassStatement * * @param[in] statement @@ -6668,8 +6480,6 @@ cass_statement_bind_collection_by_name_n(CassStatement* statement, /** * Bind a "tuple" to a query or bound statement at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassStatement * * @param[in] statement @@ -6685,8 +6495,6 @@ cass_statement_bind_tuple(CassStatement* statement, /** * Bind a "tuple" to all the values with the specified name. * - * @cassandra{2.1+} - * * @public @memberof CassStatement * * @param[in] statement @@ -6703,8 +6511,6 @@ cass_statement_bind_tuple_by_name(CassStatement* statement, * Same as cass_statement_bind_tuple_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassStatement * * @param[in] statement @@ -6725,8 +6531,6 @@ cass_statement_bind_tuple_by_name_n(CassStatement* statement, * Bind a user defined type to a query or bound statement at the * specified index. * - * @cassandra{2.1+} - * * @public @memberof CassStatement * * @param[in] statement @@ -6742,8 +6546,6 @@ cass_statement_bind_user_type(CassStatement* statement, * Bind a user defined type to a query or bound statement with the * specified name. * - * @cassandra{2.1+} - * * @public @memberof CassStatement * * @param[in] statement @@ -6760,8 +6562,6 @@ cass_statement_bind_user_type_by_name(CassStatement* statement, * Same as cass_statement_bind_user_type_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassStatement * * @param[in] statement @@ -6880,8 +6680,6 @@ cass_prepared_parameter_data_type_by_name_n(const CassPrepared* prepared, /** * Creates a new batch statement with batch type. * - * @cassandra{2.0+} - * * @public @memberof CassBatch * * @param[in] type @@ -6896,8 +6694,6 @@ cass_batch_new(CassBatchType type); * Frees a batch instance. Batches can be immediately freed after being * executed. * - * @cassandra{2.0+} - * * @public @memberof CassBatch * * @param[in] batch @@ -6943,7 +6739,6 @@ cass_batch_set_keyspace_n(CassBatch* batch, /** * Sets the batch's consistency level * - * @cassandra{2.0+} * * @public @memberof CassBatch * @@ -6958,7 +6753,6 @@ cass_batch_set_consistency(CassBatch* batch, /** * Sets the batch's serial consistency level. * - * @cassandra{2.0+} * * Default: Not set * @@ -6975,8 +6769,6 @@ cass_batch_set_serial_consistency(CassBatch* batch, /** * Sets the batch's timestamp. * - * @cassandra{2.1+} - * * @public @memberof CassBatch * * @param[in] batch @@ -7026,8 +6818,6 @@ cass_batch_set_is_idempotent(CassBatch* batch, /** * Sets the batch's retry policy. * - * @cassandra{2.0+} - * * @public @memberof CassBatch * * @param[in] batch @@ -7041,8 +6831,6 @@ cass_batch_set_retry_policy(CassBatch* batch, /** * Sets the batch's custom payload. * - * @cassandra{2.2+} - * * @public @memberof CassBatch * * @param[in] batch @@ -7056,8 +6844,6 @@ cass_batch_set_custom_payload(CassBatch* batch, /** * Sets whether the batch should use tracing. * - * @cassandra{2.2+} - * * @public @memberof CassStatement * * @param[in] batch @@ -7071,8 +6857,6 @@ cass_batch_set_tracing(CassBatch* batch, /** * Adds a statement to a batch. * - * @cassandra{2.0+} - * * @public @memberof CassBatch * * @param[in] batch @@ -7153,8 +6937,6 @@ cass_data_type_new_from_existing(const CassDataType* data_type); /** * Creates a new tuple data type. * - * @cassandra{2.1+} - * * @public @memberof CassDataType * * @param[in] item_count The number of items in the tuple @@ -7168,8 +6950,6 @@ cass_data_type_new_tuple(size_t item_count); /** * Creates a new UDT (user defined type) data type. * - * @cassandra{2.1+} - * * @public @memberof CassDataType * * @param[in] field_count The number of fields in the UDT @@ -7202,8 +6982,6 @@ cass_data_type_type(const CassDataType* data_type); /** * Gets whether a data type is frozen. * - * @cassandra{2.1+} - * * @param[in] data_type * @return cass_true if the data type is frozen, otherwise cass_false. */ @@ -7259,8 +7037,6 @@ cass_data_type_set_type_name_n(CassDataType* data_type, * * Note: Only valid for UDT data types. * - * @cassandra{2.1+} - * * @param[in] data_type * @param[out] keyspace * @param[out] keyspace_length @@ -7276,8 +7052,6 @@ cass_data_type_keyspace(const CassDataType* data_type, * * Note: Only valid for UDT data types. * - * @cassandra{2.1+} - * * @param[in] data_type * @param[in] keyspace * @return CASS_OK if successful, otherwise an error occurred. @@ -7290,8 +7064,6 @@ cass_data_type_set_keyspace(CassDataType* data_type, * Same as cass_data_type_set_keyspace(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassDataType * * @param[in] data_type @@ -7397,8 +7169,6 @@ cass_data_type_sub_data_type(const CassDataType* data_type, * * Note: Only valid for UDT data types. * - * @cassandra{2.1+} - * * @param[in] data_type * @param[in] name * @return Returns a reference to a child data type. Do not free this @@ -7413,8 +7183,6 @@ cass_data_type_sub_data_type_by_name(const CassDataType* data_type, * Same as cass_data_type_sub_data_type_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassDataType * * @param[in] data_type @@ -7432,8 +7200,6 @@ cass_data_type_sub_data_type_by_name_n(const CassDataType* data_type, /** * Gets the sub-type name of a UDT (user defined type) at the specified index. * - * @cassandra{2.1+} - * * Note: Only valid for UDT data types. * * @param[in] data_type @@ -7466,8 +7232,6 @@ cass_data_type_add_sub_type(CassDataType* data_type, * * Note: Only valid for UDT data types. * - * @cassandra{2.1+} - * * @param[in] data_type * @param[in] name * @param[in] sub_data_type @@ -7484,8 +7248,6 @@ cass_data_type_add_sub_type_by_name(CassDataType* data_type, * * Note: Only valid for UDT data types. * - * @cassandra{2.1+} - * * @param[in] data_type * @param[in] name * @param[in] name_length @@ -7517,8 +7279,6 @@ cass_data_type_add_sub_value_type(CassDataType* data_type, * * Note: Only valid for UDT data types. * - * @cassandra{2.1+} - * * @param[in] data_type * @param[in] name * @param[in] sub_value_type @@ -7535,8 +7295,6 @@ cass_data_type_add_sub_value_type_by_name(CassDataType* data_type, * * Note: Only valid for UDT data types. * - * @cassandra{2.1+} - * * @param[in] data_type * @param[in] name * @param[in] name_length @@ -7608,8 +7366,6 @@ cass_collection_data_type(const CassCollection* collection); /** * Appends a "tinyint" to the collection. * - * @cassandra{2.2+} - * * @public @memberof CassCollection * * @param[in] collection @@ -7623,8 +7379,6 @@ cass_collection_append_int8(CassCollection* collection, /** * Appends an "smallint" to the collection. * - * @cassandra{2.2+} - * * @public @memberof CassCollection * * @param[in] collection @@ -7651,8 +7405,6 @@ cass_collection_append_int32(CassCollection* collection, /** * Appends a "date" to the collection. * - * @cassandra{2.2+} - * * @public @memberof CassCollection * * @param[in] collection @@ -7872,8 +7624,6 @@ cass_collection_append_duration(CassCollection* collection, /** * Appends a "list", "map" or "set" to the collection. * - * @cassandra{2.1+} - * * @public @memberof CassCollection * * @param[in] collection @@ -7887,8 +7637,6 @@ cass_collection_append_collection(CassCollection* collection, /** * Appends a "tuple" to the collection. * - * @cassandra{2.1+} - * * @public @memberof CassCollection * * @param[in] collection @@ -7902,8 +7650,6 @@ cass_collection_append_tuple(CassCollection* collection, /** * Appends a "udt" to the collection. * - * @cassandra{2.1+} - * * @public @memberof CassCollection * * @param[in] collection @@ -7923,8 +7669,6 @@ cass_collection_append_user_type(CassCollection* collection, /** * Creates a new tuple. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] item_count The number of items in the tuple. @@ -7938,8 +7682,6 @@ cass_tuple_new(size_t item_count); /** * Creates a new tuple from an existing data type. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] data_type @@ -7953,8 +7695,6 @@ cass_tuple_new_from_data_type(const CassDataType* data_type); /** * Frees a tuple instance. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -7965,8 +7705,6 @@ cass_tuple_free(CassTuple* tuple); /** * Gets the data type of a tuple. * - * @cassandra{2.1+} - * * @param[in] tuple * @return Returns a reference to the data type of the tuple. Do not free * this reference as it is bound to the lifetime of the tuple. @@ -7977,8 +7715,6 @@ cass_tuple_data_type(const CassTuple* tuple); /** * Sets an null in a tuple at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -7991,8 +7727,6 @@ cass_tuple_set_null(CassTuple* tuple, size_t index); /** * Sets a "tinyint" in a tuple at the specified index. * - * @cassandra{2.2+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8008,8 +7742,6 @@ cass_tuple_set_int8(CassTuple* tuple, /** * Sets an "smallint" in a tuple at the specified index. * - * @cassandra{2.2+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8025,8 +7757,6 @@ cass_tuple_set_int16(CassTuple* tuple, /** * Sets an "int" in a tuple at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8042,8 +7772,6 @@ cass_tuple_set_int32(CassTuple* tuple, /** * Sets a "date" in a tuple at the specified index. * - * @cassandra{2.2+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8060,8 +7788,6 @@ cass_tuple_set_uint32(CassTuple* tuple, * Sets a "bigint", "counter", "timestamp" or "time" in a tuple at the * specified index. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8077,8 +7803,6 @@ cass_tuple_set_int64(CassTuple* tuple, /** * Sets a "float" in a tuple at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8094,8 +7818,6 @@ cass_tuple_set_float(CassTuple* tuple, /** * Sets a "double" in a tuple at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8111,8 +7833,6 @@ cass_tuple_set_double(CassTuple* tuple, /** * Sets a "boolean" in a tuple at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8128,8 +7848,6 @@ cass_tuple_set_bool(CassTuple* tuple, /** * Sets an "ascii", "text" or "varchar" in a tuple at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8147,8 +7865,6 @@ cass_tuple_set_string(CassTuple* tuple, * Same as cass_tuple_set_string(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8168,8 +7884,6 @@ cass_tuple_set_string_n(CassTuple* tuple, /** * Sets a "blob", "varint" or "custom" in a tuple at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8232,8 +7946,6 @@ cass_tuple_set_custom_n(CassTuple* tuple, /** * Sets a "uuid" or "timeuuid" in a tuple at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8249,8 +7961,6 @@ cass_tuple_set_uuid(CassTuple* tuple, /** * Sets an "inet" in a tuple at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8266,8 +7976,6 @@ cass_tuple_set_inet(CassTuple* tuple, /** * Sets a "decimal" in a tuple at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8309,8 +8017,6 @@ cass_tuple_set_duration(CassTuple* tuple, /** * Sets a "list", "map" or "set" in a tuple at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8326,8 +8032,6 @@ cass_tuple_set_collection(CassTuple* tuple, /** * Sets a "tuple" in a tuple at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8343,8 +8047,6 @@ cass_tuple_set_tuple(CassTuple* tuple, /** * Sets a "udt" in a tuple at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassTuple * * @param[in] tuple @@ -8366,8 +8068,6 @@ cass_tuple_set_user_type(CassTuple* tuple, /** * Creates a new user defined type from existing data type; * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] data_type @@ -8382,8 +8082,6 @@ cass_user_type_new_from_data_type(const CassDataType* data_type); /** * Frees a user defined type instance. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8394,8 +8092,6 @@ cass_user_type_free(CassUserType* user_type); /** * Gets the data type of a user defined type. * - * @cassandra{2.1+} - * * @param[in] user_type * @return Returns a reference to the data type of the user defined type. * Do not free this reference as it is bound to the lifetime of the @@ -8407,8 +8103,6 @@ cass_user_type_data_type(const CassUserType* user_type); /** * Sets a null in a user defined type at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8422,8 +8116,6 @@ cass_user_type_set_null(CassUserType* user_type, /** * Sets a null in a user defined type at the specified name. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8438,8 +8130,6 @@ cass_user_type_set_null_by_name(CassUserType* user_type, * Same as cass_user_type_set_null_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8457,8 +8147,6 @@ cass_user_type_set_null_by_name_n(CassUserType* user_type, /** * Sets a "tinyint" in a user defined type at the specified index. * - * @cassandra{2.2+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8474,8 +8162,6 @@ cass_user_type_set_int8(CassUserType* user_type, /** * Sets a "tinyint" in a user defined type at the specified name. * - * @cassandra{2.2+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8492,8 +8178,6 @@ cass_user_type_set_int8_by_name(CassUserType* user_type, * Same as cass_user_type_set_int8_by_name(), but with lengths for string * parameters. * - * @cassandra{2.2+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8513,8 +8197,6 @@ cass_user_type_set_int8_by_name_n(CassUserType* user_type, /** * Sets an "smallint" in a user defined type at the specified index. * - * @cassandra{2.2+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8530,8 +8212,6 @@ cass_user_type_set_int16(CassUserType* user_type, /** * Sets an "smallint" in a user defined type at the specified name. * - * @cassandra{2.2+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8548,8 +8228,6 @@ cass_user_type_set_int16_by_name(CassUserType* user_type, * Same as cass_user_type_set_int16_by_name(), but with lengths for string * parameters. * - * @cassandra{2.2+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8569,8 +8247,6 @@ cass_user_type_set_int16_by_name_n(CassUserType* user_type, /** * Sets an "int" in a user defined type at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8586,8 +8262,6 @@ cass_user_type_set_int32(CassUserType* user_type, /** * Sets an "int" in a user defined type at the specified name. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8604,8 +8278,6 @@ cass_user_type_set_int32_by_name(CassUserType* user_type, * Same as cass_user_type_set_int32_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8625,8 +8297,6 @@ cass_user_type_set_int32_by_name_n(CassUserType* user_type, /** * Sets a "date" in a user defined type at the specified index. * - * @cassandra{2.2+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8642,8 +8312,6 @@ cass_user_type_set_uint32(CassUserType* user_type, /** * Sets a "date" in a user defined type at the specified name. * - * @cassandra{2.2+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8660,8 +8328,6 @@ cass_user_type_set_uint32_by_name(CassUserType* user_type, * Same as cass_user_type_set_uint32_by_name(), but with lengths for string * parameters. * - * @cassandra{2.2+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8683,8 +8349,6 @@ cass_user_type_set_uint32_by_name_n(CassUserType* user_type, * Sets an "bigint", "counter", "timestamp" or "time" in a * user defined type at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8701,8 +8365,6 @@ cass_user_type_set_int64(CassUserType* user_type, * Sets an "bigint", "counter", "timestamp" or "time" in a * user defined type at the specified name. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8719,8 +8381,6 @@ cass_user_type_set_int64_by_name(CassUserType* user_type, * Same as cass_user_type_set_int64_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8740,8 +8400,6 @@ cass_user_type_set_int64_by_name_n(CassUserType* user_type, /** * Sets a "float" in a user defined type at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8757,8 +8415,6 @@ cass_user_type_set_float(CassUserType* user_type, /** * Sets a "float" in a user defined type at the specified name. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8775,8 +8431,6 @@ cass_user_type_set_float_by_name(CassUserType* user_type, * Same as cass_user_type_set_float_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8796,8 +8450,6 @@ cass_user_type_set_float_by_name_n(CassUserType* user_type, /** * Sets an "double" in a user defined type at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8813,8 +8465,6 @@ cass_user_type_set_double(CassUserType* user_type, /** * Sets an "double" in a user defined type at the specified name. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8831,8 +8481,6 @@ cass_user_type_set_double_by_name(CassUserType* user_type, * Same as cass_user_type_set_double_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8852,8 +8500,6 @@ cass_user_type_set_double_by_name_n(CassUserType* user_type, /** * Sets a "boolean" in a user defined type at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8869,8 +8515,6 @@ cass_user_type_set_bool(CassUserType* user_type, /** * Sets a "boolean" in a user defined type at the specified name. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8887,8 +8531,6 @@ cass_user_type_set_bool_by_name(CassUserType* user_type, * Same as cass_user_type_set_double_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8910,8 +8552,6 @@ cass_user_type_set_bool_by_name_n(CassUserType* user_type, * Sets an "ascii", "text" or "varchar" in a user defined type at the * specified index. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8928,8 +8568,6 @@ cass_user_type_set_string(CassUserType* user_type, * Same as cass_user_type_set_string(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8950,8 +8588,6 @@ cass_user_type_set_string_n(CassUserType* user_type, * Sets an "ascii", "text" or "varchar" in a user defined type at the * specified name. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8968,8 +8604,6 @@ cass_user_type_set_string_by_name(CassUserType* user_type, * Same as cass_user_type_set_string_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -8991,8 +8625,6 @@ cass_user_type_set_string_by_name_n(CassUserType* user_type, /** * Sets a "blob" "varint" or "custom" in a user defined type at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9010,8 +8642,6 @@ cass_user_type_set_bytes(CassUserType* user_type, /** * Sets a "blob", "varint" or "custom" in a user defined type at the specified name. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9030,8 +8660,6 @@ cass_user_type_set_bytes_by_name(CassUserType* user_type, * Same as cass_user_type_set_bytes_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9141,8 +8769,6 @@ cass_user_type_set_custom_by_name_n(CassUserType* user_type, /** * Sets a "uuid" or "timeuuid" in a user defined type at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9158,8 +8784,6 @@ cass_user_type_set_uuid(CassUserType* user_type, /** * Sets a "uuid" or "timeuuid" in a user defined type at the specified name. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9176,8 +8800,6 @@ cass_user_type_set_uuid_by_name(CassUserType* user_type, * Same as cass_user_type_set_uuid_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9197,8 +8819,6 @@ cass_user_type_set_uuid_by_name_n(CassUserType* user_type, /** * Sets a "inet" in a user defined type at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9214,8 +8834,6 @@ cass_user_type_set_inet(CassUserType* user_type, /** * Sets a "inet" in a user defined type at the specified name. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9232,8 +8850,6 @@ cass_user_type_set_inet_by_name(CassUserType* user_type, * Same as cass_user_type_set_inet_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9253,8 +8869,6 @@ cass_user_type_set_inet_by_name_n(CassUserType* user_type, /** * Sets an "decimal" in a user defined type at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9274,8 +8888,6 @@ cass_user_type_set_decimal(CassUserType* user_type, /** * Sets "decimal" in a user defined type at the specified name. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9296,8 +8908,6 @@ cass_user_type_set_decimal_by_name(CassUserType* user_type, * Same as cass_user_type_set_decimal_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9390,8 +9000,6 @@ cass_user_type_set_duration_by_name_n(CassUserType* user_type, * Sets a "list", "map" or "set" in a user defined type at the * specified index. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9408,8 +9016,6 @@ cass_user_type_set_collection(CassUserType* user_type, * Sets a "list", "map" or "set" in a user defined type at the * specified name. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9426,8 +9032,6 @@ cass_user_type_set_collection_by_name(CassUserType* user_type, * Same as cass_user_type_set_collection_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9447,8 +9051,6 @@ cass_user_type_set_collection_by_name_n(CassUserType* user_type, /** * Sets a "tuple" in a user defined type at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9464,8 +9066,6 @@ cass_user_type_set_tuple(CassUserType* user_type, /** * Sets a "tuple" in a user defined type at the specified name. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9482,8 +9082,6 @@ cass_user_type_set_tuple_by_name(CassUserType* user_type, * Same as cass_user_type_set_tuple_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9503,8 +9101,6 @@ cass_user_type_set_tuple_by_name_n(CassUserType* user_type, /** * Sets a user defined type in a user defined type at the specified index. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9520,8 +9116,6 @@ cass_user_type_set_user_type(CassUserType* user_type, /** * Sets a user defined type in a user defined type at the specified name. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9538,8 +9132,6 @@ cass_user_type_set_user_type_by_name(CassUserType* user_type, * Same as cass_user_type_set_user_type_by_name(), but with lengths for string * parameters. * - * @cassandra{2.1+} - * * @public @memberof CassUserType * * @param[in] user_type @@ -9655,8 +9247,6 @@ cass_result_first_row(const CassResult* result); /** * Returns true if there are more pages. * - * @cassandra{2.0+} - * * @public @memberof CassResult * * @param[in] result @@ -9674,8 +9264,6 @@ cass_result_has_more_pages(const CassResult* result); * untrusted environments. The paging state could be spoofed and potentially * used to gain access to other data. * - * @cassandra{2.0+} - * * @public @memberof CassResult * * @param[in] result @@ -9876,8 +9464,6 @@ cass_error_result_table(const CassErrorResult* error_result, * Gets the affected function for the function failure error * (CASS_ERROR_SERVER_FUNCTION_FAILURE) result type. * - * @cassandra{2.2+} - * * @public @memberof CassErrorResult * * @param[in] error_result @@ -9894,8 +9480,6 @@ cass_error_result_function(const CassErrorResult* error_result, * Gets the number of argument types for the function failure error * (CASS_ERROR_SERVER_FUNCTION_FAILURE) result type. * - * @cassandra{2.2+} - * * @public @memberof CassErrorResult * * @param[in] error_result @@ -9908,8 +9492,6 @@ cass_error_num_arg_types(const CassErrorResult* error_result); * Gets the argument type at the specified index for the function failure * error (CASS_ERROR_SERVER_FUNCTION_FAILURE) result type. * - * @cassandra{2.2+} - * * @public @memberof CassErrorResult * * @param[in] error_result @@ -10013,8 +9595,6 @@ cass_iterator_from_map(const CassValue* value); * Creates a new iterator for the specified tuple. This can be * used to iterate over values in a tuple. * - * @cassandra{2.1+} - * * @public @memberof CassValue * * @param[in] value @@ -10030,8 +9610,6 @@ cass_iterator_from_tuple(const CassValue* value); * Creates a new iterator for the specified user defined type. This can be * used to iterate over fields in a user defined type. * - * @cassandra{2.1+} - * * @public @memberof CassValue * * @param[in] value @@ -10077,8 +9655,6 @@ cass_iterator_tables_from_keyspace_meta(const CassKeyspaceMeta* keyspace_meta); * Creates a new iterator for the specified keyspace metadata. * This can be used to iterate over views. * - * @cassandra{3.0+} - * * @public @memberof CassKeyspaceMeta * * @param[in] keyspace_meta @@ -10094,8 +9670,6 @@ cass_iterator_materialized_views_from_keyspace_meta(const CassKeyspaceMeta* keys * Creates a new iterator for the specified keyspace metadata. * This can be used to iterate over types. * - * @cassandra{2.1+} - * * @public @memberof CassKeyspaceMeta * * @param[in] keyspace_meta @@ -10111,8 +9685,6 @@ cass_iterator_user_types_from_keyspace_meta(const CassKeyspaceMeta* keyspace_met * Creates a new iterator for the specified keyspace metadata. * This can be used to iterate over functions. * - * @cassandra{2.2+} - * * @public @memberof CassKeyspaceMeta * * @param[in] keyspace_meta @@ -10128,8 +9700,6 @@ cass_iterator_functions_from_keyspace_meta(const CassKeyspaceMeta* keyspace_meta * Creates a new iterator for the specified keyspace metadata. * This can be used to iterate over aggregates. * - * @cassandra{2.2+} - * * @public @memberof CassKeyspaceMeta * * @param[in] keyspace_meta @@ -10193,8 +9763,6 @@ cass_iterator_indexes_from_table_meta(const CassTableMeta* table_meta); * Creates a new iterator for the specified materialized view metadata. * This can be used to iterate over columns. * - * @cassandra{3.0+} - * * @public @memberof CassTableMeta * * @param[in] table_meta @@ -10228,8 +9796,6 @@ cass_iterator_fields_from_table_meta(const CassTableMeta* table_meta); * Creates a new iterator for the specified materialized view metadata. * This can be used to iterate over columns. * - * @cassandra{3.0+} - * * @public @memberof CassMaterializedViewMeta * * @param[in] view_meta @@ -10247,8 +9813,6 @@ cass_iterator_columns_from_materialized_view_meta(const CassMaterializedViewMeta * underlying "views" metadata view. This can be used to iterate those metadata * field entries. * - * @cassandra{3.0+} - * * @public @memberof CassMaterializedViewMeta * * @param[in] view_meta @@ -10303,8 +9867,6 @@ cass_iterator_fields_from_index_meta(const CassIndexMeta* index_meta); * "functions" metadata table. This can be used to iterate those metadata * field entries. * - * @cassandra{2.2+} - * * @public @memberof CassFunctionMeta * * @param[in] function_meta @@ -10322,8 +9884,6 @@ cass_iterator_fields_from_function_meta(const CassFunctionMeta* function_meta); * "aggregates" metadata table. This can be used to iterate those metadata * field entries. * - * @cassandra{2.2+} - * * @public @memberof CassAggregateMeta * * @param[in] aggregate_meta @@ -10423,8 +9983,6 @@ cass_iterator_get_map_value(const CassIterator* iterator); * Calling cass_iterator_next() will invalidate the previous * name returned by this method. * - * @cassandra{2.1+} - * * @public @memberof CassIterator * * @param[in] iterator @@ -10443,8 +10001,6 @@ cass_iterator_get_user_type_field_name(const CassIterator* iterator, * Calling cass_iterator_next() will invalidate the previous * value returned by this method. * - * @cassandra{2.1+} - * * @public @memberof CassIterator * * @param[in] iterator @@ -10487,8 +10043,6 @@ cass_iterator_get_table_meta(const CassIterator* iterator); * Calling cass_iterator_next() will invalidate the previous * value returned by this method. * - * @cassandra{3.0+} - * * @public @memberof CassIterator * * @param[in] iterator @@ -10503,8 +10057,6 @@ cass_iterator_get_materialized_view_meta(const CassIterator* iterator); * Calling cass_iterator_next() will invalidate the previous * value returned by this method. * - * @cassandra{2.1+} - * * @public @memberof CassIterator * * @param[in] iterator @@ -10519,8 +10071,6 @@ cass_iterator_get_user_type(const CassIterator* iterator); * Calling cass_iterator_next() will invalidate the previous * value returned by this method. * - * @cassandra{2.2+} - * * @public @memberof CassIterator * * @param[in] iterator @@ -10535,8 +10085,6 @@ cass_iterator_get_function_meta(const CassIterator* iterator); * Calling cass_iterator_next() will invalidate the previous * value returned by this method. * - * @cassandra{2.2+} - * * @public @memberof CassIterator * * @param[in] iterator @@ -10679,8 +10227,6 @@ cass_value_data_type(const CassValue* value); /** * Gets an int8 for the specified value. * - * @cassandra{2.2+} - * * @public @memberof CassValue * * @param[in] value @@ -10694,8 +10240,6 @@ cass_value_get_int8(const CassValue* value, /** * Gets an int16 for the specified value. * - * @cassandra{2.2+} - * * @public @memberof CassValue * * @param[in] value @@ -10722,8 +10266,6 @@ cass_value_get_int32(const CassValue* value, /** * Gets an uint32 for the specified value. * - * @cassandra{2.2+} - * * @public @memberof CassValue * * @param[in] value @@ -11156,8 +10698,6 @@ cass_uuid_from_string_n(const char* str, * Creates a new server-side timestamp generator. This generator allows Cassandra * to assign timestamps server-side. * - * @cassandra{2.1+} - * * @public @memberof CassTimestampGen * * @return Returns a timestamp generator that must be freed. @@ -11188,8 +10728,6 @@ cass_timestamp_gen_server_side_new(); * Note: This generator is thread-safe and can be shared by multiple * sessions. * - * @cassandra{2.1+} - * * @public @memberof CassTimestampGen * * @return Returns a timestamp generator that must be freed. @@ -11219,8 +10757,6 @@ cass_timestamp_gen_monotonic_new_with_settings(cass_int64_t warning_threshold_us /** * Frees a timestamp generator instance. * - * @cassandra{2.1+} - * * @public @memberof CassTimestampGen * * @param[in] timestamp_gen @@ -11350,8 +10886,6 @@ cass_retry_policy_free(CassRetryPolicy* policy); * * @public @memberof CassCustomPayload * - * @cassandra{2.2+} - * * @return Returns a custom payload that must be freed. * * @see cass_custom_payload_free() @@ -11362,8 +10896,6 @@ cass_custom_payload_new(); /** * Frees a custom payload instance. * - * @cassandra{2.2+} - * * @public @memberof CassCustomPayload * * @param[in] payload @@ -11374,8 +10906,6 @@ cass_custom_payload_free(CassCustomPayload* payload); /** * Sets an item to the custom payload. * - * @cassandra{2.2+} - * * @public @memberof CassCustomPayload * * @param[in] payload @@ -11393,8 +10923,6 @@ cass_custom_payload_set(CassCustomPayload* payload, * Same as cass_custom_payload_set(), but with lengths for string * parameters. * - * @cassandra{2.2+} - * * @public @memberof CassCustomPayload * * @param[in] payload @@ -11413,8 +10941,6 @@ cass_custom_payload_set_n(CassCustomPayload* payload, /** * Removes an item from the custom payload. * - * @cassandra{2.2+} - * * @public @memberof CassCustomPayload * * @param[in] payload @@ -11428,8 +10954,6 @@ cass_custom_payload_remove(CassCustomPayload* payload, * Same as cass_custom_payload_set(), but with lengths for string * parameters. * - * @cassandra{2.2+} - * * @public @memberof CassCustomPayload * * @param[in] payload @@ -11652,8 +11176,6 @@ cass_inet_from_string_n(const char* str, * represents the number of days since the Epoch (1970-01-01) with the Epoch centered at * the value 2^31. * - * @cassandra{2.2+} - * * @param[in] epoch_secs * @return the number of days since the date -5877641-06-23 */ @@ -11664,8 +11186,6 @@ cass_date_from_epoch(cass_int64_t epoch_secs); * Converts a unix timestamp (in seconds) to the Cassandra "time" type. The "time" type * represents the number of nanoseconds since midnight (range 0 to 86399999999999). * - * @cassandra{2.2+} - * * @param[in] epoch_secs * @return nanoseconds since midnight */ @@ -11675,8 +11195,6 @@ cass_time_from_epoch(cass_int64_t epoch_secs); /** * Combines the Cassandra "date" and "time" types to Epoch time in seconds. * - * @cassandra{2.2+} - * * @param[in] date * @param[in] time * @return Epoch time in seconds. Negative times are possible if the date diff --git a/scylla-rust-wrapper/src/exec_profile.rs b/scylla-rust-wrapper/src/exec_profile.rs index 13baeaa96..e19a13ffc 100644 --- a/scylla-rust-wrapper/src/exec_profile.rs +++ b/scylla-rust-wrapper/src/exec_profile.rs @@ -75,7 +75,7 @@ impl CassExecProfile { fn use_cluster_defaults_for_unset_settings(&mut self, default_profile: &ExecutionProfile) { // The reason for separate handling of consistency is that the default consistency // depends on the kind of the cluster that the CPP Driver connected to (DataStax DBAAS has - // a different default than the ordinary Cassandra/ScyllaDB). + // a different default than the ordinary ScyllaDB/Cassandra). // // It would be plausible to believe that we don't have to worry about that, because we treat // all DBs the same way wrt the default consistency. This is **wrong**. Consistency, unlike diff --git a/scylla-rust-wrapper/src/prepared.rs b/scylla-rust-wrapper/src/prepared.rs index 9954ed0a6..8a181c8ab 100644 --- a/scylla-rust-wrapper/src/prepared.rs +++ b/scylla-rust-wrapper/src/prepared.rs @@ -30,7 +30,7 @@ impl CassPrepared { // // NOTE: We are aware that it makes cached metadata immutable. It is expected, though - there // is an integration test for this for CQL protocol v4 (AlterDoesntUpdateColumnCount). - // This issue is addressed in CQL protocol v5, but Scylla doesn't support it yet, and probably + // This issue is addressed in CQL protocol v5, but ScyllaDB doesn't support it yet, and probably // won't support it in the near future. statement.set_use_cached_result_metadata(true); diff --git a/scylla-rust-wrapper/src/session.rs b/scylla-rust-wrapper/src/session.rs index acaec04a8..1f6b062e4 100644 --- a/scylla-rust-wrapper/src/session.rs +++ b/scylla-rust-wrapper/src/session.rs @@ -1126,7 +1126,7 @@ mod tests { .and(Condition::BodyContainsCaseInsensitive(Box::new( *b"INSERT INTO system.", ))), - // We simulate the write failure error that a Scylla node would respond with anyway. + // We simulate the write failure error that a ScyllaDB node would respond with anyway. RequestReaction::forge().write_failure(), )) .chain(generic_drop_queries_rules()), @@ -1814,7 +1814,7 @@ mod tests { unsafe { let mut cluster_raw = cass_cluster_new(); - // An IP with very little chance of having a Scylla node listening + // An IP with very little chance of having a ScyllaDB node listening let ip = "127.0.1.231"; let (c_ip, c_ip_len) = str_to_c_str_n(ip); @@ -1854,7 +1854,7 @@ mod tests { #![rusty_fork(timeout_ms = 1000)] #[test] fn cluster_is_not_referenced_by_session_connect_future() { - // An IP with very little chance of having a Scylla node listening + // An IP with very little chance of having a ScyllaDB node listening let ip = "127.0.1.231"; let (c_ip, c_ip_len) = str_to_c_str_n(ip); let profile_name = make_c_str!("latency_aware"); diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 73a9fde5a..09c618467 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -17,11 +17,6 @@ else() list(APPEND SOURCES ssl/ssl_no_impl.cpp) endif() -if(CASS_USE_KERBEROS) - list(APPEND INCLUDE_DIRS gssapi) - list(APPEND SOURCES gssapi/dse_auth_gssapi.cpp gssapi/dse_auth_gssapi.hpp) -endif() - # Determine atomic library to include if(CASS_USE_BOOST_ATOMIC) list(APPEND SOURCES atomic/atomic_boost.hpp) @@ -122,9 +117,7 @@ endif() set(HAVE_BOOST_ATOMIC ${CASS_USE_BOOST_ATOMIC}) set(HAVE_STD_ATOMIC ${CASS_USE_STD_ATOMIC}) -set(HAVE_KERBEROS ${CASS_USE_KERBEROS}) set(HAVE_OPENSSL ${CASS_USE_OPENSSL}) -set(HAVE_ZLIB ${CASS_USE_ZLIB}) # Generate the driver_config.hpp file configure_file( diff --git a/src/driver_config.hpp b/src/driver_config.hpp index ef52d2da9..c39eeac06 100644 --- a/src/driver_config.hpp +++ b/src/driver_config.hpp @@ -1,7 +1,6 @@ #ifndef DATASTAX_INTERNAL_DRIVER_CONFIG_HPP #define DATASTAX_INTERNAL_DRIVER_CONFIG_HPP -#define HAVE_KERBEROS #define HAVE_OPENSSL #define HAVE_STD_ATOMIC #define CASS_CPP_STANDARD 11 @@ -14,6 +13,5 @@ /* #undef HAVE_ARC4RANDOM */ #define HAVE_GETRANDOM #define HAVE_TIMERFD -#define HAVE_ZLIB #endif diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 9f5288588..15030aff5 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -167,15 +167,6 @@ set(COMMON_INTEGRATION_TEST_SOURCE_FILES ${INTEGRATION_TESTS_SOURCE_FILES} #------------------------ if(CASS_BUILD_INTEGRATION_TESTS) - configure_file(embedded-ads.jar ${CMAKE_BINARY_DIR} COPYONLY) - if(WIN32) - # Copy the embedded ADS to additional locations for use with IDE - if(NOT EXISTS ${CMAKE_BINARY_DIR}/tests) - file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/tests) - endif() - configure_file(embedded-ads.jar ${CMAKE_BINARY_DIR}/tests COPYONLY) - endif() - add_subdirectory(src/integration) endif() diff --git a/tests/src/integration/embedded_ads.cpp b/tests/src/integration/embedded_ads.cpp deleted file mode 100644 index f42aad91f..000000000 --- a/tests/src/integration/embedded_ads.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* - Copyright (c) DataStax, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -#include "embedded_ads.hpp" - -namespace test { - -// Initialize static variables -uv_process_t EmbeddedADS::process_; -uv_mutex_t EmbeddedADS::mutex_; -std::string EmbeddedADS::configuration_directory_ = ""; -std::string EmbeddedADS::configuration_file_ = ""; -std::string EmbeddedADS::cassandra_keytab_file_ = ""; -std::string EmbeddedADS::dse_keytab_file_ = ""; -std::string EmbeddedADS::dseuser_keytab_file_ = ""; -std::string EmbeddedADS::unknown_keytab_file_ = ""; -std::string EmbeddedADS::bob_keytab_file_ = ""; -std::string EmbeddedADS::bill_keytab_file_ = ""; -std::string EmbeddedADS::charlie_keytab_file_ = ""; -std::string EmbeddedADS::steve_keytab_file_ = ""; -bool EmbeddedADS::is_initialized_ = false; - -} // namespace test diff --git a/tests/src/integration/embedded_ads.hpp b/tests/src/integration/embedded_ads.hpp deleted file mode 100644 index c7e12e102..000000000 --- a/tests/src/integration/embedded_ads.hpp +++ /dev/null @@ -1,645 +0,0 @@ -/* - Copyright (c) DataStax, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -#ifndef __TEST_EMBEDDED_ADS_HPP__ -#define __TEST_EMBEDDED_ADS_HPP__ -#include "exception.hpp" -#include "options.hpp" -#include "test_utils.hpp" -#include "tlog.hpp" - -#include "scoped_lock.hpp" - -#include -#ifdef _WIN32 -#define putenv _putenv -#endif - -#include - -// TODO: This should be broken out in the future if required by more than one test (currently -// Authentication tests) - -// Defines for ADS configuration -#define EMBEDDED_ADS_JAR_FILENAME "embedded-ads.jar" -#define EMBEDDED_ADS_CONFIGURATION_DIRECTORY "ads_config" -#define EMBEDDED_ADS_CONFIGURATION_FILE "krb5.conf" -#define CASSANDRA_KEYTAB_ADS_CONFIGURATION_FILE "cassandra.keytab" -#define DSE_KEYTAB_ADS_CONFIGURATION_FILE "dse.keytab" -#define DSE_USER_KEYTAB_ADS_CONFIGURATION_FILE "dseuser.keytab" -#define UNKNOWN_KEYTAB_ADS_CONFIGURATION_FILE "unknown.keytab" -#define BILL_KEYTAB_ADS_CONFIGURATION_FILE "bill.keytab" -#define BOB_KEYTAB_ADS_CONFIGURATION_FILE "bob.keytab" -#define CHARLIE_KEYTAB_ADS_CONFIGURATION_FILE "charlie.keytab" -#define STEVE_KEYTAB_ADS_CONFIGURATION_FILE "steve.keytab" -#define REALM "DATASTAX.COM" -#define DSE_SERVICE_PRINCIPAL "dse/_HOST@DATASTAX.COM" -#define CASSANDRA_USER "cassandra" -#define CASSANDRA_PASSWORD "cassandra" -#define CASSANDRA_USER_PRINCIPAL "cassandra@DATASTAX.COM" -#define DSE_USER "dseuser" -#define DSE_USER_PRINCIPAL "dseuser@DATASTAX.COM" -#define UNKNOWN "unknown" -#define UNKNOWN_PRINCIPAL "unknown@DATASTAX.COM" -#define BILL_PRINCIPAL "bill@DATASTAX.COM" -#define BOB_PRINCIPAL "bob@DATASTAX.COM" -#define CHARLIE_PRINCIPAL "charlie@DATASTAX.COM" -#define STEVE_PRINCIPAL "steve@DATASTAX.COM" - -// Output buffer size for spawn pipe(s) -#define OUTPUT_BUFFER_SIZE 10240 - -namespace test { - -/** - * Embedded ADS for easily authenticating with DSE using Kerberos - */ -class EmbeddedADS { - /** - * Result for command execution - */ - typedef struct CommandResult_ { - /** - * Error code (e.g. exit status) - */ - int error_code; - /** - * Standard output from executing command - */ - std::string standard_output; - /** - * Standard error from executing command - */ - std::string standard_error; - - CommandResult_() - : error_code(-1) {} - } CommandResult; - -public: - /** - * @throws EmbeddedADS::Exception If applications are not available to operate the ADS - * properly - */ - EmbeddedADS() { - // TODO: Update test to work with remote deployments -#ifdef _WIN32 - // Unable to execute ADS locally and use remote DSE cluster - throw Exception("ADS Server will not be Created: Must run locally with DSE cluster"); -#endif -#ifdef CASS_USE_LIBSSH2 - if (Options::deployment_type() == CCM::DeploymentType::REMOTE) { - throw Exception("ADS Server will not be Created: Must run locally with DSE cluster"); - } -#endif - - // Initialize the mutex - uv_mutex_init(&mutex_); - - // Check to see if all applications and files are available for ADS - bool is_useable = true; - std::string message; - if (!is_java_available()) { - is_useable = false; - message += "Java"; - } - if (!is_kerberos_client_available()) { - is_useable = false; - if (!message.empty()) { - message += " and "; - } - message += "Kerberos clients (kinit/kdestroy)"; - } - if (!Utils::file_exists(EMBEDDED_ADS_JAR_FILENAME)) { - is_useable = false; - if (!message.empty()) { - message += " and "; - } - message += "embedded ADS JAR file"; - } - - if (!is_useable) { - message = "Unable to Create ADS Server: Missing " + message; - throw Exception(message); - } - } - - ~EmbeddedADS() { - terminate_process(); - uv_mutex_destroy(&mutex_); - } - - /** - * Start the ADS process - */ - void start_process() { uv_thread_create(&thread_, EmbeddedADS::process_start, NULL); } - - /** - * Terminate the ADS process - */ - void terminate_process() { - uv_process_kill(&process_, SIGTERM); - uv_thread_join(&thread_); - - // Reset the static variables - configuration_directory_ = ""; - configuration_file_ = ""; - cassandra_keytab_file_ = ""; - dse_keytab_file_ = ""; - dseuser_keytab_file_ = ""; - unknown_keytab_file_ = ""; - bill_keytab_file_ = ""; - bob_keytab_file_ = ""; - charlie_keytab_file_ = ""; - steve_keytab_file_ = ""; - is_initialized_ = false; - } - - /** - * Flag to determine if the ADS process is fully initialized - * - * @return True is ADS is initialized; false otherwise - */ - static bool is_initialized() { - datastax::internal::ScopedMutex lock(&mutex_); - return is_initialized_; - } - - /** - * Get the configuration director being used by the ADS process - * - * @return Absolute path to the ADS configuration directory; empty string - * indicates ADS was not started properly - */ - static std::string get_configuration_directory() { return configuration_directory_; } - - /** - * Get the configuration file being used by the ADS process - * - * @return Absolute path to the ADS configuration file; empty string indicates - * ADS was not started properly - */ - static std::string get_configuration_file() { return configuration_file_; } - - /** - * Get the Cassandra keytab configuration file being used by the ADS process - * - * @return Absolute path to the Cassandra keytab configuration file; empty - * string indicates ADS was not started properly - */ - static std::string get_cassandra_keytab_file() { return cassandra_keytab_file_; } - - /** - * Get the DSE keytab configuration file being used by the ADS process - * - * @return Absolute path to the DSE keytab configuration file; empty - * string indicates ADS was not started properly - */ - static std::string get_dse_keytab_file() { return dse_keytab_file_; } - - /** - * Get the DSE user keytab configuration file being used by the ADS process - * - * @return Absolute path to the DSE user keytab configuration file; empty - * string indicates ADS was not started properly - */ - static std::string get_dseuser_keytab_file() { return dseuser_keytab_file_; } - - /** - * Get the unknown keytab configuration file being used by the ADS process - * - * @return Absolute path to the unknown keytab configuration file; empty - * string indicates ADS was not started properly - */ - static std::string get_unknown_keytab_file() { return unknown_keytab_file_; } - - /** - * Get the Bill keytab configuration file being used by the ADS process - * - * @return Absolute path to the Bill keytab configuration file; empty string - * indicates ADS was not started properly - */ - static std::string get_bill_keytab_file() { return bill_keytab_file_; } - - /** - * Get the Bob keytab configuration file being used by the ADS process - * - * @return Absolute path to the Bob keytab configuration file; empty string - * indicates ADS was not started properly - */ - static std::string get_bob_keytab_file() { return bob_keytab_file_; } - - /** - * Get the Charlie keytab configuration file being used by the ADS process - * - * @return Absolute path to the Charlie keytab configuration file; empty - * string indicates ADS was not started properly - */ - static std::string get_charlie_keytab_file() { return charlie_keytab_file_; } - - /** - * Get the Steve keytab configuration file being used by the ADS process - * - * @return Absolute path to the Steve keytab configuration file; empty string - * string indicates ADS was not started properly - */ - static std::string get_steve_keytab_file() { return steve_keytab_file_; } - - /** - * Check to see if the Kerberos client binaries are Heimdal - * - * @return True if Kerberos implementation is Heimdal; false otherwise - */ - static bool is_kerberos_client_heimdal() { - if (is_kerberos_client_available()) { - // kinit - char* kinit_args[3]; - kinit_args[0] = const_cast("kinit"); - kinit_args[1] = const_cast("--version"); - kinit_args[2] = NULL; - - // Check the output of the kinit command for Heimdal - CommandResult result = execute_command(kinit_args); - if (result.error_code == 0) { - // Check both outputs - bool is_in_standard_output = Utils::contains(result.standard_output, "Heimdal"); - bool is_in_standard_error = Utils::contains(result.standard_error, "Heimdal"); - return (is_in_standard_output || is_in_standard_error); - } - } - - return false; - } - - /** - * Acquire a ticket into the cache of the ADS for a given principal and keytab - * file - * - * @param principal Principal identity - * @param keytab_file Filename of keytab to use - */ - void acquire_ticket(const std::string& principal, const std::string& keytab_file) { - char* args[6]; - args[0] = const_cast("kinit"); - args[1] = const_cast("-k"); - args[2] = const_cast("-t"); - args[3] = const_cast(keytab_file.c_str()); - args[4] = const_cast(principal.c_str()); - args[5] = NULL; - execute_command(args); - } - - /** - * Destroy all tickets in the cache - */ - void destroy_tickets() { - char* args[3]; - args[0] = const_cast("kdestroy"); - args[1] = const_cast("-A"); - args[2] = NULL; - execute_command(args); - } - - /** - * Assign the Kerberos environment for keytab use - * - * @param keytab_file Filename of keytab to use - */ - void use_keytab(const std::string& keytab_file) { - // MIT Kerberos - setenv("KRB5_CLIENT_KTNAME", keytab_file); - // Heimdal - setenv("KRB5_KTNAME", keytab_file); - } - - /** - * Clear/Unassign the Kerberos environment for keytab use - */ - void clear_keytab() { - // MIT Kerberos - setenv("KRB5_CLIENT_KTNAME", ""); - // Heimdal - setenv("KRB5_KTNAME", ""); - } - -private: - /** - * Thread for the ADS process to execute in - */ - uv_thread_t thread_; - /** - * Mutex for process piped buffer allocation and reads - */ - static uv_mutex_t mutex_; - /** - * Information regarding spawned process - */ - static uv_process_t process_; - /** - * ADS configuration directory - */ - static std::string configuration_directory_; - /** - * KRB5_CONFIG configuration file - */ - static std::string configuration_file_; - /** - * Cassandra keytab configuration file - */ - static std::string cassandra_keytab_file_; - /** - * DSE keytab configuration file - */ - static std::string dse_keytab_file_; - /** - * DSE user keytab configuration file - */ - static std::string dseuser_keytab_file_; - /** - * Unknown keytab configuration file - */ - static std::string unknown_keytab_file_; - /** - * Bill keytab configuration file - */ - static std::string bill_keytab_file_; - /** - * Bob keytab configuration file - */ - static std::string bob_keytab_file_; - /** - * Charlie keytab configuration file - */ - static std::string charlie_keytab_file_; - /** - * Steve keytab configuration file - */ - static std::string steve_keytab_file_; - /** - * Flag to determine if the ADS process is initialized - */ - static bool is_initialized_; - - /** - * Execute a command while supplying the KRB5_CONFIG to the ADS server - * configuration file - * - * @param Process and arguments to execute - * @return Error code returned from executing command - */ - static CommandResult execute_command(char* args[]) { - // Create the loop - uv_loop_t loop; - uv_loop_init(&loop); - uv_process_options_t options; - memset(&options, 0, sizeof(uv_process_options_t)); - - // Create the options for reading information from the spawn pipes - uv_pipe_t standard_output; - uv_pipe_t error_output; - uv_pipe_init(&loop, &standard_output, 0); - uv_pipe_init(&loop, &error_output, 0); - uv_stdio_container_t stdio[3]; - options.stdio_count = 3; - options.stdio = stdio; - options.stdio[0].flags = UV_IGNORE; - options.stdio[1].flags = static_cast(UV_CREATE_PIPE | UV_WRITABLE_PIPE); - options.stdio[1].data.stream = (uv_stream_t*)&standard_output; - options.stdio[2].flags = static_cast(UV_CREATE_PIPE | UV_WRITABLE_PIPE); - options.stdio[2].data.stream = (uv_stream_t*)&error_output; - - // Create the options for the process - options.args = args; - options.exit_cb = EmbeddedADS::process_exit; - options.file = args[0]; - - // Start the process and process loop (if spawned) - CommandResult result; - uv_process_t process; - result.error_code = uv_spawn(&loop, &process, &options); - if (result.error_code == 0) { - TEST_LOG("Launched " << args[0] << " with ID " << process_.pid); - - // Configure the storage for the output pipes - std::string stdout_message; - std::string stderr_message; - standard_output.data = &result.standard_output; - error_output.data = &result.standard_error; - - // Start the output thread loops - uv_read_start(reinterpret_cast(&standard_output), - EmbeddedADS::output_allocation, EmbeddedADS::process_read); - uv_read_start(reinterpret_cast(&error_output), EmbeddedADS::output_allocation, - EmbeddedADS::process_read); - - // Start the process loop - uv_run(&loop, UV_RUN_DEFAULT); - uv_loop_close(&loop); - } - return result; - } - - /** - * Check to see if Java is available in order to execute the ADS process - * - * @return True if Java is available; false otherwise - */ - static bool is_java_available() { - char* args[3]; - args[0] = const_cast("java"); - args[1] = const_cast("-help"); - args[2] = NULL; - return (execute_command(args).error_code == 0); - } - - /** - * Check to see if the Kerberos client binaries are available in order to - * properly execute request for the ADS - * - * @return True if kinit and kdestroy are available; false otherwise - */ - static bool is_kerberos_client_available() { - // kinit - char* kinit_args[3]; - kinit_args[0] = const_cast("kinit"); - kinit_args[1] = const_cast("--help"); - kinit_args[2] = NULL; - bool is_kinit_available = (execute_command(kinit_args).error_code == 0); - - // kdestroy - char* kdestroy_args[3]; - kdestroy_args[0] = const_cast("kdestroy"); - kdestroy_args[1] = const_cast("--help"); - kdestroy_args[2] = NULL; - bool is_kdestroy_available = (execute_command(kdestroy_args).error_code == 0); - - return (is_kinit_available && is_kdestroy_available); - } - - /** - * uv_thread_create callback for executing the ADS process - * - * @param arg UNUSED - */ - static void process_start(void* arg) { - // Create the configuration directory for the ADS - Utils::mkdir(EMBEDDED_ADS_CONFIGURATION_DIRECTORY); - - // Initialize the loop and process arguments - uv_loop_t loop; - uv_loop_init(&loop); - uv_process_options_t options; - memset(&options, 0, sizeof(uv_process_options_t)); - - char* args[7]; - args[0] = const_cast("java"); - args[1] = const_cast("-jar"); - args[2] = const_cast(EMBEDDED_ADS_JAR_FILENAME); - args[3] = const_cast("-k"); - args[4] = const_cast("--confdir"); - args[5] = const_cast(EMBEDDED_ADS_CONFIGURATION_DIRECTORY); - args[6] = NULL; - - // Create the options for reading information from the spawn pipes - uv_pipe_t standard_output; - uv_pipe_t error_output; - uv_pipe_init(&loop, &standard_output, 0); - uv_pipe_init(&loop, &error_output, 0); - uv_stdio_container_t stdio[3]; - options.stdio_count = 3; - options.stdio = stdio; - options.stdio[0].flags = UV_IGNORE; - options.stdio[1].flags = static_cast(UV_CREATE_PIPE | UV_WRITABLE_PIPE); - options.stdio[1].data.stream = (uv_stream_t*)&standard_output; - options.stdio[2].flags = static_cast(UV_CREATE_PIPE | UV_WRITABLE_PIPE); - options.stdio[2].data.stream = (uv_stream_t*)&error_output; - - // Create the options for the process - options.args = args; - options.exit_cb = EmbeddedADS::process_exit; - options.file = args[0]; - - // Start the process - int error_code = uv_spawn(&loop, &process_, &options); - if (error_code == 0) { - TEST_LOG("Launched " << args[0] << " with ID " << process_.pid); - - // Configure the storage for the output pipes - std::string stdout_message; - std::string stderr_message; - standard_output.data = &stdout_message; - error_output.data = &stderr_message; - - // Start the output thread loops - uv_read_start(reinterpret_cast(&standard_output), - EmbeddedADS::output_allocation, EmbeddedADS::process_read); - uv_read_start(reinterpret_cast(&error_output), EmbeddedADS::output_allocation, - EmbeddedADS::process_read); - - // Indicate the ADS configurations - configuration_directory_ = Utils::cwd() + Utils::PATH_SEPARATOR + - EMBEDDED_ADS_CONFIGURATION_DIRECTORY + Utils::PATH_SEPARATOR; - configuration_file_ = configuration_directory_ + EMBEDDED_ADS_CONFIGURATION_FILE; - cassandra_keytab_file_ = configuration_directory_ + CASSANDRA_KEYTAB_ADS_CONFIGURATION_FILE; - dse_keytab_file_ = configuration_directory_ + DSE_KEYTAB_ADS_CONFIGURATION_FILE; - dseuser_keytab_file_ = configuration_directory_ + DSE_USER_KEYTAB_ADS_CONFIGURATION_FILE; - unknown_keytab_file_ = configuration_directory_ + UNKNOWN_KEYTAB_ADS_CONFIGURATION_FILE; - bill_keytab_file_ = configuration_directory_ + BILL_KEYTAB_ADS_CONFIGURATION_FILE; - bob_keytab_file_ = configuration_directory_ + BOB_KEYTAB_ADS_CONFIGURATION_FILE; - charlie_keytab_file_ = configuration_directory_ + CHARLIE_KEYTAB_ADS_CONFIGURATION_FILE; - steve_keytab_file_ = configuration_directory_ + STEVE_KEYTAB_ADS_CONFIGURATION_FILE; - - // Inject the configuration environment variable - setenv("KRB5_CONFIG", configuration_file_); - - // Start the process loop - uv_run(&loop, UV_RUN_DEFAULT); - uv_loop_close(&loop); - } else { - TEST_LOG_ERROR(uv_strerror(error_code)); - } - } - - /** - * uv_spawn callback for handling the completion of the process - * - * @param process Process - * @param error_code Error/Exit code - * @param term_signal Terminating signal - */ - static void process_exit(uv_process_t* process, int64_t error_code, int term_signal) { - datastax::internal::ScopedMutex lock(&mutex_); - TEST_LOG("Process " << process->pid << " Terminated: " << error_code); - uv_close(reinterpret_cast(process), NULL); - } - - /** - * uv_read_start callback for allocating memory for the buffer in the pipe - * - * @param handle Handle information for the pipe being read - * @param suggested_size Suggested size for the buffer - * @param buffer Buffer to allocate bytes for - */ - static void output_allocation(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buffer) { - datastax::internal::ScopedMutex lock(&mutex_); - buffer->base = new char[OUTPUT_BUFFER_SIZE]; - buffer->len = OUTPUT_BUFFER_SIZE; - } - - /** - * uv_read_start callback for processing the buffer in the pipe - * - * @param stream Stream to process (stdout/stderr) - * @param buffer_length Length of the buffer - * @param buffer Buffer to process - */ - static void process_read(uv_stream_t* stream, ssize_t buffer_length, const uv_buf_t* buffer) { - datastax::internal::ScopedMutex lock(&mutex_); - - // Get the pipe message contents - std::string* message = reinterpret_cast(stream->data); - - if (buffer_length > 0) { - // Process the buffer and determine if the ADS is finished initializing - std::string output(buffer->base, buffer_length); - message->append(output); - - if (!is_initialized_ && - message->find("Principal Initialization Complete") != std::string::npos) { - Utils::msleep(10000); // TODO: Not 100% ready; need to add a better check mechanism - is_initialized_ = true; - } - TEST_LOG(Utils::trim(output)); - } else if (buffer_length < 0) { - uv_close(reinterpret_cast(stream), NULL); - } - - // Clean up the memory allocated - delete[] buffer->base; - } - - static void setenv(const std::string& name, const std::string& value) { -#ifdef _WIN32 - putenv(const_cast(std::string(name + "=" + value).c_str())); -#else - ::setenv(name.c_str(), value.c_str(), 1); -#endif - } -}; - -} // namespace test - -#endif // __TEST_EMBEDDED_ADS_HPP__ diff --git a/tests/src/integration/integration.hpp b/tests/src/integration/integration.hpp index b5cae52c7..2129c7cdc 100644 --- a/tests/src/integration/integration.hpp +++ b/tests/src/integration/integration.hpp @@ -85,8 +85,8 @@ // Currently, we only check for the version if tests are being run against // Cassandra cluster. It's because, some of the tests were unnecessarily -// skipped for Scylla. In the future (once it's needed) we might -// do some version restrictions for Scylla clusters as well. +// skipped for ScyllaDB. In the future (once it's needed) we might +// do some version restrictions for ScyllaDB clusters as well. #define SKIP_IF_CASSANDRA_VERSION_LT(version) \ do { \ CCM::CassVersion cass_version = this->server_version_; \ @@ -309,7 +309,7 @@ class Integration : public testing::Test { * (DEFAULT: true) */ bool is_beta_protocol_; - /** Flag to indicate if tablets should be disabled for Scylla keyspace. + /** Flag to indicate if tablets should be disabled for ScyllaDB keyspace. * There are some cases where the test logic will fail for tablets keyspace * (e.g. when test uses LWT statements). * (DEFAULT: false) @@ -515,10 +515,10 @@ class Integration : public testing::Test { std::string generate_contact_points(const std::string& ip_prefix, size_t number_of_nodes); /** - * Check if Scylla supports a specific feature. + * Check if ScyllaDB supports a specific feature. * - * @param feature Feature to check if supported by Scylla - * @return True if Scylla supports the feature; false otherwise + * @param feature Feature to check if supported by ScyllaDB + * @return True if ScyllaDB supports the feature; false otherwise */ bool scylla_supports_feature(const std::string& feature); diff --git a/tests/src/integration/tests/test_consistency.cpp b/tests/src/integration/tests/test_consistency.cpp index 2d159d9c2..667d3723a 100644 --- a/tests/src/integration/tests/test_consistency.cpp +++ b/tests/src/integration/tests/test_consistency.cpp @@ -234,7 +234,7 @@ CASSANDRA_INTEGRATION_TEST_F(ConsistencyTwoNodeClusterTests, SimpleEachQuorum) { // Handle `EACH_QUORUM` read support; added to C* v3.0.0 // https://issues.apache.org/jira/browse/CASSANDRA-9602 // - // Scylla supports `EACH_QUORUM` for writes only. + // ScyllaDB supports `EACH_QUORUM` for writes only. if (!Options::is_scylla() && server_version_ >= "3.0.0") { session_.execute(select_); } else { diff --git a/tests/src/integration/tests/test_schema_metadata.cpp b/tests/src/integration/tests/test_schema_metadata.cpp index 4eff8f3a6..9f6d3630b 100644 --- a/tests/src/integration/tests/test_schema_metadata.cpp +++ b/tests/src/integration/tests/test_schema_metadata.cpp @@ -42,7 +42,7 @@ class SchemaMetadataTest : public Integration { /* * Support for UDF should be manually enabled to successfully execute the below code. - * These tests are also disabled for C++ driver. Additionally, Scylla does no support Java language in UDFs. + * These tests are also disabled for C++ driver. Additionally, ScyllaDB does no support Java language in UDFs. * It seems that the created aggregate and functions are not checked in these tests, so currently it is commented out. * session_.execute("CREATE FUNCTION avg_state(state tuple, val int) " diff --git a/tests/src/integration/tests/test_server_side_failure.cpp b/tests/src/integration/tests/test_server_side_failure.cpp index d4ce81925..bb92fe291 100644 --- a/tests/src/integration/tests/test_server_side_failure.cpp +++ b/tests/src/integration/tests/test_server_side_failure.cpp @@ -80,7 +80,7 @@ CASSANDRA_INTEGRATION_TEST_F(ServerSideFailureTests, Warning) { CHECK_FAILURE; SKIP_IF_CASSANDRA_VERSION_LT(2.2); if (Options::is_scylla()) { - SKIP_TEST("Scylla does not emit 'Aggregation query used without partition key' warning"); + SKIP_TEST("ScyllaDB does not emit 'Aggregation query used without partition key' warning"); } logger_.add_critera("Response from the database contains a warning, "