diff --git a/.clang-format b/.clang-format index 83affd882..530bb3ede 100644 --- a/.clang-format +++ b/.clang-format @@ -1,5 +1,29 @@ -BasedOnStyle: Google -IndentWidth: 4 +--- +BasedOnStyle: LLVM +SortIncludes: false TabWidth: 4 -UseTab: Never -ColumnLimit: 80 +IndentWidth: 4 +ColumnLimit: 120 +AllowShortFunctionsOnASingleLine: false +--- +UseTab: ForIndentation +DerivePointerAlignment: false +PointerAlignment: Right +AlignConsecutiveMacros: true +AlignTrailingComments: true +AllowAllArgumentsOnNextLine: true +AllowAllConstructorInitializersOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: true +AlignAfterOpenBracket: Align +SpaceBeforeCpp11BracedList: true +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpacesInAngles: false +SpacesInCStyleCastParentheses: false +SpacesInConditionalStatement: false +AllowShortLambdasOnASingleLine: Inline +AllowShortLoopsOnASingleLine: false +AlwaysBreakTemplateDeclarations: Yes +IncludeBlocks: Regroup +Language: Cpp +AccessModifierOffset: -4 diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 000000000..7972a53ef --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,12 @@ +# This file contains a list of revisions that should be ignored by git blame. +# These are typically large formatting changes that don't provide meaningful +# information when looking at the history of a line. +# +# To use this file, run: +# git config blame.ignoreRevsFile .git-blame-ignore-revs +# +# Or for a one-time use: +# git blame --ignore-revs-file=.git-blame-ignore-revs + +# Large formatting change - add format check for code quality +95bbd61f6d5bf7927ec4b8123fc5b871616e4825 diff --git a/.github/workflows/codequality.yml b/.github/workflows/codequality.yml new file mode 100644 index 000000000..669a88724 --- /dev/null +++ b/.github/workflows/codequality.yml @@ -0,0 +1,63 @@ +name: CodeQuality Check + +on: + push: + branches: [ "main" ] + pull_request: + branches: [ "main" ] + +jobs: + check-draft: + # We run all other jobs on PRs only if they are not draft PR + if: github.event_name != 'pull_request' || github.event.pull_request.draft == false + runs-on: ubuntu-22.04 + steps: + - name: Preliminary checks on CI + run: echo "Event name is ${{ github.event_name }}" + + format-check: + name: Format Check + runs-on: ubuntu-22.04 + needs: check-draft + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.10' + + - name: Install formatting tools + shell: bash + run: | + sudo apt-get update -y -qq + sudo pip3 install 'clang_format==11.0.1' 'black>=24' cmake-format + + - name: List Installed Packages + shell: bash + run: pip3 freeze + + - name: Verify clang-format version + shell: bash + run: | + clang-format --version + clang-format --dump-config + + - name: Verify black version + shell: bash + run: black --version + + - name: Format Check + shell: bash + run: | + make format-check + + - name: Show formatting differences (if any) + if: failure() + shell: bash + run: | + echo "Formatting check failed. Here are the differences:" + python3 scripts/format.py --all --check || true \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..7008d7901 --- /dev/null +++ b/Makefile @@ -0,0 +1,4 @@ +format-check: + python3 scripts/format.py --all --check +format-fix: + python3 scripts/format.py --all --fix --noconfirm \ No newline at end of file diff --git a/benchmarks/xypd_benchmarks/proxy_demo.py b/benchmarks/xypd_benchmarks/proxy_demo.py index 76077930d..5e5290a93 100644 --- a/benchmarks/xypd_benchmarks/proxy_demo.py +++ b/benchmarks/xypd_benchmarks/proxy_demo.py @@ -23,8 +23,7 @@ import aiohttp import requests import uvicorn -from fastapi import (APIRouter, Depends, FastAPI, Header, HTTPException, - Request, status) +from fastapi import APIRouter, Depends, FastAPI, Header, HTTPException, Request, status from fastapi.responses import JSONResponse, StreamingResponse AIOHTTP_TIMEOUT = aiohttp.ClientTimeout(total=6 * 60 * 60) @@ -47,10 +46,8 @@ def __init__( decode_instances: list[str], model: str, scheduling_policy: SchedulingPolicy, - custom_create_completion: Optional[Callable[[Request], - StreamingResponse]] = None, - custom_create_chat_completion: Optional[Callable[ - [Request], StreamingResponse]] = None, + custom_create_completion: Optional[Callable[[Request], StreamingResponse]] = None, + custom_create_chat_completion: Optional[Callable[[Request], StreamingResponse]] = None, ): self.prefill_instances = prefill_instances self.decode_instances = decode_instances @@ -64,31 +61,23 @@ def __init__( self.setup_routes() def setup_routes(self): - self.router.post( - "/v1/completions", - dependencies=[ - Depends(self.validate_json_request) - ])(self.custom_create_completion if self. - custom_create_completion else self.create_completion) - self.router.post( - "/v1/chat/completions", - dependencies=[ - Depends(self.validate_json_request) - ])(self.custom_create_chat_completion if self. - custom_create_chat_completion else self.create_chat_completion) - self.router.get("/status", - response_class=JSONResponse)(self.get_status) - self.router.post("/instances/add", - dependencies=[Depends(self.api_key_authenticate) - ])(self.add_instance_endpoint) + self.router.post("/v1/completions", dependencies=[Depends(self.validate_json_request)])( + self.custom_create_completion if self.custom_create_completion else self.create_completion + ) + self.router.post("/v1/chat/completions", dependencies=[Depends(self.validate_json_request)])( + self.custom_create_chat_completion if self.custom_create_chat_completion else self.create_chat_completion + ) + self.router.get("/status", response_class=JSONResponse)(self.get_status) + self.router.post("/instances/add", dependencies=[Depends(self.api_key_authenticate)])( + self.add_instance_endpoint + ) async def validate_json_request(self, raw_request: Request): content_type = raw_request.headers.get("content-type", "").lower() if content_type != "application/json": raise HTTPException( status_code=415, - detail= - "Unsupported Media Type: Only 'application/json' is allowed", + detail="Unsupported Media Type: Only 'application/json' is allowed", ) def api_key_authenticate(self, x_api_key: str = Header(...)): @@ -100,8 +89,7 @@ def api_key_authenticate(self, x_api_key: str = Header(...)): detail="Server configuration error.", ) if x_api_key != expected_api_key: - logger.warning("Unauthorized access attempt with API Key: %s", - x_api_key) + logger.warning("Unauthorized access attempt with API Key: %s", x_api_key) raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail="Forbidden: Invalid API Key.", @@ -110,8 +98,7 @@ def api_key_authenticate(self, x_api_key: str = Header(...)): async def validate_instance(self, instance: str) -> bool: url = f"http://{instance}/v1/models" try: - async with aiohttp.ClientSession( - timeout=AIOHTTP_TIMEOUT) as client: + async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as client: logger.info("Verifying %s ...", instance) async with client.get(url) as response: if response.status == 200: @@ -119,12 +106,10 @@ async def validate_instance(self, instance: str) -> bool: if "data" in data and len(data["data"]) > 0: model_cur = data["data"][0].get("id", "") if model_cur == self.model: - logger.info("Instance: %s could be added.", - instance) + logger.info("Instance: %s could be added.", instance) return True else: - logger.warning("Mismatch model %s : %s != %s", - instance, model_cur, self.model) + logger.warning("Mismatch model %s : %s != %s", instance, model_cur, self.model) return False else: return False @@ -144,48 +129,37 @@ async def add_instance_endpoint(self, request: Request): instance_type = data.get("type") instance = data.get("instance") if instance_type not in ["prefill", "decode"]: - raise HTTPException(status_code=400, - detail="Invalid instance type.") + raise HTTPException(status_code=400, detail="Invalid instance type.") if not instance or ":" not in instance: - raise HTTPException(status_code=400, - detail="Invalid instance format.") + raise HTTPException(status_code=400, detail="Invalid instance format.") host, port_str = instance.split(":") try: if host != "localhost": ipaddress.ip_address(host) port = int(port_str) if not (0 < port < 65536): - raise HTTPException(status_code=400, - detail="Invalid port number.") + raise HTTPException(status_code=400, detail="Invalid port number.") except Exception as e: - raise HTTPException(status_code=400, - detail="Invalid instance address.") from e + raise HTTPException(status_code=400, detail="Invalid instance address.") from e is_valid = await self.validate_instance(instance) if not is_valid: - raise HTTPException(status_code=400, - detail="Instance validation failed.") + raise HTTPException(status_code=400, detail="Instance validation failed.") if instance_type == "prefill": if instance not in self.prefill_instances: self.prefill_instances.append(instance) - self.prefill_cycler = itertools.cycle( - self.prefill_instances) + self.prefill_cycler = itertools.cycle(self.prefill_instances) else: - raise HTTPException(status_code=400, - detail="Instance already exists.") + raise HTTPException(status_code=400, detail="Instance already exists.") else: if instance not in self.decode_instances: self.decode_instances.append(instance) self.decode_cycler = itertools.cycle(self.decode_instances) else: - raise HTTPException(status_code=400, - detail="Instance already exists.") + raise HTTPException(status_code=400, detail="Instance already exists.") - return JSONResponse(content={ - "message": - f"Added {instance} to {instance_type}_instances." - }) + return JSONResponse(content={"message": f"Added {instance} to {instance_type}_instances."}) except HTTPException as http_exc: raise http_exc except Exception as e: @@ -194,16 +168,12 @@ async def add_instance_endpoint(self, request: Request): async def forward_request(self, url, data, use_chunked=True): async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session: - headers = { - "Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}" - } + headers = {"Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}"} try: - async with session.post(url=url, json=data, - headers=headers) as response: + async with session.post(url=url, json=data, headers=headers) as response: if 200 <= response.status < 300 or 400 <= response.status < 500: # noqa: E501 if use_chunked: - async for chunk_bytes in response.content.iter_chunked( # noqa: E501 - 1024): + async for chunk_bytes in response.content.iter_chunked(1024): # noqa: E501 yield chunk_bytes else: content = await response.read() @@ -214,20 +184,16 @@ async def forward_request(self, url, data, use_chunked=True): error_content = json.loads(error_content) except json.JSONDecodeError: error_content = error_content - logger.error("Request failed with status %s: %s", - response.status, error_content) + logger.error("Request failed with status %s: %s", response.status, error_content) raise HTTPException( status_code=response.status, - detail= - f"Request failed with status {response.status}: " - f"{error_content}", + detail=f"Request failed with status {response.status}: " f"{error_content}", ) except aiohttp.ClientError as e: logger.error("ClientError occurred: %s", str(e)) raise HTTPException( status_code=502, - detail= - "Bad Gateway: Error communicating with upstream server.", + detail="Bad Gateway: Error communicating with upstream server.", ) from e except Exception as e: logger.error("Unexpected error: %s", str(e)) @@ -254,9 +220,7 @@ async def create_completion(self, raw_request: Request): prefill_instance = self.schedule(self.prefill_cycler) try: - async for _ in self.forward_request( - f"http://{prefill_instance}/v1/completions", - kv_prepare_request): + async for _ in self.forward_request(f"http://{prefill_instance}/v1/completions", kv_prepare_request): continue except HTTPException as http_exc: self.remove_instance_endpoint("prefill", prefill_instance) @@ -266,8 +230,7 @@ async def create_completion(self, raw_request: Request): decode_instance = self.schedule(self.decode_cycler) try: - generator = self.forward_request( - f"http://{decode_instance}/v1/completions", request) + generator = self.forward_request(f"http://{decode_instance}/v1/completions", request) except HTTPException as http_exc: self.remove_instance_endpoint("decode", decode_instance) raise http_exc @@ -292,8 +255,8 @@ async def create_chat_completion(self, raw_request: Request): prefill_instance = self.schedule(self.prefill_cycler) try: async for _ in self.forward_request( - f"http://{prefill_instance}/v1/chat/completions", - kv_prepare_request): + f"http://{prefill_instance}/v1/chat/completions", kv_prepare_request + ): continue except HTTPException as http_exc: self.remove_instance_endpoint("prefill", prefill_instance) @@ -302,9 +265,7 @@ async def create_chat_completion(self, raw_request: Request): decode_instance = self.schedule(self.decode_cycler) try: - generator = self.forward_request( - "http://" + decode_instance + "/v1/chat/completions", - request) + generator = self.forward_request("http://" + decode_instance + "/v1/chat/completions", request) except HTTPException as http_exc: self.remove_instance_endpoint("decode", decode_instance) raise http_exc @@ -315,14 +276,13 @@ async def create_chat_completion(self, raw_request: Request): error_messages = [str(e) for e in exc_info if e] print("Error occurred in disagg proxy server") print(error_messages) - return StreamingResponse(content=iter(error_messages), - media_type="text/event-stream") + return StreamingResponse(content=iter(error_messages), media_type="text/event-stream") def remove_instance_endpoint(self, instance_type, instance): - if (instance_type == "decode" and instance in self.decode_instances): + if instance_type == "decode" and instance in self.decode_instances: self.decode_instances.remove(instance) self.decode_cycler = itertools.cycle(self.decode_instances) - if (instance_type == "prefill" and instance in self.decode_instances): + if instance_type == "prefill" and instance in self.decode_instances: self.prefill_instances.remove(instance) self.prefill_cycler = itertools.cycle(self.decode_instances) @@ -342,10 +302,8 @@ def __init__( self, args: argparse.Namespace, scheduling_policy: Optional[SchedulingPolicy] = None, - create_completion: Optional[Callable[[Request], - StreamingResponse]] = None, - create_chat_completion: Optional[Callable[[Request], - StreamingResponse]] = None, + create_completion: Optional[Callable[[Request], StreamingResponse]] = None, + create_chat_completion: Optional[Callable[[Request], StreamingResponse]] = None, ): self.validate_parsed_serve_args(args) self.port = args.port @@ -353,8 +311,7 @@ def __init__( prefill_instances=[] if args.prefill is None else args.prefill, decode_instances=[] if args.decode is None else args.decode, model=args.model, - scheduling_policy=(scheduling_policy if scheduling_policy - is not None else RoundRobinSchedulingPolicy()), + scheduling_policy=(scheduling_policy if scheduling_policy is not None else RoundRobinSchedulingPolicy()), custom_create_completion=create_completion, custom_create_chat_completion=create_chat_completion, ) @@ -379,11 +336,9 @@ def validate_instances(self, instances: list): ipaddress.ip_address(host) port = int(port) if not (0 < port < 65536): - raise ValueError( - f"Invalid port number in instance: {instance}") + raise ValueError(f"Invalid port number in instance: {instance}") except Exception as e: - raise ValueError( - f"Invalid instance {instance}: {str(e)}") from e + raise ValueError(f"Invalid instance {instance}: {str(e)}") from e def verify_model_config(self, instances: list, model: str) -> None: model_suffix = model.split("/")[-1] @@ -394,14 +349,11 @@ def verify_model_config(self, instances: list, model: str) -> None: model_cur = response.json()["data"][0]["id"] model_cur_suffix = model_cur.split("/")[-1] if model_cur_suffix != model_suffix: - raise ValueError( - f"{instance} serves a different model: " - f"{model_cur} != {model}") + raise ValueError(f"{instance} serves a different model: " f"{model_cur} != {model}") else: raise ValueError(f"Cannot get model id from {instance}!") except requests.RequestException as e: - raise ValueError( - f"Error communicating with {instance}: {str(e)}") from e + raise ValueError(f"Error communicating with {instance}: {str(e)}") from e def run_server(self): app = FastAPI() @@ -414,11 +366,7 @@ def run_server(self): if __name__ == "__main__": # Todo: allow more config parser = argparse.ArgumentParser("vLLM disaggregated proxy server.") - parser.add_argument("--model", - "-m", - type=str, - required=True, - help="Model name") + parser.add_argument("--model", "-m", type=str, required=True, help="Model name") parser.add_argument( "--prefill", @@ -444,4 +392,4 @@ def run_server(self): ) args = parser.parse_args() proxy_server = ProxyServer(args=args) - proxy_server.run_server() \ No newline at end of file + proxy_server.run_server() diff --git a/benchmarks/xypd_benchmarks/vllm-benchmarks/parse_results.py b/benchmarks/xypd_benchmarks/vllm-benchmarks/parse_results.py index 5d2683939..5a1bb9cba 100644 --- a/benchmarks/xypd_benchmarks/vllm-benchmarks/parse_results.py +++ b/benchmarks/xypd_benchmarks/vllm-benchmarks/parse_results.py @@ -4,21 +4,38 @@ import openpyxl global metrics -metrics = ['request_throughput', 'output_throughput', 'total_token_throughput',\ - 'mean_ttft_ms', 'median_ttft_ms', 'std_ttft_ms', 'p99_ttft_ms',\ - 'mean_tpot_ms', 'median_tpot_ms', 'std_tpot_ms', 'p99_tpot_ms',\ - 'mean_itl_ms', 'median_itl_ms', 'std_itl_ms', 'p99_itl_ms', \ - 'mean_e2el_ms', 'median_e2el_ms', 'std_e2el_ms', 'p99_e2el_ms' ] +metrics = [ + 'request_throughput', + 'output_throughput', + 'total_token_throughput', + 'mean_ttft_ms', + 'median_ttft_ms', + 'std_ttft_ms', + 'p99_ttft_ms', + 'mean_tpot_ms', + 'median_tpot_ms', + 'std_tpot_ms', + 'p99_tpot_ms', + 'mean_itl_ms', + 'median_itl_ms', + 'std_itl_ms', + 'p99_itl_ms', + 'mean_e2el_ms', + 'median_e2el_ms', + 'std_e2el_ms', + 'p99_e2el_ms', +] + def parse_serving_throughput(path: str): - values=[] + values = [] with open(path, 'r') as f: result = json.load(f) for metric in metrics: value = result[metric] values.append(value) return values - + if __name__ == '__main__': if len(sys.argv) != 3: @@ -26,31 +43,31 @@ def parse_serving_throughput(path: str): sys.exit(1) result_path = sys.argv[1] parsed_result_path = sys.argv[2] - col=1 - row=1 + col = 1 + row = 1 workbook = openpyxl.Workbook() sheet = workbook.active - config_names=['num_pserver','num_dserver','input_len','output_len','max_concurrency'] + config_names = ['num_pserver', 'num_dserver', 'input_len', 'output_len', 'max_concurrency'] for con in config_names: - sheet.cell(row,col,con) + sheet.cell(row, col, con) row += 1 for metric in metrics: - sheet.cell(row,col,metric) + sheet.cell(row, col, metric) row += 1 files = os.listdir(result_path) files.sort() for file in files: if file.endswith("json"): - configs=file.split('-') + configs = file.split('-') col += 1 - sheet.cell(1,col,configs[1]) - sheet.cell(2,col,configs[2]) - sheet.cell(3,col,configs[4]) - sheet.cell(4,col,configs[6]) - sheet.cell(5,col,configs[8]) - results=parse_serving_throughput(os.path.join(result_path,file)) - row=5 + sheet.cell(1, col, configs[1]) + sheet.cell(2, col, configs[2]) + sheet.cell(3, col, configs[4]) + sheet.cell(4, col, configs[6]) + sheet.cell(5, col, configs[8]) + results = parse_serving_throughput(os.path.join(result_path, file)) + row = 5 for result in results: - row+=1 - sheet.cell(row,col,result) - workbook.save(parsed_result_path) \ No newline at end of file + row += 1 + sheet.cell(row, col, result) + workbook.save(parsed_result_path) diff --git a/mooncake-common/CMakeLists.txt b/mooncake-common/CMakeLists.txt index 6448456be..484d691af 100644 --- a/mooncake-common/CMakeLists.txt +++ b/mooncake-common/CMakeLists.txt @@ -1,3 +1,3 @@ -if ((USE_ETCD AND NOT USE_ETCD_LEGACY) OR STORE_USE_ETCD) - add_subdirectory(etcd) -endif() \ No newline at end of file +if((USE_ETCD AND NOT USE_ETCD_LEGACY) OR STORE_USE_ETCD) + add_subdirectory(etcd) +endif() diff --git a/mooncake-common/etcd/CMakeLists.txt b/mooncake-common/etcd/CMakeLists.txt index aa3c6de1c..3982cc39b 100644 --- a/mooncake-common/etcd/CMakeLists.txt +++ b/mooncake-common/etcd/CMakeLists.txt @@ -1,20 +1,17 @@ add_custom_command( - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/libetcd_wrapper.so - COMMAND bash -c "go mod tidy" && bash -c "go build -buildmode=c-shared -o ${CMAKE_CURRENT_BINARY_DIR}/libetcd_wrapper.so etcd_wrapper.go" && cp ${CMAKE_CURRENT_BINARY_DIR}/libetcd_wrapper.h ${CMAKE_CURRENT_SOURCE_DIR} - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - COMMENT "Building Go shared library" - DEPENDS etcd_wrapper.go -) + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/libetcd_wrapper.so + COMMAND + bash -c "go mod tidy" && bash -c + "go build -buildmode=c-shared -o ${CMAKE_CURRENT_BINARY_DIR}/libetcd_wrapper.so etcd_wrapper.go" + && cp ${CMAKE_CURRENT_BINARY_DIR}/libetcd_wrapper.h + ${CMAKE_CURRENT_SOURCE_DIR} + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + COMMENT "Building Go shared library" + DEPENDS etcd_wrapper.go) set(ETCD_WRAPPER_INCLUDE ${CMAKE_CURRENT_BINARY_DIR}/libetcd_wrapper.h) set(ETCD_WRAPPER_LIB ${CMAKE_CURRENT_BINARY_DIR}/libetcd_wrapper.so) -add_custom_target( - build_etcd_wrapper - DEPENDS ${ETCD_WRAPPER_LIB} -) +add_custom_target(build_etcd_wrapper DEPENDS ${ETCD_WRAPPER_LIB}) -install( - FILES ${ETCD_WRAPPER_LIB} - DESTINATION lib -) \ No newline at end of file +install(FILES ${ETCD_WRAPPER_LIB} DESTINATION lib) diff --git a/mooncake-integration/CMakeLists.txt b/mooncake-integration/CMakeLists.txt index dbca66119..7a80e9011 100644 --- a/mooncake-integration/CMakeLists.txt +++ b/mooncake-integration/CMakeLists.txt @@ -1,26 +1,27 @@ file(GLOB SOURCES "*.cpp") set(PYTHON_EXECUTABLE "python3") execute_process( - COMMAND ${PYTHON_EXECUTABLE} -c "import sys; print([s for s in sys.path if 'packages' in s][0])" - OUTPUT_VARIABLE PYTHON_SYS_PATH -) + COMMAND ${PYTHON_EXECUTABLE} -c + "import sys; print([s for s in sys.path if 'packages' in s][0])" + OUTPUT_VARIABLE PYTHON_SYS_PATH) string(STRIP ${PYTHON_SYS_PATH} PYTHON_SYS_PATH) if("${PYTHON_SYS_PATH}" STREQUAL "") - message(FATAL_ERROR "Python path is empty! Please check the python env.") + message(FATAL_ERROR "Python path is empty! Please check the python env.") endif() -if (WITH_STORE) - include_directories("../mooncake-store/include") - include_directories("../mooncake-store/include/cachelib_memory_allocator") +if(WITH_STORE) + include_directories("../mooncake-store/include") + include_directories("../mooncake-store/include/cachelib_memory_allocator") - include_directories("../mooncake-store/include/cachelib_memory_allocator/include") - include_directories("../mooncake-store/include/cachelib_memory_allocator/fake_include") + include_directories( + "../mooncake-store/include/cachelib_memory_allocator/include") + include_directories( + "../mooncake-store/include/cachelib_memory_allocator/fake_include") endif() include_directories("/usr/include/jsoncpp") - set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE) @@ -29,67 +30,54 @@ message("${PYTHON_SYS_PATH}") set(PYTHON_PACKAGE_NAME "mooncake") pybind11_add_module(engine ${SOURCES} ${CACHE_ALLOCATOR_SOURCES} - transfer_engine/transfer_engine_py.cpp -) + transfer_engine/transfer_engine_py.cpp) -target_link_libraries(engine PUBLIC - transfer_engine - glog::glog - gflags::gflags -) +target_link_libraries(engine PUBLIC transfer_engine glog::glog gflags::gflags) -set(ALLOCATOR_SO_PATH "${CMAKE_BINARY_DIR}/mooncake-transfer-engine/nvlink-allocator/nvlink_allocator.so") +set(ALLOCATOR_SO_PATH + "${CMAKE_BINARY_DIR}/mooncake-transfer-engine/nvlink-allocator/nvlink_allocator.so" +) if(USE_MNNVL) - message(STATUS "USE_MNNVL is enabled, nvlink_allocator.so will be installed in the Python package") - install(FILES - "${ALLOCATOR_SO_PATH}" - DESTINATION ${PYTHON_SYS_PATH}/${PYTHON_PACKAGE_NAME} - ) + message( + STATUS + "USE_MNNVL is enabled, nvlink_allocator.so will be installed in the Python package" + ) + install(FILES "${ALLOCATOR_SO_PATH}" + DESTINATION ${PYTHON_SYS_PATH}/${PYTHON_PACKAGE_NAME}) endif() -if (WITH_STORE) - pybind11_add_module(store ${SOURCES} ${CACHE_ALLOCATOR_SOURCES} - store/store_py.cpp - ) - target_link_libraries(store PUBLIC - transfer_engine - glog::glog - gflags::gflags - mooncake_store - cachelib_memory_allocator - ) +if(WITH_STORE) + pybind11_add_module(store ${SOURCES} ${CACHE_ALLOCATOR_SOURCES} + store/store_py.cpp) + target_link_libraries(store PUBLIC transfer_engine glog::glog gflags::gflags + mooncake_store cachelib_memory_allocator) endif() message("${PYTHON_SYS_PATH}") file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/${PYTHON_PACKAGE_NAME}/__init__.py - "# Auto-generated by CMake\n" -) + "# Auto-generated by CMake\n") -if (USE_MNNVL) - message(STATUS "USE_MNNVL is enabled, allocator.py will be installed in the Python package") - install(FILES - "${CMAKE_CURRENT_SOURCE_DIR}/allocator.py" - DESTINATION ${PYTHON_SYS_PATH}/${PYTHON_PACKAGE_NAME} - ) +if(USE_MNNVL) + message( + STATUS + "USE_MNNVL is enabled, allocator.py will be installed in the Python package" + ) + install(FILES "${CMAKE_CURRENT_SOURCE_DIR}/allocator.py" + DESTINATION ${PYTHON_SYS_PATH}/${PYTHON_PACKAGE_NAME}) endif() -install( - DIRECTORY - ${CMAKE_CURRENT_BINARY_DIR}/${PYTHON_PACKAGE_NAME}/ - DESTINATION - ${PYTHON_SYS_PATH}/${PYTHON_PACKAGE_NAME} -) +install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${PYTHON_PACKAGE_NAME}/ + DESTINATION ${PYTHON_SYS_PATH}/${PYTHON_PACKAGE_NAME}) install( - CODE " + CODE " execute_process(COMMAND chmod 766 \"${PYTHON_SYS_PATH}/${PYTHON_PACKAGE_NAME}\") execute_process(COMMAND chmod 766 \"${PYTHON_SYS_PATH}/${PYTHON_PACKAGE_NAME}/__init__.py\") - " -) + ") -if (WITH_STORE) - install(TARGETS store DESTINATION ${PYTHON_SYS_PATH}/${PYTHON_PACKAGE_NAME}) +if(WITH_STORE) + install(TARGETS store DESTINATION ${PYTHON_SYS_PATH}/${PYTHON_PACKAGE_NAME}) endif() install(TARGETS engine DESTINATION ${PYTHON_SYS_PATH}/${PYTHON_PACKAGE_NAME}) diff --git a/mooncake-integration/allocator.py b/mooncake-integration/allocator.py index 3c17c3aef..72e9f711f 100644 --- a/mooncake-integration/allocator.py +++ b/mooncake-integration/allocator.py @@ -31,16 +31,12 @@ def _get_so_path(cls) -> str: if os.path.exists(so_path): return so_path except (ImportError, FileNotFoundError, TypeError): - raise ImportError( - "SGLANG_MOONCAKE_CUSTOM_MEM_POOL require mooncake-transfer-engine >= 0.3.3.post2." - ) + raise ImportError("SGLANG_MOONCAKE_CUSTOM_MEM_POOL require mooncake-transfer-engine >= 0.3.3.post2.") @classmethod def get_allocator(cls, device: torch_device) -> CUDAPluggableAllocator: with cls._lock: if device not in cls._instances: so_path = cls._get_so_path() - cls._instances[device] = CUDAPluggableAllocator( - so_path, "mc_nvlink_malloc", "mc_nvlink_free" - ) + cls._instances[device] = CUDAPluggableAllocator(so_path, "mc_nvlink_malloc", "mc_nvlink_free") return cls._instances[device] diff --git a/mooncake-integration/store/store_py.cpp b/mooncake-integration/store/store_py.cpp index ac3282bb3..83246ce55 100644 --- a/mooncake-integration/store/store_py.cpp +++ b/mooncake-integration/store/store_py.cpp @@ -1,12 +1,12 @@ #include "store_py.h" #include -#include // For GIL management +#include // For GIL management #include #include #include -#include // for atexit +#include // for atexit #include #include "types.h" @@ -18,1608 +18,1506 @@ namespace mooncake { // RAII container that automatically frees slices on destruction class SliceGuard { - public: - explicit SliceGuard(DistributedObjectStore &store) : store_(store) {} - - ~SliceGuard() { store_.freeSlices(slices_); } - - // Prevent copying - SliceGuard(const SliceGuard &) = delete; - SliceGuard &operator=(const SliceGuard &) = delete; - - // Access the underlying slices - std::vector &slices() { return slices_; } - const std::vector &slices() const { return slices_; } - - private: - DistributedObjectStore &store_; - std::vector slices_; +public: + explicit SliceGuard(DistributedObjectStore &store) : store_(store) { + } + + ~SliceGuard() { + store_.freeSlices(slices_); + } + + // Prevent copying + SliceGuard(const SliceGuard &) = delete; + SliceGuard &operator=(const SliceGuard &) = delete; + + // Access the underlying slices + std::vector &slices() { + return slices_; + } + const std::vector &slices() const { + return slices_; + } + +private: + DistributedObjectStore &store_; + std::vector slices_; }; // ResourceTracker implementation using singleton pattern ResourceTracker &ResourceTracker::getInstance() { - static ResourceTracker instance; - return instance; + static ResourceTracker instance; + return instance; } ResourceTracker::ResourceTracker() { - // Set up signal handlers - struct sigaction sa; - sa.sa_handler = signalHandler; - sigemptyset(&sa.sa_mask); - sa.sa_flags = 0; - - // Register for common termination signals - sigaction(SIGINT, &sa, nullptr); // Ctrl+C - sigaction(SIGTERM, &sa, nullptr); // kill command - sigaction(SIGHUP, &sa, nullptr); // Terminal closed - - // Register exit handler - std::atexit(exitHandler); + // Set up signal handlers + struct sigaction sa; + sa.sa_handler = signalHandler; + sigemptyset(&sa.sa_mask); + sa.sa_flags = 0; + + // Register for common termination signals + sigaction(SIGINT, &sa, nullptr); // Ctrl+C + sigaction(SIGTERM, &sa, nullptr); // kill command + sigaction(SIGHUP, &sa, nullptr); // Terminal closed + + // Register exit handler + std::atexit(exitHandler); } ResourceTracker::~ResourceTracker() { - // Cleanup is handled by exitHandler + // Cleanup is handled by exitHandler } void ResourceTracker::registerInstance(DistributedObjectStore *instance) { - std::lock_guard lock(mutex_); - instances_.insert(instance); + std::lock_guard lock(mutex_); + instances_.insert(instance); } void ResourceTracker::unregisterInstance(DistributedObjectStore *instance) { - std::lock_guard lock(mutex_); - instances_.erase(instance); + std::lock_guard lock(mutex_); + instances_.erase(instance); } void ResourceTracker::cleanupAllResources() { - std::lock_guard lock(mutex_); - - // Perform cleanup outside the lock to avoid potential deadlocks - for (void *instance : instances_) { - DistributedObjectStore *store = - static_cast(instance); - if (store) { - LOG(INFO) << "Cleaning up DistributedObjectStore instance"; - store->tearDownAll(); - } - } + std::lock_guard lock(mutex_); + + // Perform cleanup outside the lock to avoid potential deadlocks + for (void *instance : instances_) { + DistributedObjectStore *store = static_cast(instance); + if (store) { + LOG(INFO) << "Cleaning up DistributedObjectStore instance"; + store->tearDownAll(); + } + } } void ResourceTracker::signalHandler(int signal) { - LOG(INFO) << "Received signal " << signal << ", cleaning up resources"; - getInstance().cleanupAllResources(); - - // Re-raise the signal with default handler to allow normal termination - struct sigaction sa; - sa.sa_handler = SIG_DFL; - sigemptyset(&sa.sa_mask); - sa.sa_flags = 0; - sigaction(signal, &sa, nullptr); - raise(signal); + LOG(INFO) << "Received signal " << signal << ", cleaning up resources"; + getInstance().cleanupAllResources(); + + // Re-raise the signal with default handler to allow normal termination + struct sigaction sa; + sa.sa_handler = SIG_DFL; + sigemptyset(&sa.sa_mask); + sa.sa_flags = 0; + sigaction(signal, &sa, nullptr); + raise(signal); } -void ResourceTracker::exitHandler() { getInstance().cleanupAllResources(); } +void ResourceTracker::exitHandler() { + getInstance().cleanupAllResources(); +} static bool isPortAvailable(int port) { - int sock = socket(AF_INET, SOCK_STREAM, 0); - if (sock < 0) return false; - - int opt = 1; - setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)); - - struct sockaddr_in addr; - memset(&addr, 0, sizeof(addr)); - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = INADDR_ANY; - addr.sin_port = htons(port); - - bool available = (bind(sock, (struct sockaddr *)&addr, sizeof(addr)) == 0); - close(sock); - return available; + int sock = socket(AF_INET, SOCK_STREAM, 0); + if (sock < 0) + return false; + + int opt = 1; + setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)); + + struct sockaddr_in addr; + memset(&addr, 0, sizeof(addr)); + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = INADDR_ANY; + addr.sin_port = htons(port); + + bool available = (bind(sock, (struct sockaddr *)&addr, sizeof(addr)) == 0); + close(sock); + return available; } // Get a random available port between min_port and max_port static int getRandomAvailablePort(int min_port = 12300, int max_port = 14300) { - std::random_device rd; - std::mt19937 gen(rd()); - std::uniform_int_distribution<> dis(min_port, max_port); - - for (int attempts = 0; attempts < 10; attempts++) { - int port = dis(gen); - if (isPortAvailable(port)) { - return port; - } - } - return -1; // Failed to find available port + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> dis(min_port, max_port); + + for (int attempts = 0; attempts < 10; attempts++) { + int port = dis(gen); + if (isPortAvailable(port)) { + return port; + } + } + return -1; // Failed to find available port } DistributedObjectStore::DistributedObjectStore() { - // Register this instance with the global tracker - easylog::set_min_severity(easylog::Severity::WARN); - ResourceTracker::getInstance().registerInstance(this); + // Register this instance with the global tracker + easylog::set_min_severity(easylog::Severity::WARN); + ResourceTracker::getInstance().registerInstance(this); } DistributedObjectStore::~DistributedObjectStore() { - // Unregister from the tracker before cleanup - ResourceTracker::getInstance().unregisterInstance(this); + // Unregister from the tracker before cleanup + ResourceTracker::getInstance().unregisterInstance(this); } -int DistributedObjectStore::setup(const std::string &local_hostname, - const std::string &metadata_server, - size_t global_segment_size, - size_t local_buffer_size, - const std::string &protocol, - const std::string &rdma_devices, - const std::string &master_server_addr) { - this->protocol = protocol; - - // Remove port if hostname already contains one - std::string hostname = local_hostname; - size_t colon_pos = hostname.find(":"); - if (colon_pos == std::string::npos) { - // Get a random available port - int port = getRandomAvailablePort(); - if (port < 0) { - LOG(ERROR) << "Failed to find available port"; - return 1; - } - // Combine hostname with port - this->local_hostname = hostname + ":" + std::to_string(port); - } else { - this->local_hostname = local_hostname; - } - - void **args = (protocol == "rdma") ? rdma_args(rdma_devices) : nullptr; - auto client_opt = - mooncake::Client::Create(this->local_hostname, metadata_server, - protocol, args, master_server_addr); - if (!client_opt) { - LOG(ERROR) << "Failed to create client"; - return 1; - } - client_ = *client_opt; - - client_buffer_allocator_ = - std::make_unique(local_buffer_size); - auto result = client_->RegisterLocalMemory( - client_buffer_allocator_->getBase(), local_buffer_size, - kWildcardLocation, false, true); - if (!result.has_value()) { - LOG(ERROR) << "Failed to register local memory: " - << toString(result.error()); - return 1; - } - // Skip mount segment if global_segment_size is 0 - if (global_segment_size == 0) { - return 0; - } - void *ptr = allocate_buffer_allocator_memory(global_segment_size); - if (!ptr) { - LOG(ERROR) << "Failed to allocate segment memory"; - return 1; - } - segment_ptr_.reset(ptr); - auto mount_result = - client_->MountSegment(segment_ptr_.get(), global_segment_size); - if (!mount_result.has_value()) { - LOG(ERROR) << "Failed to mount segment: " - << toString(mount_result.error()); - return 1; - } - - return 0; +int DistributedObjectStore::setup(const std::string &local_hostname, const std::string &metadata_server, + size_t global_segment_size, size_t local_buffer_size, const std::string &protocol, + const std::string &rdma_devices, const std::string &master_server_addr) { + this->protocol = protocol; + + // Remove port if hostname already contains one + std::string hostname = local_hostname; + size_t colon_pos = hostname.find(":"); + if (colon_pos == std::string::npos) { + // Get a random available port + int port = getRandomAvailablePort(); + if (port < 0) { + LOG(ERROR) << "Failed to find available port"; + return 1; + } + // Combine hostname with port + this->local_hostname = hostname + ":" + std::to_string(port); + } else { + this->local_hostname = local_hostname; + } + + void **args = (protocol == "rdma") ? rdma_args(rdma_devices) : nullptr; + auto client_opt = + mooncake::Client::Create(this->local_hostname, metadata_server, protocol, args, master_server_addr); + if (!client_opt) { + LOG(ERROR) << "Failed to create client"; + return 1; + } + client_ = *client_opt; + + client_buffer_allocator_ = std::make_unique(local_buffer_size); + auto result = client_->RegisterLocalMemory(client_buffer_allocator_->getBase(), local_buffer_size, + kWildcardLocation, false, true); + if (!result.has_value()) { + LOG(ERROR) << "Failed to register local memory: " << toString(result.error()); + return 1; + } + // Skip mount segment if global_segment_size is 0 + if (global_segment_size == 0) { + return 0; + } + void *ptr = allocate_buffer_allocator_memory(global_segment_size); + if (!ptr) { + LOG(ERROR) << "Failed to allocate segment memory"; + return 1; + } + segment_ptr_.reset(ptr); + auto mount_result = client_->MountSegment(segment_ptr_.get(), global_segment_size); + if (!mount_result.has_value()) { + LOG(ERROR) << "Failed to mount segment: " << toString(mount_result.error()); + return 1; + } + + return 0; } -int DistributedObjectStore::initAll(const std::string &protocol_, - const std::string &device_name, +int DistributedObjectStore::initAll(const std::string &protocol_, const std::string &device_name, size_t mount_segment_size) { - if (client_) { - LOG(ERROR) << "Client is already initialized"; - return 1; - } - uint64_t buffer_allocator_size = 1024 * 1024 * 1024; - return setup("localhost:12345", "127.0.0.1:2379", mount_segment_size, - buffer_allocator_size, protocol_, device_name); + if (client_) { + LOG(ERROR) << "Client is already initialized"; + return 1; + } + uint64_t buffer_allocator_size = 1024 * 1024 * 1024; + return setup("localhost:12345", "127.0.0.1:2379", mount_segment_size, buffer_allocator_size, protocol_, + device_name); } -int DistributedObjectStore::allocateSlices(std::vector &slices, - size_t length) { - uint64_t offset = 0; - while (offset < length) { - auto chunk_size = std::min(length - offset, kMaxSliceSize); - auto ptr = client_buffer_allocator_->allocate(chunk_size); - if (!ptr) { - return 1; // SliceGuard will handle cleanup - } - slices.emplace_back(Slice{ptr, chunk_size}); - offset += chunk_size; - } - return 0; +int DistributedObjectStore::allocateSlices(std::vector &slices, size_t length) { + uint64_t offset = 0; + while (offset < length) { + auto chunk_size = std::min(length - offset, kMaxSliceSize); + auto ptr = client_buffer_allocator_->allocate(chunk_size); + if (!ptr) { + return 1; // SliceGuard will handle cleanup + } + slices.emplace_back(Slice {ptr, chunk_size}); + offset += chunk_size; + } + return 0; } -int DistributedObjectStore::allocateSlices(std::vector &slices, - const std::string &value) { - uint64_t offset = 0; - while (offset < value.size()) { - auto chunk_size = std::min(value.size() - offset, kMaxSliceSize); - auto ptr = client_buffer_allocator_->allocate(chunk_size); - if (!ptr) { - return 1; // SliceGuard will handle cleanup - } - memcpy(ptr, value.data() + offset, chunk_size); - slices.emplace_back(Slice{ptr, chunk_size}); - offset += chunk_size; - } - return 0; +int DistributedObjectStore::allocateSlices(std::vector &slices, const std::string &value) { + uint64_t offset = 0; + while (offset < value.size()) { + auto chunk_size = std::min(value.size() - offset, kMaxSliceSize); + auto ptr = client_buffer_allocator_->allocate(chunk_size); + if (!ptr) { + return 1; // SliceGuard will handle cleanup + } + memcpy(ptr, value.data() + offset, chunk_size); + slices.emplace_back(Slice {ptr, chunk_size}); + offset += chunk_size; + } + return 0; } -int DistributedObjectStore::allocateSlices(std::vector &slices, - std::span value) { - uint64_t offset = 0; - while (offset < value.size()) { - auto chunk_size = std::min(value.size() - offset, kMaxSliceSize); - auto ptr = client_buffer_allocator_->allocate(chunk_size); - if (!ptr) { - return 1; // SliceGuard will handle cleanup - } - memcpy(ptr, value.data() + offset, chunk_size); - slices.emplace_back(Slice{ptr, chunk_size}); - offset += chunk_size; - } - return 0; +int DistributedObjectStore::allocateSlices(std::vector &slices, std::span value) { + uint64_t offset = 0; + while (offset < value.size()) { + auto chunk_size = std::min(value.size() - offset, kMaxSliceSize); + auto ptr = client_buffer_allocator_->allocate(chunk_size); + if (!ptr) { + return 1; // SliceGuard will handle cleanup + } + memcpy(ptr, value.data() + offset, chunk_size); + slices.emplace_back(Slice {ptr, chunk_size}); + offset += chunk_size; + } + return 0; } -int DistributedObjectStore::allocateSlicesPacked( - std::vector &slices, - const std::vector> &parts) { - size_t total = 0; - for (auto p : parts) total += p.size(); - - if (total == 0) return 0; - - size_t n_slice = (total + kMaxSliceSize - 1) / kMaxSliceSize; - slices.reserve(n_slice); - - size_t remaining = total; - for (size_t i = 0; i < n_slice; ++i) { - size_t sz = std::min(remaining, (size_t)kMaxSliceSize); - void *ptr = client_buffer_allocator_->allocate(sz); - if (!ptr) { - return 1; // SliceGuard will handle cleanup - } - slices.emplace_back(mooncake::Slice{ptr, sz}); - remaining -= sz; - } - - size_t idx = 0; - char *dst = static_cast(slices[0].ptr); - size_t dst_left = slices[0].size; - - for (auto part : parts) { - const char *src = part.data(); - size_t n = part.size(); - - while (n > 0) { - if (dst_left == 0) { - dst = static_cast(slices[++idx].ptr); - dst_left = slices[idx].size; - } - size_t chunk = std::min(n, dst_left); - memcpy(dst, src, chunk); - dst += chunk; - dst_left -= chunk; - src += chunk; - n -= chunk; - } - } - return 0; +int DistributedObjectStore::allocateSlicesPacked(std::vector &slices, + const std::vector> &parts) { + size_t total = 0; + for (auto p : parts) + total += p.size(); + + if (total == 0) + return 0; + + size_t n_slice = (total + kMaxSliceSize - 1) / kMaxSliceSize; + slices.reserve(n_slice); + + size_t remaining = total; + for (size_t i = 0; i < n_slice; ++i) { + size_t sz = std::min(remaining, (size_t)kMaxSliceSize); + void *ptr = client_buffer_allocator_->allocate(sz); + if (!ptr) { + return 1; // SliceGuard will handle cleanup + } + slices.emplace_back(mooncake::Slice {ptr, sz}); + remaining -= sz; + } + + size_t idx = 0; + char *dst = static_cast(slices[0].ptr); + size_t dst_left = slices[0].size; + + for (auto part : parts) { + const char *src = part.data(); + size_t n = part.size(); + + while (n > 0) { + if (dst_left == 0) { + dst = static_cast(slices[++idx].ptr); + dst_left = slices[idx].size; + } + size_t chunk = std::min(n, dst_left); + memcpy(dst, src, chunk); + dst += chunk; + dst_left -= chunk; + src += chunk; + n -= chunk; + } + } + return 0; } -int DistributedObjectStore::allocateSlices( - std::vector &slices, - const std::vector &replica_list, uint64_t &length) { - length = 0; - if (replica_list.empty()) return -1; - auto &replica = replica_list[0]; - if (replica.is_memory_replica() == false) { - auto &disk_descriptor = replica.get_disk_descriptor(); - length = disk_descriptor.file_size; - return allocateSlices(slices, length); - } else { - auto &memory_descriptors = replica.get_memory_descriptor(); - for (auto &handle : memory_descriptors.buffer_descriptors) { - auto chunk_size = handle.size_; - assert(chunk_size <= kMaxSliceSize); - auto ptr = client_buffer_allocator_->allocate(chunk_size); - if (!ptr) { - return 1; // SliceGuard will handle cleanup - } - slices.emplace_back(Slice{ptr, chunk_size}); - length += chunk_size; - } - } - return 0; +int DistributedObjectStore::allocateSlices(std::vector &slices, + const std::vector &replica_list, uint64_t &length) { + length = 0; + if (replica_list.empty()) + return -1; + auto &replica = replica_list[0]; + if (replica.is_memory_replica() == false) { + auto &disk_descriptor = replica.get_disk_descriptor(); + length = disk_descriptor.file_size; + return allocateSlices(slices, length); + } else { + auto &memory_descriptors = replica.get_memory_descriptor(); + for (auto &handle : memory_descriptors.buffer_descriptors) { + auto chunk_size = handle.size_; + assert(chunk_size <= kMaxSliceSize); + auto ptr = client_buffer_allocator_->allocate(chunk_size); + if (!ptr) { + return 1; // SliceGuard will handle cleanup + } + slices.emplace_back(Slice {ptr, chunk_size}); + length += chunk_size; + } + } + return 0; } int DistributedObjectStore::allocateBatchedSlices( - const std::vector &keys, - std::unordered_map> - &batched_slices, - const std::vector> - &replica_lists, + const std::vector &keys, std::unordered_map> &batched_slices, + const std::vector> &replica_lists, std::unordered_map &str_length_map) { - if (replica_lists.empty()) return -1; - if (keys.size() != replica_lists.size()) { - LOG(ERROR) << "Keys size (" << keys.size() - << ") doesn't match replica lists size (" - << replica_lists.size() << ")"; - return 1; - } - - for (size_t i = 0; i < keys.size(); ++i) { - const auto &key = keys[i]; - const auto &replica_list = replica_lists[i]; - - if (replica_list.empty()) { - LOG(ERROR) << "Empty replica list for key: " << key; - return 1; - } - - // Get first replica - const auto &replica = replica_list[0]; - uint64_t length = 0; - - if (replica.is_memory_replica() == false) { - auto &disk_descriptor = replica.get_disk_descriptor(); - length = disk_descriptor.file_size; - auto result = allocateSlices(batched_slices[key], length); - if (result) { - return 1; - } - } else { - auto &memory_descriptors = replica.get_memory_descriptor(); - for (auto &handle : memory_descriptors.buffer_descriptors) { - auto chunk_size = handle.size_; - assert(chunk_size <= kMaxSliceSize); - auto ptr = client_buffer_allocator_->allocate(chunk_size); - if (!ptr) { - return 1; - } - batched_slices[key].emplace_back(Slice{ptr, chunk_size}); - length += chunk_size; - } - } - str_length_map.emplace(key, length); - } - return 0; + if (replica_lists.empty()) + return -1; + if (keys.size() != replica_lists.size()) { + LOG(ERROR) << "Keys size (" << keys.size() << ") doesn't match replica lists size (" << replica_lists.size() + << ")"; + return 1; + } + + for (size_t i = 0; i < keys.size(); ++i) { + const auto &key = keys[i]; + const auto &replica_list = replica_lists[i]; + + if (replica_list.empty()) { + LOG(ERROR) << "Empty replica list for key: " << key; + return 1; + } + + // Get first replica + const auto &replica = replica_list[0]; + uint64_t length = 0; + + if (replica.is_memory_replica() == false) { + auto &disk_descriptor = replica.get_disk_descriptor(); + length = disk_descriptor.file_size; + auto result = allocateSlices(batched_slices[key], length); + if (result) { + return 1; + } + } else { + auto &memory_descriptors = replica.get_memory_descriptor(); + for (auto &handle : memory_descriptors.buffer_descriptors) { + auto chunk_size = handle.size_; + assert(chunk_size <= kMaxSliceSize); + auto ptr = client_buffer_allocator_->allocate(chunk_size); + if (!ptr) { + return 1; + } + batched_slices[key].emplace_back(Slice {ptr, chunk_size}); + length += chunk_size; + } + } + str_length_map.emplace(key, length); + } + return 0; } -char *DistributedObjectStore::exportSlices( - const std::vector &slices, uint64_t length) { - char *buf = new char[length + 1]; - buf[length] = '\0'; - uint64_t offset = 0; - for (auto slice : slices) { - memcpy(buf + offset, slice.ptr, slice.size); - offset += slice.size; - } - return buf; +char *DistributedObjectStore::exportSlices(const std::vector &slices, uint64_t length) { + char *buf = new char[length + 1]; + buf[length] = '\0'; + uint64_t offset = 0; + for (auto slice : slices) { + memcpy(buf + offset, slice.ptr, slice.size); + offset += slice.size; + } + return buf; } -int DistributedObjectStore::freeSlices( - const std::vector &slices) { - for (auto slice : slices) { - client_buffer_allocator_->deallocate(slice.ptr, slice.size); - } - return 0; +int DistributedObjectStore::freeSlices(const std::vector &slices) { + for (auto slice : slices) { + client_buffer_allocator_->deallocate(slice.ptr, slice.size); + } + return 0; } int DistributedObjectStore::tearDownAll() { - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return 1; - } - // Reset all resources - client_.reset(); - client_buffer_allocator_.reset(); - segment_ptr_.reset(); - local_hostname = ""; - device_name = ""; - protocol = ""; - return 0; + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return 1; + } + // Reset all resources + client_.reset(); + client_buffer_allocator_.reset(); + segment_ptr_.reset(); + local_hostname = ""; + device_name = ""; + protocol = ""; + return 0; } -int DistributedObjectStore::put(const std::string &key, - std::span value, - const ReplicateConfig &config) { - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return 1; - } - SliceGuard slices(*this); - int ret = allocateSlices(slices.slices(), value); - if (ret) { - LOG(ERROR) << "Failed to allocate slices for put operation, key: " - << key << ", value size: " << value.size(); - return ret; - } - - auto put_result = client_->Put(key, slices.slices(), config); - if (!put_result) { - LOG(ERROR) << "Put operation failed with error: " - << toString(put_result.error()); - return toInt(put_result.error()); - } - - return 0; +int DistributedObjectStore::put(const std::string &key, std::span value, const ReplicateConfig &config) { + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return 1; + } + SliceGuard slices(*this); + int ret = allocateSlices(slices.slices(), value); + if (ret) { + LOG(ERROR) << "Failed to allocate slices for put operation, key: " << key << ", value size: " << value.size(); + return ret; + } + + auto put_result = client_->Put(key, slices.slices(), config); + if (!put_result) { + LOG(ERROR) << "Put operation failed with error: " << toString(put_result.error()); + return toInt(put_result.error()); + } + + return 0; } -int DistributedObjectStore::put_batch( - const std::vector &keys, - const std::vector> &values, - const ReplicateConfig &config) { - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return 1; - } - if (keys.size() != values.size()) { - LOG(ERROR) << "Key and value size mismatch"; - return 1; - } - std::vector> slices; - slices.reserve(keys.size()); - std::unordered_map> batched_slices; - batched_slices.reserve(keys.size()); - - for (size_t i = 0; i < keys.size(); ++i) { - auto &key = keys[i]; - auto &value = values[i]; - slices.emplace_back(std::make_unique(*this)); - int ret = allocateSlices(slices.back()->slices(), value); - if (ret) { - LOG(ERROR) - << "Failed to allocate slices for put_batch operation, key: " - << key << ", value size: " << value.size(); - return ret; - } - batched_slices.emplace(key, slices.back()->slices()); - } - - // Convert unordered_map to vector format expected by BatchPut - std::vector> ordered_batched_slices; - ordered_batched_slices.reserve(keys.size()); - for (const auto &key : keys) { - auto it = batched_slices.find(key); - if (it != batched_slices.end()) { - ordered_batched_slices.emplace_back(it->second); - } else { - LOG(ERROR) << "Missing slices for key: " << key; - return 1; - } - } - - auto results = client_->BatchPut(keys, ordered_batched_slices, config); - - // Check if any operations failed - for (size_t i = 0; i < results.size(); ++i) { - if (!results[i]) { - LOG(ERROR) << "BatchPut operation failed for key '" << keys[i] - << "' with error: " << toString(results[i].error()); - return toInt(results[i].error()); - } - } - return 0; +int DistributedObjectStore::put_batch(const std::vector &keys, + const std::vector> &values, const ReplicateConfig &config) { + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return 1; + } + if (keys.size() != values.size()) { + LOG(ERROR) << "Key and value size mismatch"; + return 1; + } + std::vector> slices; + slices.reserve(keys.size()); + std::unordered_map> batched_slices; + batched_slices.reserve(keys.size()); + + for (size_t i = 0; i < keys.size(); ++i) { + auto &key = keys[i]; + auto &value = values[i]; + slices.emplace_back(std::make_unique(*this)); + int ret = allocateSlices(slices.back()->slices(), value); + if (ret) { + LOG(ERROR) << "Failed to allocate slices for put_batch operation, key: " << key + << ", value size: " << value.size(); + return ret; + } + batched_slices.emplace(key, slices.back()->slices()); + } + + // Convert unordered_map to vector format expected by BatchPut + std::vector> ordered_batched_slices; + ordered_batched_slices.reserve(keys.size()); + for (const auto &key : keys) { + auto it = batched_slices.find(key); + if (it != batched_slices.end()) { + ordered_batched_slices.emplace_back(it->second); + } else { + LOG(ERROR) << "Missing slices for key: " << key; + return 1; + } + } + + auto results = client_->BatchPut(keys, ordered_batched_slices, config); + + // Check if any operations failed + for (size_t i = 0; i < results.size(); ++i) { + if (!results[i]) { + LOG(ERROR) << "BatchPut operation failed for key '" << keys[i] + << "' with error: " << toString(results[i].error()); + return toInt(results[i].error()); + } + } + return 0; } -int DistributedObjectStore::put_parts(const std::string &key, - std::vector> values, +int DistributedObjectStore::put_parts(const std::string &key, std::vector> values, const ReplicateConfig &config) { - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return 1; - } - SliceGuard slices(*this); - int ret = allocateSlicesPacked(slices.slices(), values); - if (ret) { - LOG(ERROR) << "Failed to allocate slices for put operation, key: " - << key << ", values size: " << values.size(); - return ret; - } - - auto put_result = client_->Put(key, slices.slices(), config); - if (!put_result) { - LOG(ERROR) << "Put operation failed with error: " - << toString(put_result.error()); - return toInt(put_result.error()); - } - return 0; + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return 1; + } + SliceGuard slices(*this); + int ret = allocateSlicesPacked(slices.slices(), values); + if (ret) { + LOG(ERROR) << "Failed to allocate slices for put operation, key: " << key << ", values size: " << values.size(); + return ret; + } + + auto put_result = client_->Put(key, slices.slices(), config); + if (!put_result) { + LOG(ERROR) << "Put operation failed with error: " << toString(put_result.error()); + return toInt(put_result.error()); + } + return 0; } pybind11::bytes DistributedObjectStore::get(const std::string &key) { - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return pybind11::bytes("\0", 0); - } - - SliceGuard guard(*this); // Use SliceGuard for RAII - uint64_t str_length = 0; - char *exported_str_ptr = nullptr; - bool use_exported_str = false; - - const auto kNullString = pybind11::bytes("\0", 0); - - { - py::gil_scoped_release release_gil; - - auto query_result = client_->Query(key); - if (!query_result) { - py::gil_scoped_acquire acquire_gil; - return kNullString; - } - // Extract replica list from the query result - auto replica_list = query_result.value(); - if (replica_list.empty()) { - py::gil_scoped_acquire acquire_gil; - return kNullString; - } - int ret = allocateSlices(guard.slices(), replica_list, str_length); - if (ret) { - py::gil_scoped_acquire acquire_gil; - return kNullString; - } - - auto get_result = client_->Get(key, replica_list, guard.slices()); - if (!get_result) { - py::gil_scoped_acquire acquire_gil; - return kNullString; - } - - if (guard.slices().size() == 1 && - guard.slices()[0].size == str_length) { - } else { - exported_str_ptr = exportSlices(guard.slices(), str_length); - if (!exported_str_ptr) { - py::gil_scoped_acquire acquire_gil; - return kNullString; - } - use_exported_str = true; - } - } - - py::gil_scoped_acquire acquire_gil; - - pybind11::bytes result; - if (use_exported_str) { - result = pybind11::bytes(exported_str_ptr, str_length); - delete[] exported_str_ptr; - } else if (!guard.slices().empty()) { - result = pybind11::bytes(static_cast(guard.slices()[0].ptr), - str_length); - } else { - result = kNullString; - } - - return result; + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return pybind11::bytes("\0", 0); + } + + SliceGuard guard(*this); // Use SliceGuard for RAII + uint64_t str_length = 0; + char *exported_str_ptr = nullptr; + bool use_exported_str = false; + + const auto kNullString = pybind11::bytes("\0", 0); + + { + py::gil_scoped_release release_gil; + + auto query_result = client_->Query(key); + if (!query_result) { + py::gil_scoped_acquire acquire_gil; + return kNullString; + } + // Extract replica list from the query result + auto replica_list = query_result.value(); + if (replica_list.empty()) { + py::gil_scoped_acquire acquire_gil; + return kNullString; + } + int ret = allocateSlices(guard.slices(), replica_list, str_length); + if (ret) { + py::gil_scoped_acquire acquire_gil; + return kNullString; + } + + auto get_result = client_->Get(key, replica_list, guard.slices()); + if (!get_result) { + py::gil_scoped_acquire acquire_gil; + return kNullString; + } + + if (guard.slices().size() == 1 && guard.slices()[0].size == str_length) { + } else { + exported_str_ptr = exportSlices(guard.slices(), str_length); + if (!exported_str_ptr) { + py::gil_scoped_acquire acquire_gil; + return kNullString; + } + use_exported_str = true; + } + } + + py::gil_scoped_acquire acquire_gil; + + pybind11::bytes result; + if (use_exported_str) { + result = pybind11::bytes(exported_str_ptr, str_length); + delete[] exported_str_ptr; + } else if (!guard.slices().empty()) { + result = pybind11::bytes(static_cast(guard.slices()[0].ptr), str_length); + } else { + result = kNullString; + } + + return result; } -std::vector DistributedObjectStore::get_batch( - const std::vector &keys) { - const auto kNullString = pybind11::bytes("\0", 0); - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - py::gil_scoped_acquire acquire_gil; - return {kNullString}; - } - std::unordered_set seen; - for (const auto &key : keys) { - if (!seen.insert(key).second) { - LOG(ERROR) << "Duplicate key not supported for Batch API, key: " - << key; - py::gil_scoped_acquire acquire_gil; - return {kNullString}; - } - } - - std::vector results; - std::unordered_map> - batched_slices; - batched_slices.reserve(keys.size()); - std::unordered_map str_length_map; - { - py::gil_scoped_release release_gil; - auto query_results = client_->BatchQuery(keys); - - // Extract successful replica lists - std::vector> replica_lists; - replica_lists.reserve(keys.size()); - for (size_t i = 0; i < query_results.size(); ++i) { - if (!query_results[i]) { - py::gil_scoped_acquire acquire_gil; - LOG(ERROR) << "Query failed for key '" << keys[i] - << "': " << toString(query_results[i].error()); - return {kNullString}; - } - replica_lists.emplace_back(query_results[i].value()); - } - - int ret = allocateBatchedSlices(keys, batched_slices, replica_lists, - str_length_map); - if (ret) { - for (auto &slice : batched_slices) { - freeSlices(slice.second); - } - py::gil_scoped_acquire acquire_gil; - return {kNullString}; - } - - auto get_results = - client_->BatchGet(keys, replica_lists, batched_slices); - for (size_t i = 0; i < get_results.size(); ++i) { - if (!get_results[i]) { - for (auto &slice : batched_slices) { - freeSlices(slice.second); - } - py::gil_scoped_acquire acquire_gil; - LOG(ERROR) << "BatchGet failed for key '" << keys[i] - << "': " << toString(get_results[i].error()); - return {kNullString}; - } - } - - py::gil_scoped_acquire acquire_gil; - std::vector results; - for (const auto &key : keys) { - if (batched_slices[key].size() == 1 && - batched_slices[key][0].size == str_length_map[key]) { - results.push_back(pybind11::bytes( - static_cast(batched_slices[key][0].ptr), - str_length_map[key])); - } else { - char *exported_str_ptr = - exportSlices(batched_slices[key], str_length_map[key]); - if (!exported_str_ptr) { - for (auto &slice : batched_slices) { - freeSlices(slice.second); - } - return {kNullString}; - } else { - results.push_back( - pybind11::bytes(exported_str_ptr, str_length_map[key])); - delete[] exported_str_ptr; - } - } - } - if (results.size() != keys.size()) { - LOG(ERROR) << "Results size does not match keys size"; - for (auto &slice : batched_slices) { - freeSlices(slice.second); - } - return {kNullString}; - } - for (auto &slice : batched_slices) { - freeSlices(slice.second); - } - return results; - } +std::vector DistributedObjectStore::get_batch(const std::vector &keys) { + const auto kNullString = pybind11::bytes("\0", 0); + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + py::gil_scoped_acquire acquire_gil; + return {kNullString}; + } + std::unordered_set seen; + for (const auto &key : keys) { + if (!seen.insert(key).second) { + LOG(ERROR) << "Duplicate key not supported for Batch API, key: " << key; + py::gil_scoped_acquire acquire_gil; + return {kNullString}; + } + } + + std::vector results; + std::unordered_map> batched_slices; + batched_slices.reserve(keys.size()); + std::unordered_map str_length_map; + { + py::gil_scoped_release release_gil; + auto query_results = client_->BatchQuery(keys); + + // Extract successful replica lists + std::vector> replica_lists; + replica_lists.reserve(keys.size()); + for (size_t i = 0; i < query_results.size(); ++i) { + if (!query_results[i]) { + py::gil_scoped_acquire acquire_gil; + LOG(ERROR) << "Query failed for key '" << keys[i] << "': " << toString(query_results[i].error()); + return {kNullString}; + } + replica_lists.emplace_back(query_results[i].value()); + } + + int ret = allocateBatchedSlices(keys, batched_slices, replica_lists, str_length_map); + if (ret) { + for (auto &slice : batched_slices) { + freeSlices(slice.second); + } + py::gil_scoped_acquire acquire_gil; + return {kNullString}; + } + + auto get_results = client_->BatchGet(keys, replica_lists, batched_slices); + for (size_t i = 0; i < get_results.size(); ++i) { + if (!get_results[i]) { + for (auto &slice : batched_slices) { + freeSlices(slice.second); + } + py::gil_scoped_acquire acquire_gil; + LOG(ERROR) << "BatchGet failed for key '" << keys[i] << "': " << toString(get_results[i].error()); + return {kNullString}; + } + } + + py::gil_scoped_acquire acquire_gil; + std::vector results; + for (const auto &key : keys) { + if (batched_slices[key].size() == 1 && batched_slices[key][0].size == str_length_map[key]) { + results.push_back( + pybind11::bytes(static_cast(batched_slices[key][0].ptr), str_length_map[key])); + } else { + char *exported_str_ptr = exportSlices(batched_slices[key], str_length_map[key]); + if (!exported_str_ptr) { + for (auto &slice : batched_slices) { + freeSlices(slice.second); + } + return {kNullString}; + } else { + results.push_back(pybind11::bytes(exported_str_ptr, str_length_map[key])); + delete[] exported_str_ptr; + } + } + } + if (results.size() != keys.size()) { + LOG(ERROR) << "Results size does not match keys size"; + for (auto &slice : batched_slices) { + freeSlices(slice.second); + } + return {kNullString}; + } + for (auto &slice : batched_slices) { + freeSlices(slice.second); + } + return results; + } } int DistributedObjectStore::remove(const std::string &key) { - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return 1; - } - auto remove_result = client_->Remove(key); - if (!remove_result) return toInt(remove_result.error()); - return 0; + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return 1; + } + auto remove_result = client_->Remove(key); + if (!remove_result) + return toInt(remove_result.error()); + return 0; } long DistributedObjectStore::removeAll() { - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return -1; - } - auto result = client_->RemoveAll(); - if (!result) { - LOG(ERROR) << "RemoveAll failed: " << result.error(); - return -1; - } - return result.value(); + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return -1; + } + auto result = client_->RemoveAll(); + if (!result) { + LOG(ERROR) << "RemoveAll failed: " << result.error(); + return -1; + } + return result.value(); } int DistributedObjectStore::isExist(const std::string &key) { - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return -1; - } - auto exist_result = client_->IsExist(key); - if (!exist_result) { - if (exist_result.error() == ErrorCode::OBJECT_NOT_FOUND) - return 0; // No - return toInt(exist_result.error()); // Error - } - return exist_result.value() ? 1 : 0; // Yes/No + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return -1; + } + auto exist_result = client_->IsExist(key); + if (!exist_result) { + if (exist_result.error() == ErrorCode::OBJECT_NOT_FOUND) + return 0; // No + return toInt(exist_result.error()); // Error + } + return exist_result.value() ? 1 : 0; // Yes/No } -std::vector DistributedObjectStore::batchIsExist( - const std::vector &keys) { - std::vector results; - - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - results.resize(keys.size(), -1); // Fill with error codes - return results; - } - - if (keys.empty()) { - LOG(WARNING) << "Empty keys vector provided to batchIsExist"; - return results; // Return empty vector - } - - auto batch_exist_results = client_->BatchIsExist(keys); - - results.resize(keys.size()); - - // Convert tl::expected results to int results - for (size_t i = 0; i < keys.size(); ++i) { - if (!batch_exist_results[i]) { - if (batch_exist_results[i].error() == ErrorCode::OBJECT_NOT_FOUND) { - results[i] = 0; // Does not exist - } else { - results[i] = toInt(batch_exist_results[i].error()); // Error - } - } else { - results[i] = - batch_exist_results[i].value() ? 1 : 0; // Exists/Not exists - } - } - - return results; +std::vector DistributedObjectStore::batchIsExist(const std::vector &keys) { + std::vector results; + + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + results.resize(keys.size(), -1); // Fill with error codes + return results; + } + + if (keys.empty()) { + LOG(WARNING) << "Empty keys vector provided to batchIsExist"; + return results; // Return empty vector + } + + auto batch_exist_results = client_->BatchIsExist(keys); + + results.resize(keys.size()); + + // Convert tl::expected results to int results + for (size_t i = 0; i < keys.size(); ++i) { + if (!batch_exist_results[i]) { + if (batch_exist_results[i].error() == ErrorCode::OBJECT_NOT_FOUND) { + results[i] = 0; // Does not exist + } else { + results[i] = toInt(batch_exist_results[i].error()); // Error + } + } else { + results[i] = batch_exist_results[i].value() ? 1 : 0; // Exists/Not exists + } + } + + return results; } int64_t DistributedObjectStore::getSize(const std::string &key) { - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return -1; - } - - auto query_result = client_->Query(key); - - if (!query_result) { - return toInt(query_result.error()); - } - - auto replica_list = query_result.value(); - - // Calculate total size from all replicas' handles - int64_t total_size = 0; - if (!replica_list.empty()) { - auto &replica = replica_list[0]; - if (replica.is_memory_replica() == false) { - auto &disk_descriptor = replica.get_disk_descriptor(); - total_size = disk_descriptor.file_size; - } else { - auto &memory_descriptors = replica.get_memory_descriptor(); - for (auto &handle : memory_descriptors.buffer_descriptors) { - total_size += handle.size_; - } - } - } else { - LOG(ERROR) << "Internal error: replica_list is empty"; - return -1; // Internal error - } - - return total_size; + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return -1; + } + + auto query_result = client_->Query(key); + + if (!query_result) { + return toInt(query_result.error()); + } + + auto replica_list = query_result.value(); + + // Calculate total size from all replicas' handles + int64_t total_size = 0; + if (!replica_list.empty()) { + auto &replica = replica_list[0]; + if (replica.is_memory_replica() == false) { + auto &disk_descriptor = replica.get_disk_descriptor(); + total_size = disk_descriptor.file_size; + } else { + auto &memory_descriptors = replica.get_memory_descriptor(); + for (auto &handle : memory_descriptors.buffer_descriptors) { + total_size += handle.size_; + } + } + } else { + LOG(ERROR) << "Internal error: replica_list is empty"; + return -1; // Internal error + } + + return total_size; } // SliceBuffer implementation -SliceBuffer::SliceBuffer(DistributedObjectStore &store, void *buffer, - uint64_t size, bool use_allocator_free) - : store_(store), - buffer_(buffer), - size_(size), - use_allocator_free_(use_allocator_free) {} +SliceBuffer::SliceBuffer(DistributedObjectStore &store, void *buffer, uint64_t size, bool use_allocator_free) + : store_(store), buffer_(buffer), size_(size), use_allocator_free_(use_allocator_free) { +} SliceBuffer::~SliceBuffer() { - if (buffer_) { - if (use_allocator_free_) { - // Use SimpleAllocator to deallocate memory - store_.client_buffer_allocator_->deallocate(buffer_, size_); - } else { - // Use delete[] for memory allocated with new[] - delete[] static_cast(buffer_); - } - buffer_ = nullptr; - } + if (buffer_) { + if (use_allocator_free_) { + // Use SimpleAllocator to deallocate memory + store_.client_buffer_allocator_->deallocate(buffer_, size_); + } else { + // Use delete[] for memory allocated with new[] + delete[] static_cast(buffer_); + } + buffer_ = nullptr; + } } -void *SliceBuffer::ptr() const { return buffer_; } +void *SliceBuffer::ptr() const { + return buffer_; +} -uint64_t SliceBuffer::size() const { return size_; } +uint64_t SliceBuffer::size() const { + return size_; +} // Implementation of get_buffer method -std::shared_ptr DistributedObjectStore::get_buffer( - const std::string &key) { - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return nullptr; - } - - SliceGuard guard(*this); // Use SliceGuard for RAII - uint64_t total_length = 0; - std::shared_ptr result = nullptr; - - // Query the object info - auto query_result = client_->Query(key); - if (!query_result) { - if (query_result.error() == ErrorCode::OBJECT_NOT_FOUND) { - return nullptr; - } - LOG(ERROR) << "Query failed for key: " << key - << " with error: " << toString(query_result.error()); - return nullptr; - } - - auto replica_list = query_result.value(); - - // Allocate slices for the object using the guard - int ret = allocateSlices(guard.slices(), replica_list, total_length); - if (ret) { - LOG(ERROR) << "Failed to allocate slices for key: " << key; - return nullptr; - } - - // Get the object data - auto get_result = client_->Get(key, replica_list, guard.slices()); - if (!get_result) { - LOG(ERROR) << "Get failed for key: " << key - << " with error: " << toString(get_result.error()); - return nullptr; - } - - if (guard.slices().size() == 1) { - auto ptr = guard.slices()[0].ptr; - guard.slices().clear(); - // Use SimpleAllocator for deallocation (default behavior) - result = std::make_shared(*this, ptr, total_length, true); - } else { - auto contiguous_buffer = exportSlices(guard.slices(), total_length); - // Use delete[] for deallocation since exportSlices uses new char[] - result = std::make_shared(*this, contiguous_buffer, - total_length, false); - } - - return result; +std::shared_ptr DistributedObjectStore::get_buffer(const std::string &key) { + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return nullptr; + } + + SliceGuard guard(*this); // Use SliceGuard for RAII + uint64_t total_length = 0; + std::shared_ptr result = nullptr; + + // Query the object info + auto query_result = client_->Query(key); + if (!query_result) { + if (query_result.error() == ErrorCode::OBJECT_NOT_FOUND) { + return nullptr; + } + LOG(ERROR) << "Query failed for key: " << key << " with error: " << toString(query_result.error()); + return nullptr; + } + + auto replica_list = query_result.value(); + + // Allocate slices for the object using the guard + int ret = allocateSlices(guard.slices(), replica_list, total_length); + if (ret) { + LOG(ERROR) << "Failed to allocate slices for key: " << key; + return nullptr; + } + + // Get the object data + auto get_result = client_->Get(key, replica_list, guard.slices()); + if (!get_result) { + LOG(ERROR) << "Get failed for key: " << key << " with error: " << toString(get_result.error()); + return nullptr; + } + + if (guard.slices().size() == 1) { + auto ptr = guard.slices()[0].ptr; + guard.slices().clear(); + // Use SimpleAllocator for deallocation (default behavior) + result = std::make_shared(*this, ptr, total_length, true); + } else { + auto contiguous_buffer = exportSlices(guard.slices(), total_length); + // Use delete[] for deallocation since exportSlices uses new char[] + result = std::make_shared(*this, contiguous_buffer, total_length, false); + } + + return result; } int DistributedObjectStore::register_buffer(void *buffer, size_t size) { - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return 1; - } - auto register_result = client_->RegisterLocalMemory( - buffer, size, kWildcardLocation, false, true); - if (!register_result) { - LOG(ERROR) << "Register buffer failed with error: " - << toString(register_result.error()); - return toInt(register_result.error()); - } - return 0; + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return 1; + } + auto register_result = client_->RegisterLocalMemory(buffer, size, kWildcardLocation, false, true); + if (!register_result) { + LOG(ERROR) << "Register buffer failed with error: " << toString(register_result.error()); + return toInt(register_result.error()); + } + return 0; } int DistributedObjectStore::unregister_buffer(void *buffer) { - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return 1; - } - auto unregister_result = client_->unregisterLocalMemory(buffer, true); - if (!unregister_result) { - LOG(ERROR) << "Unregister buffer failed with error: " - << toString(unregister_result.error()); - return toInt(unregister_result.error()); - } - return 0; + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return 1; + } + auto unregister_result = client_->unregisterLocalMemory(buffer, true); + if (!unregister_result) { + LOG(ERROR) << "Unregister buffer failed with error: " << toString(unregister_result.error()); + return toInt(unregister_result.error()); + } + return 0; } -int DistributedObjectStore::get_into(const std::string &key, void *buffer, - size_t size) { - // NOTE: The buffer address must be previously registered with - // register_buffer() for zero-copy RDMA operations to work correctly - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return -1; - } - - // Step 1: Get object info - auto query_result = client_->Query(key); - if (!query_result) { - if (query_result.error() == ErrorCode::OBJECT_NOT_FOUND) { - VLOG(1) << "Object not found for key: " << key; - return -toInt(query_result.error()); - } - LOG(ERROR) << "Query failed for key: " << key - << " with error: " << toString(query_result.error()); - return -toInt(query_result.error()); - } - - auto replica_list = query_result.value(); - - // Calculate total size from replica list - uint64_t total_size = 0; - if (replica_list.empty()) { - LOG(ERROR) << "Internal error: replica_list is empty"; - return -1; - } - - auto &replica = replica_list[0]; - if (replica.is_memory_replica() == false) { - auto &disk_descriptor = replica.get_disk_descriptor(); - total_size = disk_descriptor.file_size; - } else { - for (auto &handle : - replica.get_memory_descriptor().buffer_descriptors) { - total_size += handle.size_; - } - } - - // Check if user buffer is large enough - if (size < total_size) { - LOG(ERROR) << "User buffer too small. Required: " << total_size - << ", provided: " << size; - return -1; - } - - // Step 2: Split user buffer according to object info and create slices - std::vector slices; - uint64_t offset = 0; - - if (replica.is_memory_replica() == false) { - while (offset < total_size) { - auto chunk_size = std::min(total_size - offset, kMaxSliceSize); - void *chunk_ptr = static_cast(buffer) + offset; - slices.emplace_back(Slice{chunk_ptr, chunk_size}); - offset += chunk_size; - } - } else { - for (auto &handle : - replica.get_memory_descriptor().buffer_descriptors) { - void *chunk_ptr = static_cast(buffer) + offset; - slices.emplace_back(Slice{chunk_ptr, handle.size_}); - offset += handle.size_; - } - } - - // Step 3: Read data directly into user buffer - auto get_result = client_->Get(key, replica_list, slices); - if (!get_result) { - LOG(ERROR) << "Get failed for key: " << key - << " with error: " << toString(get_result.error()); - return -toInt(get_result.error()); - } - - return static_cast(total_size); +int DistributedObjectStore::get_into(const std::string &key, void *buffer, size_t size) { + // NOTE: The buffer address must be previously registered with + // register_buffer() for zero-copy RDMA operations to work correctly + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return -1; + } + + // Step 1: Get object info + auto query_result = client_->Query(key); + if (!query_result) { + if (query_result.error() == ErrorCode::OBJECT_NOT_FOUND) { + VLOG(1) << "Object not found for key: " << key; + return -toInt(query_result.error()); + } + LOG(ERROR) << "Query failed for key: " << key << " with error: " << toString(query_result.error()); + return -toInt(query_result.error()); + } + + auto replica_list = query_result.value(); + + // Calculate total size from replica list + uint64_t total_size = 0; + if (replica_list.empty()) { + LOG(ERROR) << "Internal error: replica_list is empty"; + return -1; + } + + auto &replica = replica_list[0]; + if (replica.is_memory_replica() == false) { + auto &disk_descriptor = replica.get_disk_descriptor(); + total_size = disk_descriptor.file_size; + } else { + for (auto &handle : replica.get_memory_descriptor().buffer_descriptors) { + total_size += handle.size_; + } + } + + // Check if user buffer is large enough + if (size < total_size) { + LOG(ERROR) << "User buffer too small. Required: " << total_size << ", provided: " << size; + return -1; + } + + // Step 2: Split user buffer according to object info and create slices + std::vector slices; + uint64_t offset = 0; + + if (replica.is_memory_replica() == false) { + while (offset < total_size) { + auto chunk_size = std::min(total_size - offset, kMaxSliceSize); + void *chunk_ptr = static_cast(buffer) + offset; + slices.emplace_back(Slice {chunk_ptr, chunk_size}); + offset += chunk_size; + } + } else { + for (auto &handle : replica.get_memory_descriptor().buffer_descriptors) { + void *chunk_ptr = static_cast(buffer) + offset; + slices.emplace_back(Slice {chunk_ptr, handle.size_}); + offset += handle.size_; + } + } + + // Step 3: Read data directly into user buffer + auto get_result = client_->Get(key, replica_list, slices); + if (!get_result) { + LOG(ERROR) << "Get failed for key: " << key << " with error: " << toString(get_result.error()); + return -toInt(get_result.error()); + } + + return static_cast(total_size); } std::string DistributedObjectStore::get_hostname() const { - return local_hostname; + return local_hostname; } -std::vector DistributedObjectStore::batch_put_from( - const std::vector &keys, const std::vector &buffers, - const std::vector &sizes, const ReplicateConfig &config) { - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return std::vector(keys.size(), -1); - } - - if (keys.size() != buffers.size() || keys.size() != sizes.size()) { - LOG(ERROR) << "Mismatched sizes for keys, buffers, and sizes"; - return std::vector(keys.size(), -1); - } - - std::unordered_map> all_slices; - - // Create slices from user buffers - for (size_t i = 0; i < keys.size(); ++i) { - const std::string &key = keys[i]; - void *buffer = buffers[i]; - size_t size = sizes[i]; - - std::vector slices; - uint64_t offset = 0; - - while (offset < size) { - auto chunk_size = std::min(size - offset, kMaxSliceSize); - void *chunk_ptr = static_cast(buffer) + offset; - slices.emplace_back(Slice{chunk_ptr, chunk_size}); - offset += chunk_size; - } - - all_slices[key] = std::move(slices); - } - - std::vector> ordered_batched_slices; - ordered_batched_slices.reserve(keys.size()); - for (const auto &key : keys) { - auto it = all_slices.find(key); - if (it != all_slices.end()) { - ordered_batched_slices.emplace_back(it->second); - } else { - LOG(ERROR) << "Missing slices for key: " << key; - return std::vector(keys.size(), -1); - } - } - - auto batch_put_results = - client_->BatchPut(keys, ordered_batched_slices, config); - - std::vector results(keys.size()); - - // Check if any operations failed - for (size_t i = 0; i < batch_put_results.size(); ++i) { - if (!batch_put_results[i]) { - LOG(ERROR) << "BatchPut operation failed for key '" << keys[i] - << "' with error: " - << toString(batch_put_results[i].error()); - results[i] = -toInt(batch_put_results[i].error()); - } else { - results[i] = 0; - } - } - - return results; +std::vector DistributedObjectStore::batch_put_from(const std::vector &keys, + const std::vector &buffers, + const std::vector &sizes, + const ReplicateConfig &config) { + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return std::vector(keys.size(), -1); + } + + if (keys.size() != buffers.size() || keys.size() != sizes.size()) { + LOG(ERROR) << "Mismatched sizes for keys, buffers, and sizes"; + return std::vector(keys.size(), -1); + } + + std::unordered_map> all_slices; + + // Create slices from user buffers + for (size_t i = 0; i < keys.size(); ++i) { + const std::string &key = keys[i]; + void *buffer = buffers[i]; + size_t size = sizes[i]; + + std::vector slices; + uint64_t offset = 0; + + while (offset < size) { + auto chunk_size = std::min(size - offset, kMaxSliceSize); + void *chunk_ptr = static_cast(buffer) + offset; + slices.emplace_back(Slice {chunk_ptr, chunk_size}); + offset += chunk_size; + } + + all_slices[key] = std::move(slices); + } + + std::vector> ordered_batched_slices; + ordered_batched_slices.reserve(keys.size()); + for (const auto &key : keys) { + auto it = all_slices.find(key); + if (it != all_slices.end()) { + ordered_batched_slices.emplace_back(it->second); + } else { + LOG(ERROR) << "Missing slices for key: " << key; + return std::vector(keys.size(), -1); + } + } + + auto batch_put_results = client_->BatchPut(keys, ordered_batched_slices, config); + + std::vector results(keys.size()); + + // Check if any operations failed + for (size_t i = 0; i < batch_put_results.size(); ++i) { + if (!batch_put_results[i]) { + LOG(ERROR) << "BatchPut operation failed for key '" << keys[i] + << "' with error: " << toString(batch_put_results[i].error()); + results[i] = -toInt(batch_put_results[i].error()); + } else { + results[i] = 0; + } + } + + return results; } -std::vector DistributedObjectStore::batch_get_into( - const std::vector &keys, const std::vector &buffers, - const std::vector &sizes) { - // Validate preconditions - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return std::vector(keys.size(), -1); - } - - if (keys.size() != buffers.size() || keys.size() != sizes.size()) { - LOG(ERROR) << "Input vector sizes mismatch: keys=" << keys.size() - << ", buffers=" << buffers.size() - << ", sizes=" << sizes.size(); - return std::vector(keys.size(), -1); - } - - const size_t num_keys = keys.size(); - std::vector results(num_keys, -1); - - if (num_keys == 0) { - return results; - } - - // Query metadata for all keys - const auto query_results = client_->BatchQuery(keys); - - // Process each key individually and prepare for batch transfer - struct ValidKeyInfo { - std::string key; - size_t original_index; - std::vector replica_list; - std::vector slices; - uint64_t total_size; - }; - - std::vector valid_operations; - valid_operations.reserve(num_keys); - - for (size_t i = 0; i < num_keys; ++i) { - const auto &key = keys[i]; - - // Handle query failures - if (!query_results[i]) { - const auto error = query_results[i].error(); - results[i] = (error == ErrorCode::OBJECT_NOT_FOUND) - ? -toInt(ErrorCode::OBJECT_NOT_FOUND) - : -toInt(error); - - if (error != ErrorCode::OBJECT_NOT_FOUND) { - LOG(ERROR) << "Query failed for key '" << key - << "': " << toString(error); - } - continue; - } - - // Validate replica list - auto replica_list = query_results[i].value(); - if (replica_list.empty()) { - LOG(ERROR) << "Empty replica list for key: " << key; - results[i] = -1; - // TODO: We could early return here for prefix match case - continue; - } - - // Calculate required buffer size - const auto &replica = replica_list[0]; - uint64_t total_size = 0; - if (replica.is_memory_replica() == false) { - auto &disk_descriptor = replica.get_disk_descriptor(); - total_size = disk_descriptor.file_size; - } else { - for (auto &handle : - replica.get_memory_descriptor().buffer_descriptors) { - total_size += handle.size_; - } - } - - // Validate buffer capacity - if (sizes[i] < total_size) { - LOG(ERROR) << "Buffer too small for key '" << key - << "': required=" << total_size - << ", available=" << sizes[i]; - results[i] = -1; - continue; - } - - // Create slices for this key's buffer - std::vector key_slices; - uint64_t offset = 0; - if (replica.is_memory_replica() == false) { - while (offset < total_size) { - auto chunk_size = std::min(total_size - offset, kMaxSliceSize); - void *chunk_ptr = static_cast(buffers[i]) + offset; - key_slices.emplace_back(Slice{chunk_ptr, chunk_size}); - offset += chunk_size; - } - } else { - for (auto &handle : - replica.get_memory_descriptor().buffer_descriptors) { - void *chunk_ptr = static_cast(buffers[i]) + offset; - key_slices.emplace_back(Slice{chunk_ptr, handle.size_}); - offset += handle.size_; - } - } - - // Store operation info for batch processing - valid_operations.push_back({.key = key, - .original_index = i, - .replica_list = std::move(replica_list), - .slices = std::move(key_slices), - .total_size = total_size}); - - // Set success result (actual bytes transferred) - results[i] = static_cast(total_size); - } - - // Early return if no valid operations - if (valid_operations.empty()) { - return results; - } - - // Prepare batch transfer data structures - std::vector batch_keys; - std::vector> batch_replica_lists; - std::unordered_map> batch_slices; - - batch_keys.reserve(valid_operations.size()); - batch_replica_lists.reserve(valid_operations.size()); - - for (const auto &op : valid_operations) { - batch_keys.push_back(op.key); - batch_replica_lists.push_back(op.replica_list); - batch_slices[op.key] = op.slices; - } - - // Execute batch transfer - const auto batch_get_results = - client_->BatchGet(batch_keys, batch_replica_lists, batch_slices); - - // Process transfer results - for (size_t j = 0; j < batch_get_results.size(); ++j) { - const auto &op = valid_operations[j]; - - if (!batch_get_results[j]) { - const auto error = batch_get_results[j].error(); - LOG(ERROR) << "BatchGet failed for key '" << op.key - << "': " << toString(error); - results[op.original_index] = -toInt(error); - } - } - - return results; +std::vector DistributedObjectStore::batch_get_into(const std::vector &keys, + const std::vector &buffers, + const std::vector &sizes) { + // Validate preconditions + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return std::vector(keys.size(), -1); + } + + if (keys.size() != buffers.size() || keys.size() != sizes.size()) { + LOG(ERROR) << "Input vector sizes mismatch: keys=" << keys.size() << ", buffers=" << buffers.size() + << ", sizes=" << sizes.size(); + return std::vector(keys.size(), -1); + } + + const size_t num_keys = keys.size(); + std::vector results(num_keys, -1); + + if (num_keys == 0) { + return results; + } + + // Query metadata for all keys + const auto query_results = client_->BatchQuery(keys); + + // Process each key individually and prepare for batch transfer + struct ValidKeyInfo { + std::string key; + size_t original_index; + std::vector replica_list; + std::vector slices; + uint64_t total_size; + }; + + std::vector valid_operations; + valid_operations.reserve(num_keys); + + for (size_t i = 0; i < num_keys; ++i) { + const auto &key = keys[i]; + + // Handle query failures + if (!query_results[i]) { + const auto error = query_results[i].error(); + results[i] = (error == ErrorCode::OBJECT_NOT_FOUND) ? -toInt(ErrorCode::OBJECT_NOT_FOUND) : -toInt(error); + + if (error != ErrorCode::OBJECT_NOT_FOUND) { + LOG(ERROR) << "Query failed for key '" << key << "': " << toString(error); + } + continue; + } + + // Validate replica list + auto replica_list = query_results[i].value(); + if (replica_list.empty()) { + LOG(ERROR) << "Empty replica list for key: " << key; + results[i] = -1; + // TODO: We could early return here for prefix match case + continue; + } + + // Calculate required buffer size + const auto &replica = replica_list[0]; + uint64_t total_size = 0; + if (replica.is_memory_replica() == false) { + auto &disk_descriptor = replica.get_disk_descriptor(); + total_size = disk_descriptor.file_size; + } else { + for (auto &handle : replica.get_memory_descriptor().buffer_descriptors) { + total_size += handle.size_; + } + } + + // Validate buffer capacity + if (sizes[i] < total_size) { + LOG(ERROR) << "Buffer too small for key '" << key << "': required=" << total_size + << ", available=" << sizes[i]; + results[i] = -1; + continue; + } + + // Create slices for this key's buffer + std::vector key_slices; + uint64_t offset = 0; + if (replica.is_memory_replica() == false) { + while (offset < total_size) { + auto chunk_size = std::min(total_size - offset, kMaxSliceSize); + void *chunk_ptr = static_cast(buffers[i]) + offset; + key_slices.emplace_back(Slice {chunk_ptr, chunk_size}); + offset += chunk_size; + } + } else { + for (auto &handle : replica.get_memory_descriptor().buffer_descriptors) { + void *chunk_ptr = static_cast(buffers[i]) + offset; + key_slices.emplace_back(Slice {chunk_ptr, handle.size_}); + offset += handle.size_; + } + } + + // Store operation info for batch processing + valid_operations.push_back({.key = key, + .original_index = i, + .replica_list = std::move(replica_list), + .slices = std::move(key_slices), + .total_size = total_size}); + + // Set success result (actual bytes transferred) + results[i] = static_cast(total_size); + } + + // Early return if no valid operations + if (valid_operations.empty()) { + return results; + } + + // Prepare batch transfer data structures + std::vector batch_keys; + std::vector> batch_replica_lists; + std::unordered_map> batch_slices; + + batch_keys.reserve(valid_operations.size()); + batch_replica_lists.reserve(valid_operations.size()); + + for (const auto &op : valid_operations) { + batch_keys.push_back(op.key); + batch_replica_lists.push_back(op.replica_list); + batch_slices[op.key] = op.slices; + } + + // Execute batch transfer + const auto batch_get_results = client_->BatchGet(batch_keys, batch_replica_lists, batch_slices); + + // Process transfer results + for (size_t j = 0; j < batch_get_results.size(); ++j) { + const auto &op = valid_operations[j]; + + if (!batch_get_results[j]) { + const auto error = batch_get_results[j].error(); + LOG(ERROR) << "BatchGet failed for key '" << op.key << "': " << toString(error); + results[op.original_index] = -toInt(error); + } + } + + return results; } -int DistributedObjectStore::put_from(const std::string &key, void *buffer, - size_t size, - const ReplicateConfig &config) { - // NOTE: The buffer address must be previously registered with - // register_buffer() for zero-copy RDMA operations to work correctly - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return -1; - } - - if (size == 0) { - LOG(WARNING) << "Attempting to put empty data for key: " << key; - return 0; - } - - // Create slices directly from the user buffer - std::vector slices; - uint64_t offset = 0; - - while (offset < size) { - auto chunk_size = std::min(size - offset, kMaxSliceSize); - void *chunk_ptr = static_cast(buffer) + offset; - slices.emplace_back(Slice{chunk_ptr, chunk_size}); - offset += chunk_size; - } - - auto put_result = client_->Put(key, slices, config); - if (!put_result) { - LOG(ERROR) << "Put operation failed with error: " - << toString(put_result.error()); - return -toInt(put_result.error()); - } - - return 0; +int DistributedObjectStore::put_from(const std::string &key, void *buffer, size_t size, const ReplicateConfig &config) { + // NOTE: The buffer address must be previously registered with + // register_buffer() for zero-copy RDMA operations to work correctly + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return -1; + } + + if (size == 0) { + LOG(WARNING) << "Attempting to put empty data for key: " << key; + return 0; + } + + // Create slices directly from the user buffer + std::vector slices; + uint64_t offset = 0; + + while (offset < size) { + auto chunk_size = std::min(size - offset, kMaxSliceSize); + void *chunk_ptr = static_cast(buffer) + offset; + slices.emplace_back(Slice {chunk_ptr, chunk_size}); + offset += chunk_size; + } + + auto put_result = client_->Put(key, slices, config); + if (!put_result) { + LOG(ERROR) << "Put operation failed with error: " << toString(put_result.error()); + return -toInt(put_result.error()); + } + + return 0; } template py::array create_typed_array(char *exported_data, size_t total_length) { - py::capsule free_when_done(exported_data, - [](void *p) { delete[] static_cast(p); }); + py::capsule free_when_done(exported_data, [](void *p) { delete[] static_cast(p); }); - return py::array_t({static_cast(total_length / sizeof(T))}, - (T *)exported_data, free_when_done); + return py::array_t({static_cast(total_length / sizeof(T))}, (T *)exported_data, free_when_done); } -pybind11::object DistributedObjectStore::get_tensor(const std::string &key, - const std::string dtype) { - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return pybind11::none(); - } - - try { - // Import torch module - - // Query object info first - // Step 1: Get object info - auto query_result = client_->Query(key); - if (!query_result) { - py::gil_scoped_acquire acquire_gil; - return pybind11::none(); - } - // Extract replica list from the query result - auto replica_list = query_result.value(); - if (replica_list.empty()) { - py::gil_scoped_acquire acquire_gil; - return pybind11::none(); - } - - // Allocate slices for the object - SliceGuard guard(*this); - uint64_t total_length = 0; - int ret = allocateSlices(guard.slices(), replica_list, total_length); - if (ret) { - py::gil_scoped_acquire acquire_gil; - return pybind11::none(); - } - - // Get the object data - auto get_result = client_->Get(key, guard.slices()); - if (!get_result) { - py::gil_scoped_acquire acquire_gil; - LOG(ERROR) << "Get failed for key: " << key - << " with error: " << toString(get_result.error()); - return pybind11::none(); - } - - // Convert slices to contiguous bytes - char *exported_data = exportSlices(guard.slices(), total_length); - if (!exported_data) { - py::gil_scoped_acquire acquire_gil; - return pybind11::none(); - } - - // Convert bytes to tensor using torch.from_numpy - - py::object py_buffer = - py::memoryview::from_memory(exported_data, total_length); - pybind11::object np_array; - if (dtype == "float32") { - np_array = create_typed_array(exported_data, total_length); - } else if (dtype == "float64") { - np_array = create_typed_array(exported_data, total_length); - } else if (dtype == "int8") { - np_array = create_typed_array(exported_data, total_length); - } else if (dtype == "uint8") { - np_array = create_typed_array(exported_data, total_length); - } else if (dtype == "int16") { - np_array = create_typed_array(exported_data, total_length); - } else if (dtype == "uint16") { - np_array = - create_typed_array(exported_data, total_length); - } else if (dtype == "int32") { - np_array = create_typed_array(exported_data, total_length); - } else if (dtype == "uint32") { - np_array = - create_typed_array(exported_data, total_length); - } else if (dtype == "int64") { - np_array = create_typed_array(exported_data, total_length); - } else if (dtype == "uint64") { - np_array = - create_typed_array(exported_data, total_length); - } else if (dtype == "bool") { - np_array = create_typed_array(exported_data, total_length); - } - - // Create tensor from numpy array - pybind11::object tensor = torch.attr("from_numpy")(np_array); - return tensor; - } catch (const pybind11::error_already_set &e) { - LOG(ERROR) << "Failed to convert bytes to tensor: " << e.what(); - return pybind11::none(); - } +pybind11::object DistributedObjectStore::get_tensor(const std::string &key, const std::string dtype) { + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return pybind11::none(); + } + + try { + // Import torch module + + // Query object info first + // Step 1: Get object info + auto query_result = client_->Query(key); + if (!query_result) { + py::gil_scoped_acquire acquire_gil; + return pybind11::none(); + } + // Extract replica list from the query result + auto replica_list = query_result.value(); + if (replica_list.empty()) { + py::gil_scoped_acquire acquire_gil; + return pybind11::none(); + } + + // Allocate slices for the object + SliceGuard guard(*this); + uint64_t total_length = 0; + int ret = allocateSlices(guard.slices(), replica_list, total_length); + if (ret) { + py::gil_scoped_acquire acquire_gil; + return pybind11::none(); + } + + // Get the object data + auto get_result = client_->Get(key, guard.slices()); + if (!get_result) { + py::gil_scoped_acquire acquire_gil; + LOG(ERROR) << "Get failed for key: " << key << " with error: " << toString(get_result.error()); + return pybind11::none(); + } + + // Convert slices to contiguous bytes + char *exported_data = exportSlices(guard.slices(), total_length); + if (!exported_data) { + py::gil_scoped_acquire acquire_gil; + return pybind11::none(); + } + + // Convert bytes to tensor using torch.from_numpy + + py::object py_buffer = py::memoryview::from_memory(exported_data, total_length); + pybind11::object np_array; + if (dtype == "float32") { + np_array = create_typed_array(exported_data, total_length); + } else if (dtype == "float64") { + np_array = create_typed_array(exported_data, total_length); + } else if (dtype == "int8") { + np_array = create_typed_array(exported_data, total_length); + } else if (dtype == "uint8") { + np_array = create_typed_array(exported_data, total_length); + } else if (dtype == "int16") { + np_array = create_typed_array(exported_data, total_length); + } else if (dtype == "uint16") { + np_array = create_typed_array(exported_data, total_length); + } else if (dtype == "int32") { + np_array = create_typed_array(exported_data, total_length); + } else if (dtype == "uint32") { + np_array = create_typed_array(exported_data, total_length); + } else if (dtype == "int64") { + np_array = create_typed_array(exported_data, total_length); + } else if (dtype == "uint64") { + np_array = create_typed_array(exported_data, total_length); + } else if (dtype == "bool") { + np_array = create_typed_array(exported_data, total_length); + } + + // Create tensor from numpy array + pybind11::object tensor = torch.attr("from_numpy")(np_array); + return tensor; + } catch (const pybind11::error_already_set &e) { + LOG(ERROR) << "Failed to convert bytes to tensor: " << e.what(); + return pybind11::none(); + } } -int DistributedObjectStore::put_tensor(const std::string &key, - pybind11::object tensor) { - if (!client_) { - LOG(ERROR) << "Client is not initialized"; - return -1; - } - - try { - // Import torch module - // Check if the object is a tensor - if (!(tensor.attr("__class__") - .attr("__name__") - .cast() - .find("Tensor") != std::string::npos)) { - LOG(ERROR) << "Input is not a PyTorch tensor"; - return -1; - } - // Get the data pointer and size directly from the tensor - uintptr_t data_ptr = tensor.attr("data_ptr")().cast(); - size_t numel = tensor.attr("numel")().cast(); - size_t element_size = tensor.attr("element_size")().cast(); - size_t buffer_size = numel * element_size; - - this->register_buffer(reinterpret_cast(data_ptr), buffer_size); - // Use put_from for direct memory access (zero-copy) - int result = this->put_from(key, reinterpret_cast(data_ptr), - buffer_size); - this->unregister_buffer(reinterpret_cast(data_ptr)); - return result; - } catch (const pybind11::error_already_set &e) { - LOG(ERROR) << "Failed to access tensor data: " << e.what(); - return -1; - } +int DistributedObjectStore::put_tensor(const std::string &key, pybind11::object tensor) { + if (!client_) { + LOG(ERROR) << "Client is not initialized"; + return -1; + } + + try { + // Import torch module + // Check if the object is a tensor + if (!(tensor.attr("__class__").attr("__name__").cast().find("Tensor") != std::string::npos)) { + LOG(ERROR) << "Input is not a PyTorch tensor"; + return -1; + } + // Get the data pointer and size directly from the tensor + uintptr_t data_ptr = tensor.attr("data_ptr")().cast(); + size_t numel = tensor.attr("numel")().cast(); + size_t element_size = tensor.attr("element_size")().cast(); + size_t buffer_size = numel * element_size; + + this->register_buffer(reinterpret_cast(data_ptr), buffer_size); + // Use put_from for direct memory access (zero-copy) + int result = this->put_from(key, reinterpret_cast(data_ptr), buffer_size); + this->unregister_buffer(reinterpret_cast(data_ptr)); + return result; + } catch (const pybind11::error_already_set &e) { + LOG(ERROR) << "Failed to access tensor data: " << e.what(); + return -1; + } } PYBIND11_MODULE(store, m) { - // Define the ReplicateConfig class - py::class_(m, "ReplicateConfig") - .def(py::init<>()) - .def_readwrite("replica_num", &ReplicateConfig::replica_num) - .def_readwrite("with_soft_pin", &ReplicateConfig::with_soft_pin) - .def_readwrite("preferred_segment", &ReplicateConfig::preferred_segment) - .def("__str__", [](const ReplicateConfig &config) { - std::ostringstream oss; - oss << config; - return oss.str(); - }); - - // Define the SliceBuffer class - py::class_>(m, "SliceBuffer", - py::buffer_protocol()) - .def("ptr", - [](const SliceBuffer &self) { - // Return the pointer as an integer for Python - return reinterpret_cast(self.ptr()); - }) - .def("size", &SliceBuffer::size) - .def("__len__", &SliceBuffer::size) - .def_buffer([](SliceBuffer &self) -> py::buffer_info { - // SliceBuffer now always contains contiguous memory - if (self.size() > 0) { - return py::buffer_info( - self.ptr(), /* Pointer to buffer */ - sizeof(char), /* Size of one scalar */ - py::format_descriptor< - char>::format(), /* Python struct-style - format descriptor */ - 1, /* Number of dimensions */ - {(size_t)self.size()}, /* Buffer dimensions */ - {sizeof(char)} /* Strides (in bytes) for each index */ - ); - } else { - // Empty buffer - return py::buffer_info( - nullptr, /* Pointer to buffer */ - sizeof(char), /* Size of one scalar */ - py::format_descriptor< - char>::format(), /* Python struct-style - format descriptor */ - 1, /* Number of dimensions */ - {0}, /* Buffer dimensions */ - {sizeof(char)} /* Strides (in bytes) for each index */ - ); - } - }); - - // Define the DistributedObjectStore class - py::class_(m, "MooncakeDistributedStore") - .def(py::init<>()) - .def("setup", &DistributedObjectStore::setup) - .def("init_all", &DistributedObjectStore::initAll) - .def("get", &DistributedObjectStore::get) - .def("get_batch", &DistributedObjectStore::get_batch) - .def("get_buffer", &DistributedObjectStore::get_buffer, - py::call_guard(), - py::return_value_policy::take_ownership) - .def("remove", &DistributedObjectStore::remove, - py::call_guard()) - .def("remove_all", &DistributedObjectStore::removeAll, - py::call_guard()) - .def("is_exist", &DistributedObjectStore::isExist, - py::call_guard()) - .def("batch_is_exist", &DistributedObjectStore::batchIsExist, - py::call_guard(), py::arg("keys"), - "Check if multiple objects exist. Returns list of results: 1 if " - "exists, 0 if not exists, -1 if error") - .def("close", &DistributedObjectStore::tearDownAll) - .def("get_size", &DistributedObjectStore::getSize, - py::call_guard()) - .def("get_tensor", &DistributedObjectStore::get_tensor, py::arg("key"), - py::arg("dtype"), "Get a PyTorch tensor from the store") - .def("put_tensor", &DistributedObjectStore::put_tensor, py::arg("key"), - py::arg("tensor"), "Put a PyTorch tensor into the store") - .def( - "register_buffer", - [](DistributedObjectStore &self, uintptr_t buffer_ptr, - size_t size) { - // Register memory buffer for RDMA operations - void *buffer = reinterpret_cast(buffer_ptr); - py::gil_scoped_release release; - return self.register_buffer(buffer, size); - }, - py::arg("buffer_ptr"), py::arg("size"), - "Register a memory buffer for direct access operations") - .def( - "unregister_buffer", - [](DistributedObjectStore &self, uintptr_t buffer_ptr) { - // Unregister memory buffer - void *buffer = reinterpret_cast(buffer_ptr); - py::gil_scoped_release release; - return self.unregister_buffer(buffer); - }, - py::arg("buffer_ptr"), - "Unregister a previously registered memory " - "buffer for direct access operations") - .def( - "get_into", - [](DistributedObjectStore &self, const std::string &key, - uintptr_t buffer_ptr, size_t size) { - // Get data directly into user-provided buffer - void *buffer = reinterpret_cast(buffer_ptr); - py::gil_scoped_release release; - return self.get_into(key, buffer, size); - }, - py::arg("key"), py::arg("buffer_ptr"), py::arg("size"), - "Get object data directly into a pre-allocated buffer") - .def( - "batch_get_into", - [](DistributedObjectStore &self, - const std::vector &keys, - const std::vector &buffer_ptrs, - const std::vector &sizes) { - std::vector buffers; - buffers.reserve(buffer_ptrs.size()); - for (uintptr_t ptr : buffer_ptrs) { - buffers.push_back(reinterpret_cast(ptr)); - } - py::gil_scoped_release release; - return self.batch_get_into(keys, buffers, sizes); - }, - py::arg("keys"), py::arg("buffer_ptrs"), py::arg("sizes"), - "Get object data directly into pre-allocated buffers for multiple " - "keys") - .def( - "put_from", - [](DistributedObjectStore &self, const std::string &key, - uintptr_t buffer_ptr, size_t size, - const ReplicateConfig &config = ReplicateConfig{}) { - // Put data directly from user-provided buffer - void *buffer = reinterpret_cast(buffer_ptr); - py::gil_scoped_release release; - return self.put_from(key, buffer, size, config); - }, - py::arg("key"), py::arg("buffer_ptr"), py::arg("size"), - py::arg("config") = ReplicateConfig{}, - "Put object data directly from a pre-allocated buffer") - .def( - "batch_put_from", - [](DistributedObjectStore &self, - const std::vector &keys, - const std::vector &buffer_ptrs, - const std::vector &sizes, - const ReplicateConfig &config = ReplicateConfig{}) { - std::vector buffers; - buffers.reserve(buffer_ptrs.size()); - for (uintptr_t ptr : buffer_ptrs) { - buffers.push_back(reinterpret_cast(ptr)); - } - py::gil_scoped_release release; - return self.batch_put_from(keys, buffers, sizes, config); - }, - py::arg("keys"), py::arg("buffer_ptrs"), py::arg("sizes"), - py::arg("config") = ReplicateConfig{}, - "Put object data directly from pre-allocated buffers for multiple " - "keys") - .def( - "put", - [](DistributedObjectStore &self, const std::string &key, - py::buffer buf, - const ReplicateConfig &config = ReplicateConfig{}) { - py::buffer_info info = buf.request(/*writable=*/false); - py::gil_scoped_release release; - return self.put( - key, - std::span(static_cast(info.ptr), - static_cast(info.size)), - config); - }, - py::arg("key"), py::arg("value"), - py::arg("config") = ReplicateConfig{}) - .def( - "put_parts", - [](DistributedObjectStore &self, const std::string &key, - py::args parts, - const ReplicateConfig &config = ReplicateConfig{}) { - // 1) Python buffer → span - std::vector infos; - std::vector> spans; - infos.reserve(parts.size()); - spans.reserve(parts.size()); - - for (auto &obj : parts) { - py::buffer buf = py::reinterpret_borrow(obj); - infos.emplace_back(buf.request(false)); - const auto &info = infos.back(); - if (info.ndim != 1 || info.itemsize != 1) - throw std::runtime_error( - "parts must be 1-D bytes-like"); - - spans.emplace_back(static_cast(info.ptr), - static_cast(info.size)); - } - - // 2) Call C++ function - py::gil_scoped_release unlock; - return self.put_parts(key, spans, config); - }, - py::arg("key"), py::arg("config") = ReplicateConfig{}) - .def( - "put_batch", - [](DistributedObjectStore &self, - const std::vector &keys, - const std::vector &py_values, - const ReplicateConfig &config = ReplicateConfig{}) { - std::vector temp_values; - temp_values.reserve(py_values.size()); - for (const auto &value : py_values) { - temp_values.emplace_back(value.cast()); - } - - std::vector> spans; - spans.reserve(temp_values.size()); - for (const auto &s : temp_values) { - spans.emplace_back(s.data(), s.size()); - } - - return self.put_batch(keys, spans, config); - }, - py::arg("keys"), py::arg("values"), - py::arg("config") = ReplicateConfig{}) - .def("get_hostname", &DistributedObjectStore::get_hostname); + // Define the ReplicateConfig class + py::class_(m, "ReplicateConfig") + .def(py::init<>()) + .def_readwrite("replica_num", &ReplicateConfig::replica_num) + .def_readwrite("with_soft_pin", &ReplicateConfig::with_soft_pin) + .def_readwrite("preferred_segment", &ReplicateConfig::preferred_segment) + .def("__str__", [](const ReplicateConfig &config) { + std::ostringstream oss; + oss << config; + return oss.str(); + }); + + // Define the SliceBuffer class + py::class_>(m, "SliceBuffer", py::buffer_protocol()) + .def("ptr", + [](const SliceBuffer &self) { + // Return the pointer as an integer for Python + return reinterpret_cast(self.ptr()); + }) + .def("size", &SliceBuffer::size) + .def("__len__", &SliceBuffer::size) + .def_buffer([](SliceBuffer &self) -> py::buffer_info { + // SliceBuffer now always contains contiguous memory + if (self.size() > 0) { + return py::buffer_info(self.ptr(), /* Pointer to buffer */ + sizeof(char), /* Size of one scalar */ + py::format_descriptor::format(), /* Python struct-style + format descriptor */ + 1, /* Number of dimensions */ + {(size_t)self.size()}, /* Buffer dimensions */ + {sizeof(char)} /* Strides (in bytes) for each index */ + ); + } else { + // Empty buffer + return py::buffer_info(nullptr, /* Pointer to buffer */ + sizeof(char), /* Size of one scalar */ + py::format_descriptor::format(), /* Python struct-style + format descriptor */ + 1, /* Number of dimensions */ + {0}, /* Buffer dimensions */ + {sizeof(char)} /* Strides (in bytes) for each index */ + ); + } + }); + + // Define the DistributedObjectStore class + py::class_(m, "MooncakeDistributedStore") + .def(py::init<>()) + .def("setup", &DistributedObjectStore::setup) + .def("init_all", &DistributedObjectStore::initAll) + .def("get", &DistributedObjectStore::get) + .def("get_batch", &DistributedObjectStore::get_batch) + .def("get_buffer", &DistributedObjectStore::get_buffer, py::call_guard(), + py::return_value_policy::take_ownership) + .def("remove", &DistributedObjectStore::remove, py::call_guard()) + .def("remove_all", &DistributedObjectStore::removeAll, py::call_guard()) + .def("is_exist", &DistributedObjectStore::isExist, py::call_guard()) + .def("batch_is_exist", &DistributedObjectStore::batchIsExist, py::call_guard(), + py::arg("keys"), + "Check if multiple objects exist. Returns list of results: 1 if " + "exists, 0 if not exists, -1 if error") + .def("close", &DistributedObjectStore::tearDownAll) + .def("get_size", &DistributedObjectStore::getSize, py::call_guard()) + .def("get_tensor", &DistributedObjectStore::get_tensor, py::arg("key"), py::arg("dtype"), + "Get a PyTorch tensor from the store") + .def("put_tensor", &DistributedObjectStore::put_tensor, py::arg("key"), py::arg("tensor"), + "Put a PyTorch tensor into the store") + .def( + "register_buffer", + [](DistributedObjectStore &self, uintptr_t buffer_ptr, size_t size) { + // Register memory buffer for RDMA operations + void *buffer = reinterpret_cast(buffer_ptr); + py::gil_scoped_release release; + return self.register_buffer(buffer, size); + }, + py::arg("buffer_ptr"), py::arg("size"), "Register a memory buffer for direct access operations") + .def( + "unregister_buffer", + [](DistributedObjectStore &self, uintptr_t buffer_ptr) { + // Unregister memory buffer + void *buffer = reinterpret_cast(buffer_ptr); + py::gil_scoped_release release; + return self.unregister_buffer(buffer); + }, + py::arg("buffer_ptr"), + "Unregister a previously registered memory " + "buffer for direct access operations") + .def( + "get_into", + [](DistributedObjectStore &self, const std::string &key, uintptr_t buffer_ptr, size_t size) { + // Get data directly into user-provided buffer + void *buffer = reinterpret_cast(buffer_ptr); + py::gil_scoped_release release; + return self.get_into(key, buffer, size); + }, + py::arg("key"), py::arg("buffer_ptr"), py::arg("size"), + "Get object data directly into a pre-allocated buffer") + .def( + "batch_get_into", + [](DistributedObjectStore &self, const std::vector &keys, + const std::vector &buffer_ptrs, const std::vector &sizes) { + std::vector buffers; + buffers.reserve(buffer_ptrs.size()); + for (uintptr_t ptr : buffer_ptrs) { + buffers.push_back(reinterpret_cast(ptr)); + } + py::gil_scoped_release release; + return self.batch_get_into(keys, buffers, sizes); + }, + py::arg("keys"), py::arg("buffer_ptrs"), py::arg("sizes"), + "Get object data directly into pre-allocated buffers for multiple " + "keys") + .def( + "put_from", + [](DistributedObjectStore &self, const std::string &key, uintptr_t buffer_ptr, size_t size, + const ReplicateConfig &config = ReplicateConfig {}) { + // Put data directly from user-provided buffer + void *buffer = reinterpret_cast(buffer_ptr); + py::gil_scoped_release release; + return self.put_from(key, buffer, size, config); + }, + py::arg("key"), py::arg("buffer_ptr"), py::arg("size"), py::arg("config") = ReplicateConfig {}, + "Put object data directly from a pre-allocated buffer") + .def( + "batch_put_from", + [](DistributedObjectStore &self, const std::vector &keys, + const std::vector &buffer_ptrs, const std::vector &sizes, + const ReplicateConfig &config = ReplicateConfig {}) { + std::vector buffers; + buffers.reserve(buffer_ptrs.size()); + for (uintptr_t ptr : buffer_ptrs) { + buffers.push_back(reinterpret_cast(ptr)); + } + py::gil_scoped_release release; + return self.batch_put_from(keys, buffers, sizes, config); + }, + py::arg("keys"), py::arg("buffer_ptrs"), py::arg("sizes"), py::arg("config") = ReplicateConfig {}, + "Put object data directly from pre-allocated buffers for multiple " + "keys") + .def( + "put", + [](DistributedObjectStore &self, const std::string &key, py::buffer buf, + const ReplicateConfig &config = ReplicateConfig {}) { + py::buffer_info info = buf.request(/*writable=*/false); + py::gil_scoped_release release; + return self.put( + key, std::span(static_cast(info.ptr), static_cast(info.size)), config); + }, + py::arg("key"), py::arg("value"), py::arg("config") = ReplicateConfig {}) + .def( + "put_parts", + [](DistributedObjectStore &self, const std::string &key, py::args parts, + const ReplicateConfig &config = ReplicateConfig {}) { + // 1) Python buffer → span + std::vector infos; + std::vector> spans; + infos.reserve(parts.size()); + spans.reserve(parts.size()); + + for (auto &obj : parts) { + py::buffer buf = py::reinterpret_borrow(obj); + infos.emplace_back(buf.request(false)); + const auto &info = infos.back(); + if (info.ndim != 1 || info.itemsize != 1) + throw std::runtime_error("parts must be 1-D bytes-like"); + + spans.emplace_back(static_cast(info.ptr), static_cast(info.size)); + } + + // 2) Call C++ function + py::gil_scoped_release unlock; + return self.put_parts(key, spans, config); + }, + py::arg("key"), py::arg("config") = ReplicateConfig {}) + .def( + "put_batch", + [](DistributedObjectStore &self, const std::vector &keys, + const std::vector &py_values, const ReplicateConfig &config = ReplicateConfig {}) { + std::vector temp_values; + temp_values.reserve(py_values.size()); + for (const auto &value : py_values) { + temp_values.emplace_back(value.cast()); + } + + std::vector> spans; + spans.reserve(temp_values.size()); + for (const auto &s : temp_values) { + spans.emplace_back(s.data(), s.size()); + } + + return self.put_batch(keys, spans, config); + }, + py::arg("keys"), py::arg("values"), py::arg("config") = ReplicateConfig {}) + .def("get_hostname", &DistributedObjectStore::get_hostname); } -} // namespace mooncake +} // namespace mooncake diff --git a/mooncake-integration/store/store_py.h b/mooncake-integration/store/store_py.h index f5f98a63a..7cdbe16d5 100644 --- a/mooncake-integration/store/store_py.h +++ b/mooncake-integration/store/store_py.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include @@ -21,35 +21,35 @@ class SliceBuffer; // Global resource tracker to handle cleanup on abnormal termination class ResourceTracker { - public: - // Get the singleton instance - static ResourceTracker &getInstance(); +public: + // Get the singleton instance + static ResourceTracker &getInstance(); - // Register a DistributedObjectStore instance for cleanup - void registerInstance(DistributedObjectStore *instance); + // Register a DistributedObjectStore instance for cleanup + void registerInstance(DistributedObjectStore *instance); - // Unregister a DistributedObjectStore instance - void unregisterInstance(DistributedObjectStore *instance); + // Unregister a DistributedObjectStore instance + void unregisterInstance(DistributedObjectStore *instance); - private: - ResourceTracker(); - ~ResourceTracker(); +private: + ResourceTracker(); + ~ResourceTracker(); - // Prevent copying - ResourceTracker(const ResourceTracker &) = delete; - ResourceTracker &operator=(const ResourceTracker &) = delete; + // Prevent copying + ResourceTracker(const ResourceTracker &) = delete; + ResourceTracker &operator=(const ResourceTracker &) = delete; - // Cleanup all registered resources - void cleanupAllResources(); + // Cleanup all registered resources + void cleanupAllResources(); - // Signal handler function - static void signalHandler(int signal); + // Signal handler function + static void signalHandler(int signal); - // Exit handler function - static void exitHandler(); + // Exit handler function + static void exitHandler(); - std::mutex mutex_; - std::unordered_set instances_; + std::mutex mutex_; + std::unordered_set instances_; }; /** @@ -57,242 +57,221 @@ class ResourceTracker { * This class is responsible for freeing the buffer when it's destroyed (RAII) */ class SliceBuffer { - public: - /** - * @brief Construct a new SliceBuffer object with contiguous memory - * @param store Reference to the DistributedObjectStore that owns the - * allocator - * @param buffer Pointer to the contiguous buffer - * @param size Size of the buffer in bytes - * @param use_allocator_free If true, use SimpleAllocator to free the - * buffer, otherwise use delete[] - */ - SliceBuffer(DistributedObjectStore &store, void *buffer, uint64_t size, - bool use_allocator_free = true); - - /** - * @brief Destructor that frees the buffer - */ - ~SliceBuffer(); - - /** - * @brief Get a pointer to the data - * @return void* Pointer to the dat - */ - void *ptr() const; - - /** - * @brief Get the size of the data - * @return uint64_t Size of the data in bytes - */ - uint64_t size() const; - - private: - DistributedObjectStore &store_; - void *buffer_; - uint64_t size_; - bool use_allocator_free_; // Flag to control deallocation method +public: + /** + * @brief Construct a new SliceBuffer object with contiguous memory + * @param store Reference to the DistributedObjectStore that owns the + * allocator + * @param buffer Pointer to the contiguous buffer + * @param size Size of the buffer in bytes + * @param use_allocator_free If true, use SimpleAllocator to free the + * buffer, otherwise use delete[] + */ + SliceBuffer(DistributedObjectStore &store, void *buffer, uint64_t size, bool use_allocator_free = true); + + /** + * @brief Destructor that frees the buffer + */ + ~SliceBuffer(); + + /** + * @brief Get a pointer to the data + * @return void* Pointer to the dat + */ + void *ptr() const; + + /** + * @brief Get the size of the data + * @return uint64_t Size of the data in bytes + */ + uint64_t size() const; + +private: + DistributedObjectStore &store_; + void *buffer_; + uint64_t size_; + bool use_allocator_free_; // Flag to control deallocation method }; class DistributedObjectStore { - public: - friend class SliceGuard; // Allow SliceGuard to access private members - friend class SliceBuffer; // Allow SliceBuffer to access private members - DistributedObjectStore(); - ~DistributedObjectStore(); - - int setup(const std::string &local_hostname, - const std::string &metadata_server, - size_t global_segment_size = 1024 * 1024 * 16, - size_t local_buffer_size = 1024 * 1024 * 16, - const std::string &protocol = "tcp", - const std::string &rdma_devices = "", - const std::string &master_server_addr = "127.0.0.1:50051"); - - int initAll(const std::string &protocol, const std::string &device_name, - size_t mount_segment_size = 1024 * 1024 * 16); // Default 16MB - - int put(const std::string &key, std::span value, - const ReplicateConfig &config = ReplicateConfig{}); - - int register_buffer(void *buffer, size_t size); - - int unregister_buffer(void *buffer); - - /** - * @brief Get object data directly into a pre-allocated buffer - * @param key Key of the object to get - * @param buffer Pointer to the pre-allocated buffer (must be registered - * with register_buffer) - * @param size Size of the buffer - * @return Number of bytes read on success, negative value on error - * @note The buffer address must be previously registered with - * register_buffer() for zero-copy operations - */ - int get_into(const std::string &key, void *buffer, size_t size); - - /** - * @brief Get object data directly into pre-allocated buffers for multiple - * keys (batch version) - * @param keys Vector of keys of the objects to get - * @param buffers Vector of pointers to the pre-allocated buffers - * @param sizes Vector of sizes of the buffers - * @return Vector of integers, where each element is the number of bytes - * read on success, or a negative value on error - * @note The buffer addresses must be previously registered with - * register_buffer() for zero-copy operations - */ - std::vector batch_get_into(const std::vector &keys, - const std::vector &buffers, - const std::vector &sizes); - - /** - * @brief Put object data directly from a pre-allocated buffer - * @param key Key of the object to put - * @param buffer Pointer to the buffer containing data (must be registered - * with register_buffer) - * @param size Size of the data to put - * @return 0 on success, negative value on error - * @note The buffer address must be previously registered with - * register_buffer() for zero-copy operations - */ - int put_from(const std::string &key, void *buffer, size_t size, - const ReplicateConfig &config = ReplicateConfig{}); - - /** - * @brief Put object data directly from pre-allocated buffers for multiple - * keys (batch version) - * @param keys Vector of keys of the objects to put - * @param buffers Vector of pointers to the pre-allocated buffers - * @param sizes Vector of sizes of the buffers - * @param config Replication configuration - * @return Vector of integers, where each element is 0 on success, or a - * negative value on error - * @note The buffer addresses must be previously registered with - * register_buffer() for zero-copy operations - */ - std::vector batch_put_from( - const std::vector &keys, - const std::vector &buffers, const std::vector &sizes, - const ReplicateConfig &config = ReplicateConfig{}); - - int put_parts(const std::string &key, - std::vector> values, - const ReplicateConfig &config = ReplicateConfig{}); - - int put_batch(const std::vector &keys, - const std::vector> &values, - const ReplicateConfig &config = ReplicateConfig{}); - - [[nodiscard]] std::string get_hostname() const; - - pybind11::bytes get(const std::string &key); - - std::vector get_batch( - const std::vector &keys); - - /** - * @brief Get a buffer containing the data for a key - * @param key Key to get data for - * @return std::shared_ptr Buffer containing the data, or - * nullptr if error - */ - std::shared_ptr get_buffer(const std::string &key); - - int remove(const std::string &key); - - long removeAll(); - - int tearDownAll(); - - /** - * @brief Check if an object exists - * @param key Key to check - * @return 1 if exists, 0 if not exists, -1 if error - */ - int isExist(const std::string &key); - - /** - * @brief Check if multiple objects exist - * @param keys Vector of keys to check - * @return Vector of existence results: 1 if exists, 0 if not exists, -1 if - * error - */ - std::vector batchIsExist(const std::vector &keys); - - /** - * @brief Get the size of an object - * @param key Key of the object - * @return Size of the object in bytes, or -1 if error or object doesn't - * exist - */ - int64_t getSize(const std::string &key); - - /** - * @brief Get a PyTorch tensor from the store - * @param key Key of the tensor to get - * @param dtype Data type of the tensor - * @return PyTorch tensor, or nullptr if error or tensor doesn't exist - */ - pybind11::object get_tensor(const std::string &key, const std::string dtype); - - /** - * @brief Put a PyTorch tensor into the store - * @param key Key for the tensor - * @param tensor PyTorch tensor to store - * @return 0 on success, negative value on error - */ - int put_tensor(const std::string &key, pybind11::object tensor); - - private: - pybind11::module numpy = pybind11::module::import("numpy"); - pybind11::module torch = pybind11::module::import("torch"); - - int allocateSlices(std::vector &slices, - size_t length); - - int allocateSlices(std::vector &slices, - const std::string &value); - - int allocateSlices(std::vector &slices, - const std::vector &handles, - uint64_t &length); - - int allocateSlices(std::vector &slices, - std::span value); - - int allocateSlicesPacked(std::vector &slices, - const std::vector> &parts); - - int allocateBatchedSlices( - const std::vector &keys, - std::unordered_map> - &batched_slices, - const std::vector> - &replica_lists, - std::unordered_map &str_length_map); - - char *exportSlices(const std::vector &slices, - uint64_t length); - - int freeSlices(const std::vector &slices); - - public: - std::shared_ptr client_ = nullptr; - std::unique_ptr client_buffer_allocator_ = - nullptr; - struct SegmentDeleter { - void operator()(void *ptr) { - if (ptr) { - free(ptr); - } - } - }; - - std::unique_ptr segment_ptr_; - std::string protocol; - std::string device_name; - std::string local_hostname; +public: + friend class SliceGuard; // Allow SliceGuard to access private members + friend class SliceBuffer; // Allow SliceBuffer to access private members + DistributedObjectStore(); + ~DistributedObjectStore(); + + int setup(const std::string &local_hostname, const std::string &metadata_server, + size_t global_segment_size = 1024 * 1024 * 16, size_t local_buffer_size = 1024 * 1024 * 16, + const std::string &protocol = "tcp", const std::string &rdma_devices = "", + const std::string &master_server_addr = "127.0.0.1:50051"); + + int initAll(const std::string &protocol, const std::string &device_name, + size_t mount_segment_size = 1024 * 1024 * 16); // Default 16MB + + int put(const std::string &key, std::span value, const ReplicateConfig &config = ReplicateConfig {}); + + int register_buffer(void *buffer, size_t size); + + int unregister_buffer(void *buffer); + + /** + * @brief Get object data directly into a pre-allocated buffer + * @param key Key of the object to get + * @param buffer Pointer to the pre-allocated buffer (must be registered + * with register_buffer) + * @param size Size of the buffer + * @return Number of bytes read on success, negative value on error + * @note The buffer address must be previously registered with + * register_buffer() for zero-copy operations + */ + int get_into(const std::string &key, void *buffer, size_t size); + + /** + * @brief Get object data directly into pre-allocated buffers for multiple + * keys (batch version) + * @param keys Vector of keys of the objects to get + * @param buffers Vector of pointers to the pre-allocated buffers + * @param sizes Vector of sizes of the buffers + * @return Vector of integers, where each element is the number of bytes + * read on success, or a negative value on error + * @note The buffer addresses must be previously registered with + * register_buffer() for zero-copy operations + */ + std::vector batch_get_into(const std::vector &keys, const std::vector &buffers, + const std::vector &sizes); + + /** + * @brief Put object data directly from a pre-allocated buffer + * @param key Key of the object to put + * @param buffer Pointer to the buffer containing data (must be registered + * with register_buffer) + * @param size Size of the data to put + * @return 0 on success, negative value on error + * @note The buffer address must be previously registered with + * register_buffer() for zero-copy operations + */ + int put_from(const std::string &key, void *buffer, size_t size, const ReplicateConfig &config = ReplicateConfig {}); + + /** + * @brief Put object data directly from pre-allocated buffers for multiple + * keys (batch version) + * @param keys Vector of keys of the objects to put + * @param buffers Vector of pointers to the pre-allocated buffers + * @param sizes Vector of sizes of the buffers + * @param config Replication configuration + * @return Vector of integers, where each element is 0 on success, or a + * negative value on error + * @note The buffer addresses must be previously registered with + * register_buffer() for zero-copy operations + */ + std::vector batch_put_from(const std::vector &keys, const std::vector &buffers, + const std::vector &sizes, + const ReplicateConfig &config = ReplicateConfig {}); + + int put_parts(const std::string &key, std::vector> values, + const ReplicateConfig &config = ReplicateConfig {}); + + int put_batch(const std::vector &keys, const std::vector> &values, + const ReplicateConfig &config = ReplicateConfig {}); + + [[nodiscard]] std::string get_hostname() const; + + pybind11::bytes get(const std::string &key); + + std::vector get_batch(const std::vector &keys); + + /** + * @brief Get a buffer containing the data for a key + * @param key Key to get data for + * @return std::shared_ptr Buffer containing the data, or + * nullptr if error + */ + std::shared_ptr get_buffer(const std::string &key); + + int remove(const std::string &key); + + long removeAll(); + + int tearDownAll(); + + /** + * @brief Check if an object exists + * @param key Key to check + * @return 1 if exists, 0 if not exists, -1 if error + */ + int isExist(const std::string &key); + + /** + * @brief Check if multiple objects exist + * @param keys Vector of keys to check + * @return Vector of existence results: 1 if exists, 0 if not exists, -1 if + * error + */ + std::vector batchIsExist(const std::vector &keys); + + /** + * @brief Get the size of an object + * @param key Key of the object + * @return Size of the object in bytes, or -1 if error or object doesn't + * exist + */ + int64_t getSize(const std::string &key); + + /** + * @brief Get a PyTorch tensor from the store + * @param key Key of the tensor to get + * @param dtype Data type of the tensor + * @return PyTorch tensor, or nullptr if error or tensor doesn't exist + */ + pybind11::object get_tensor(const std::string &key, const std::string dtype); + + /** + * @brief Put a PyTorch tensor into the store + * @param key Key for the tensor + * @param tensor PyTorch tensor to store + * @return 0 on success, negative value on error + */ + int put_tensor(const std::string &key, pybind11::object tensor); + +private: + pybind11::module numpy = pybind11::module::import("numpy"); + pybind11::module torch = pybind11::module::import("torch"); + + int allocateSlices(std::vector &slices, size_t length); + + int allocateSlices(std::vector &slices, const std::string &value); + + int allocateSlices(std::vector &slices, const std::vector &handles, + uint64_t &length); + + int allocateSlices(std::vector &slices, std::span value); + + int allocateSlicesPacked(std::vector &slices, const std::vector> &parts); + + int allocateBatchedSlices(const std::vector &keys, + std::unordered_map> &batched_slices, + const std::vector> &replica_lists, + std::unordered_map &str_length_map); + + char *exportSlices(const std::vector &slices, uint64_t length); + + int freeSlices(const std::vector &slices); + +public: + std::shared_ptr client_ = nullptr; + std::unique_ptr client_buffer_allocator_ = nullptr; + struct SegmentDeleter { + void operator()(void *ptr) { + if (ptr) { + free(ptr); + } + } + }; + + std::unique_ptr segment_ptr_; + std::string protocol; + std::string device_name; + std::string local_hostname; }; -} // namespace mooncake +} // namespace mooncake diff --git a/mooncake-integration/transfer_engine/transfer_engine_py.cpp b/mooncake-integration/transfer_engine/transfer_engine_py.cpp index 589cd9c38..a2f37807c 100644 --- a/mooncake-integration/transfer_engine/transfer_engine_py.cpp +++ b/mooncake-integration/transfer_engine/transfer_engine_py.cpp @@ -23,645 +23,619 @@ #ifdef USE_MNNVL #include "transport/nvlink_transport/nvlink_transport.h" static void *allocateMemory(size_t size) { - return mooncake::NvlinkTransport::allocatePinnedLocalMemory(size); + return mooncake::NvlinkTransport::allocatePinnedLocalMemory(size); } static void freeMemory(void *ptr) { - mooncake::NvlinkTransport::freePinnedLocalMemory(ptr); + mooncake::NvlinkTransport::freePinnedLocalMemory(ptr); } #else -static void *allocateMemory(size_t size) { return malloc(size); } -static void freeMemory(void *ptr) { free(ptr); } +static void *allocateMemory(size_t size) { + return malloc(size); +} +static void freeMemory(void *ptr) { + free(ptr); +} #endif TransferEnginePy::TransferEnginePy() { - const int64_t kNanosPerSecond = 1000 * 1000 * 1000; - if (getenv("MC_TRANSFER_TIMEOUT")) { - int timeout_sec = std::max(5, atoi(getenv("MC_TRANSFER_TIMEOUT"))); - transfer_timeout_nsec_ = timeout_sec * kNanosPerSecond; - } else { - transfer_timeout_nsec_ = 30 * kNanosPerSecond; - } + const int64_t kNanosPerSecond = 1000 * 1000 * 1000; + if (getenv("MC_TRANSFER_TIMEOUT")) { + int timeout_sec = std::max(5, atoi(getenv("MC_TRANSFER_TIMEOUT"))); + transfer_timeout_nsec_ = timeout_sec * kNanosPerSecond; + } else { + transfer_timeout_nsec_ = 30 * kNanosPerSecond; + } } TransferEnginePy::~TransferEnginePy() { - for (auto &handle : handle_map_) engine_->closeSegment(handle.second); - handle_map_.clear(); - engine_.reset(); - for (auto &buffer : buffer_list_) freeMemory(buffer); - buffer_list_.clear(); - for (auto &buffer : large_buffer_list_) freeMemory(buffer); - large_buffer_list_.clear(); + for (auto &handle : handle_map_) + engine_->closeSegment(handle.second); + handle_map_.clear(); + engine_.reset(); + for (auto &buffer : buffer_list_) + freeMemory(buffer); + buffer_list_.clear(); + for (auto &buffer : large_buffer_list_) + freeMemory(buffer); + large_buffer_list_.clear(); } std::vector buildDeviceFilter(const std::string &device_names) { - std::stringstream ss(device_names); - std::string item; - std::vector tokens; - while (getline(ss, item, ',')) { - tokens.push_back(item); - } - return tokens; -} - -std::pair parseConnectionString( - const std::string &conn_string) { - std::pair result; - std::string proto = "etcd"; - std::string domain; - std::size_t pos = conn_string.find("://"); - - if (pos != std::string::npos) { - proto = conn_string.substr(0, pos); - domain = conn_string.substr(pos + 3); - } else if (conn_string == P2PHANDSHAKE) { - proto = ""; - domain = P2PHANDSHAKE; - } else { - domain = conn_string; - } - - result.first = proto; - result.second = domain; - return result; -} - -std::string buildConnString(const std::string &metadata_type, - const std::string &metadata_server) { - if (metadata_server == P2PHANDSHAKE) { - return P2PHANDSHAKE; - } - - std::string conn_string = metadata_server; - if (conn_string.find("://") == std::string::npos) - conn_string = metadata_type + "://" + metadata_server; - return conn_string; -} - -int TransferEnginePy::initialize(const char *local_hostname, - const char *metadata_server, - const char *protocol, + std::stringstream ss(device_names); + std::string item; + std::vector tokens; + while (getline(ss, item, ',')) { + tokens.push_back(item); + } + return tokens; +} + +std::pair parseConnectionString(const std::string &conn_string) { + std::pair result; + std::string proto = "etcd"; + std::string domain; + std::size_t pos = conn_string.find("://"); + + if (pos != std::string::npos) { + proto = conn_string.substr(0, pos); + domain = conn_string.substr(pos + 3); + } else if (conn_string == P2PHANDSHAKE) { + proto = ""; + domain = P2PHANDSHAKE; + } else { + domain = conn_string; + } + + result.first = proto; + result.second = domain; + return result; +} + +std::string buildConnString(const std::string &metadata_type, const std::string &metadata_server) { + if (metadata_server == P2PHANDSHAKE) { + return P2PHANDSHAKE; + } + + std::string conn_string = metadata_server; + if (conn_string.find("://") == std::string::npos) + conn_string = metadata_type + "://" + metadata_server; + return conn_string; +} + +int TransferEnginePy::initialize(const char *local_hostname, const char *metadata_server, const char *protocol, const char *device_name) { - auto conn_string = parseConnectionString(metadata_server); - return initializeExt(local_hostname, conn_string.second.c_str(), protocol, - device_name, conn_string.first.c_str()); -} - -int TransferEnginePy::initializeExt(const char *local_hostname, - const char *metadata_server, - const char *protocol, - const char *device_name, - const char *metadata_type) { - (void)(protocol); - std::string conn_string = buildConnString(metadata_type, metadata_server); - - auto device_name_safe = device_name ? std::string(device_name) : ""; - auto device_filter = buildDeviceFilter(device_name_safe); - engine_ = std::make_unique(true, device_filter); - if (getenv("MC_LEGACY_RPC_PORT_BINDING")) { - auto hostname_port = parseHostNameWithPort(local_hostname); - int ret = - engine_->init(conn_string, local_hostname, - hostname_port.first.c_str(), hostname_port.second); - if (ret) return -1; - } else { - // the last two params are unused - int ret = engine_->init(conn_string, local_hostname, "", 0); - if (ret) return -1; - } - - free_list_.resize(kSlabSizeKBTabLen); + auto conn_string = parseConnectionString(metadata_server); + return initializeExt(local_hostname, conn_string.second.c_str(), protocol, device_name, conn_string.first.c_str()); +} + +int TransferEnginePy::initializeExt(const char *local_hostname, const char *metadata_server, const char *protocol, + const char *device_name, const char *metadata_type) { + (void)(protocol); + std::string conn_string = buildConnString(metadata_type, metadata_server); + + auto device_name_safe = device_name ? std::string(device_name) : ""; + auto device_filter = buildDeviceFilter(device_name_safe); + engine_ = std::make_unique(true, device_filter); + if (getenv("MC_LEGACY_RPC_PORT_BINDING")) { + auto hostname_port = parseHostNameWithPort(local_hostname); + int ret = engine_->init(conn_string, local_hostname, hostname_port.first.c_str(), hostname_port.second); + if (ret) + return -1; + } else { + // the last two params are unused + int ret = engine_->init(conn_string, local_hostname, "", 0); + if (ret) + return -1; + } + + free_list_.resize(kSlabSizeKBTabLen); #ifndef USE_ASCEND - doBuddyAllocate(kMaxClassId); + doBuddyAllocate(kMaxClassId); #endif - return 0; + return 0; } -int TransferEnginePy::getRpcPort() { return engine_->getRpcPort(); } +int TransferEnginePy::getRpcPort() { + return engine_->getRpcPort(); +} char *TransferEnginePy::allocateRawBuffer(size_t capacity) { - auto buffer = allocateMemory(capacity); - if (!buffer) return nullptr; - int ret = engine_->registerLocalMemory(buffer, capacity, kWildcardLocation); - if (ret) { - freeMemory(buffer); - return nullptr; - } - return (char *)buffer; + auto buffer = allocateMemory(capacity); + if (!buffer) + return nullptr; + int ret = engine_->registerLocalMemory(buffer, capacity, kWildcardLocation); + if (ret) { + freeMemory(buffer); + return nullptr; + } + return (char *)buffer; } int TransferEnginePy::findClassId(size_t size) { - if (size > 1024ull * kSlabSizeKB[kMaxClassId]) return -1; - for (int i = kMaxClassId - 1; i >= 0; --i) - if (size > 1024ull * kSlabSizeKB[i]) return i + 1; - return 0; + if (size > 1024ull * kSlabSizeKB[kMaxClassId]) + return -1; + for (int i = kMaxClassId - 1; i >= 0; --i) + if (size > 1024ull * kSlabSizeKB[i]) + return i + 1; + return 0; } int TransferEnginePy::doBuddyAllocate(int class_id) { - if (class_id == kMaxClassId) { - auto buffer = allocateRawBuffer(kDefaultBufferCapacity); - buffer_list_.push_back(buffer); - for (size_t offset = 0; offset < kDefaultBufferCapacity; - offset += 1024ull * kSlabSizeKB[kMaxClassId]) - free_list_[kMaxClassId].push(buffer + offset); - return 0; - } - if (free_list_[class_id + 1].empty()) { - int ret = doBuddyAllocate(class_id + 1); - if (ret) return ret; - } - assert(!free_list_[class_id + 1].empty()); - char *buffer = free_list_[class_id + 1].top(); - free_list_[class_id + 1].pop(); - free_list_[class_id].push(buffer); - free_list_[class_id].push(buffer + kSlabSizeKB[class_id] * 1024); - return 0; + if (class_id == kMaxClassId) { + auto buffer = allocateRawBuffer(kDefaultBufferCapacity); + buffer_list_.push_back(buffer); + for (size_t offset = 0; offset < kDefaultBufferCapacity; offset += 1024ull * kSlabSizeKB[kMaxClassId]) + free_list_[kMaxClassId].push(buffer + offset); + return 0; + } + if (free_list_[class_id + 1].empty()) { + int ret = doBuddyAllocate(class_id + 1); + if (ret) + return ret; + } + assert(!free_list_[class_id + 1].empty()); + char *buffer = free_list_[class_id + 1].top(); + free_list_[class_id + 1].pop(); + free_list_[class_id].push(buffer); + free_list_[class_id].push(buffer + kSlabSizeKB[class_id] * 1024); + return 0; } uintptr_t TransferEnginePy::allocateManagedBuffer(size_t length) { - std::lock_guard guard(mutex_); - int class_id = findClassId(length); - if (class_id < 0) { - char *buffer = allocateRawBuffer(length); - if (buffer) large_buffer_list_.insert(buffer); - return (uintptr_t)buffer; - } - if (free_list_[class_id].empty()) - if (doBuddyAllocate(class_id)) return 0; - assert(!free_list_[class_id].empty()); - char *buffer = free_list_[class_id].top(); - free_list_[class_id].pop(); - return (uintptr_t)buffer; + std::lock_guard guard(mutex_); + int class_id = findClassId(length); + if (class_id < 0) { + char *buffer = allocateRawBuffer(length); + if (buffer) + large_buffer_list_.insert(buffer); + return (uintptr_t)buffer; + } + if (free_list_[class_id].empty()) + if (doBuddyAllocate(class_id)) + return 0; + assert(!free_list_[class_id].empty()); + char *buffer = free_list_[class_id].top(); + free_list_[class_id].pop(); + return (uintptr_t)buffer; } int TransferEnginePy::freeManagedBuffer(uintptr_t buffer_addr, size_t length) { - std::lock_guard guard(mutex_); - auto buffer = (char *)buffer_addr; - int class_id = findClassId(length); - if (class_id < 0) { - large_buffer_list_.erase(buffer); - engine_->unregisterLocalMemory(buffer); - freeMemory(buffer); - return 0; - } - free_list_[class_id].push(buffer); - return 0; -} - -int TransferEnginePy::transferSyncWrite(const char *target_hostname, - uintptr_t buffer, - uintptr_t peer_buffer_address, + std::lock_guard guard(mutex_); + auto buffer = (char *)buffer_addr; + int class_id = findClassId(length); + if (class_id < 0) { + large_buffer_list_.erase(buffer); + engine_->unregisterLocalMemory(buffer); + freeMemory(buffer); + return 0; + } + free_list_[class_id].push(buffer); + return 0; +} + +int TransferEnginePy::transferSyncWrite(const char *target_hostname, uintptr_t buffer, uintptr_t peer_buffer_address, size_t length) { - return transferSync(target_hostname, buffer, peer_buffer_address, length, - TransferOpcode::WRITE); + return transferSync(target_hostname, buffer, peer_buffer_address, length, TransferOpcode::WRITE); } -int TransferEnginePy::transferSyncRead(const char *target_hostname, - uintptr_t buffer, - uintptr_t peer_buffer_address, +int TransferEnginePy::transferSyncRead(const char *target_hostname, uintptr_t buffer, uintptr_t peer_buffer_address, size_t length) { - return transferSync(target_hostname, buffer, peer_buffer_address, length, - TransferOpcode::READ); + return transferSync(target_hostname, buffer, peer_buffer_address, length, TransferOpcode::READ); } -int TransferEnginePy::batchTransferSyncWrite(const char *target_hostname, - std::vector buffers, +int TransferEnginePy::batchTransferSyncWrite(const char *target_hostname, std::vector buffers, std::vector peer_buffer_addresses, std::vector lengths) { - return batchTransferSync(target_hostname, buffers, peer_buffer_addresses, lengths, - TransferOpcode::WRITE); -} - -int TransferEnginePy::batchTransferSyncRead(const char *target_hostname, - std::vector buffers, - std::vector peer_buffer_addresses, - std::vector lengths) { - return batchTransferSync(target_hostname, buffers, peer_buffer_addresses, lengths, - TransferOpcode::READ); -} - -batch_id_t TransferEnginePy::batchTransferAsyncWrite(const char *target_hostname, - const std::vector &buffers, - const std::vector &peer_buffer_addresses, - const std::vector &lengths) { - return batchTransferAsync(target_hostname, buffers, peer_buffer_addresses, lengths, - TransferOpcode::WRITE); -} - -batch_id_t TransferEnginePy::batchTransferAsyncRead(const char *target_hostname, - const std::vector &buffers, - const std::vector &peer_buffer_addresses, - const std::vector &lengths) { - return batchTransferAsync(target_hostname, buffers, peer_buffer_addresses, lengths, - TransferOpcode::READ); -} - -int TransferEnginePy::transferSync(const char *target_hostname, - uintptr_t buffer, - uintptr_t peer_buffer_address, size_t length, - TransferOpcode opcode) { - pybind11::gil_scoped_release release; - Transport::SegmentHandle handle; - { - std::lock_guard guard(mutex_); - if (handle_map_.count(target_hostname)) { - handle = handle_map_[target_hostname]; - } else { - handle = engine_->openSegment(target_hostname); - if (handle == (Transport::SegmentHandle)-1) return -1; - handle_map_[target_hostname] = handle; - } - } - - // TODO this is just a workaround - // When transfer engine submits one task, it will be dispatch to a worker - // associated with one local RNIC. If the local RNIC fails to connect to any - // remote RNIC, it will eventually fail. This allows selecting multiple - // local RNIC in one transferSync call. Will be fixed in the next revision. - const int max_retry = - engine_->numContexts() + 1; // Iter all possible local contexts - auto start_ts = getCurrentTimeInNano(); - for (int retry = 0; retry < max_retry; ++retry) { - auto batch_id = engine_->allocateBatchID(1); - TransferRequest entry; - if (opcode == TransferOpcode::WRITE) { - entry.opcode = TransferRequest::WRITE; - } else { - entry.opcode = TransferRequest::READ; - } - entry.length = length; - entry.source = (void *)buffer; - entry.target_id = handle; - entry.target_offset = peer_buffer_address; - entry.advise_retry_cnt = retry; - - Status s = engine_->submitTransfer(batch_id, {entry}); - if (!s.ok()) return -1; - - TransferStatus status; - bool completed = false; - while (!completed) { - Status s = engine_->getTransferStatus(batch_id, 0, status); - LOG_ASSERT(s.ok()); - if (status.s == TransferStatusEnum::COMPLETED) { - engine_->freeBatchID(batch_id); - return 0; - } else if (status.s == TransferStatusEnum::FAILED) { - engine_->freeBatchID(batch_id); - completed = true; - } else if (status.s == TransferStatusEnum::TIMEOUT) { - LOG(INFO) << "Sync data transfer timeout"; - completed = true; - } - auto current_ts = getCurrentTimeInNano(); - const int64_t timeout = - transfer_timeout_nsec_ + length; // 1GiB per second - if (current_ts - start_ts > timeout) { - LOG(INFO) << "Sync data transfer timeout after " - << current_ts - start_ts << "ns, local buffer " - << (void *)buffer << " remote buffer " - << (void *)peer_buffer_address << " length " - << length; - return -1; - } - } - } - return -1; -} - -int TransferEnginePy::batchTransferSync(const char *target_hostname, - std::vector buffers, - std::vector peer_buffer_addresses, - std::vector lengths, - TransferOpcode opcode) { - pybind11::gil_scoped_release release; - Transport::SegmentHandle handle; - { - std::lock_guard guard(mutex_); - if (handle_map_.count(target_hostname)) { - handle = handle_map_[target_hostname]; - } else { - handle = engine_->openSegment(target_hostname); - if (handle == (Transport::SegmentHandle)-1) return -1; - handle_map_[target_hostname] = handle; - } - } - - if (buffers.size() != peer_buffer_addresses.size() || buffers.size() != lengths.size()) { - LOG(ERROR) << "buffers, peer_buffer_addresses and lengths have different size"; - return -1; - } - - const int max_retry = engine_->numContexts() + 1; - auto start_ts = getCurrentTimeInNano(); - auto total_length = std::accumulate(lengths.begin(), lengths.end(), 0ull); - auto batch_size = buffers.size(); - std::vector entries; - for (size_t i = 0; i < batch_size; ++i) { - TransferRequest entry; - if (opcode == TransferOpcode::WRITE) { - entry.opcode = TransferRequest::WRITE; - } else { - entry.opcode = TransferRequest::READ; - } - entry.length = lengths[i]; - entry.source = (void *)buffers[i]; - entry.target_id = handle; - entry.target_offset = peer_buffer_addresses[i]; - entry.advise_retry_cnt = 0; - entries.push_back(entry); - } - - for (int retry = 0; retry < max_retry; ++retry) { - auto batch_id = engine_->allocateBatchID(batch_size); - Status s = engine_->submitTransfer(batch_id, entries); - if (!s.ok()) { - engine_->freeBatchID(batch_id); - return -1; - } - - TransferStatus status; - bool completed = false; - bool already_freed = false; - while (!completed) { - Status s = engine_->getBatchTransferStatus(batch_id, status); - LOG_ASSERT(s.ok()); - if (status.s == TransferStatusEnum::COMPLETED) { - engine_->freeBatchID(batch_id); - return 0; - } else if (status.s == TransferStatusEnum::FAILED) { - engine_->freeBatchID(batch_id); - already_freed = true; - completed = true; - } else if (status.s == TransferStatusEnum::TIMEOUT) { - LOG(INFO) << "Sync data transfer timeout"; - completed = true; - } - auto current_ts = getCurrentTimeInNano(); - const int64_t timeout = transfer_timeout_nsec_ + total_length; // 1GiB per second - if (current_ts - start_ts > timeout) { - LOG(INFO) << "Sync batch data transfer timeout after " - << current_ts - start_ts << "ns"; - // TODO: as @doujiang24 mentioned, early free(while there are still waiting tasks) - // the batch_id may fail and cause memory leak(a known issue). - if (!already_freed) { - engine_->freeBatchID(batch_id); - } - return -1; - } - } - } - return -1; -} - -batch_id_t TransferEnginePy::batchTransferAsync(const char *target_hostname, - const std::vector& buffers, - const std::vector& peer_buffer_addresses, - const std::vector& lengths, - TransferOpcode opcode) { - pybind11::gil_scoped_release release; - Transport::SegmentHandle handle; - { - std::lock_guard guard(mutex_); - if (handle_map_.count(target_hostname)) { - handle = handle_map_[target_hostname]; - } else { - handle = engine_->openSegment(target_hostname); - if (handle == (Transport::SegmentHandle)-1) return -1; - handle_map_[target_hostname] = handle; - } - } - - if (buffers.size() != peer_buffer_addresses.size() || buffers.size() != lengths.size()) { - LOG(ERROR) << "buffers, peer_buffer_addresses and lengths have different size"; - return 0; - } - - const int max_retry = engine_->numContexts() + 1; - auto batch_size = buffers.size(); - std::vector entries; - batch_id_t batch_id = 0; - for (size_t i = 0; i < batch_size; ++i) { - TransferRequest entry; - if (opcode == TransferOpcode::WRITE) { - entry.opcode = TransferRequest::WRITE; - } else { - entry.opcode = TransferRequest::READ; - } - entry.length = lengths[i]; - entry.source = (void *)buffers[i]; - entry.target_id = handle; - entry.target_offset = peer_buffer_addresses[i]; - entry.advise_retry_cnt = 0; - entries.push_back(entry); - } - - for (int retry = 0; retry < max_retry; ++retry) { - batch_id = engine_->allocateBatchID(batch_size); - auto batch_desc = reinterpret_cast(batch_id); - - auto start_ts = getCurrentTimeInNano(); - batch_desc->start_timestamp = start_ts; - - Status s = engine_->submitTransfer(batch_id, entries); - if (!s.ok()) { - engine_->freeBatchID(batch_id); - return 0; - } else { - break; - } - } - - return batch_id; -} - -int TransferEnginePy::getBatchTransferStatus(const std::vector& batch_ids) { - pybind11::gil_scoped_release release; - TransferStatus status; - std::unordered_map timeout_table{}; - for (auto &batch_id : batch_ids) { - int64_t total_length = 0; - auto batch_desc = reinterpret_cast(batch_id); - const size_t task_count = batch_desc->task_list.size(); - - for (size_t task_id = 0; task_id < task_count; task_id++) { - auto &task = batch_desc->task_list[task_id]; - for (auto &slice : task.slice_list) { - total_length += slice->length; - } - } - - timeout_table[batch_id] = total_length + transfer_timeout_nsec_; - } - - bool failed_or_timeout = false; - std::unordered_set remove_ids {}; - while (!timeout_table.empty() && !failed_or_timeout) { - for (auto &entry : timeout_table) { - auto batch_desc = reinterpret_cast(entry.first); - Status s = engine_->getBatchTransferStatus(entry.first, status); - LOG_ASSERT(s.ok()); - if (status.s == TransferStatusEnum::COMPLETED) { - engine_->freeBatchID(entry.first); - LOG(INFO) << "Batch Transfer completed!"; - remove_ids.insert(entry.first); - } else if (status.s == TransferStatusEnum::FAILED) { - failed_or_timeout = true; - } else if (status.s == TransferStatusEnum::TIMEOUT) { - LOG(INFO) << "Sync data transfer timeout"; - } - auto current_ts = getCurrentTimeInNano(); - if (current_ts - batch_desc->start_timestamp > entry.second) { - LOG(INFO) << "Sync batch data transfer timeout after " - << current_ts - batch_desc->start_timestamp << "ns"; - failed_or_timeout = true; - } - } - - for (auto &remove_id : remove_ids) { - timeout_table.erase(remove_id); - } - - remove_ids.clear(); - } - - if (failed_or_timeout) { - for (auto &entry : timeout_table) { - engine_->freeBatchID(entry.first); - } - } - - return failed_or_timeout ? -1 : 0; -} - -batch_id_t TransferEnginePy::transferSubmitWrite(const char *target_hostname, - uintptr_t buffer, - uintptr_t peer_buffer_address, - size_t length) { - pybind11::gil_scoped_release release; - Transport::SegmentHandle handle; - { - std::lock_guard guard(mutex_); - if (handle_map_.count(target_hostname)) { - handle = handle_map_[target_hostname]; - } else { - handle = engine_->openSegment(target_hostname); - if (handle == (Transport::SegmentHandle)-1) return -1; - handle_map_[target_hostname] = handle; - } - } - - auto batch_id = engine_->allocateBatchID(1); - TransferRequest entry; - entry.opcode = TransferRequest::WRITE; - entry.length = length; - entry.source = (void *)buffer; - entry.target_id = handle; - entry.target_offset = peer_buffer_address; - - Status s = engine_->submitTransfer(batch_id, {entry}); - if (!s.ok()) return -1; - - return batch_id; + return batchTransferSync(target_hostname, buffers, peer_buffer_addresses, lengths, TransferOpcode::WRITE); +} + +int TransferEnginePy::batchTransferSyncRead(const char *target_hostname, std::vector buffers, + std::vector peer_buffer_addresses, std::vector lengths) { + return batchTransferSync(target_hostname, buffers, peer_buffer_addresses, lengths, TransferOpcode::READ); +} + +batch_id_t TransferEnginePy::batchTransferAsyncWrite(const char *target_hostname, const std::vector &buffers, + const std::vector &peer_buffer_addresses, + const std::vector &lengths) { + return batchTransferAsync(target_hostname, buffers, peer_buffer_addresses, lengths, TransferOpcode::WRITE); +} + +batch_id_t TransferEnginePy::batchTransferAsyncRead(const char *target_hostname, const std::vector &buffers, + const std::vector &peer_buffer_addresses, + const std::vector &lengths) { + return batchTransferAsync(target_hostname, buffers, peer_buffer_addresses, lengths, TransferOpcode::READ); +} + +int TransferEnginePy::transferSync(const char *target_hostname, uintptr_t buffer, uintptr_t peer_buffer_address, + size_t length, TransferOpcode opcode) { + pybind11::gil_scoped_release release; + Transport::SegmentHandle handle; + { + std::lock_guard guard(mutex_); + if (handle_map_.count(target_hostname)) { + handle = handle_map_[target_hostname]; + } else { + handle = engine_->openSegment(target_hostname); + if (handle == (Transport::SegmentHandle)-1) + return -1; + handle_map_[target_hostname] = handle; + } + } + + // TODO this is just a workaround + // When transfer engine submits one task, it will be dispatch to a worker + // associated with one local RNIC. If the local RNIC fails to connect to any + // remote RNIC, it will eventually fail. This allows selecting multiple + // local RNIC in one transferSync call. Will be fixed in the next revision. + const int max_retry = engine_->numContexts() + 1; // Iter all possible local contexts + auto start_ts = getCurrentTimeInNano(); + for (int retry = 0; retry < max_retry; ++retry) { + auto batch_id = engine_->allocateBatchID(1); + TransferRequest entry; + if (opcode == TransferOpcode::WRITE) { + entry.opcode = TransferRequest::WRITE; + } else { + entry.opcode = TransferRequest::READ; + } + entry.length = length; + entry.source = (void *)buffer; + entry.target_id = handle; + entry.target_offset = peer_buffer_address; + entry.advise_retry_cnt = retry; + + Status s = engine_->submitTransfer(batch_id, {entry}); + if (!s.ok()) + return -1; + + TransferStatus status; + bool completed = false; + while (!completed) { + Status s = engine_->getTransferStatus(batch_id, 0, status); + LOG_ASSERT(s.ok()); + if (status.s == TransferStatusEnum::COMPLETED) { + engine_->freeBatchID(batch_id); + return 0; + } else if (status.s == TransferStatusEnum::FAILED) { + engine_->freeBatchID(batch_id); + completed = true; + } else if (status.s == TransferStatusEnum::TIMEOUT) { + LOG(INFO) << "Sync data transfer timeout"; + completed = true; + } + auto current_ts = getCurrentTimeInNano(); + const int64_t timeout = transfer_timeout_nsec_ + length; // 1GiB per second + if (current_ts - start_ts > timeout) { + LOG(INFO) << "Sync data transfer timeout after " << current_ts - start_ts << "ns, local buffer " + << (void *)buffer << " remote buffer " << (void *)peer_buffer_address << " length " << length; + return -1; + } + } + } + return -1; +} + +int TransferEnginePy::batchTransferSync(const char *target_hostname, std::vector buffers, + std::vector peer_buffer_addresses, std::vector lengths, + TransferOpcode opcode) { + pybind11::gil_scoped_release release; + Transport::SegmentHandle handle; + { + std::lock_guard guard(mutex_); + if (handle_map_.count(target_hostname)) { + handle = handle_map_[target_hostname]; + } else { + handle = engine_->openSegment(target_hostname); + if (handle == (Transport::SegmentHandle)-1) + return -1; + handle_map_[target_hostname] = handle; + } + } + + if (buffers.size() != peer_buffer_addresses.size() || buffers.size() != lengths.size()) { + LOG(ERROR) << "buffers, peer_buffer_addresses and lengths have different size"; + return -1; + } + + const int max_retry = engine_->numContexts() + 1; + auto start_ts = getCurrentTimeInNano(); + auto total_length = std::accumulate(lengths.begin(), lengths.end(), 0ull); + auto batch_size = buffers.size(); + std::vector entries; + for (size_t i = 0; i < batch_size; ++i) { + TransferRequest entry; + if (opcode == TransferOpcode::WRITE) { + entry.opcode = TransferRequest::WRITE; + } else { + entry.opcode = TransferRequest::READ; + } + entry.length = lengths[i]; + entry.source = (void *)buffers[i]; + entry.target_id = handle; + entry.target_offset = peer_buffer_addresses[i]; + entry.advise_retry_cnt = 0; + entries.push_back(entry); + } + + for (int retry = 0; retry < max_retry; ++retry) { + auto batch_id = engine_->allocateBatchID(batch_size); + Status s = engine_->submitTransfer(batch_id, entries); + if (!s.ok()) { + engine_->freeBatchID(batch_id); + return -1; + } + + TransferStatus status; + bool completed = false; + bool already_freed = false; + while (!completed) { + Status s = engine_->getBatchTransferStatus(batch_id, status); + LOG_ASSERT(s.ok()); + if (status.s == TransferStatusEnum::COMPLETED) { + engine_->freeBatchID(batch_id); + return 0; + } else if (status.s == TransferStatusEnum::FAILED) { + engine_->freeBatchID(batch_id); + already_freed = true; + completed = true; + } else if (status.s == TransferStatusEnum::TIMEOUT) { + LOG(INFO) << "Sync data transfer timeout"; + completed = true; + } + auto current_ts = getCurrentTimeInNano(); + const int64_t timeout = transfer_timeout_nsec_ + total_length; // 1GiB per second + if (current_ts - start_ts > timeout) { + LOG(INFO) << "Sync batch data transfer timeout after " << current_ts - start_ts << "ns"; + // TODO: as @doujiang24 mentioned, early free(while there are + // still waiting tasks) the batch_id may fail and cause memory + // leak(a known issue). + if (!already_freed) { + engine_->freeBatchID(batch_id); + } + return -1; + } + } + } + return -1; +} + +batch_id_t TransferEnginePy::batchTransferAsync(const char *target_hostname, const std::vector &buffers, + const std::vector &peer_buffer_addresses, + const std::vector &lengths, TransferOpcode opcode) { + pybind11::gil_scoped_release release; + Transport::SegmentHandle handle; + { + std::lock_guard guard(mutex_); + if (handle_map_.count(target_hostname)) { + handle = handle_map_[target_hostname]; + } else { + handle = engine_->openSegment(target_hostname); + if (handle == (Transport::SegmentHandle)-1) + return -1; + handle_map_[target_hostname] = handle; + } + } + + if (buffers.size() != peer_buffer_addresses.size() || buffers.size() != lengths.size()) { + LOG(ERROR) << "buffers, peer_buffer_addresses and lengths have different size"; + return 0; + } + + const int max_retry = engine_->numContexts() + 1; + auto batch_size = buffers.size(); + std::vector entries; + batch_id_t batch_id = 0; + for (size_t i = 0; i < batch_size; ++i) { + TransferRequest entry; + if (opcode == TransferOpcode::WRITE) { + entry.opcode = TransferRequest::WRITE; + } else { + entry.opcode = TransferRequest::READ; + } + entry.length = lengths[i]; + entry.source = (void *)buffers[i]; + entry.target_id = handle; + entry.target_offset = peer_buffer_addresses[i]; + entry.advise_retry_cnt = 0; + entries.push_back(entry); + } + + for (int retry = 0; retry < max_retry; ++retry) { + batch_id = engine_->allocateBatchID(batch_size); + auto batch_desc = reinterpret_cast(batch_id); + + auto start_ts = getCurrentTimeInNano(); + batch_desc->start_timestamp = start_ts; + + Status s = engine_->submitTransfer(batch_id, entries); + if (!s.ok()) { + engine_->freeBatchID(batch_id); + return 0; + } else { + break; + } + } + + return batch_id; +} + +int TransferEnginePy::getBatchTransferStatus(const std::vector &batch_ids) { + pybind11::gil_scoped_release release; + TransferStatus status; + std::unordered_map timeout_table {}; + for (auto &batch_id : batch_ids) { + int64_t total_length = 0; + auto batch_desc = reinterpret_cast(batch_id); + const size_t task_count = batch_desc->task_list.size(); + + for (size_t task_id = 0; task_id < task_count; task_id++) { + auto &task = batch_desc->task_list[task_id]; + for (auto &slice : task.slice_list) { + total_length += slice->length; + } + } + + timeout_table[batch_id] = total_length + transfer_timeout_nsec_; + } + + bool failed_or_timeout = false; + std::unordered_set remove_ids {}; + while (!timeout_table.empty() && !failed_or_timeout) { + for (auto &entry : timeout_table) { + auto batch_desc = reinterpret_cast(entry.first); + Status s = engine_->getBatchTransferStatus(entry.first, status); + LOG_ASSERT(s.ok()); + if (status.s == TransferStatusEnum::COMPLETED) { + engine_->freeBatchID(entry.first); + LOG(INFO) << "Batch Transfer completed!"; + remove_ids.insert(entry.first); + } else if (status.s == TransferStatusEnum::FAILED) { + failed_or_timeout = true; + } else if (status.s == TransferStatusEnum::TIMEOUT) { + LOG(INFO) << "Sync data transfer timeout"; + } + auto current_ts = getCurrentTimeInNano(); + if (current_ts - batch_desc->start_timestamp > entry.second) { + LOG(INFO) << "Sync batch data transfer timeout after " << current_ts - batch_desc->start_timestamp + << "ns"; + failed_or_timeout = true; + } + } + + for (auto &remove_id : remove_ids) { + timeout_table.erase(remove_id); + } + + remove_ids.clear(); + } + + if (failed_or_timeout) { + for (auto &entry : timeout_table) { + engine_->freeBatchID(entry.first); + } + } + + return failed_or_timeout ? -1 : 0; +} + +batch_id_t TransferEnginePy::transferSubmitWrite(const char *target_hostname, uintptr_t buffer, + uintptr_t peer_buffer_address, size_t length) { + pybind11::gil_scoped_release release; + Transport::SegmentHandle handle; + { + std::lock_guard guard(mutex_); + if (handle_map_.count(target_hostname)) { + handle = handle_map_[target_hostname]; + } else { + handle = engine_->openSegment(target_hostname); + if (handle == (Transport::SegmentHandle)-1) + return -1; + handle_map_[target_hostname] = handle; + } + } + + auto batch_id = engine_->allocateBatchID(1); + TransferRequest entry; + entry.opcode = TransferRequest::WRITE; + entry.length = length; + entry.source = (void *)buffer; + entry.target_id = handle; + entry.target_offset = peer_buffer_address; + + Status s = engine_->submitTransfer(batch_id, {entry}); + if (!s.ok()) + return -1; + + return batch_id; } int TransferEnginePy::transferCheckStatus(batch_id_t batch_id) { - pybind11::gil_scoped_release release; - TransferStatus status; - Status s = engine_->getTransferStatus(batch_id, 0, status); - LOG_ASSERT(s.ok()); - if (status.s == TransferStatusEnum::COMPLETED) { - engine_->freeBatchID(batch_id); - return 1; - } else if (status.s == TransferStatusEnum::FAILED) { - engine_->freeBatchID(batch_id); - return -1; - } else if (status.s == TransferStatusEnum::TIMEOUT) { - return -2; - } else { - return 0; - } -} - -int TransferEnginePy::batchRegisterMemory(std::vector buffer_addresses, - std::vector capacities) { - pybind11::gil_scoped_release release; - auto batch_size = buffer_addresses.size(); - std::vector buffers; - for (int i = 0; i < batch_size; i ++ ) { - buffers.push_back(BufferEntry{(void *)buffer_addresses[i], capacities[i]}); - } - return engine_->registerLocalMemoryBatch(buffers, kWildcardLocation); + pybind11::gil_scoped_release release; + TransferStatus status; + Status s = engine_->getTransferStatus(batch_id, 0, status); + LOG_ASSERT(s.ok()); + if (status.s == TransferStatusEnum::COMPLETED) { + engine_->freeBatchID(batch_id); + return 1; + } else if (status.s == TransferStatusEnum::FAILED) { + engine_->freeBatchID(batch_id); + return -1; + } else if (status.s == TransferStatusEnum::TIMEOUT) { + return -2; + } else { + return 0; + } +} + +int TransferEnginePy::batchRegisterMemory(std::vector buffer_addresses, std::vector capacities) { + pybind11::gil_scoped_release release; + auto batch_size = buffer_addresses.size(); + std::vector buffers; + for (int i = 0; i < batch_size; i++) { + buffers.push_back(BufferEntry {(void *)buffer_addresses[i], capacities[i]}); + } + return engine_->registerLocalMemoryBatch(buffers, kWildcardLocation); } int TransferEnginePy::batchUnregisterMemory(std::vector buffer_addresses) { - pybind11::gil_scoped_release release; - auto batch_size = buffer_addresses.size(); - std::vector buffers; - for (int i = 0; i < batch_size; i ++ ) { - buffers.push_back(reinterpret_cast(buffer_addresses[i])); - } - return engine_->unregisterLocalMemoryBatch(buffers); + pybind11::gil_scoped_release release; + auto batch_size = buffer_addresses.size(); + std::vector buffers; + for (int i = 0; i < batch_size; i++) { + buffers.push_back(reinterpret_cast(buffer_addresses[i])); + } + return engine_->unregisterLocalMemoryBatch(buffers); } int TransferEnginePy::registerMemory(uintptr_t buffer_addr, size_t capacity) { - char *buffer = reinterpret_cast(buffer_addr); - return engine_->registerLocalMemory(buffer, capacity); + char *buffer = reinterpret_cast(buffer_addr); + return engine_->registerLocalMemory(buffer, capacity); } int TransferEnginePy::unregisterMemory(uintptr_t buffer_addr) { - char *buffer = reinterpret_cast(buffer_addr); - return engine_->unregisterLocalMemory(buffer); + char *buffer = reinterpret_cast(buffer_addr); + return engine_->unregisterLocalMemory(buffer); } -uintptr_t TransferEnginePy::getFirstBufferAddress( - const std::string &segment_name) { - Transport::SegmentHandle segment_id = - engine_->openSegment(segment_name.c_str()); - auto segment_desc = engine_->getMetadata()->getSegmentDescByID(segment_id); - return segment_desc->buffers[0].addr; +uintptr_t TransferEnginePy::getFirstBufferAddress(const std::string &segment_name) { + Transport::SegmentHandle segment_id = engine_->openSegment(segment_name.c_str()); + auto segment_desc = engine_->getMetadata()->getSegmentDescByID(segment_id); + return segment_desc->buffers[0].addr; } namespace py = pybind11; PYBIND11_MODULE(engine, m) { - py::enum_ transfer_opcode( - m, "TransferOpcode", py::arithmetic()); - transfer_opcode.value("Read", TransferEnginePy::TransferOpcode::READ) - .value("Write", TransferEnginePy::TransferOpcode::WRITE) - .export_values(); - - auto adaptor_cls = - py::class_(m, "TransferEngine") - .def(py::init<>()) - .def("initialize", &TransferEnginePy::initialize) - .def("initialize_ext", &TransferEnginePy::initializeExt) - .def("get_rpc_port", &TransferEnginePy::getRpcPort) - .def("allocate_managed_buffer", - &TransferEnginePy::allocateManagedBuffer) - .def("free_managed_buffer", &TransferEnginePy::freeManagedBuffer) - .def("transfer_sync_write", &TransferEnginePy::transferSyncWrite) - .def("transfer_sync_read", &TransferEnginePy::transferSyncRead) - .def("batch_transfer_sync_write", &TransferEnginePy::batchTransferSyncWrite) - .def("batch_transfer_sync_read", &TransferEnginePy::batchTransferSyncRead) - .def("batch_transfer_async_write", &TransferEnginePy::batchTransferAsyncWrite) - .def("batch_transfer_async_read", &TransferEnginePy::batchTransferAsyncRead) - .def("transfer_sync", &TransferEnginePy::transferSync) - .def("batch_transfer_sync", &TransferEnginePy::batchTransferSync) - .def("batch_transfer_async", &TransferEnginePy::batchTransferAsync) - .def("get_batch_transfer_status", &TransferEnginePy::getBatchTransferStatus) - .def("transfer_submit_write", - &TransferEnginePy::transferSubmitWrite) - .def("transfer_check_status", - &TransferEnginePy::transferCheckStatus) - .def("write_bytes_to_buffer", &TransferEnginePy::writeBytesToBuffer) - .def("read_bytes_from_buffer", - &TransferEnginePy::readBytesFromBuffer) - .def("register_memory", &TransferEnginePy::registerMemory) - .def("unregister_memory", &TransferEnginePy::unregisterMemory) - .def("batch_register_memory", &TransferEnginePy::batchRegisterMemory) - .def("batch_unregister_memory", &TransferEnginePy::batchUnregisterMemory) - .def("get_first_buffer_address", - &TransferEnginePy::getFirstBufferAddress); - - adaptor_cls.attr("TransferOpcode") = transfer_opcode; + py::enum_ transfer_opcode(m, "TransferOpcode", py::arithmetic()); + transfer_opcode.value("Read", TransferEnginePy::TransferOpcode::READ) + .value("Write", TransferEnginePy::TransferOpcode::WRITE) + .export_values(); + + auto adaptor_cls = py::class_(m, "TransferEngine") + .def(py::init<>()) + .def("initialize", &TransferEnginePy::initialize) + .def("initialize_ext", &TransferEnginePy::initializeExt) + .def("get_rpc_port", &TransferEnginePy::getRpcPort) + .def("allocate_managed_buffer", &TransferEnginePy::allocateManagedBuffer) + .def("free_managed_buffer", &TransferEnginePy::freeManagedBuffer) + .def("transfer_sync_write", &TransferEnginePy::transferSyncWrite) + .def("transfer_sync_read", &TransferEnginePy::transferSyncRead) + .def("batch_transfer_sync_write", &TransferEnginePy::batchTransferSyncWrite) + .def("batch_transfer_sync_read", &TransferEnginePy::batchTransferSyncRead) + .def("batch_transfer_async_write", &TransferEnginePy::batchTransferAsyncWrite) + .def("batch_transfer_async_read", &TransferEnginePy::batchTransferAsyncRead) + .def("transfer_sync", &TransferEnginePy::transferSync) + .def("batch_transfer_sync", &TransferEnginePy::batchTransferSync) + .def("batch_transfer_async", &TransferEnginePy::batchTransferAsync) + .def("get_batch_transfer_status", &TransferEnginePy::getBatchTransferStatus) + .def("transfer_submit_write", &TransferEnginePy::transferSubmitWrite) + .def("transfer_check_status", &TransferEnginePy::transferCheckStatus) + .def("write_bytes_to_buffer", &TransferEnginePy::writeBytesToBuffer) + .def("read_bytes_from_buffer", &TransferEnginePy::readBytesFromBuffer) + .def("register_memory", &TransferEnginePy::registerMemory) + .def("unregister_memory", &TransferEnginePy::unregisterMemory) + .def("batch_register_memory", &TransferEnginePy::batchRegisterMemory) + .def("batch_unregister_memory", &TransferEnginePy::batchUnregisterMemory) + .def("get_first_buffer_address", &TransferEnginePy::getFirstBufferAddress); + + adaptor_cls.attr("TransferOpcode") = transfer_opcode; } diff --git a/mooncake-integration/transfer_engine/transfer_engine_py.h b/mooncake-integration/transfer_engine/transfer_engine_py.h index ed04696be..8740d2a2e 100644 --- a/mooncake-integration/transfer_engine/transfer_engine_py.h +++ b/mooncake-integration/transfer_engine/transfer_engine_py.h @@ -35,127 +35,108 @@ using namespace mooncake; const static size_t kDefaultBufferCapacity = 2ull * 1024 * 1024 * 1024; const static size_t kSlabSizeKBTabLen = 16; const static size_t kMaxClassId = kSlabSizeKBTabLen - 1; -const static size_t kSlabSizeKB[] = { - 8, 16, 32, 64, 128, 256, - 512, 1024, 2 * 1024, 4 * 1024, 8 * 1024, 16 * 1024, - 32 * 1024, 64 * 1024, 128 * 1024, 256 * 1024}; +const static size_t kSlabSizeKB[] = {8, 16, 32, 64, 128, 256, + 512, 1024, 2 * 1024, 4 * 1024, 8 * 1024, 16 * 1024, + 32 * 1024, 64 * 1024, 128 * 1024, 256 * 1024}; class TransferEnginePy { - public: - enum class TransferOpcode { READ = 0, WRITE = 1 }; +public: + enum class TransferOpcode { READ = 0, WRITE = 1 }; - public: - using BatchDesc = Transport::BatchDesc; +public: + using BatchDesc = Transport::BatchDesc; - public: - TransferEnginePy(); +public: + TransferEnginePy(); - ~TransferEnginePy(); + ~TransferEnginePy(); - int initialize(const char *local_hostname, const char *metadata_server, - const char *protocol, const char *device_name); + int initialize(const char *local_hostname, const char *metadata_server, const char *protocol, + const char *device_name); - int initializeExt(const char *local_hostname, const char *metadata_server, - const char *protocol, const char *device_name, - const char *metadata_type); + int initializeExt(const char *local_hostname, const char *metadata_server, const char *protocol, + const char *device_name, const char *metadata_type); - int getRpcPort(); + int getRpcPort(); - uintptr_t allocateManagedBuffer(size_t length); + uintptr_t allocateManagedBuffer(size_t length); - int freeManagedBuffer(uintptr_t user_tensor, size_t length); + int freeManagedBuffer(uintptr_t user_tensor, size_t length); - int transferSyncWrite(const char *target_hostname, uintptr_t buffer, - uintptr_t peer_buffer_address, size_t length); + int transferSyncWrite(const char *target_hostname, uintptr_t buffer, uintptr_t peer_buffer_address, size_t length); - batch_id_t transferSubmitWrite(const char *target_hostname, uintptr_t buffer, - uintptr_t peer_buffer_address, size_t length); + batch_id_t transferSubmitWrite(const char *target_hostname, uintptr_t buffer, uintptr_t peer_buffer_address, + size_t length); - int transferCheckStatus(batch_id_t batch_id); + int transferCheckStatus(batch_id_t batch_id); - int transferSyncRead(const char *target_hostname, uintptr_t buffer, - uintptr_t peer_buffer_address, size_t length); - - int batchTransferSyncWrite(const char *target_hostname, - std::vector buffers, - std::vector peer_buffer_addresses, - std::vector lengths); + int transferSyncRead(const char *target_hostname, uintptr_t buffer, uintptr_t peer_buffer_address, size_t length); - int batchTransferSyncRead(const char *target_hostname, - std::vector buffers, - std::vector peer_buffer_addresses, - std::vector lengths); + int batchTransferSyncWrite(const char *target_hostname, std::vector buffers, + std::vector peer_buffer_addresses, std::vector lengths); - batch_id_t batchTransferAsyncWrite(const char *target_hostname, - const std::vector &buffers, - const std::vector &peer_buffer_addresses, - const std::vector &lengths); + int batchTransferSyncRead(const char *target_hostname, std::vector buffers, + std::vector peer_buffer_addresses, std::vector lengths); - batch_id_t batchTransferAsyncRead(const char *target_hostname, - const std::vector &buffers, - const std::vector &peer_buffer_addresses, - const std::vector &lengths); + batch_id_t batchTransferAsyncWrite(const char *target_hostname, const std::vector &buffers, + const std::vector &peer_buffer_addresses, + const std::vector &lengths); - int transferSync(const char *target_hostname, uintptr_t buffer, - uintptr_t peer_buffer_address, size_t length, - TransferOpcode opcode); + batch_id_t batchTransferAsyncRead(const char *target_hostname, const std::vector &buffers, + const std::vector &peer_buffer_addresses, + const std::vector &lengths); - int batchTransferSync(const char *target_hostname, - std::vector buffers, - std::vector peer_buffer_addresses, - std::vector lengths, - TransferOpcode opcode); + int transferSync(const char *target_hostname, uintptr_t buffer, uintptr_t peer_buffer_address, size_t length, + TransferOpcode opcode); - batch_id_t batchTransferAsync(const char *target_hostname, - const std::vector &buffers, - const std::vector &peer_buffer_addresses, - const std::vector &lengths, - TransferOpcode opcode); - - int getBatchTransferStatus(const std::vector &batch_ids); + int batchTransferSync(const char *target_hostname, std::vector buffers, + std::vector peer_buffer_addresses, std::vector lengths, + TransferOpcode opcode); - uintptr_t getFirstBufferAddress(const std::string &segment_name); + batch_id_t batchTransferAsync(const char *target_hostname, const std::vector &buffers, + const std::vector &peer_buffer_addresses, + const std::vector &lengths, TransferOpcode opcode); - int writeBytesToBuffer(uintptr_t dest_address, char *src_ptr, - size_t length) { - memcpy((void *)dest_address, (void *)src_ptr, length); - return 0; - } + int getBatchTransferStatus(const std::vector &batch_ids); - pybind11::bytes readBytesFromBuffer(uintptr_t source_address, - size_t length) { - return pybind11::bytes( - static_cast(reinterpret_cast(source_address)), - length); - } + uintptr_t getFirstBufferAddress(const std::string &segment_name); - // FOR EXPERIMENT ONLY - int registerMemory(uintptr_t buffer_addr, size_t capacity); + int writeBytesToBuffer(uintptr_t dest_address, char *src_ptr, size_t length) { + memcpy((void *)dest_address, (void *)src_ptr, length); + return 0; + } - // must be called before TransferEnginePy::~TransferEnginePy() - int unregisterMemory(uintptr_t buffer_addr); + pybind11::bytes readBytesFromBuffer(uintptr_t source_address, size_t length) { + return pybind11::bytes(static_cast(reinterpret_cast(source_address)), length); + } - int batchRegisterMemory(std::vector buffer_addresses, std::vector capacities); + // FOR EXPERIMENT ONLY + int registerMemory(uintptr_t buffer_addr, size_t capacity); - int batchUnregisterMemory(std::vector buffer_addresses); + // must be called before TransferEnginePy::~TransferEnginePy() + int unregisterMemory(uintptr_t buffer_addr); - private: - char *allocateRawBuffer(size_t capacity); + int batchRegisterMemory(std::vector buffer_addresses, std::vector capacities); - int findClassId(size_t size); + int batchUnregisterMemory(std::vector buffer_addresses); - int doBuddyAllocate(int class_id); +private: + char *allocateRawBuffer(size_t capacity); - private: - std::shared_ptr engine_; - Transport *xport_; + int findClassId(size_t size); - std::mutex mutex_; - std::vector> free_list_; - std::vector buffer_list_; - std::unordered_set large_buffer_list_; - std::unordered_map handle_map_; - bool auto_discovery_; + int doBuddyAllocate(int class_id); - uint64_t transfer_timeout_nsec_; +private: + std::shared_ptr engine_; + Transport *xport_; + + std::mutex mutex_; + std::vector> free_list_; + std::vector buffer_list_; + std::unordered_set large_buffer_list_; + std::unordered_map handle_map_; + bool auto_discovery_; + + uint64_t transfer_timeout_nsec_; }; diff --git a/mooncake-store/CMakeLists.txt b/mooncake-store/CMakeLists.txt index f5241faaf..d375bf0cb 100644 --- a/mooncake-store/CMakeLists.txt +++ b/mooncake-store/CMakeLists.txt @@ -1,23 +1,23 @@ project(MooncakeStore) -if (STORE_USE_ETCD) - set(ETCD_WRAPPER_INCLUDE ${CMAKE_CURRENT_BINARY_DIR}/../mooncake-common/etcd/) - set(ETCD_WRAPPER_LIB ${CMAKE_CURRENT_BINARY_DIR}/../mooncake-common/etcd/libetcd_wrapper.so) +if(STORE_USE_ETCD) + set(ETCD_WRAPPER_INCLUDE ${CMAKE_CURRENT_BINARY_DIR}/../mooncake-common/etcd/) + set(ETCD_WRAPPER_LIB + ${CMAKE_CURRENT_BINARY_DIR}/../mooncake-common/etcd/libetcd_wrapper.so) else() - message(STATUS "STORE_USE_ETCD=OFF, high availability of Store is disabled") + message(STATUS "STORE_USE_ETCD=OFF, high availability of Store is disabled") endif() # Add include directories include_directories( - ${CMAKE_CURRENT_SOURCE_DIR}/include/cachelib_memory_allocator/include - ${CMAKE_CURRENT_SOURCE_DIR}/include/cachelib_memory_allocator/fake_include - ${CMAKE_CURRENT_SOURCE_DIR}/include/cachelib_memory_allocator/ - ${CMAKE_CURRENT_SOURCE_DIR}/include/mooncake-store/proto/ - ${CMAKE_CURRENT_SOURCE_DIR}/include/ - ${CMAKE_CURRENT_SOURCE_DIR}/../mooncake-transfer-engine/include - ${ETCD_WRAPPER_INCLUDE} -) + ${CMAKE_CURRENT_SOURCE_DIR}/include/cachelib_memory_allocator/include + ${CMAKE_CURRENT_SOURCE_DIR}/include/cachelib_memory_allocator/fake_include + ${CMAKE_CURRENT_SOURCE_DIR}/include/cachelib_memory_allocator/ + ${CMAKE_CURRENT_SOURCE_DIR}/include/mooncake-store/proto/ + ${CMAKE_CURRENT_SOURCE_DIR}/include/ + ${CMAKE_CURRENT_SOURCE_DIR}/../mooncake-transfer-engine/include + ${ETCD_WRAPPER_INCLUDE}) # Add subdirectories add_subdirectory(src) -add_subdirectory(tests) \ No newline at end of file +add_subdirectory(tests) diff --git a/mooncake-store/include/allocation_strategy.h b/mooncake-store/include/allocation_strategy.h index 83b57c7a6..b6e5fb8d5 100644 --- a/mooncake-store/include/allocation_strategy.h +++ b/mooncake-store/include/allocation_strategy.h @@ -5,7 +5,7 @@ #include #include -#include "allocator.h" // Contains BufferAllocator declaration +#include "allocator.h" // Contains BufferAllocator declaration #include "types.h" namespace mooncake { @@ -15,25 +15,24 @@ namespace mooncake { * among multiple BufferAllocators. */ class AllocationStrategy { - public: - virtual ~AllocationStrategy() = default; - - /** - * @brief Given all mounted BufferAllocators and required object size, - * the strategy can freely choose a suitable BufferAllocator. - * @param allocators Container of mounted allocators - * @param allocators_by_name Container of mounted allocators, key is segment_name, - * value is the corresponding allocator - * @param objectSize Size of object to be allocated - * @param config Replica configuration - * @return Selected allocator; returns nullptr if allocation is not possible - * or no suitable allocator is found - */ - virtual std::unique_ptr Allocate( - const std::vector>& allocators, - const std::unordered_map>>& - allocators_by_name, - size_t objectSize, const ReplicateConfig& config) = 0; +public: + virtual ~AllocationStrategy() = default; + + /** + * @brief Given all mounted BufferAllocators and required object size, + * the strategy can freely choose a suitable BufferAllocator. + * @param allocators Container of mounted allocators + * @param allocators_by_name Container of mounted allocators, key is + * segment_name, value is the corresponding allocator + * @param objectSize Size of object to be allocated + * @param config Replica configuration + * @return Selected allocator; returns nullptr if allocation is not possible + * or no suitable allocator is found + */ + virtual std::unique_ptr + Allocate(const std::vector> &allocators, + const std::unordered_map>> &allocators_by_name, + size_t objectSize, const ReplicateConfig &config) = 0; }; /** @@ -44,93 +43,88 @@ class AllocationStrategy { * allocators. */ class RandomAllocationStrategy : public AllocationStrategy { - public: - RandomAllocationStrategy() : rng_(std::random_device{}()) {} - - std::unique_ptr Allocate( - const std::vector>& allocators, - const std::unordered_map>>& - allocators_by_name, - size_t objectSize, const ReplicateConfig& config) override { - // Fast path: single allocator case - if (allocators.size() == 1) { - return allocators[0]->allocate(objectSize); - } - - // Try preferred segment first if specified - if (auto preferred_buffer = - TryPreferredAllocate(allocators_by_name, objectSize, config)) { - return preferred_buffer; - } - - // Fall back to random allocation among all eligible allocators - return TryRandomAllocate(allocators, objectSize); - } - - private: - static constexpr size_t kMaxRetryLimit = 10; - - std::mt19937 rng_; // Mersenne Twister random number generator - - /** - * @brief Attempts allocation from preferred segment if available and - * eligible - */ - std::unique_ptr TryPreferredAllocate( - const std::unordered_map>>& - allocators, - size_t objectSize, const ReplicateConfig& config) { - if (config.preferred_segment.empty()) { - return nullptr; - } - - auto preferred_it = allocators.find(config.preferred_segment); - if (preferred_it == allocators.end()) { - return nullptr; - } - - auto& preferred_allocators = preferred_it->second; - for (auto& allocator : preferred_allocators) { - auto buffer = allocator->allocate(objectSize); - if (buffer != nullptr) { - return buffer; - } - } - - return nullptr; - } - - /** - * @brief Attempts allocation with random selection and retry logic - */ - std::unique_ptr TryRandomAllocate( - const std::vector>& allocators, - size_t objectSize) { - const size_t max_tries = std::min(kMaxRetryLimit, allocators.size()); - - std::vector allocator_indices(allocators.size()); - std::iota(allocator_indices.begin(), allocator_indices.end(), 0); - - for (size_t try_count = 0; try_count < max_tries; ++try_count) { - // Randomly select an allocator - std::uniform_int_distribution dist( - 0, allocator_indices.size() - 1); - const size_t random_index = allocator_indices[dist(rng_)]; - - auto& allocator = allocators[random_index]; - if (auto buffer = allocator->allocate(objectSize)) { - return buffer; - } - - // Remove failed allocator and continue with remaining ones - if (random_index + 1 != allocator_indices.size()) { - std::swap(allocator_indices[random_index], - allocator_indices[allocator_indices.size() - 1]); - } - allocator_indices.pop_back(); - } - return nullptr; - } +public: + RandomAllocationStrategy() : rng_(std::random_device {}()) { + } + + std::unique_ptr + Allocate(const std::vector> &allocators, + const std::unordered_map>> &allocators_by_name, + size_t objectSize, const ReplicateConfig &config) override { + // Fast path: single allocator case + if (allocators.size() == 1) { + return allocators[0]->allocate(objectSize); + } + + // Try preferred segment first if specified + if (auto preferred_buffer = TryPreferredAllocate(allocators_by_name, objectSize, config)) { + return preferred_buffer; + } + + // Fall back to random allocation among all eligible allocators + return TryRandomAllocate(allocators, objectSize); + } + +private: + static constexpr size_t kMaxRetryLimit = 10; + + std::mt19937 rng_; // Mersenne Twister random number generator + + /** + * @brief Attempts allocation from preferred segment if available and + * eligible + */ + std::unique_ptr TryPreferredAllocate( + const std::unordered_map>> &allocators, + size_t objectSize, const ReplicateConfig &config) { + if (config.preferred_segment.empty()) { + return nullptr; + } + + auto preferred_it = allocators.find(config.preferred_segment); + if (preferred_it == allocators.end()) { + return nullptr; + } + + auto &preferred_allocators = preferred_it->second; + for (auto &allocator : preferred_allocators) { + auto buffer = allocator->allocate(objectSize); + if (buffer != nullptr) { + return buffer; + } + } + + return nullptr; + } + + /** + * @brief Attempts allocation with random selection and retry logic + */ + std::unique_ptr TryRandomAllocate(const std::vector> &allocators, + size_t objectSize) { + const size_t max_tries = std::min(kMaxRetryLimit, allocators.size()); + + std::vector allocator_indices(allocators.size()); + std::iota(allocator_indices.begin(), allocator_indices.end(), 0); + + for (size_t try_count = 0; try_count < max_tries; ++try_count) { + // Randomly select an allocator + std::uniform_int_distribution dist(0, allocator_indices.size() - 1); + const size_t random_index = allocator_indices[dist(rng_)]; + + auto &allocator = allocators[random_index]; + if (auto buffer = allocator->allocate(objectSize)) { + return buffer; + } + + // Remove failed allocator and continue with remaining ones + if (random_index + 1 != allocator_indices.size()) { + std::swap(allocator_indices[random_index], allocator_indices[allocator_indices.size() - 1]); + } + allocator_indices.pop_back(); + } + return nullptr; + } }; -} // namespace mooncake +} // namespace mooncake diff --git a/mooncake-store/include/allocator.h b/mooncake-store/include/allocator.h index 48176f59f..4fb6ffcee 100644 --- a/mooncake-store/include/allocator.h +++ b/mooncake-store/include/allocator.h @@ -37,55 +37,63 @@ namespace mooncake { * ``` */ class BufferAllocator : public std::enable_shared_from_this { - public: - BufferAllocator(std::string segment_name, size_t base, size_t size); - - ~BufferAllocator(); - - std::unique_ptr allocate(size_t size); - - void deallocate(AllocatedBuffer* handle); - - size_t capacity() const { return total_size_; } - size_t size() const { return cur_size_.load(); } - std::string getSegmentName() const { return segment_name_; } - - private: - // metadata - const std::string segment_name_; - const size_t base_; - const size_t total_size_; - std::atomic_size_t cur_size_; - - // metrics - removed allocated_bytes_ member - // ylt::metric::gauge_t* allocated_bytes_{nullptr}; - // cachelib - std::unique_ptr header_region_start_; - size_t header_region_size_; - std::unique_ptr memory_allocator_; - facebook::cachelib::PoolId pool_id_; +public: + BufferAllocator(std::string segment_name, size_t base, size_t size); + + ~BufferAllocator(); + + std::unique_ptr allocate(size_t size); + + void deallocate(AllocatedBuffer *handle); + + size_t capacity() const { + return total_size_; + } + size_t size() const { + return cur_size_.load(); + } + std::string getSegmentName() const { + return segment_name_; + } + +private: + // metadata + const std::string segment_name_; + const size_t base_; + const size_t total_size_; + std::atomic_size_t cur_size_; + + // metrics - removed allocated_bytes_ member + // ylt::metric::gauge_t* allocated_bytes_{nullptr}; + // cachelib + std::unique_ptr header_region_start_; + size_t header_region_size_; + std::unique_ptr memory_allocator_; + facebook::cachelib::PoolId pool_id_; }; // The main difference is that it allocates real memory and returns it, while // BufferAllocator allocates an address class SimpleAllocator { - public: - SimpleAllocator(size_t size); - ~SimpleAllocator(); - void* allocate(size_t size); - void deallocate(void* ptr, size_t size); - void* getBase() const { return base_; } - - private: - void* base_{nullptr}; - - std::unique_ptr header_region_start_; - size_t header_region_size_; - - std::unique_ptr memory_allocator_; - facebook::cachelib::PoolId pool_id_; +public: + SimpleAllocator(size_t size); + ~SimpleAllocator(); + void *allocate(size_t size); + void deallocate(void *ptr, size_t size); + void *getBase() const { + return base_; + } + +private: + void *base_ {nullptr}; + + std::unique_ptr header_region_start_; + size_t header_region_size_; + + std::unique_ptr memory_allocator_; + facebook::cachelib::PoolId pool_id_; }; -} // namespace mooncake +} // namespace mooncake -#endif // BUFFER_ALLOCATOR_H +#endif // BUFFER_ALLOCATOR_H diff --git a/mooncake-store/include/cachelib_memory_allocator/AllocationClass.h b/mooncake-store/include/cachelib_memory_allocator/AllocationClass.h index 70ec9b568..e6cf24a2a 100644 --- a/mooncake-store/include/cachelib_memory_allocator/AllocationClass.h +++ b/mooncake-store/include/cachelib_memory_allocator/AllocationClass.h @@ -29,421 +29,412 @@ namespace facebook { namespace cachelib { -enum class SlabIterationStatus { - kFinishedCurrentSlabAndContinue, - kSkippedCurrentSlabAndContinue, - kAbortIteration -}; +enum class SlabIterationStatus { kFinishedCurrentSlabAndContinue, kSkippedCurrentSlabAndContinue, kAbortIteration }; // An AllocationClass is used to allocate memory for a given allocation size // from Slabs class AllocationClass { - public: - // @param classId the id corresponding to this allocation class - // @param poolId the poolId corresponding to this allocation class - // @param allocSize the size of allocations that this allocation class - // handles. - // @param s the slab allocator for fetching the header info. - // - // @throw std::invalid_argument if the classId is invalid or the allocSize - // is invalid. - AllocationClass(ClassId classId, PoolId poolId, uint32_t allocSize, - const SlabAllocator& s); - - AllocationClass(const AllocationClass&) = delete; - AllocationClass& operator=(const AllocationClass&) = delete; - - // returns the id corresponding to the allocation class. - ClassId getId() const noexcept { return classId_; } - - // returns the poolId corresponding to the allocation class. - PoolId getPoolId() const noexcept { return poolId_; } - - // returns the allocation size handled by this allocation class. - uint32_t getAllocSize() const noexcept { return allocationSize_; } - - // returns the number of allocations that can be made out of a Slab. - unsigned int getAllocsPerSlab() const noexcept { - return static_cast(Slab::kSize / allocationSize_); - } - - // Whether the pool is full or free to allocate more in the current state. - // This is only a hint and not a guarantee that subsequent allocate will - // fail/succeed. - bool isFull() const noexcept { return !canAllocate_; } - - // allocate memory corresponding to the allocation size of this - // AllocationClass. - // - // @return ptr to the memory of allocationSize_ chunk or nullptr if we - // don't have any free memory. The caller will have to add a slab - // to this slab class to make further allocations out of it. - void* allocate(); - - // @param ctx release context for the slab owning this alloc - // @param memory memory to check - // - // @return true if the memory corresponds to an alloc that has been freed - // - // @throws std::invalid_argument if the memory does not belong to a slab of - // this slab class, or if the slab is not actively being released, - // or if the context belongs to a different slab. - // @throws std::runtime_error if the slab cannot be found inside - // slabReleaseAllocMap_ - bool isAllocFreed(const SlabReleaseContext& ctx, void* memory) const; - - // The callback is executed under the lock, immediately after checking if - // the alloc has been freed. - // - // @param ctx release context for the slab owning this alloc - // @param memory memory to check - // @param callback callback to execute if the alloc has not been freed. - // This - // takes a single argument - the alloc being processed. - // - // @throws std::invalid_argument if the memory does not belong to a slab of - // this slab class, or if the slab is not actively being released, - // or if the context belongs to a different slab. - // @throws std::runtime_error if the slab cannot be found inside - // slabReleaseAllocMap_ - void processAllocForRelease( - const SlabReleaseContext& ctx, void* memory, - const std::function& callback) const; - - // Function takes the startSlabReleaseLock_, gets the slab header and if - // the slab is in a valid state invokes a user defined callback for each - // allocation in the slab. - // - // @param slab Slab to visit. - // @param callback Callback function to invoke on each allocation. - // - // @return true to continue with the iteration, false to abort. - // - // AllocTraversalFn Allocator traversal function - // @param ptr pointer to allocation - // @param allocInfo AllocInfo of the allocation - // @return SlabIterationStatus - template - SlabIterationStatus forEachAllocation(Slab* slab, - AllocTraversalFn&& callback) { - // Take a try_lock on this allocation class beginning any new slab - // release. - std::unique_lock startSlabReleaseLockHolder( - startSlabReleaseLock_, std::defer_lock); - - // If the try_lock fails, skip this slab - if (!startSlabReleaseLockHolder.try_lock()) { - return SlabIterationStatus::kSkippedCurrentSlabAndContinue; - } - - // check for the header to be valid. - using Return = std::optional; - Return allocInfo; - { - std::unique_lock l(lock_); - allocInfo = ([this, slab]() -> Return { - auto slabHdr = slabAlloc_.getSlabHeader(slab); - - if (!slabHdr || slabHdr->classId != classId_ || - slabHdr->poolId != poolId_ || slabHdr->isAdvised() || - slabHdr->isMarkedForRelease()) { - return std::nullopt; - } - - return Return{ - {slabHdr->poolId, slabHdr->classId, slabHdr->allocSize}}; - })(); - } - if (!allocInfo) { - return SlabIterationStatus::kSkippedCurrentSlabAndContinue; - } - - // Prefetch the first kForEachAllocPrefetchPffset items in the slab. - // Note that the prefetch is for read with no temporal locality. - void* prefetchOffsetPtr = reinterpret_cast(slab); - for (unsigned int i = 0; i < kForEachAllocPrefetchOffset; i++) { - prefetchOffsetPtr = reinterpret_cast( - reinterpret_cast(prefetchOffsetPtr) + - allocationSize_); - __builtin_prefetch(prefetchOffsetPtr, 0, 0); - } - void* ptr = reinterpret_cast(slab); - unsigned int allocsPerSlab = getAllocsPerSlab(); - for (unsigned int i = 0; i < allocsPerSlab; ++i) { - prefetchOffsetPtr = reinterpret_cast( - reinterpret_cast(prefetchOffsetPtr) + - allocationSize_); - // Prefetch ahead the kForEachAllocPrefetchOffset item. - __builtin_prefetch(prefetchOffsetPtr, 0, 0); - if (!callback(ptr, allocInfo.value())) { - return SlabIterationStatus::kAbortIteration; - } - ptr = reinterpret_cast(reinterpret_cast(ptr) + - allocationSize_); - } - return SlabIterationStatus::kFinishedCurrentSlabAndContinue; - } - - // release the memory back to the slab class. - // - // @param memory memory to be released. - // @throws std::invalid_argument if the memory does not belong to a slab of - // this slab class. - void free(void* memory); - - // acquires a new slab for this allocation class. - // @param slab a new slab to be added. This can NOT be nullptr. - void addSlab(Slab* slab); - - // acquires a new slab and return an allocation right away. - // @param slab a new slab to be added. This can NOT be nullptr. - // @return new allocation. This cannot fail. - void* addSlabAndAllocate(Slab* slab); - - // Releasing a slab is a two step process. - // 1. Mark a slab for release, by calling `startSlabRelease`. - // 2. Free all the activeAllocations - // 3. Actually release the slab, by calling `completeSlabRelease`. - // In some scenario (i.e. when the slab is already released in step 1), - // there is no need to do step 2. - // - // In between the two steps, the user must ensure any active allocation - // from the slab is freed by calling ac->free(alloc). completeSlabRelease - // will block until all the active allocations for the slab are freed back. - // - // These allocations will not be moved to the free allocation list. Instead - // the free simply becomes an no-op. This is fine since the slab will be - // released eventually, and we do not want the freed allocations to be used - // again in the meanwhile. - // - // @param mode slab release mode - // - // @param hint hint of an allocation belong to the slab that we want - // released. If this is nullptr, a random slab will be - // selected for releasing. - // - // @param shouldAbortFn invoked in the code to see if this release slab - // process should be aborted - // - // @return SlabReleaseContext - // isReleased == true means the slab is already released - // as there was no active allocation to be freed. If not, the - // caller is responsible for ensuring that all active allocations - // returned by getActiveAllocs are freed back and - // - // @throw std::invalid_argument if the hint is invalid. - // - // @throw exception::SlabReleaseAborted if slab release is aborted due to - // shouldAbortFn returning true. - SlabReleaseContext startSlabRelease( - SlabReleaseMode mode, const void* hint, - SlabReleaseAbortFn shouldAbortFn = []() { return false; }); - - // Aborting a previously started SlabRelease will not restore already - // freed allocations. So the end state may not be exactly same as - // pre-startSlabRelease. - // - // precondition: startSlabRelease must be called before this, and the - // context must be valid and the slab has not yet been - // released. - // - // @param context the slab release context returned by startSlabRelease - // @throw std::invalid_argument - // a invalid_argument is thrown when the context is invalid or - // the context is already released or all allocs are freed. - void abortSlabRelease(const SlabReleaseContext& context); - - // precondition: startSlabRelease must be called before this, and the - // context must be valid and the slab has not yet been - // released. If context.isReleased() == true there is no - // need to call completeSlabRelease. - // - // @param context the slab release context returned by startSlabRelease - // @throw std::runtime_error - // a runtime_error is thrown when the context is invalid or - // the slab associated with the context is not in a valid state. - void completeSlabRelease(const SlabReleaseContext& context); - - // check if the slab has all its allocations freed back to the - // AllocationClass. This must be called only for a slab that has an active - // slab release. - // - // @param slab the slab that we are interested in. - // @return True if all the allocations are freed back to the allocator. - // False if not. - // @throw std::runtime_error if the slab does not have the allocStateMap - // entry. - bool allFreed(const Slab* slab) const; - - private: - // check if the state of the AllocationClass is valid and if not, throws an - // std::invalid_argument exception. This is intended for use in - // constructors. - void checkState() const; - - // grabs a slab from the free slabs and makes it the currentSlab_ - // precondition: freeSlabs_ must not be empty. - void setupCurrentSlabLocked(); - - // returns true if the allocation can be satisfied from the current slab. - bool canAllocateFromCurrentSlabLocked() const noexcept; - - // returns a new allocation from the current slab. Caller needs to ensure - // that precondition canAllocateFromCurrentSlabLocked is satisfied - void* allocateFromCurrentSlabLocked() noexcept; - - // get a suitable slab for being released from either the set of free slabs - // or the allocated slabs. - const Slab* getSlabForReleaseLocked() const noexcept; - - // prune the freeAllocs_ to eliminate any allocs belonging to this slab and - // also return a list of active allocations. If there are any active - // allocations, it maintains the freeState for the slab release. - // - // @param slab Eliminate allocs belonging to this slab - // - // @param shouldAbortFn invoked in the code to see if this release slab - // process should be aborted - // - // @return a pair with - // a bool indicating if slab release should be aborted or not and - // a list of active allocations if should abort is false. - // - // @throw exception::SlabReleaseAborted if slab release is aborted due to - // shouldAbortFn returning true. - std::pair> pruneFreeAllocs( - const Slab* slab, - SlabReleaseAbortFn shouldAbortFn = []() { return false; }); - - // wraps around allFreed and blocks until all the allocations belonging to - // the slab are freed back. - void waitUntilAllFreed(const Slab* slab); - - // return the allocation's index into the slab. It is the caller's - // responsibility to ensure that the alloc belongs to the slab and is valid. - size_t getAllocIdx(const Slab* slab, void* alloc) const noexcept; - - // return the allocation pointer into the slab for a given index. - void* getAllocForIdx(const Slab* slab, size_t idx) const; - - uintptr_t getSlabPtrValue(const Slab* slab) const noexcept { - return reinterpret_cast(slab); - } - - // Internal logic for checking if an allocation has been freed. This should - // be called under a lock. - // - // @param ctx release context for the slab owning the alloc - // @param memory memory to check - // - // @throws std::runtime_error if the slab cannot be found inside - // slabReleaseAllocMap_ - bool isAllocFreedLocked(const SlabReleaseContext& ctx, void* memory) const; - - // Checks if the memory belongs to a slab being released, and if that slab - // matches with the provided release context. - // - // @param ctx release context for the slab owning this alloc - // @param memory memory to check - // - // @throws std::invalid_argument if the memory does not belong to a slab of - // this slab class, or if the slab is not actively being released, - // or if the context belongs to a different slab. - void checkSlabInRelease(const SlabReleaseContext& ctx, - const void* memory) const; - - // @param slab the slab to create a new release alloc map - // - // throw std::runtime_error if fail to create a new release alloc map - void createSlabReleaseAllocMapLocked(const Slab* slab); - - // @param slab the slab associated with a release alloc map - // - // @return std::vector& this is the alloc state map - // @throws std::out_of_range if alloc map does not exist - std::vector& getSlabReleaseAllocMapLocked(const Slab* slab); - - // acquires a new slab for this allocation class. - void addSlabLocked(Slab* slab); - - // allocate memory corresponding to the allocation size of this - // AllocationClass. - // - // @return ptr to the memory of allocationSize_ chunk or nullptr if we - // don't have any free memory. The caller will have to add a slab - // to this slab class to make further allocations out of it. - void* allocateLocked(); - - // lock for serializing access to currSlab_, currOffset, allocatedSlabs_, - // freeSlabs_, freedAllocations_. - mutable std::mutex lock_; - - // the allocation class id. - const ClassId classId_{-1}; - - // the allocation pool id. - const PoolId poolId_{-1}; - - // the chunk size for the allocations of this allocation class. - const uint32_t allocationSize_{0}; - - // the offset of the next available allocation. - uint32_t currOffset_{0}; - - // the next available chunk that can be allocated from the current active - // slab. If nullptr, then there are no active slabs that are being chunked - // out. - Slab* currSlab_{nullptr}; - - const SlabAllocator& slabAlloc_; - - // slabs that belong to this allocation class and are not entirely free. The - // un-used allocations in this are present in freedAllocations_. - // TODO store the index of the slab instead of the actual pointer. Pointer - // is 8byte vs index which can be half of it. - std::vector allocatedSlabs_; - - // slabs which are empty and can be used for allocations. - // TODO use an intrusive container on the freed slabs. - std::vector freeSlabs_; - - // list of freed allocations for this allocation class. - using FreeList = std::list; - FreeList freedAllocations_{}; - - // Partition the 'freeAllocs' into two different SList depending on whether - // they are in slab memory or outside. Does not take a lock. If access to - // 'freeAllocs' requires a lock, it should be taken by the caller. - void partitionFreeAllocs(const Slab* slab, FreeList& freeAllocs, - FreeList& inSlab, FreeList& notInSlab); - - // if this is false, then we have run out of memory to do any more - // allocations. Reading this outside the lock_ will be racy. - std::atomic canAllocate_{true}; - - std::atomic activeReleases_{0}; - - // stores the list of outstanding allocations for a given slab. This is - // created when we start a slab release process and if there are any active - // allocations need to be marked as free. - std::unordered_map> slabReleaseAllocMap_; - - // Starting releasing a slab is serialized across threads. - // Afterwards, the multiple threads can proceed in parallel to - // complete the slab release - std::mutex startSlabReleaseLock_; - - // maximum number of free allocs to walk through during pruning - // before dropping the lock - static constexpr unsigned int kFreeAllocsPruneLimit = 4 * 1024; - - // Number of micro seconds to sleep between the batches during pruning. - // This is needed to avoid other threads from starving for lock. - static constexpr unsigned int kFreeAllocsPruneSleepMicroSecs = 1000; - - // Number of allocations ahead to prefetch when iterating over each - // allocation in a slab. - static constexpr unsigned int kForEachAllocPrefetchOffset = 16; +public: + // @param classId the id corresponding to this allocation class + // @param poolId the poolId corresponding to this allocation class + // @param allocSize the size of allocations that this allocation class + // handles. + // @param s the slab allocator for fetching the header info. + // + // @throw std::invalid_argument if the classId is invalid or the allocSize + // is invalid. + AllocationClass(ClassId classId, PoolId poolId, uint32_t allocSize, const SlabAllocator &s); + + AllocationClass(const AllocationClass &) = delete; + AllocationClass &operator=(const AllocationClass &) = delete; + + // returns the id corresponding to the allocation class. + ClassId getId() const noexcept { + return classId_; + } + + // returns the poolId corresponding to the allocation class. + PoolId getPoolId() const noexcept { + return poolId_; + } + + // returns the allocation size handled by this allocation class. + uint32_t getAllocSize() const noexcept { + return allocationSize_; + } + + // returns the number of allocations that can be made out of a Slab. + unsigned int getAllocsPerSlab() const noexcept { + return static_cast(Slab::kSize / allocationSize_); + } + + // Whether the pool is full or free to allocate more in the current state. + // This is only a hint and not a guarantee that subsequent allocate will + // fail/succeed. + bool isFull() const noexcept { + return !canAllocate_; + } + + // allocate memory corresponding to the allocation size of this + // AllocationClass. + // + // @return ptr to the memory of allocationSize_ chunk or nullptr if we + // don't have any free memory. The caller will have to add a slab + // to this slab class to make further allocations out of it. + void *allocate(); + + // @param ctx release context for the slab owning this alloc + // @param memory memory to check + // + // @return true if the memory corresponds to an alloc that has been freed + // + // @throws std::invalid_argument if the memory does not belong to a slab of + // this slab class, or if the slab is not actively being released, + // or if the context belongs to a different slab. + // @throws std::runtime_error if the slab cannot be found inside + // slabReleaseAllocMap_ + bool isAllocFreed(const SlabReleaseContext &ctx, void *memory) const; + + // The callback is executed under the lock, immediately after checking if + // the alloc has been freed. + // + // @param ctx release context for the slab owning this alloc + // @param memory memory to check + // @param callback callback to execute if the alloc has not been freed. + // This + // takes a single argument - the alloc being processed. + // + // @throws std::invalid_argument if the memory does not belong to a slab of + // this slab class, or if the slab is not actively being released, + // or if the context belongs to a different slab. + // @throws std::runtime_error if the slab cannot be found inside + // slabReleaseAllocMap_ + void processAllocForRelease(const SlabReleaseContext &ctx, void *memory, + const std::function &callback) const; + + // Function takes the startSlabReleaseLock_, gets the slab header and if + // the slab is in a valid state invokes a user defined callback for each + // allocation in the slab. + // + // @param slab Slab to visit. + // @param callback Callback function to invoke on each allocation. + // + // @return true to continue with the iteration, false to abort. + // + // AllocTraversalFn Allocator traversal function + // @param ptr pointer to allocation + // @param allocInfo AllocInfo of the allocation + // @return SlabIterationStatus + template + SlabIterationStatus forEachAllocation(Slab *slab, AllocTraversalFn &&callback) { + // Take a try_lock on this allocation class beginning any new slab + // release. + std::unique_lock startSlabReleaseLockHolder(startSlabReleaseLock_, std::defer_lock); + + // If the try_lock fails, skip this slab + if (!startSlabReleaseLockHolder.try_lock()) { + return SlabIterationStatus::kSkippedCurrentSlabAndContinue; + } + + // check for the header to be valid. + using Return = std::optional; + Return allocInfo; + { + std::unique_lock l(lock_); + allocInfo = ([this, slab]() -> Return { + auto slabHdr = slabAlloc_.getSlabHeader(slab); + + if (!slabHdr || slabHdr->classId != classId_ || slabHdr->poolId != poolId_ || slabHdr->isAdvised() || + slabHdr->isMarkedForRelease()) { + return std::nullopt; + } + + return Return {{slabHdr->poolId, slabHdr->classId, slabHdr->allocSize}}; + })(); + } + if (!allocInfo) { + return SlabIterationStatus::kSkippedCurrentSlabAndContinue; + } + + // Prefetch the first kForEachAllocPrefetchPffset items in the slab. + // Note that the prefetch is for read with no temporal locality. + void *prefetchOffsetPtr = reinterpret_cast(slab); + for (unsigned int i = 0; i < kForEachAllocPrefetchOffset; i++) { + prefetchOffsetPtr = + reinterpret_cast(reinterpret_cast(prefetchOffsetPtr) + allocationSize_); + __builtin_prefetch(prefetchOffsetPtr, 0, 0); + } + void *ptr = reinterpret_cast(slab); + unsigned int allocsPerSlab = getAllocsPerSlab(); + for (unsigned int i = 0; i < allocsPerSlab; ++i) { + prefetchOffsetPtr = + reinterpret_cast(reinterpret_cast(prefetchOffsetPtr) + allocationSize_); + // Prefetch ahead the kForEachAllocPrefetchOffset item. + __builtin_prefetch(prefetchOffsetPtr, 0, 0); + if (!callback(ptr, allocInfo.value())) { + return SlabIterationStatus::kAbortIteration; + } + ptr = reinterpret_cast(reinterpret_cast(ptr) + allocationSize_); + } + return SlabIterationStatus::kFinishedCurrentSlabAndContinue; + } + + // release the memory back to the slab class. + // + // @param memory memory to be released. + // @throws std::invalid_argument if the memory does not belong to a slab of + // this slab class. + void free(void *memory); + + // acquires a new slab for this allocation class. + // @param slab a new slab to be added. This can NOT be nullptr. + void addSlab(Slab *slab); + + // acquires a new slab and return an allocation right away. + // @param slab a new slab to be added. This can NOT be nullptr. + // @return new allocation. This cannot fail. + void *addSlabAndAllocate(Slab *slab); + + // Releasing a slab is a two step process. + // 1. Mark a slab for release, by calling `startSlabRelease`. + // 2. Free all the activeAllocations + // 3. Actually release the slab, by calling `completeSlabRelease`. + // In some scenario (i.e. when the slab is already released in step 1), + // there is no need to do step 2. + // + // In between the two steps, the user must ensure any active allocation + // from the slab is freed by calling ac->free(alloc). completeSlabRelease + // will block until all the active allocations for the slab are freed back. + // + // These allocations will not be moved to the free allocation list. Instead + // the free simply becomes an no-op. This is fine since the slab will be + // released eventually, and we do not want the freed allocations to be used + // again in the meanwhile. + // + // @param mode slab release mode + // + // @param hint hint of an allocation belong to the slab that we want + // released. If this is nullptr, a random slab will be + // selected for releasing. + // + // @param shouldAbortFn invoked in the code to see if this release slab + // process should be aborted + // + // @return SlabReleaseContext + // isReleased == true means the slab is already released + // as there was no active allocation to be freed. If not, the + // caller is responsible for ensuring that all active allocations + // returned by getActiveAllocs are freed back and + // + // @throw std::invalid_argument if the hint is invalid. + // + // @throw exception::SlabReleaseAborted if slab release is aborted due to + // shouldAbortFn returning true. + SlabReleaseContext startSlabRelease( + SlabReleaseMode mode, const void *hint, SlabReleaseAbortFn shouldAbortFn = []() { return false; }); + + // Aborting a previously started SlabRelease will not restore already + // freed allocations. So the end state may not be exactly same as + // pre-startSlabRelease. + // + // precondition: startSlabRelease must be called before this, and the + // context must be valid and the slab has not yet been + // released. + // + // @param context the slab release context returned by startSlabRelease + // @throw std::invalid_argument + // a invalid_argument is thrown when the context is invalid or + // the context is already released or all allocs are freed. + void abortSlabRelease(const SlabReleaseContext &context); + + // precondition: startSlabRelease must be called before this, and the + // context must be valid and the slab has not yet been + // released. If context.isReleased() == true there is no + // need to call completeSlabRelease. + // + // @param context the slab release context returned by startSlabRelease + // @throw std::runtime_error + // a runtime_error is thrown when the context is invalid or + // the slab associated with the context is not in a valid state. + void completeSlabRelease(const SlabReleaseContext &context); + + // check if the slab has all its allocations freed back to the + // AllocationClass. This must be called only for a slab that has an active + // slab release. + // + // @param slab the slab that we are interested in. + // @return True if all the allocations are freed back to the allocator. + // False if not. + // @throw std::runtime_error if the slab does not have the allocStateMap + // entry. + bool allFreed(const Slab *slab) const; + +private: + // check if the state of the AllocationClass is valid and if not, throws an + // std::invalid_argument exception. This is intended for use in + // constructors. + void checkState() const; + + // grabs a slab from the free slabs and makes it the currentSlab_ + // precondition: freeSlabs_ must not be empty. + void setupCurrentSlabLocked(); + + // returns true if the allocation can be satisfied from the current slab. + bool canAllocateFromCurrentSlabLocked() const noexcept; + + // returns a new allocation from the current slab. Caller needs to ensure + // that precondition canAllocateFromCurrentSlabLocked is satisfied + void *allocateFromCurrentSlabLocked() noexcept; + + // get a suitable slab for being released from either the set of free slabs + // or the allocated slabs. + const Slab *getSlabForReleaseLocked() const noexcept; + + // prune the freeAllocs_ to eliminate any allocs belonging to this slab and + // also return a list of active allocations. If there are any active + // allocations, it maintains the freeState for the slab release. + // + // @param slab Eliminate allocs belonging to this slab + // + // @param shouldAbortFn invoked in the code to see if this release slab + // process should be aborted + // + // @return a pair with + // a bool indicating if slab release should be aborted or not and + // a list of active allocations if should abort is false. + // + // @throw exception::SlabReleaseAborted if slab release is aborted due to + // shouldAbortFn returning true. + std::pair> pruneFreeAllocs( + const Slab *slab, SlabReleaseAbortFn shouldAbortFn = []() { return false; }); + + // wraps around allFreed and blocks until all the allocations belonging to + // the slab are freed back. + void waitUntilAllFreed(const Slab *slab); + + // return the allocation's index into the slab. It is the caller's + // responsibility to ensure that the alloc belongs to the slab and is valid. + size_t getAllocIdx(const Slab *slab, void *alloc) const noexcept; + + // return the allocation pointer into the slab for a given index. + void *getAllocForIdx(const Slab *slab, size_t idx) const; + + uintptr_t getSlabPtrValue(const Slab *slab) const noexcept { + return reinterpret_cast(slab); + } + + // Internal logic for checking if an allocation has been freed. This should + // be called under a lock. + // + // @param ctx release context for the slab owning the alloc + // @param memory memory to check + // + // @throws std::runtime_error if the slab cannot be found inside + // slabReleaseAllocMap_ + bool isAllocFreedLocked(const SlabReleaseContext &ctx, void *memory) const; + + // Checks if the memory belongs to a slab being released, and if that slab + // matches with the provided release context. + // + // @param ctx release context for the slab owning this alloc + // @param memory memory to check + // + // @throws std::invalid_argument if the memory does not belong to a slab of + // this slab class, or if the slab is not actively being released, + // or if the context belongs to a different slab. + void checkSlabInRelease(const SlabReleaseContext &ctx, const void *memory) const; + + // @param slab the slab to create a new release alloc map + // + // throw std::runtime_error if fail to create a new release alloc map + void createSlabReleaseAllocMapLocked(const Slab *slab); + + // @param slab the slab associated with a release alloc map + // + // @return std::vector& this is the alloc state map + // @throws std::out_of_range if alloc map does not exist + std::vector &getSlabReleaseAllocMapLocked(const Slab *slab); + + // acquires a new slab for this allocation class. + void addSlabLocked(Slab *slab); + + // allocate memory corresponding to the allocation size of this + // AllocationClass. + // + // @return ptr to the memory of allocationSize_ chunk or nullptr if we + // don't have any free memory. The caller will have to add a slab + // to this slab class to make further allocations out of it. + void *allocateLocked(); + + // lock for serializing access to currSlab_, currOffset, allocatedSlabs_, + // freeSlabs_, freedAllocations_. + mutable std::mutex lock_; + + // the allocation class id. + const ClassId classId_ {-1}; + + // the allocation pool id. + const PoolId poolId_ {-1}; + + // the chunk size for the allocations of this allocation class. + const uint32_t allocationSize_ {0}; + + // the offset of the next available allocation. + uint32_t currOffset_ {0}; + + // the next available chunk that can be allocated from the current active + // slab. If nullptr, then there are no active slabs that are being chunked + // out. + Slab *currSlab_ {nullptr}; + + const SlabAllocator &slabAlloc_; + + // slabs that belong to this allocation class and are not entirely free. The + // un-used allocations in this are present in freedAllocations_. + // TODO store the index of the slab instead of the actual pointer. Pointer + // is 8byte vs index which can be half of it. + std::vector allocatedSlabs_; + + // slabs which are empty and can be used for allocations. + // TODO use an intrusive container on the freed slabs. + std::vector freeSlabs_; + + // list of freed allocations for this allocation class. + using FreeList = std::list; + FreeList freedAllocations_ {}; + + // Partition the 'freeAllocs' into two different SList depending on whether + // they are in slab memory or outside. Does not take a lock. If access to + // 'freeAllocs' requires a lock, it should be taken by the caller. + void partitionFreeAllocs(const Slab *slab, FreeList &freeAllocs, FreeList &inSlab, FreeList ¬InSlab); + + // if this is false, then we have run out of memory to do any more + // allocations. Reading this outside the lock_ will be racy. + std::atomic canAllocate_ {true}; + + std::atomic activeReleases_ {0}; + + // stores the list of outstanding allocations for a given slab. This is + // created when we start a slab release process and if there are any active + // allocations need to be marked as free. + std::unordered_map> slabReleaseAllocMap_; + + // Starting releasing a slab is serialized across threads. + // Afterwards, the multiple threads can proceed in parallel to + // complete the slab release + std::mutex startSlabReleaseLock_; + + // maximum number of free allocs to walk through during pruning + // before dropping the lock + static constexpr unsigned int kFreeAllocsPruneLimit = 4 * 1024; + + // Number of micro seconds to sleep between the batches during pruning. + // This is needed to avoid other threads from starving for lock. + static constexpr unsigned int kFreeAllocsPruneSleepMicroSecs = 1000; + + // Number of allocations ahead to prefetch when iterating over each + // allocation in a slab. + static constexpr unsigned int kForEachAllocPrefetchOffset = 16; }; -} // namespace cachelib -} // namespace facebook +} // namespace cachelib +} // namespace facebook diff --git a/mooncake-store/include/cachelib_memory_allocator/MemoryAllocator.h b/mooncake-store/include/cachelib_memory_allocator/MemoryAllocator.h index a2b5f2882..eeae029f5 100644 --- a/mooncake-store/include/cachelib_memory_allocator/MemoryAllocator.h +++ b/mooncake-store/include/cachelib_memory_allocator/MemoryAllocator.h @@ -56,391 +56,382 @@ namespace cachelib { // uses the slab allocator and slab memory pool to actually allocate the memory. // Read the description at the beginning of the file for more info class MemoryAllocator { - public: - // maximum number of allocation classes that we support. - static constexpr unsigned int kMaxClasses = 1 << 7; - static constexpr ClassId kMaxClassId = kMaxClasses - 1; - - // maximum number of memory pools that we support. - static constexpr unsigned int kMaxPools = MemoryPoolManager::kMaxPools; - static constexpr PoolId kMaxPoolId = kMaxPools - 1; - // default of 8 byte aligned. - static constexpr uint32_t kAlignment = sizeof(void*); - - // config for the slab memory allocator. - struct Config { - Config() {} - Config(std::set sizes) : allocSizes(std::move(sizes)) {} - - // Hint to determine the allocation class sizes - std::set allocSizes; - }; - - // See Feishu document. - MemoryAllocator(Config config, void* headerMemoryStart, - size_t headerMemorySize, void* slabMemoryStart, - size_t slabMemorySize); - - MemoryAllocator(const MemoryAllocator&) = delete; - MemoryAllocator& operator=(const MemoryAllocator&) = delete; - - // allocate memory of corresponding size. - // - // @param id the pool id to be used for this allocation. - // @param size the size for the allocation. - // @return pointer to the memory corresponding to the allocation. nullptr if - // memory is not available. - // - // @throw std::invalid_argument if the poolId is invalid or the size is - // invalid. - void* allocate(PoolId id, uint32_t size); - - // free the memory back to the allocator. - // - // @throw std::invalid_argument if the memory does not belong to any active - // allocation handed out by this allocator. - void free(void* memory); - - // Memory pool interface. The memory pools must be established before the - // first allocation happens. Currently we dont support adding / removing - // pools dynamically. - // - // @param name the name of the pool - // @param size the size of the pool - // @param allocSize the set of allocation sizes for this memory pool, - // if empty, a default one will be used - // @param ensureProvisionable ensures that the size of the pool is enough - // to provision one slab to each allocation - // class - // - // @return a valid pool id that the caller can use on successful return. - // - // @throws std::invalid_argument if the name or size is inappropriate or - // if there is not enough space left for this pool. - // std::logic_error if we have run out the allowed number of pools. - PoolId addPool(std::string name, size_t size, - const std::set& allocSizes = {}, - bool ensureProvisionable = false); - - // shrink the existing pool by _bytes_ . - // @param id the id for the pool - // @param bytes the number of bytes to be taken away from the pool - // @return true if the operation succeeded. false if the size of the pool - // is - // smaller than _bytes_ - // @throw std::invalid_argument if the poolId is invalid. - bool shrinkPool(PoolId pid, size_t bytes) { - return memoryPoolManager_.shrinkPool(pid, bytes); - } - - // grow an existing pool by _bytes_. This will fail if there is no - // available memory across all the pools to provide for this pool - // @param id the pool id to be grown. - // @param bytes the number of bytes to be added to the pool. - // @return true if the pool was grown. false if the necessary number of - // bytes were not available. - // @throw std::invalid_argument if the poolId is invalid. - bool growPool(PoolId pid, size_t bytes) { - return memoryPoolManager_.growPool(pid, bytes); - } - - // move bytes from one pool to another. The source pool should be at least - // _bytes_ in size. - // - // @param src the pool to be sized down and giving the memory. - // @param dest the pool receiving the memory. - // @param bytes the number of bytes to move from src to dest. - // @param true if the resize succeeded. false if src does does not have - // correct size to do the transfer. - // @throw std::invalid_argument if src or dest is invalid pool - bool resizePools(PoolId src, PoolId dest, size_t bytes) { - return memoryPoolManager_.resizePools(src, dest, bytes); - } - - // Start the process of releasing a slab from this allocation class id and - // pool id. The release could be for a pool resizing or allocation class - // rebalancing. If a valid context is returned, the caller needs to free the - // active allocations in the valid context and call completeSlabRelease. A - // null context indicates that a slab was successfully released. throws on - // any other error. - // - // @param pid the pool id - // @param victim the allocation class id in the pool. if invalid, we try - // to pick any free slab that is available from the pool. - // @param receiver the allocation class that will get a slab - // @param mode the mode for slab release (rebalance/resize) - // @param hint hint referring to the slab. this can be an allocation that - // the user knows to exist in the slab. If this is nullptr, a - // random slab is selected from the pool and allocation class. - // @param shouldAbortFn invoked in the code to see if this release slab - // process should be aborted - // - // @return a valid context. If the slab is already released, then the - // caller needs to do nothing. If it is not released, then the - // caller needs to free the allocations and call - // completeSlabRelease with the same context. - // - // @throw std::invalid_argument if the hint is invalid or if the pid or cid - // is invalid. Or if the mode is set to kResize but the receiver is - // also specified. Receiver class id can only be specified if the - // mode is set to kRebalance. - // @throw exception::SlabReleaseAborted if slab release is aborted due to - // shouldAbortFn returning true. - SlabReleaseContext startSlabRelease( - PoolId pid, ClassId victim, ClassId receiver, SlabReleaseMode mode, - const void* hint = nullptr, - SlabReleaseAbortFn shouldAbortFn = []() { return false; }); - - // Check if an alloc is free (during slab release) - // - // @param ctx SlabReleaseContext to enforce that this is only called - // during slab release. - // @param memory alloc being checked. - // - // @return true if the alloc is free. - // - // @throws std::invalid_argument if the memory does not belong to a slab of - // this slab class, or if the slab is not actively being released, - // or if the context belongs to a different slab. - bool isAllocFreed(const SlabReleaseContext& ctx, void* memory) const; - - // Check if the slab has all its active allocations freed. - // - // @param ctx context returned by startSlabRelease. - // @return true if all allocs have been freed back to the allocator - // false otherwise - // - // @throw std::invalid_argument if the pool id or allocation class id - // associated with the context is invalid. - // - // std::runtime_error if the slab associatec with the context - // does not have the allocStateMap entry. - bool allAllocsFreed(const SlabReleaseContext& ctx) const; - - // See AllocationClass::processAllocForRelease - void processAllocForRelease( - const SlabReleaseContext& ctx, void* memory, - const std::function& callback) const; - - // Aborts the slab release process when there were active allocations in - // the slab. This should be called with the same non-null context that was - // created using startSlabRelease and after the user FAILS to free all the - // active allocations in the context. The state of the allocation class may - // not exactly same as pre-startSlabRelease state because freed allocations - // while trying to release the slab are not restored. - // - // @param context the context returned by startSlabRelease - // - // @throw std::invalid_argument if the context is invalid or - // context is already released or all allocs in the context are - // free - void abortSlabRelease(const SlabReleaseContext& context); - - // completes the slab release process when there were active allocations in - // the slab. This should be called with the same non-null context that was - // created using startSlabRelease and after the user frees all the active - // allocations in the context. After this, the slab is released - // appropriately. Calling this with a context that has the slab already - // released is a no-op. This will block until all the active allocations are - // completely returned to the allocator. - // - // @param context a valid context - // @throw std::invalid_argument if the context is invalid. - // Or if the mode is set to kResize but the receiver is - // also specified. Receiver class id can only be specified if the - // mode is set to kRebalance. - void completeSlabRelease(const SlabReleaseContext& context); - - // get the PoolId corresponding to the pool name. - // - // @param name the name of the pool - // @return poold id corresponding to the name if it exists or - // kInvalidPoolId if name is not a recognized pool. - PoolId getPoolId(const std::string& name) const noexcept; - - // get the pool name corresponding to its PoolId - // - // @param id the id of the pool - // @return pool name of this pool - // @throw std::logic_error if the pool id is invalid. - std::string getPoolName(PoolId id) const { - return memoryPoolManager_.getPoolNameById(id); - } - - // return the usable size in bytes for this allocator. - size_t getMemorySize() const noexcept { - return slabAllocator_.getNumUsableSlabs() * Slab::kSize; - } - - size_t getUnreservedMemorySize() const noexcept { - return memoryPoolManager_.getBytesUnReserved(); - } - - // return the total memory advised away - size_t getAdvisedMemorySize() const noexcept { - return memoryPoolManager_.getAdvisedMemorySize(); - } - - // return the list of pool ids for this allocator. - std::set getPoolIds() const { - return memoryPoolManager_.getPoolIds(); - } - - // fetches the memory pool for the id if one exists. This is purely to get - // information out of the pool. - // - // @return const reference to memory pool for the id if one exists. - // @throw std::invalid_argument if the pool id is invalid. - const MemoryPool& getPool(PoolId id) const { - return memoryPoolManager_.getPoolById(id); - } - - // obtain list of pools that are currently occupying more memory than their - // current limit. - std::set getPoolsOverLimit() const { - return memoryPoolManager_.getPoolsOverLimit(); - } - - // return true if all the memory for the allocator is allocated to some - // pool. - // this is leveraged by pool rebalancers to determine if the rebalancing has - // to start. - bool allSlabsAllocated() const noexcept { - return slabAllocator_.allSlabsAllocated(); - } - - // returns true if all the slab memory for the pool is accounted for in some - // allocation class belonging to the pool. - // - // @throw std::invalid_argument if the pool id does not belong to a valid - // pool. - bool allSlabsAllocated(PoolId pid) const { - return getPool(pid).allSlabsAllocated(); - } - - // fetch the pool and allocation class information for the memory - // corresponding to a memory allocation from the allocator. Caller is - // expected to supply a memory that is valid and allocated from this - // allocator. - // - // @param memory the memory belonging to the slab allocator - // @return pair of poolId and classId of the memory - // @throw std::invalid_argument if the memory doesn't belong to allocator - FOLLY_ALWAYS_INLINE AllocInfo getAllocInfo(const void* memory) const { - const auto* header = slabAllocator_.getSlabHeader(memory); - if (!header) { - throw std::invalid_argument( - fmt::format("invalid header for slab memory addr: {}", memory)); - } - return AllocInfo{header->poolId, header->classId, header->allocSize}; - } - - // fetch the allocation size for the pool id and class id. - // - // @param pid the pool id - // @param cid the allocation class id - // - // @return the allocation size corresponding to this pair. - // @throw std::invalid_argument if the ids are invalid. - uint32_t getAllocSize(PoolId pid, ClassId cid) const { - const auto& pool = getPool(pid); - const auto& allocClass = pool.getAllocationClass(cid); - return allocClass.getAllocSize(); - } - - // return the default allocation sizes for this allocator. - const std::set& getAllocSizes() const noexcept { - return config_.allocSizes; - } - - // fetch the allocation class info corresponding to a given size in a pool. - // - // @param poolId the pool to be allocated from - // @param nBytes the allocation size - // @return a valid class id on success - // @throw std::invalid_argument if the poolId is invalid or the size is - // outside of the allocation sizes for the memory pool. - ClassId getAllocationClassId(PoolId poolId, uint32_t nBytes) const; - - // Traverse each slab and call user defined callback on each allocation - // within the slab. Callback will be invoked if the slab is not advised, - // marked for release or currently being moved. Callbacks will be invoked - // irrespective of whether the slab is allocated for free. - // - // @param callback Callback to be executed on each allocation - // @return The number of slabs skipped - // Slab can be skipped because it is being released or - // already released but not yet assigned to another pool - // or allocation class. - template - uint64_t forEachAllocation(AllocTraversalFn&& callback) { - uint64_t slabSkipped = 0; - for (unsigned int idx = 0; idx < slabAllocator_.getNumUsableSlabs(); - ++idx) { - Slab* slab = slabAllocator_.getSlabForIdx(idx); - const auto slabHdr = slabAllocator_.getSlabHeader(slab); - if (!slabHdr) { - continue; - } - auto classId = slabHdr->classId; - auto poolId = slabHdr->poolId; - if (poolId == Slab::kInvalidPoolId || - classId == Slab::kInvalidClassId || slabHdr->isAdvised() || - slabHdr->isMarkedForRelease()) { - ++slabSkipped; - continue; - } - auto& pool = memoryPoolManager_.getPoolById(poolId); - auto slabIterationStatus = pool.forEachAllocation( - classId, slab, std::forward(callback)); - if (slabIterationStatus == - SlabIterationStatus::kSkippedCurrentSlabAndContinue) { - ++slabSkipped; - } else if (slabIterationStatus == - SlabIterationStatus::kAbortIteration) { - return slabSkipped; - } - } - return slabSkipped; - } - - // returns a default set of allocation sizes with given size range and - // factor. - // - // @param factor the factor by which the alloc sizes grow. - // @param maxSize the maximum allowed allocation size - // @param minSize the minimum allowed allocation size - // @param reduceFragmentation if true chunk sizes will be increased to the - // maximum size that maintains the number of - // chunks per slab as determined using factor. - // - // @return std::set of allocation sizes that all fit within maxSize. - // - // @throw std::invalid_argument if the maxSize is more than the slab size. - // @throw std::invalid_argument if the factor is <= 1.0 - // @throw std::invalid_argument if the factor is not incrementing large - // enough when reduceFragmentation is enabled - - static std::set generateAllocSizes( - double factor = 1.25, uint32_t maxSize = Slab::kSize, - uint32_t minSize = 72, bool reduceFragmentation = false); - - private: - // @param memory pointer to the memory. - // @return the MemoryPool corresponding to the memory. - // @throw std::invalid_argument if the memory does not belong to any active - // allocation handed out by this allocator. - MemoryPool& getMemoryPool(const void* memory) const; - - // the config for the allocator. - const Config config_; - - // the instance of slab allocator we will use to allocate slabs. - SlabAllocator slabAllocator_; - - // the instance used for book keeping information about the memory pools - // configuration. - MemoryPoolManager memoryPoolManager_; +public: + // maximum number of allocation classes that we support. + static constexpr unsigned int kMaxClasses = 1 << 7; + static constexpr ClassId kMaxClassId = kMaxClasses - 1; + + // maximum number of memory pools that we support. + static constexpr unsigned int kMaxPools = MemoryPoolManager::kMaxPools; + static constexpr PoolId kMaxPoolId = kMaxPools - 1; + // default of 8 byte aligned. + static constexpr uint32_t kAlignment = sizeof(void *); + + // config for the slab memory allocator. + struct Config { + Config() { + } + Config(std::set sizes) : allocSizes(std::move(sizes)) { + } + + // Hint to determine the allocation class sizes + std::set allocSizes; + }; + + // See Feishu document. + MemoryAllocator(Config config, void *headerMemoryStart, size_t headerMemorySize, void *slabMemoryStart, + size_t slabMemorySize); + + MemoryAllocator(const MemoryAllocator &) = delete; + MemoryAllocator &operator=(const MemoryAllocator &) = delete; + + // allocate memory of corresponding size. + // + // @param id the pool id to be used for this allocation. + // @param size the size for the allocation. + // @return pointer to the memory corresponding to the allocation. nullptr if + // memory is not available. + // + // @throw std::invalid_argument if the poolId is invalid or the size is + // invalid. + void *allocate(PoolId id, uint32_t size); + + // free the memory back to the allocator. + // + // @throw std::invalid_argument if the memory does not belong to any active + // allocation handed out by this allocator. + void free(void *memory); + + // Memory pool interface. The memory pools must be established before the + // first allocation happens. Currently we dont support adding / removing + // pools dynamically. + // + // @param name the name of the pool + // @param size the size of the pool + // @param allocSize the set of allocation sizes for this memory pool, + // if empty, a default one will be used + // @param ensureProvisionable ensures that the size of the pool is enough + // to provision one slab to each allocation + // class + // + // @return a valid pool id that the caller can use on successful return. + // + // @throws std::invalid_argument if the name or size is inappropriate or + // if there is not enough space left for this pool. + // std::logic_error if we have run out the allowed number of pools. + PoolId addPool(std::string name, size_t size, const std::set &allocSizes = {}, + bool ensureProvisionable = false); + + // shrink the existing pool by _bytes_ . + // @param id the id for the pool + // @param bytes the number of bytes to be taken away from the pool + // @return true if the operation succeeded. false if the size of the pool + // is + // smaller than _bytes_ + // @throw std::invalid_argument if the poolId is invalid. + bool shrinkPool(PoolId pid, size_t bytes) { + return memoryPoolManager_.shrinkPool(pid, bytes); + } + + // grow an existing pool by _bytes_. This will fail if there is no + // available memory across all the pools to provide for this pool + // @param id the pool id to be grown. + // @param bytes the number of bytes to be added to the pool. + // @return true if the pool was grown. false if the necessary number of + // bytes were not available. + // @throw std::invalid_argument if the poolId is invalid. + bool growPool(PoolId pid, size_t bytes) { + return memoryPoolManager_.growPool(pid, bytes); + } + + // move bytes from one pool to another. The source pool should be at least + // _bytes_ in size. + // + // @param src the pool to be sized down and giving the memory. + // @param dest the pool receiving the memory. + // @param bytes the number of bytes to move from src to dest. + // @param true if the resize succeeded. false if src does does not have + // correct size to do the transfer. + // @throw std::invalid_argument if src or dest is invalid pool + bool resizePools(PoolId src, PoolId dest, size_t bytes) { + return memoryPoolManager_.resizePools(src, dest, bytes); + } + + // Start the process of releasing a slab from this allocation class id and + // pool id. The release could be for a pool resizing or allocation class + // rebalancing. If a valid context is returned, the caller needs to free the + // active allocations in the valid context and call completeSlabRelease. A + // null context indicates that a slab was successfully released. throws on + // any other error. + // + // @param pid the pool id + // @param victim the allocation class id in the pool. if invalid, we try + // to pick any free slab that is available from the pool. + // @param receiver the allocation class that will get a slab + // @param mode the mode for slab release (rebalance/resize) + // @param hint hint referring to the slab. this can be an allocation that + // the user knows to exist in the slab. If this is nullptr, a + // random slab is selected from the pool and allocation class. + // @param shouldAbortFn invoked in the code to see if this release slab + // process should be aborted + // + // @return a valid context. If the slab is already released, then the + // caller needs to do nothing. If it is not released, then the + // caller needs to free the allocations and call + // completeSlabRelease with the same context. + // + // @throw std::invalid_argument if the hint is invalid or if the pid or cid + // is invalid. Or if the mode is set to kResize but the receiver is + // also specified. Receiver class id can only be specified if the + // mode is set to kRebalance. + // @throw exception::SlabReleaseAborted if slab release is aborted due to + // shouldAbortFn returning true. + SlabReleaseContext startSlabRelease( + PoolId pid, ClassId victim, ClassId receiver, SlabReleaseMode mode, const void *hint = nullptr, + SlabReleaseAbortFn shouldAbortFn = []() { return false; }); + + // Check if an alloc is free (during slab release) + // + // @param ctx SlabReleaseContext to enforce that this is only called + // during slab release. + // @param memory alloc being checked. + // + // @return true if the alloc is free. + // + // @throws std::invalid_argument if the memory does not belong to a slab of + // this slab class, or if the slab is not actively being released, + // or if the context belongs to a different slab. + bool isAllocFreed(const SlabReleaseContext &ctx, void *memory) const; + + // Check if the slab has all its active allocations freed. + // + // @param ctx context returned by startSlabRelease. + // @return true if all allocs have been freed back to the allocator + // false otherwise + // + // @throw std::invalid_argument if the pool id or allocation class id + // associated with the context is invalid. + // + // std::runtime_error if the slab associatec with the context + // does not have the allocStateMap entry. + bool allAllocsFreed(const SlabReleaseContext &ctx) const; + + // See AllocationClass::processAllocForRelease + void processAllocForRelease(const SlabReleaseContext &ctx, void *memory, + const std::function &callback) const; + + // Aborts the slab release process when there were active allocations in + // the slab. This should be called with the same non-null context that was + // created using startSlabRelease and after the user FAILS to free all the + // active allocations in the context. The state of the allocation class may + // not exactly same as pre-startSlabRelease state because freed allocations + // while trying to release the slab are not restored. + // + // @param context the context returned by startSlabRelease + // + // @throw std::invalid_argument if the context is invalid or + // context is already released or all allocs in the context are + // free + void abortSlabRelease(const SlabReleaseContext &context); + + // completes the slab release process when there were active allocations in + // the slab. This should be called with the same non-null context that was + // created using startSlabRelease and after the user frees all the active + // allocations in the context. After this, the slab is released + // appropriately. Calling this with a context that has the slab already + // released is a no-op. This will block until all the active allocations are + // completely returned to the allocator. + // + // @param context a valid context + // @throw std::invalid_argument if the context is invalid. + // Or if the mode is set to kResize but the receiver is + // also specified. Receiver class id can only be specified if the + // mode is set to kRebalance. + void completeSlabRelease(const SlabReleaseContext &context); + + // get the PoolId corresponding to the pool name. + // + // @param name the name of the pool + // @return poold id corresponding to the name if it exists or + // kInvalidPoolId if name is not a recognized pool. + PoolId getPoolId(const std::string &name) const noexcept; + + // get the pool name corresponding to its PoolId + // + // @param id the id of the pool + // @return pool name of this pool + // @throw std::logic_error if the pool id is invalid. + std::string getPoolName(PoolId id) const { + return memoryPoolManager_.getPoolNameById(id); + } + + // return the usable size in bytes for this allocator. + size_t getMemorySize() const noexcept { + return slabAllocator_.getNumUsableSlabs() * Slab::kSize; + } + + size_t getUnreservedMemorySize() const noexcept { + return memoryPoolManager_.getBytesUnReserved(); + } + + // return the total memory advised away + size_t getAdvisedMemorySize() const noexcept { + return memoryPoolManager_.getAdvisedMemorySize(); + } + + // return the list of pool ids for this allocator. + std::set getPoolIds() const { + return memoryPoolManager_.getPoolIds(); + } + + // fetches the memory pool for the id if one exists. This is purely to get + // information out of the pool. + // + // @return const reference to memory pool for the id if one exists. + // @throw std::invalid_argument if the pool id is invalid. + const MemoryPool &getPool(PoolId id) const { + return memoryPoolManager_.getPoolById(id); + } + + // obtain list of pools that are currently occupying more memory than their + // current limit. + std::set getPoolsOverLimit() const { + return memoryPoolManager_.getPoolsOverLimit(); + } + + // return true if all the memory for the allocator is allocated to some + // pool. + // this is leveraged by pool rebalancers to determine if the rebalancing has + // to start. + bool allSlabsAllocated() const noexcept { + return slabAllocator_.allSlabsAllocated(); + } + + // returns true if all the slab memory for the pool is accounted for in some + // allocation class belonging to the pool. + // + // @throw std::invalid_argument if the pool id does not belong to a valid + // pool. + bool allSlabsAllocated(PoolId pid) const { + return getPool(pid).allSlabsAllocated(); + } + + // fetch the pool and allocation class information for the memory + // corresponding to a memory allocation from the allocator. Caller is + // expected to supply a memory that is valid and allocated from this + // allocator. + // + // @param memory the memory belonging to the slab allocator + // @return pair of poolId and classId of the memory + // @throw std::invalid_argument if the memory doesn't belong to allocator + FOLLY_ALWAYS_INLINE AllocInfo getAllocInfo(const void *memory) const { + const auto *header = slabAllocator_.getSlabHeader(memory); + if (!header) { + throw std::invalid_argument(fmt::format("invalid header for slab memory addr: {}", memory)); + } + return AllocInfo {header->poolId, header->classId, header->allocSize}; + } + + // fetch the allocation size for the pool id and class id. + // + // @param pid the pool id + // @param cid the allocation class id + // + // @return the allocation size corresponding to this pair. + // @throw std::invalid_argument if the ids are invalid. + uint32_t getAllocSize(PoolId pid, ClassId cid) const { + const auto &pool = getPool(pid); + const auto &allocClass = pool.getAllocationClass(cid); + return allocClass.getAllocSize(); + } + + // return the default allocation sizes for this allocator. + const std::set &getAllocSizes() const noexcept { + return config_.allocSizes; + } + + // fetch the allocation class info corresponding to a given size in a pool. + // + // @param poolId the pool to be allocated from + // @param nBytes the allocation size + // @return a valid class id on success + // @throw std::invalid_argument if the poolId is invalid or the size is + // outside of the allocation sizes for the memory pool. + ClassId getAllocationClassId(PoolId poolId, uint32_t nBytes) const; + + // Traverse each slab and call user defined callback on each allocation + // within the slab. Callback will be invoked if the slab is not advised, + // marked for release or currently being moved. Callbacks will be invoked + // irrespective of whether the slab is allocated for free. + // + // @param callback Callback to be executed on each allocation + // @return The number of slabs skipped + // Slab can be skipped because it is being released or + // already released but not yet assigned to another pool + // or allocation class. + template + uint64_t forEachAllocation(AllocTraversalFn &&callback) { + uint64_t slabSkipped = 0; + for (unsigned int idx = 0; idx < slabAllocator_.getNumUsableSlabs(); ++idx) { + Slab *slab = slabAllocator_.getSlabForIdx(idx); + const auto slabHdr = slabAllocator_.getSlabHeader(slab); + if (!slabHdr) { + continue; + } + auto classId = slabHdr->classId; + auto poolId = slabHdr->poolId; + if (poolId == Slab::kInvalidPoolId || classId == Slab::kInvalidClassId || slabHdr->isAdvised() || + slabHdr->isMarkedForRelease()) { + ++slabSkipped; + continue; + } + auto &pool = memoryPoolManager_.getPoolById(poolId); + auto slabIterationStatus = pool.forEachAllocation(classId, slab, std::forward(callback)); + if (slabIterationStatus == SlabIterationStatus::kSkippedCurrentSlabAndContinue) { + ++slabSkipped; + } else if (slabIterationStatus == SlabIterationStatus::kAbortIteration) { + return slabSkipped; + } + } + return slabSkipped; + } + + // returns a default set of allocation sizes with given size range and + // factor. + // + // @param factor the factor by which the alloc sizes grow. + // @param maxSize the maximum allowed allocation size + // @param minSize the minimum allowed allocation size + // @param reduceFragmentation if true chunk sizes will be increased to the + // maximum size that maintains the number of + // chunks per slab as determined using factor. + // + // @return std::set of allocation sizes that all fit within maxSize. + // + // @throw std::invalid_argument if the maxSize is more than the slab size. + // @throw std::invalid_argument if the factor is <= 1.0 + // @throw std::invalid_argument if the factor is not incrementing large + // enough when reduceFragmentation is enabled + + static std::set generateAllocSizes(double factor = 1.25, uint32_t maxSize = Slab::kSize, + uint32_t minSize = 72, bool reduceFragmentation = false); + +private: + // @param memory pointer to the memory. + // @return the MemoryPool corresponding to the memory. + // @throw std::invalid_argument if the memory does not belong to any active + // allocation handed out by this allocator. + MemoryPool &getMemoryPool(const void *memory) const; + + // the config for the allocator. + const Config config_; + + // the instance of slab allocator we will use to allocate slabs. + SlabAllocator slabAllocator_; + + // the instance used for book keeping information about the memory pools + // configuration. + MemoryPoolManager memoryPoolManager_; }; -} // namespace cachelib -} // namespace facebook +} // namespace cachelib +} // namespace facebook diff --git a/mooncake-store/include/cachelib_memory_allocator/MemoryPool.h b/mooncake-store/include/cachelib_memory_allocator/MemoryPool.h index d34197e1f..b2c26f2f9 100644 --- a/mooncake-store/include/cachelib_memory_allocator/MemoryPool.h +++ b/mooncake-store/include/cachelib_memory_allocator/MemoryPool.h @@ -38,317 +38,313 @@ class SlabAllocator; // allocations per allocation class should amount for the total memory footprint // of this memory pool from the slab allocator's perspective. class MemoryPool { - public: - // creates a pool with the id and size. - // - // @param id the unique pool id. - // @param poolSize max size of the pool. - // @param alloc the slab allocator for requesting the slabs. - // @param allocSizes the set of allocation class sizes for this pool, - // sorted in increasing order. The largest size should be - // less than Slab::kSize. - // @throw std::invalid_argument if allocSizes is invalid - MemoryPool(PoolId id, - size_t poolSize, - SlabAllocator& alloc, - const std::set& allocSizes); - - MemoryPool(const MemoryPool&) = delete; - MemoryPool& operator=(const MemoryPool&) = delete; - - // returns the poolId of this memory pool. - PoolId getId() const noexcept { return id_; } - - // the configured size of the pool. - size_t getPoolSize() const noexcept { return maxSize_; } - - // return the current size of the pool that has been already advised. Note - // include the configured advised size if advising has not - // caught up. - size_t getPoolAdvisedSize() const noexcept { - return curSlabsAdvised_ * Slab::kSize; - } - - // return the usable size of the pool. - // Usable pool size is configured pool size minus advised away size - size_t getPoolUsableSize() const noexcept { - auto advisedSize = getPoolAdvisedSize(); - return maxSize_ <= advisedSize ? 0 : maxSize_ - advisedSize; - } - - // returns the allocation class sizes as configured in this pool - const std::vector& getAllocSizes() const noexcept { - return acSizes_; - } - - // returns true if the memory pools has more memory allocated than the - // current size. This is possible because we allow resizing the pool - // dynamically. - bool overLimit() const noexcept { - auto getCurrentUsedAndAdvisedSize = - getCurrentUsedSize() + getPoolAdvisedSize(); - return getCurrentUsedAndAdvisedSize > maxSize_; - } - - // returns the size of memory currently unallocated in this pool - size_t getUnAllocatedSlabMemory() const noexcept { - auto totalAllocSize = currSlabAllocSize_ + getPoolAdvisedSize(); - return totalAllocSize > maxSize_ ? 0 : maxSize_ - totalAllocSize; - } - - // returns the current memory that is allocated in this memory pool. - size_t getCurrentAllocSize() const noexcept { return currAllocSize_; } - - // returns current memory used by this memory pool, including slabs - // in the free list. - size_t getCurrentUsedSize() const noexcept; - - // returns true if the pool has allocated all the slabs it can for some - // allocation class. This does not mean that the pool is full since the - // allocation class corresponding to some allocation class can still have - // free memory available. - bool allSlabsAllocated() const noexcept { - auto currAdvisedSize = getPoolAdvisedSize(); - return (currSlabAllocSize_ + currAdvisedSize + Slab::kSize) > maxSize_; - } - - // allocates memory of at least _size_ bytes. - // - // @param size size of the allocation. - // @return pointer to allocation or nullptr on failure to allocate. - // @throw std::invalid_argument if size is invalid. - void* allocate(uint32_t size); - - // Allocate a slab with zeroed memory - // - // @return pointer to allocation or nullptr on failure to allocate. - // @throw std::invalid_argument if requestedSize is invalid. - void* allocateZeroedSlab(); - - // frees the memory back to the pool. throws an exception if the memory does - // not belong to this pool. - // - // @param memory pointer to the memory to be freed - // - // throws the following exceptions in cases where either the caller freed - // the wrong allocations to this pool or if there is an internal corruption - // of data structures - // @throw std::invalid_argument if the memory does not belong to this pool. - // @throw std::run_time_error if the slab class information is corrupted. - void free(void* memory); - - // resize the memory pool. This only adjusts the Pool size. It does not - // release the slabs back to the SlabAllocator if the new size is less than - // the current size. The caller is responsible for doing that through - // startSlabRelease() calls. - // - // @param size_t size the new size for this pool. - void resize(size_t size) noexcept { maxSize_ = size; } - - // Start the process of releasing a slab from this allocation class id and - // pool id. The release could be for a pool resizing or allocation class - // rebalancing. If a valid context is returned, the caller needs to free the - // active allocations in the valid context and call completeSlabRelease. A - // null context indicates that a slab was successfully released. throws on - // any other error. - // - // @param victim the allocation class id in the pool. if invalid, we try - // to pick from the free slabs if available - // @param receiver the allocation class that will get a slab - // @param mode the mode for slab release (rebalance/resize) - // @param hint hint referring to the slab. this can be an allocation that - // the user knows to exist in the slab. If this is nullptr, a - // random slab is selected from the pool and allocation class. - // @param shouldAbortFn invoked in the code to see if this release slab - // process should be aborted - // - // @return a valid context. If the slab is already released, then the - // caller needs to do nothing. If it is not released, then the caller - // needs to free the allocations and call completeSlabRelease with - // the same context. - // - // @throw std::invalid_argument if the hint is invalid or if the pid or cid - // is invalid. Or if the mode is set to kResize but the receiver is - // also specified. Receiver class id can only be specified if the mode - // is set to kRebalance. - // @throw exception::SlabReleaseAborted if slab release is aborted due to - // shouldAbortFn returning true. - SlabReleaseContext startSlabRelease( - ClassId victim, - ClassId receiver, - SlabReleaseMode mode, - const void* hint, - SlabReleaseAbortFn shouldAbortFn = []() { return false; }); - - // Aborts the slab release process when there were active allocations in - // the slab. This should be called with the same non-null context that was - // created using startSlabRelease and after the user FAILS to free all the - // active allocations in the context. The state of the allocation class may - // not exactly same as pre-startSlabRelease state because freed allocations - // while trying to release the slab are not restored. - // - // @param context context returned by startSlabRelease - // - // @throw std::invalid_argument if the context is invalid or - // context is already released or all allocs in the context are - // free - void abortSlabRelease(const SlabReleaseContext& context); - - // completes the slab release process when there were active allocations in - // the slab. This should be called with the same non-null context that was - // created using startSlabRelease and after the user frees all the active - // allocations in the context. After this, the slab is released - // appropriately. This will block until all the allocations are returned to - // the allocator. - // - // @param context context returned by startSlabRelease - // - // @throw std::invalid_argument if the context is invalid. - // Or if the mode is set to kResize but the receiver is - // also specified. Receiver class id can only be specified if the mode - // is set to kRebalance. - void completeSlabRelease(const SlabReleaseContext& context); - - // fetch the ClassId corresponding to the allocation class from this memory - // pool - // - // @param size the allocation size - // @return the allocations class id corresponding to the alloc size - // @throw std::invalid_argument if the size does not correspond to any - // allocation class. - ClassId getAllocationClassId(uint32_t size) const; - - // fetch the ClassId for the memory. - // - // @param memory pointer to allocated memory from this pool. - // @return the allocation class id for the memory - // @throw std::invalid_argument if the memory does not belong to this pool - // or a valid allocation class. - ClassId getAllocationClassId(const void* memory) const; - - // fetch the allocation class for inspection. This is merely to read the - // info about the allocation class. - // - // @param cid the allocation class id that we are looking for. - // @return pointer to the AllocationClass. guaranteed to be valid - // allocation class. - // @throw std::invalid_argument if the ClassId is invalid. - const AllocationClass& getAllocationClass(ClassId cid) const; - - // return the number of allocation ClassIds for this pool based on the - // allocation sizes that it was configured with. All allocations from this - // pool will have ClassId from [0 .. numClassId - 1] (inclusive). - unsigned int getNumClassId() const noexcept { - return static_cast(acSizes_.size()); - } - - // Gets allocation class for a given class id and calls forEachAllocation on - // that allocation class. - // - // @param callback Callback to be executed on each allocation - // - // @return SlabIterationStatus - template - SlabIterationStatus forEachAllocation(ClassId classId, - Slab* slab, - AllocTraversalFn&& callback) { - auto& allocClass = getAllocationClassFor(classId); - return allocClass.forEachAllocation( - slab, std::forward(callback)); - } - - private: - // container for storing a vector of AllocationClass. - using ACVector = std::vector>; - - // intended to be used by the constructor to verify the state of the memory - // pool, specifically when we deserialize from a serialized state - // @throw std::invalid_argument if any of the state is invalid. - void checkState() const; - - // get a slab for use based on the activeSize and maxSize. returns nullptr - // if out of slab memory. - Slab* getSlabLocked() noexcept; - - // create allocation classes corresponding to the pool's configuration. - ACVector createAllocationClasses() const; - - // @return AllocationClass corresponding to the memory, if it - // belongs to an AllocationClass - // - // @throw std::invalid_argument if the memory does not belong to this pool - // or is invalid. - AllocationClass& getAllocationClassFor(void* memory) const; - - // fetch the allocation class corresponding to the allocation size. returns - // - // @param size the allocation size requested. - // @return allocation class. - // @throw std::invalid_argument if the allocation size is out of range. - AllocationClass& getAllocationClassFor(uint32_t size) const; - - // fetch the allocation class corresponding to the class id. - // - // @param cid the allocation class id - // @return the allocation class - // @throw std::invalid_argument if the class id is invalid. - AllocationClass& getAllocationClassFor(ClassId cid) const; - - // helper function to release a slab back to either the slab allocator or to - // our free pool. - // @param mode the mode of the release operation - // @param slab the slab to be released. - // @param receiverClassId optional AC to receive this slab - void releaseSlab(SlabReleaseMode mode, - const Slab* slab, - ClassId receiverClassId); - - // create a slab release context from the free slabs if possible. - // - // @throw std::invalid_argument if there are no free slabs available. - SlabReleaseContext releaseFromFreeSlabs(); - - // mutex for serializing access to freeSlabs_ and the currSlabAllocSize_. - mutable std::mutex lock_; - - // the id for this memory pool - const PoolId id_{-1}; - - // the current max size of the memory pool. - std::atomic maxSize_{0}; - - // the current size of all the slab memory we have allocated for this pool - // that actively belong to one of its AllocationClasses. This does not - // include the memory under freeSlabs_. - std::atomic currSlabAllocSize_{0}; - - // the current size of all allocations from this memory pool. - // currAllocSize_ <= currSlabSize_ <= maxSize_ - std::atomic currAllocSize_{0}; - - // the allocator for slabs. - SlabAllocator& slabAllocator_; - - // slabs allocated from the slab allocator for this memory pool, that are - // not currently in use. - std::vector freeSlabs_; - - // sorted vector of allocation class sizes - const std::vector acSizes_; - - // vector of allocation classes for this pool, sorted by their allocation - // sizes and indexed by their class id. This vector does not change once it - // is initialized inside the constructor. so this can be accessed without - // grabbing the mutex. - const ACVector ac_; - - // Current configuration of advised away Slabs in the pool - std::atomic curSlabsAdvised_{0}; - - // number of slabs we released for resizes and rebalances - std::atomic nSlabResize_{0}; - std::atomic nSlabRebalance_{0}; - std::atomic nSlabReleaseAborted_{0}; +public: + // creates a pool with the id and size. + // + // @param id the unique pool id. + // @param poolSize max size of the pool. + // @param alloc the slab allocator for requesting the slabs. + // @param allocSizes the set of allocation class sizes for this pool, + // sorted in increasing order. The largest size should be + // less than Slab::kSize. + // @throw std::invalid_argument if allocSizes is invalid + MemoryPool(PoolId id, size_t poolSize, SlabAllocator &alloc, const std::set &allocSizes); + + MemoryPool(const MemoryPool &) = delete; + MemoryPool &operator=(const MemoryPool &) = delete; + + // returns the poolId of this memory pool. + PoolId getId() const noexcept { + return id_; + } + + // the configured size of the pool. + size_t getPoolSize() const noexcept { + return maxSize_; + } + + // return the current size of the pool that has been already advised. Note + // include the configured advised size if advising has not + // caught up. + size_t getPoolAdvisedSize() const noexcept { + return curSlabsAdvised_ * Slab::kSize; + } + + // return the usable size of the pool. + // Usable pool size is configured pool size minus advised away size + size_t getPoolUsableSize() const noexcept { + auto advisedSize = getPoolAdvisedSize(); + return maxSize_ <= advisedSize ? 0 : maxSize_ - advisedSize; + } + + // returns the allocation class sizes as configured in this pool + const std::vector &getAllocSizes() const noexcept { + return acSizes_; + } + + // returns true if the memory pools has more memory allocated than the + // current size. This is possible because we allow resizing the pool + // dynamically. + bool overLimit() const noexcept { + auto getCurrentUsedAndAdvisedSize = getCurrentUsedSize() + getPoolAdvisedSize(); + return getCurrentUsedAndAdvisedSize > maxSize_; + } + + // returns the size of memory currently unallocated in this pool + size_t getUnAllocatedSlabMemory() const noexcept { + auto totalAllocSize = currSlabAllocSize_ + getPoolAdvisedSize(); + return totalAllocSize > maxSize_ ? 0 : maxSize_ - totalAllocSize; + } + + // returns the current memory that is allocated in this memory pool. + size_t getCurrentAllocSize() const noexcept { + return currAllocSize_; + } + + // returns current memory used by this memory pool, including slabs + // in the free list. + size_t getCurrentUsedSize() const noexcept; + + // returns true if the pool has allocated all the slabs it can for some + // allocation class. This does not mean that the pool is full since the + // allocation class corresponding to some allocation class can still have + // free memory available. + bool allSlabsAllocated() const noexcept { + auto currAdvisedSize = getPoolAdvisedSize(); + return (currSlabAllocSize_ + currAdvisedSize + Slab::kSize) > maxSize_; + } + + // allocates memory of at least _size_ bytes. + // + // @param size size of the allocation. + // @return pointer to allocation or nullptr on failure to allocate. + // @throw std::invalid_argument if size is invalid. + void *allocate(uint32_t size); + + // Allocate a slab with zeroed memory + // + // @return pointer to allocation or nullptr on failure to allocate. + // @throw std::invalid_argument if requestedSize is invalid. + void *allocateZeroedSlab(); + + // frees the memory back to the pool. throws an exception if the memory does + // not belong to this pool. + // + // @param memory pointer to the memory to be freed + // + // throws the following exceptions in cases where either the caller freed + // the wrong allocations to this pool or if there is an internal corruption + // of data structures + // @throw std::invalid_argument if the memory does not belong to this pool. + // @throw std::run_time_error if the slab class information is corrupted. + void free(void *memory); + + // resize the memory pool. This only adjusts the Pool size. It does not + // release the slabs back to the SlabAllocator if the new size is less than + // the current size. The caller is responsible for doing that through + // startSlabRelease() calls. + // + // @param size_t size the new size for this pool. + void resize(size_t size) noexcept { + maxSize_ = size; + } + + // Start the process of releasing a slab from this allocation class id and + // pool id. The release could be for a pool resizing or allocation class + // rebalancing. If a valid context is returned, the caller needs to free the + // active allocations in the valid context and call completeSlabRelease. A + // null context indicates that a slab was successfully released. throws on + // any other error. + // + // @param victim the allocation class id in the pool. if invalid, we try + // to pick from the free slabs if available + // @param receiver the allocation class that will get a slab + // @param mode the mode for slab release (rebalance/resize) + // @param hint hint referring to the slab. this can be an allocation that + // the user knows to exist in the slab. If this is nullptr, a + // random slab is selected from the pool and allocation class. + // @param shouldAbortFn invoked in the code to see if this release slab + // process should be aborted + // + // @return a valid context. If the slab is already released, then the + // caller needs to do nothing. If it is not released, then the + // caller needs to free the allocations and call + // completeSlabRelease with the same context. + // + // @throw std::invalid_argument if the hint is invalid or if the pid or cid + // is invalid. Or if the mode is set to kResize but the receiver is + // also specified. Receiver class id can only be specified if the + // mode is set to kRebalance. + // @throw exception::SlabReleaseAborted if slab release is aborted due to + // shouldAbortFn returning true. + SlabReleaseContext startSlabRelease( + ClassId victim, ClassId receiver, SlabReleaseMode mode, const void *hint, + SlabReleaseAbortFn shouldAbortFn = []() { return false; }); + + // Aborts the slab release process when there were active allocations in + // the slab. This should be called with the same non-null context that was + // created using startSlabRelease and after the user FAILS to free all the + // active allocations in the context. The state of the allocation class may + // not exactly same as pre-startSlabRelease state because freed allocations + // while trying to release the slab are not restored. + // + // @param context context returned by startSlabRelease + // + // @throw std::invalid_argument if the context is invalid or + // context is already released or all allocs in the context are + // free + void abortSlabRelease(const SlabReleaseContext &context); + + // completes the slab release process when there were active allocations in + // the slab. This should be called with the same non-null context that was + // created using startSlabRelease and after the user frees all the active + // allocations in the context. After this, the slab is released + // appropriately. This will block until all the allocations are returned to + // the allocator. + // + // @param context context returned by startSlabRelease + // + // @throw std::invalid_argument if the context is invalid. + // Or if the mode is set to kResize but the receiver is + // also specified. Receiver class id can only be specified if the + // mode is set to kRebalance. + void completeSlabRelease(const SlabReleaseContext &context); + + // fetch the ClassId corresponding to the allocation class from this memory + // pool + // + // @param size the allocation size + // @return the allocations class id corresponding to the alloc size + // @throw std::invalid_argument if the size does not correspond to any + // allocation class. + ClassId getAllocationClassId(uint32_t size) const; + + // fetch the ClassId for the memory. + // + // @param memory pointer to allocated memory from this pool. + // @return the allocation class id for the memory + // @throw std::invalid_argument if the memory does not belong to this pool + // or a valid allocation class. + ClassId getAllocationClassId(const void *memory) const; + + // fetch the allocation class for inspection. This is merely to read the + // info about the allocation class. + // + // @param cid the allocation class id that we are looking for. + // @return pointer to the AllocationClass. guaranteed to be valid + // allocation class. + // @throw std::invalid_argument if the ClassId is invalid. + const AllocationClass &getAllocationClass(ClassId cid) const; + + // return the number of allocation ClassIds for this pool based on the + // allocation sizes that it was configured with. All allocations from this + // pool will have ClassId from [0 .. numClassId - 1] (inclusive). + unsigned int getNumClassId() const noexcept { + return static_cast(acSizes_.size()); + } + + // Gets allocation class for a given class id and calls forEachAllocation on + // that allocation class. + // + // @param callback Callback to be executed on each allocation + // + // @return SlabIterationStatus + template + SlabIterationStatus forEachAllocation(ClassId classId, Slab *slab, AllocTraversalFn &&callback) { + auto &allocClass = getAllocationClassFor(classId); + return allocClass.forEachAllocation(slab, std::forward(callback)); + } + +private: + // container for storing a vector of AllocationClass. + using ACVector = std::vector>; + + // intended to be used by the constructor to verify the state of the memory + // pool, specifically when we deserialize from a serialized state + // @throw std::invalid_argument if any of the state is invalid. + void checkState() const; + + // get a slab for use based on the activeSize and maxSize. returns nullptr + // if out of slab memory. + Slab *getSlabLocked() noexcept; + + // create allocation classes corresponding to the pool's configuration. + ACVector createAllocationClasses() const; + + // @return AllocationClass corresponding to the memory, if it + // belongs to an AllocationClass + // + // @throw std::invalid_argument if the memory does not belong to this pool + // or is invalid. + AllocationClass &getAllocationClassFor(void *memory) const; + + // fetch the allocation class corresponding to the allocation size. returns + // + // @param size the allocation size requested. + // @return allocation class. + // @throw std::invalid_argument if the allocation size is out of range. + AllocationClass &getAllocationClassFor(uint32_t size) const; + + // fetch the allocation class corresponding to the class id. + // + // @param cid the allocation class id + // @return the allocation class + // @throw std::invalid_argument if the class id is invalid. + AllocationClass &getAllocationClassFor(ClassId cid) const; + + // helper function to release a slab back to either the slab allocator or to + // our free pool. + // @param mode the mode of the release operation + // @param slab the slab to be released. + // @param receiverClassId optional AC to receive this slab + void releaseSlab(SlabReleaseMode mode, const Slab *slab, ClassId receiverClassId); + + // create a slab release context from the free slabs if possible. + // + // @throw std::invalid_argument if there are no free slabs available. + SlabReleaseContext releaseFromFreeSlabs(); + + // mutex for serializing access to freeSlabs_ and the currSlabAllocSize_. + mutable std::mutex lock_; + + // the id for this memory pool + const PoolId id_ {-1}; + + // the current max size of the memory pool. + std::atomic maxSize_ {0}; + + // the current size of all the slab memory we have allocated for this pool + // that actively belong to one of its AllocationClasses. This does not + // include the memory under freeSlabs_. + std::atomic currSlabAllocSize_ {0}; + + // the current size of all allocations from this memory pool. + // currAllocSize_ <= currSlabSize_ <= maxSize_ + std::atomic currAllocSize_ {0}; + + // the allocator for slabs. + SlabAllocator &slabAllocator_; + + // slabs allocated from the slab allocator for this memory pool, that are + // not currently in use. + std::vector freeSlabs_; + + // sorted vector of allocation class sizes + const std::vector acSizes_; + + // vector of allocation classes for this pool, sorted by their allocation + // sizes and indexed by their class id. This vector does not change once it + // is initialized inside the constructor. so this can be accessed without + // grabbing the mutex. + const ACVector ac_; + + // Current configuration of advised away Slabs in the pool + std::atomic curSlabsAdvised_ {0}; + + // number of slabs we released for resizes and rebalances + std::atomic nSlabResize_ {0}; + std::atomic nSlabRebalance_ {0}; + std::atomic nSlabReleaseAborted_ {0}; }; } // namespace cachelib } // namespace facebook diff --git a/mooncake-store/include/cachelib_memory_allocator/MemoryPoolManager.h b/mooncake-store/include/cachelib_memory_allocator/MemoryPoolManager.h index 5ef51b4ae..860bdeb7f 100644 --- a/mooncake-store/include/cachelib_memory_allocator/MemoryPoolManager.h +++ b/mooncake-store/include/cachelib_memory_allocator/MemoryPoolManager.h @@ -31,115 +31,115 @@ namespace cachelib { // used to organize the available memory into pools and identify them by a // string name or pool id. class MemoryPoolManager { - public: - // maximum number of pools that we support. - static constexpr unsigned int kMaxPools = 64; - - // creates a memory pool manager for this slabAllocator. - // @param slabAlloc the slab allocator to be used for the memory pools. - explicit MemoryPoolManager(SlabAllocator& slabAlloc); - - MemoryPoolManager(const MemoryPoolManager&) = delete; - MemoryPoolManager& operator=(const MemoryPoolManager&) = delete; - - // adding a pool - // @param name the string name representing the pool. - // @param size the size of the memory pool. - // @param allocSizes set of allocation sizes sorted in increasing - // order. This will be used to create the corresponding - // AllocationClasses. - // - // @return on success, returns id of the new memory pool. - // @throw std::invalid_argument if the name/size/allcoSizes are invalid or - // std::logic_error if we have run out the allowed number of pools. - PoolId createNewPool(std::string name, - size_t size, - const std::set& allocSizes); - - // shrink the existing pool by _bytes_ . - // @param bytes the number of bytes to be taken away from the pool - // @return true if the operation succeeded. false if the size of the pool is - // smaller than _bytes_ - // @throw std::invalid_argument if the poolId is invalid. - bool shrinkPool(PoolId pid, size_t bytes); - - // grow an existing pool by _bytes_. This will fail if there is no - // available memory across all the pools to provide for this pool - // @param bytes the number of bytes to be added to the pool. - // @return true if the pool was grown. false if the necessary number of - // bytes were not available. - // @throw std::invalid_argument if the poolId is invalid. - bool growPool(PoolId pid, size_t bytes); - - // move bytes from one pool to another. The source pool should be at least - // _bytes_ in size. - // - // @param src the pool to be sized down and giving the memory. - // @param dest the pool receiving the memory. - // @param bytes the number of bytes to move from src to dest. - // - // @return true if the resize succeeded. False if the src pool does not have - // enough memory to make the resize. - // @throw std::invalid_argument if src or dest is invalid pool - bool resizePools(PoolId src, PoolId dest, size_t bytes); - - // Fetch the list of pools that are above their current limit due to a - // recent resize. - // - // @return list of pools that are over limit. - std::set getPoolsOverLimit() const; - - // access the memory pool by its name and id. - // @returns returns a valid MemoryPool. - // @throw std::invalid_argument if the name or id is invalid. - MemoryPool& getPoolByName(const std::string& name) const; - MemoryPool& getPoolById(PoolId id) const; - - // returns the pool's name by its pool ID - // @throw std::logic_error if the pool ID not existed. - const std::string& getPoolNameById(PoolId id) const; - - // returns the current pool ids that are being used. - std::set getPoolIds() const; - - // size in bytes of the remaining size that is not reserved for any pools. - size_t getBytesUnReserved() const { - std::shared_lock l(lock_); - return getRemainingSizeLocked(); - } - - // return total memory currently advised away - size_t getAdvisedMemorySize() const noexcept { - size_t sum = 0; - std::unique_lock l(lock_); - for (PoolId id = 0; id < nextPoolId_; id++) { - sum += pools_[id]->getPoolAdvisedSize(); - } - return sum; - } - - private: - // obtain the remaining size in bytes that is not reserved by taking into - // account the total available memory in the slab allocator and the size of - // all the pools we manage. - size_t getRemainingSizeLocked() const noexcept; - - // rw lock serializing the access to poolsByName_ and pool creation. - mutable std::shared_mutex lock_; - - // array of pools by Id. The valid pools are up to (nextPoolId_ - 1). This - // is to ensure that we can fetch pools by Id without holding any locks as - // long as the pool Id is valid. - std::array, kMaxPools> pools_; - - // pool name -> pool Id mapping. - std::map poolsByName_; - - // the next available pool id. - std::atomic nextPoolId_{0}; - - // slab allocator for the pools - SlabAllocator& slabAlloc_; +public: + // maximum number of pools that we support. + static constexpr unsigned int kMaxPools = 64; + + // creates a memory pool manager for this slabAllocator. + // @param slabAlloc the slab allocator to be used for the memory pools. + explicit MemoryPoolManager(SlabAllocator &slabAlloc); + + MemoryPoolManager(const MemoryPoolManager &) = delete; + MemoryPoolManager &operator=(const MemoryPoolManager &) = delete; + + // adding a pool + // @param name the string name representing the pool. + // @param size the size of the memory pool. + // @param allocSizes set of allocation sizes sorted in increasing + // order. This will be used to create the corresponding + // AllocationClasses. + // + // @return on success, returns id of the new memory pool. + // @throw std::invalid_argument if the name/size/allcoSizes are invalid or + // std::logic_error if we have run out the allowed number of pools. + PoolId createNewPool(std::string name, size_t size, const std::set &allocSizes); + + // shrink the existing pool by _bytes_ . + // @param bytes the number of bytes to be taken away from the pool + // @return true if the operation succeeded. false if the size of the pool + // is + // smaller than _bytes_ + // @throw std::invalid_argument if the poolId is invalid. + bool shrinkPool(PoolId pid, size_t bytes); + + // grow an existing pool by _bytes_. This will fail if there is no + // available memory across all the pools to provide for this pool + // @param bytes the number of bytes to be added to the pool. + // @return true if the pool was grown. false if the necessary number of + // bytes were not available. + // @throw std::invalid_argument if the poolId is invalid. + bool growPool(PoolId pid, size_t bytes); + + // move bytes from one pool to another. The source pool should be at least + // _bytes_ in size. + // + // @param src the pool to be sized down and giving the memory. + // @param dest the pool receiving the memory. + // @param bytes the number of bytes to move from src to dest. + // + // @return true if the resize succeeded. False if the src pool does not + // have + // enough memory to make the resize. + // @throw std::invalid_argument if src or dest is invalid pool + bool resizePools(PoolId src, PoolId dest, size_t bytes); + + // Fetch the list of pools that are above their current limit due to a + // recent resize. + // + // @return list of pools that are over limit. + std::set getPoolsOverLimit() const; + + // access the memory pool by its name and id. + // @returns returns a valid MemoryPool. + // @throw std::invalid_argument if the name or id is invalid. + MemoryPool &getPoolByName(const std::string &name) const; + MemoryPool &getPoolById(PoolId id) const; + + // returns the pool's name by its pool ID + // @throw std::logic_error if the pool ID not existed. + const std::string &getPoolNameById(PoolId id) const; + + // returns the current pool ids that are being used. + std::set getPoolIds() const; + + // size in bytes of the remaining size that is not reserved for any pools. + size_t getBytesUnReserved() const { + std::shared_lock l(lock_); + return getRemainingSizeLocked(); + } + + // return total memory currently advised away + size_t getAdvisedMemorySize() const noexcept { + size_t sum = 0; + std::unique_lock l(lock_); + for (PoolId id = 0; id < nextPoolId_; id++) { + sum += pools_[id]->getPoolAdvisedSize(); + } + return sum; + } + +private: + // obtain the remaining size in bytes that is not reserved by taking into + // account the total available memory in the slab allocator and the size of + // all the pools we manage. + size_t getRemainingSizeLocked() const noexcept; + + // rw lock serializing the access to poolsByName_ and pool creation. + mutable std::shared_mutex lock_; + + // array of pools by Id. The valid pools are up to (nextPoolId_ - 1). This + // is to ensure that we can fetch pools by Id without holding any locks as + // long as the pool Id is valid. + std::array, kMaxPools> pools_; + + // pool name -> pool Id mapping. + std::map poolsByName_; + + // the next available pool id. + std::atomic nextPoolId_ {0}; + + // slab allocator for the pools + SlabAllocator &slabAlloc_; }; } // namespace cachelib } // namespace facebook diff --git a/mooncake-store/include/cachelib_memory_allocator/Slab.h b/mooncake-store/include/cachelib_memory_allocator/Slab.h index c24f79fcc..8f0e24261 100644 --- a/mooncake-store/include/cachelib_memory_allocator/Slab.h +++ b/mooncake-store/include/cachelib_memory_allocator/Slab.h @@ -61,138 +61,136 @@ using ClassId = int8_t; using SlabReleaseAbortFn = std::function; struct AllocInfo { - const PoolId poolId; - const ClassId classId; - // the allocation size configured for this PoolId, ClassId pair. - const size_t allocSize; + const PoolId poolId; + const ClassId classId; + // the allocation size configured for this PoolId, ClassId pair. + const size_t allocSize; }; // slabs that are aligned by kSize. class CACHELIB_PACKED_ATTR Slab { - public: - // used to represent the fact that the slab does not belong to any - // AllocationClass - static constexpr ClassId kInvalidClassId = -1; - - // used to represent the fact that the slab does not belong to any - // MemoryPool - static constexpr PoolId kInvalidPoolId = -1; - - // size of the slab in bytes. - static constexpr unsigned int kNumSlabBits = 24; - - // minimum of 64 byte allocations. - static constexpr unsigned int kMinAllocPower = 6; - static constexpr size_t kMinAllocSize = 1 << kMinAllocPower; - - static constexpr size_t kSize = 1 << kNumSlabBits; - - // returns pointer to the memory at the offset inside the slab memory. - char* memoryAtOffset(size_t offset) const noexcept { - XDCHECK_LT(offset, Slab::kSize); - return dataStart() + offset; - } - - private: - // returns the pointer to the start of the slab memory. - char* dataStart() const noexcept { return &data_[0]; } - - // available memory in this slab. - mutable char data_[kSize]; +public: + // used to represent the fact that the slab does not belong to any + // AllocationClass + static constexpr ClassId kInvalidClassId = -1; + + // used to represent the fact that the slab does not belong to any + // MemoryPool + static constexpr PoolId kInvalidPoolId = -1; + + // size of the slab in bytes. + static constexpr unsigned int kNumSlabBits = 24; + + // minimum of 64 byte allocations. + static constexpr unsigned int kMinAllocPower = 6; + static constexpr size_t kMinAllocSize = 1 << kMinAllocPower; + + static constexpr size_t kSize = 1 << kNumSlabBits; + + // returns pointer to the memory at the offset inside the slab memory. + char *memoryAtOffset(size_t offset) const noexcept { + XDCHECK_LT(offset, Slab::kSize); + return dataStart() + offset; + } + +private: + // returns the pointer to the start of the slab memory. + char *dataStart() const noexcept { + return &data_[0]; + } + + // available memory in this slab. + mutable char data_[kSize]; }; -static_assert(std::is_standard_layout::value, - "Slab is not standard layout"); +static_assert(std::is_standard_layout::value, "Slab is not standard layout"); enum class SlabHeaderFlag : uint8_t { - IS_MARKED_FOR_RELEASE = 0, - IS_ADVISED = 1, - SH_FLAG_2 = 2, - SH_FLAG_3 = 3, - SH_FLAG_4 = 4, - SH_FLAG_5 = 5, - SH_FLAG_6 = 6, - SH_FLAG_7 = 7 + IS_MARKED_FOR_RELEASE = 0, + IS_ADVISED = 1, + SH_FLAG_2 = 2, + SH_FLAG_3 = 3, + SH_FLAG_4 = 4, + SH_FLAG_5 = 5, + SH_FLAG_6 = 6, + SH_FLAG_7 = 7 }; // one per slab. This is not colocated with the slab. But could be if there is // trailing space based on the slab's allocation size. struct CACHELIB_PACKED_ATTR SlabHeader { - constexpr SlabHeader() noexcept = default; - explicit SlabHeader(PoolId pid) : poolId(pid) {} - SlabHeader(PoolId pid, ClassId cid, uint32_t size) - : poolId(pid), classId(cid), allocSize(size) {} - - // This doesn't reset the flags. That's done explicitly by calling - // setFlag/unsetFlag above. - void resetAllocInfo() { - poolId = Slab::kInvalidPoolId; - classId = Slab::kInvalidClassId; - allocSize = 0; - } - - bool isAdvised() const noexcept { - return isFlagSet(SlabHeaderFlag::IS_ADVISED); - } - - void setAdvised(bool value) { - value ? setFlag(SlabHeaderFlag::IS_ADVISED) - : unSetFlag(SlabHeaderFlag::IS_ADVISED); - } - - bool isMarkedForRelease() const noexcept { - return isFlagSet(SlabHeaderFlag::IS_MARKED_FOR_RELEASE); - } - - void setMarkedForRelease(bool value) { - value ? setFlag(SlabHeaderFlag::IS_MARKED_FOR_RELEASE) - : unSetFlag(SlabHeaderFlag::IS_MARKED_FOR_RELEASE); - } - - // id of the pool that this slab currently belongs to. - PoolId poolId{Slab::kInvalidPoolId}; - - // the allocation class id that this slab currently belongs to - ClassId classId{Slab::kInvalidClassId}; - - // whether the slab is currently being released or not. - uint8_t flags{0}; - - // the allocation size of the allocation class. Useful for pointer - // compression. the current size of this struct is 1 + 1 + 1 + 4 = 7 bytes. - // This allocSize is accessed on every decompression of the - // compressed pointer. If the offset of this changes, use the benchmark to - // figure out if it moves the needle by a big margin. - uint32_t allocSize{0}; - - private: - void setFlag(SlabHeaderFlag flag) noexcept { - const uint8_t bitmask = - static_cast(1u << static_cast(flag)); - // FIXME: - // https://fb.workplace.com/groups/cachelibusers/posts/2345418462311949/ - // #pragma clang diagnostic push - // #pragma clang diagnostic ignored "-Watomic-implicit-seq-cst" - __sync_or_and_fetch(&flags, bitmask); - // #pragma clang diagnostic pop - } - - void unSetFlag(SlabHeaderFlag flag) noexcept { - const uint8_t bitmask = - static_cast(std::numeric_limits::max() - - (1u << static_cast(flag))); - __sync_fetch_and_and(&flags, bitmask); - } - - bool isFlagSet(SlabHeaderFlag flag) const noexcept { - return flags & (1u << static_cast(flag)); - } + constexpr SlabHeader() noexcept = default; + explicit SlabHeader(PoolId pid) : poolId(pid) { + } + SlabHeader(PoolId pid, ClassId cid, uint32_t size) : poolId(pid), classId(cid), allocSize(size) { + } + + // This doesn't reset the flags. That's done explicitly by calling + // setFlag/unsetFlag above. + void resetAllocInfo() { + poolId = Slab::kInvalidPoolId; + classId = Slab::kInvalidClassId; + allocSize = 0; + } + + bool isAdvised() const noexcept { + return isFlagSet(SlabHeaderFlag::IS_ADVISED); + } + + void setAdvised(bool value) { + value ? setFlag(SlabHeaderFlag::IS_ADVISED) : unSetFlag(SlabHeaderFlag::IS_ADVISED); + } + + bool isMarkedForRelease() const noexcept { + return isFlagSet(SlabHeaderFlag::IS_MARKED_FOR_RELEASE); + } + + void setMarkedForRelease(bool value) { + value ? setFlag(SlabHeaderFlag::IS_MARKED_FOR_RELEASE) : unSetFlag(SlabHeaderFlag::IS_MARKED_FOR_RELEASE); + } + + // id of the pool that this slab currently belongs to. + PoolId poolId {Slab::kInvalidPoolId}; + + // the allocation class id that this slab currently belongs to + ClassId classId {Slab::kInvalidClassId}; + + // whether the slab is currently being released or not. + uint8_t flags {0}; + + // the allocation size of the allocation class. Useful for pointer + // compression. the current size of this struct is 1 + 1 + 1 + 4 = 7 bytes. + // This allocSize is accessed on every decompression of the + // compressed pointer. If the offset of this changes, use the benchmark to + // figure out if it moves the needle by a big margin. + uint32_t allocSize {0}; + +private: + void setFlag(SlabHeaderFlag flag) noexcept { + const uint8_t bitmask = static_cast(1u << static_cast(flag)); + // FIXME: + // https://fb.workplace.com/groups/cachelibusers/posts/2345418462311949/ + // #pragma clang diagnostic push + // #pragma clang diagnostic ignored "-Watomic-implicit-seq-cst" + __sync_or_and_fetch(&flags, bitmask); + // #pragma clang diagnostic pop + } + + void unSetFlag(SlabHeaderFlag flag) noexcept { + const uint8_t bitmask = + static_cast(std::numeric_limits::max() - (1u << static_cast(flag))); + __sync_fetch_and_and(&flags, bitmask); + } + + bool isFlagSet(SlabHeaderFlag flag) const noexcept { + return flags & (1u << static_cast(flag)); + } }; // Definition for slab based resizing and rebalancing. enum class SlabReleaseMode { - kResize, // Resize the pool - kRebalance, // Rebalance away a slab from one pool to another + kResize, // Resize the pool + kRebalance, // Rebalance away a slab from one pool to another }; // Used to denote store the context for releasing a slab. This is created @@ -200,99 +198,105 @@ enum class SlabReleaseMode { // completeSlabRelease call to finalize the slab release process if the // context is in a state where the slab is not released(isReleased()) class SlabReleaseContext { - public: - // non copyable - SlabReleaseContext(const SlabReleaseContext&) = delete; - SlabReleaseContext& operator=(const SlabReleaseContext&) = delete; - - // movable - SlabReleaseContext(SlabReleaseContext&&) = default; - SlabReleaseContext& operator=(SlabReleaseContext&&) = delete; - - // create a context where the slab is already released. - SlabReleaseContext(const Slab* slab, PoolId pid, ClassId cid, - SlabReleaseMode m) - : SlabReleaseContext(slab, pid, cid, {}, m) {} - - // create a context where the slab needs the user to free up some active - // allocations for slab release. - SlabReleaseContext(const Slab* slab, PoolId pid, ClassId cid, - std::vector allocations, SlabReleaseMode m) - : slab_(slab), - pid_(pid), - victim_(cid), - activeAllocations_(std::move(allocations)), - mode_(m) {} - - // create a context where the slab is already released. - // - // also specify the receiver to receive the slab - SlabReleaseContext(const Slab* slab, PoolId pid, ClassId victim, - ClassId receiver) - : SlabReleaseContext(slab, pid, victim, {}, receiver) {} - - // create a context where the slab needs the user to free up some active - // allocations for slab release. - // - // also specify the receiver to receive the slab. - SlabReleaseContext(const Slab* slab, PoolId pid, ClassId victim, - std::vector allocations, ClassId receiver) - : slab_(slab), - pid_(pid), - victim_(victim), - activeAllocations_(std::move(allocations)), - receiver_(receiver), - mode_(SlabReleaseMode::kRebalance) {} - - // @return true if the slab has already been released and there are no - // active allocations to be freed. - bool isReleased() const noexcept { return activeAllocations_.empty(); } - - // @return true if the slab release context specifies a receiver to receive - // the released slab - bool hasValidReceiver() const noexcept { - return receiver_ != Slab::kInvalidClassId; - } - - PoolId getPoolId() const noexcept { return pid_; } - ClassId getClassId() const noexcept { return victim_; } - - ClassId getReceiverClassId() const noexcept { return receiver_; } - - // @return returns a list of active allocations. If the vector is empty - // it means no active allocations are associated with this slab. - const std::vector& getActiveAllocations() const noexcept { - return activeAllocations_; - } - - // @return pointer to slab marked for release - const Slab* getSlab() const noexcept { return slab_; } - - // @return the mode for this slab release context. - SlabReleaseMode getMode() const noexcept { return mode_; } - - private: - // Slab about to be released. - const Slab* const slab_; - - // the pool and the class id of the slab. If the slab is already released, - // the classId is invalid. - const PoolId pid_; - const ClassId victim_; - - // Active allocations in this slab. Non-zero for a slab that is marked for - // release. - const std::vector activeAllocations_; - - // Optional receiver that will receive the slab being released - ClassId receiver_{Slab::kInvalidClassId}; - - // the mode for this slab release. - const SlabReleaseMode mode_; - - void setReceiver(ClassId receiver) noexcept { receiver_ = receiver; } - - friend class MemoryPool; +public: + // non copyable + SlabReleaseContext(const SlabReleaseContext &) = delete; + SlabReleaseContext &operator=(const SlabReleaseContext &) = delete; + + // movable + SlabReleaseContext(SlabReleaseContext &&) = default; + SlabReleaseContext &operator=(SlabReleaseContext &&) = delete; + + // create a context where the slab is already released. + SlabReleaseContext(const Slab *slab, PoolId pid, ClassId cid, SlabReleaseMode m) + : SlabReleaseContext(slab, pid, cid, {}, m) { + } + + // create a context where the slab needs the user to free up some active + // allocations for slab release. + SlabReleaseContext(const Slab *slab, PoolId pid, ClassId cid, std::vector allocations, SlabReleaseMode m) + : slab_(slab), pid_(pid), victim_(cid), activeAllocations_(std::move(allocations)), mode_(m) { + } + + // create a context where the slab is already released. + // + // also specify the receiver to receive the slab + SlabReleaseContext(const Slab *slab, PoolId pid, ClassId victim, ClassId receiver) + : SlabReleaseContext(slab, pid, victim, {}, receiver) { + } + + // create a context where the slab needs the user to free up some active + // allocations for slab release. + // + // also specify the receiver to receive the slab. + SlabReleaseContext(const Slab *slab, PoolId pid, ClassId victim, std::vector allocations, ClassId receiver) + : slab_(slab), pid_(pid), victim_(victim), activeAllocations_(std::move(allocations)), receiver_(receiver), + mode_(SlabReleaseMode::kRebalance) { + } + + // @return true if the slab has already been released and there are no + // active allocations to be freed. + bool isReleased() const noexcept { + return activeAllocations_.empty(); + } + + // @return true if the slab release context specifies a receiver to receive + // the released slab + bool hasValidReceiver() const noexcept { + return receiver_ != Slab::kInvalidClassId; + } + + PoolId getPoolId() const noexcept { + return pid_; + } + ClassId getClassId() const noexcept { + return victim_; + } + + ClassId getReceiverClassId() const noexcept { + return receiver_; + } + + // @return returns a list of active allocations. If the vector is empty + // it means no active allocations are associated with this slab. + const std::vector &getActiveAllocations() const noexcept { + return activeAllocations_; + } + + // @return pointer to slab marked for release + const Slab *getSlab() const noexcept { + return slab_; + } + + // @return the mode for this slab release context. + SlabReleaseMode getMode() const noexcept { + return mode_; + } + +private: + // Slab about to be released. + const Slab *const slab_; + + // the pool and the class id of the slab. If the slab is already released, + // the classId is invalid. + const PoolId pid_; + const ClassId victim_; + + // Active allocations in this slab. Non-zero for a slab that is marked for + // release. + const std::vector activeAllocations_; + + // Optional receiver that will receive the slab being released + ClassId receiver_ {Slab::kInvalidClassId}; + + // the mode for this slab release. + const SlabReleaseMode mode_; + + void setReceiver(ClassId receiver) noexcept { + receiver_ = receiver; + } + + friend class MemoryPool; }; -} // namespace cachelib -} // namespace facebook +} // namespace cachelib +} // namespace facebook diff --git a/mooncake-store/include/cachelib_memory_allocator/SlabAllocator.h b/mooncake-store/include/cachelib_memory_allocator/SlabAllocator.h index ecee6fa9b..ebccb8e43 100644 --- a/mooncake-store/include/cachelib_memory_allocator/SlabAllocator.h +++ b/mooncake-store/include/cachelib_memory_allocator/SlabAllocator.h @@ -36,192 +36,181 @@ namespace cachelib { // allocator is also responsible for providing memory for the slab headers for // each slab. class SlabAllocator { - public: - - // See Feishu document. - SlabAllocator(void* headerMemoryStart, - size_t headerMemorySize, - void* slabMemoryStart, - size_t slabMemorySize); - - // free up and unmap the mapped memory if the allocator was created with - // one. - ~SlabAllocator(); - - SlabAllocator(const SlabAllocator&) = delete; - SlabAllocator& operator=(const SlabAllocator&) = delete; - - using LockHolder = std::unique_lock; - - // return true if any more slabs can be allocated from the slab allocator at - // this point of time. - bool allSlabsAllocated() const { - LockHolder l(lock_); - return allMemorySlabbed() && freeSlabs_.empty(); - } - - // grab an empty slab from the slab allocator if one is available. - // - // @param id the pool id. - // @return pointer to a new slab of memory. - Slab* makeNewSlab(PoolId id); - - // frees a used slab back to the slab allocator. - // - // @throw throws std::runtime_error if the slab is invalid - void freeSlab(Slab* slab); - - // returns the number of slabs that the cache can hold. - unsigned int getNumUsableSlabs() const noexcept; - - // returns the SlabHeader for the memory address or nullptr if the memory - // is invalid. Hotly accessed for getting alloc info - FOLLY_ALWAYS_INLINE SlabHeader* getSlabHeader( - const void* memory) const noexcept { - const auto* slab = getSlabForMemory(memory); - if (LIKELY(isValidSlab(slab))) { - const auto slabIndex = static_cast(slab - slabMemoryStart_); - return getSlabHeader(slabIndex); - } - return nullptr; - } - - // return the SlabHeader for the given slab or nullptr if the slab is - // invalid - SlabHeader* getSlabHeader(const Slab* const slab) const noexcept; - - // returns true if ptr points to memory in the slab and the slab is a valid - // slab, false otherwise. - bool isMemoryInSlab(const void* ptr, const Slab* slab) const noexcept; - - // true if the slab is a valid allocated slab in the memory belonging to this - // allocator. - FOLLY_ALWAYS_INLINE bool isValidSlab(const Slab* slab) const noexcept { - // suppress TSAN race error, this is harmless because nextSlabAllocation_ - // cannot go backwards and slab can't become invalid once it is valid - // folly::annotate_ignore_thread_sanitizer_guard g(__FILE__, __LINE__); - return slab >= slabMemoryStart_ && slab < nextSlabAllocation_ && - getSlabForMemory(static_cast(slab)) == slab; - } - - // returns the slab in which the memory resides, irrespective of the - // validity of the memory. The caller can use isValidSlab to check if the - // returned slab is valid. - FOLLY_ALWAYS_INLINE const Slab* getSlabForMemory( - const void* memory) const noexcept { - // returns the closest slab boundary for the memory address. - return reinterpret_cast(reinterpret_cast(memory) & - kAddressMask); - } - - using SlabIdx = uint32_t; - - // returns the index of the slab from the start of the slab memory - SlabIdx slabIdx(const Slab* const slab) const noexcept { - if (slab == nullptr) { - return kNullSlabIdx; - } - // We should never be querying for a slab that is not valid or beyond - // nextSlabAllocation_. - XDCHECK(slab == nextSlabAllocation_ || isValidSlab(slab)); - return static_cast(slab - slabMemoryStart_); - } - - // returns the slab corresponding to the idx, irrespective of the validity of - // the memory. The caller can use isValidSlab to check if the returned slab is - // valid. - Slab* getSlabForIdx(const SlabIdx idx) const noexcept { - if (idx == kNullSlabIdx) { - return nullptr; - } - return &slabMemoryStart_[idx]; - } - - private: - // null Slab* presenttation. With 4M Slab size, a valid slab index would never - // reach 2^16 - 1; - static constexpr SlabIdx kNullSlabIdx = std::numeric_limits::max(); - - // returns first byte after the end of memory region we own. - const Slab* getSlabMemoryEnd() const noexcept { - return reinterpret_cast(reinterpret_cast(slabMemoryStart_) + - slabMemorySize_); - } - - // returns true if we have slabbed all the memory that is available to us. - // false otherwise. - bool allMemorySlabbed() const noexcept { - return nextSlabAllocation_ == getSlabMemoryEnd(); - } - - FOLLY_ALWAYS_INLINE SlabHeader* getSlabHeader( - unsigned int slabIndex) const noexcept { - return reinterpret_cast(headerMemoryStart_) + slabIndex; - } - - // implementation of makeNewSlab that takes care of locking, free list and - // carving out new slabs. - // @return pointer to slab or nullptr if no more slabs can be allocated. - Slab* makeNewSlabImpl(); - - // Initialize the header for the given slab and pool - void initializeHeader(Slab* slab, PoolId id); - - // shutsdown the memory locker if it is still running. - void stopMemoryLocker(); - - // lock serializing access to nextSlabAllocation_, freeSlabs_. - mutable std::mutex lock_; - - // the current sizes of different memory pools from the slab allocator's - // perspective. This is bumped up during makeNewSlab based on the poolId and - // bumped down when the slab is released through freeSlab. - std::array, std::numeric_limits::max()> - memoryPoolSize_{{}}; - - // list of allocated slabs that are not in use. - std::vector freeSlabs_; - - // start of the slab header memory region - void* const headerMemoryStart_{nullptr}; - - // size of the slab header memory region - const size_t headerMemorySize_; - - // beginning of the slab memory region - Slab* const slabMemoryStart_{nullptr}; - - // size of memory aligned to slab size - const size_t slabMemorySize_; - - // the memory address up to which we have converted into slabs. - Slab* nextSlabAllocation_{nullptr}; - - // boolean atomic that represents whether the allocator can allocate any - // more slabs without holding any locks. - std::atomic canAllocate_{true}; - - // thread that does back-ground job of paging in and locking the memory if - // enabled. - std::thread memoryLocker_; - - // signals the locker thread to stop if we need to shutdown this instance. - std::atomic stopLocking_{false}; - - // amount of time to sleep in between each step to spread out the page - // faults over a period of time. - static constexpr unsigned int kLockSleepMS = 100; - - // number of pages to touch in eash step. - static constexpr size_t kPagesPerStep = 10000; - - static_assert((Slab::kSize & (Slab::kSize - 1)) == 0, - "Slab size is not power of two"); - - // mask for all addresses belonging to slab aligned to Slab::kSize; - static constexpr uint64_t kAddressMask = - std::numeric_limits::max() - - (static_cast(1) << Slab::kNumSlabBits) + 1; +public: + // See Feishu document. + SlabAllocator(void *headerMemoryStart, size_t headerMemorySize, void *slabMemoryStart, size_t slabMemorySize); + + // free up and unmap the mapped memory if the allocator was created with + // one. + ~SlabAllocator(); + + SlabAllocator(const SlabAllocator &) = delete; + SlabAllocator &operator=(const SlabAllocator &) = delete; + + using LockHolder = std::unique_lock; + + // return true if any more slabs can be allocated from the slab allocator at + // this point of time. + bool allSlabsAllocated() const { + LockHolder l(lock_); + return allMemorySlabbed() && freeSlabs_.empty(); + } + + // grab an empty slab from the slab allocator if one is available. + // + // @param id the pool id. + // @return pointer to a new slab of memory. + Slab *makeNewSlab(PoolId id); + + // frees a used slab back to the slab allocator. + // + // @throw throws std::runtime_error if the slab is invalid + void freeSlab(Slab *slab); + + // returns the number of slabs that the cache can hold. + unsigned int getNumUsableSlabs() const noexcept; + + // returns the SlabHeader for the memory address or nullptr if the memory + // is invalid. Hotly accessed for getting alloc info + FOLLY_ALWAYS_INLINE SlabHeader *getSlabHeader(const void *memory) const noexcept { + const auto *slab = getSlabForMemory(memory); + if (LIKELY(isValidSlab(slab))) { + const auto slabIndex = static_cast(slab - slabMemoryStart_); + return getSlabHeader(slabIndex); + } + return nullptr; + } + + // return the SlabHeader for the given slab or nullptr if the slab is + // invalid + SlabHeader *getSlabHeader(const Slab *const slab) const noexcept; + + // returns true if ptr points to memory in the slab and the slab is a valid + // slab, false otherwise. + bool isMemoryInSlab(const void *ptr, const Slab *slab) const noexcept; + + // true if the slab is a valid allocated slab in the memory belonging to + // this allocator. + FOLLY_ALWAYS_INLINE bool isValidSlab(const Slab *slab) const noexcept { + // suppress TSAN race error, this is harmless because + // nextSlabAllocation_ cannot go backwards and slab can't become invalid + // once it is valid folly::annotate_ignore_thread_sanitizer_guard + // g(__FILE__, __LINE__); + return slab >= slabMemoryStart_ && slab < nextSlabAllocation_ && + getSlabForMemory(static_cast(slab)) == slab; + } + + // returns the slab in which the memory resides, irrespective of the + // validity of the memory. The caller can use isValidSlab to check if the + // returned slab is valid. + FOLLY_ALWAYS_INLINE const Slab *getSlabForMemory(const void *memory) const noexcept { + // returns the closest slab boundary for the memory address. + return reinterpret_cast(reinterpret_cast(memory) & kAddressMask); + } + + using SlabIdx = uint32_t; + + // returns the index of the slab from the start of the slab memory + SlabIdx slabIdx(const Slab *const slab) const noexcept { + if (slab == nullptr) { + return kNullSlabIdx; + } + // We should never be querying for a slab that is not valid or beyond + // nextSlabAllocation_. + XDCHECK(slab == nextSlabAllocation_ || isValidSlab(slab)); + return static_cast(slab - slabMemoryStart_); + } + + // returns the slab corresponding to the idx, irrespective of the validity + // of the memory. The caller can use isValidSlab to check if the returned + // slab is valid. + Slab *getSlabForIdx(const SlabIdx idx) const noexcept { + if (idx == kNullSlabIdx) { + return nullptr; + } + return &slabMemoryStart_[idx]; + } + +private: + // null Slab* presenttation. With 4M Slab size, a valid slab index would + // never reach 2^16 - 1; + static constexpr SlabIdx kNullSlabIdx = std::numeric_limits::max(); + + // returns first byte after the end of memory region we own. + const Slab *getSlabMemoryEnd() const noexcept { + return reinterpret_cast(reinterpret_cast(slabMemoryStart_) + slabMemorySize_); + } + + // returns true if we have slabbed all the memory that is available to us. + // false otherwise. + bool allMemorySlabbed() const noexcept { + return nextSlabAllocation_ == getSlabMemoryEnd(); + } + + FOLLY_ALWAYS_INLINE SlabHeader *getSlabHeader(unsigned int slabIndex) const noexcept { + return reinterpret_cast(headerMemoryStart_) + slabIndex; + } + + // implementation of makeNewSlab that takes care of locking, free list and + // carving out new slabs. + // @return pointer to slab or nullptr if no more slabs can be allocated. + Slab *makeNewSlabImpl(); + + // Initialize the header for the given slab and pool + void initializeHeader(Slab *slab, PoolId id); + + // shutsdown the memory locker if it is still running. + void stopMemoryLocker(); + + // lock serializing access to nextSlabAllocation_, freeSlabs_. + mutable std::mutex lock_; + + // the current sizes of different memory pools from the slab allocator's + // perspective. This is bumped up during makeNewSlab based on the poolId and + // bumped down when the slab is released through freeSlab. + std::array, std::numeric_limits::max()> memoryPoolSize_ {{}}; + + // list of allocated slabs that are not in use. + std::vector freeSlabs_; + + // start of the slab header memory region + void *const headerMemoryStart_ {nullptr}; + + // size of the slab header memory region + const size_t headerMemorySize_; + + // beginning of the slab memory region + Slab *const slabMemoryStart_ {nullptr}; + + // size of memory aligned to slab size + const size_t slabMemorySize_; + + // the memory address up to which we have converted into slabs. + Slab *nextSlabAllocation_ {nullptr}; + + // boolean atomic that represents whether the allocator can allocate any + // more slabs without holding any locks. + std::atomic canAllocate_ {true}; + + // thread that does back-ground job of paging in and locking the memory if + // enabled. + std::thread memoryLocker_; + + // signals the locker thread to stop if we need to shutdown this instance. + std::atomic stopLocking_ {false}; + + // amount of time to sleep in between each step to spread out the page + // faults over a period of time. + static constexpr unsigned int kLockSleepMS = 100; + + // number of pages to touch in eash step. + static constexpr size_t kPagesPerStep = 10000; + + static_assert((Slab::kSize & (Slab::kSize - 1)) == 0, "Slab size is not power of two"); + + // mask for all addresses belonging to slab aligned to Slab::kSize; + static constexpr uint64_t kAddressMask = + std::numeric_limits::max() - (static_cast(1) << Slab::kNumSlabBits) + 1; }; } // namespace cachelib } // namespace facebook diff --git a/mooncake-store/include/cachelib_memory_allocator/common/CompilerUtils.h b/mooncake-store/include/cachelib_memory_allocator/common/CompilerUtils.h index 347e721a8..affc1c1f0 100644 --- a/mooncake-store/include/cachelib_memory_allocator/common/CompilerUtils.h +++ b/mooncake-store/include/cachelib_memory_allocator/common/CompilerUtils.h @@ -17,7 +17,7 @@ #pragma once #include #define CACHELIB_PACKED_ATTR __attribute__((__packed__)) -#define CACHELIB_INLINE __attribute__((__always_inline__)) +#define CACHELIB_INLINE __attribute__((__always_inline__)) namespace facebook { namespace cachelib { @@ -28,7 +28,7 @@ namespace cachelib { */ template struct IsShmSafe { - static constexpr bool value = false; + static constexpr bool value = false; }; namespace detail { @@ -40,25 +40,21 @@ struct CheckSize {}; * Certifies that the type named by 'Name' is of the given size and is * safe to store in shared memory or nvm. */ -#define CACHELIB_SHM_CERTIFY(Name, size) \ - template <> \ - struct facebook::cachelib::IsShmSafe { \ - static constexpr bool value = true; \ - }; \ - constexpr facebook::cachelib::detail::CheckSize \ - FB_ANONYMOUS_VARIABLE(checkSize) = \ - facebook::cachelib::detail::CheckSize(); \ - static_assert(std::is_standard_layout::value, \ - #Name "must be standard layout") +#define CACHELIB_SHM_CERTIFY(Name, size) \ + template <> \ + struct facebook::cachelib::IsShmSafe { \ + static constexpr bool value = true; \ + }; \ + constexpr facebook::cachelib::detail::CheckSize FB_ANONYMOUS_VARIABLE(checkSize) = \ + facebook::cachelib::detail::CheckSize(); \ + static_assert(std::is_standard_layout::value, #Name "must be standard layout") // convenience struct for getting the number of bits in a byte. template struct NumBits { - static constexpr unsigned int kBitsPerByte = 8; - static constexpr uint8_t value = - static_cast(sizeof(T) * kBitsPerByte); - static_assert(sizeof(T) * kBitsPerByte <= UINT8_MAX, - "number of bits in this structure larger than max uint8_t"); + static constexpr unsigned int kBitsPerByte = 8; + static constexpr uint8_t value = static_cast(sizeof(T) * kBitsPerByte); + static_assert(sizeof(T) * kBitsPerByte <= UINT8_MAX, "number of bits in this structure larger than max uint8_t"); }; } // namespace cachelib } // namespace facebook diff --git a/mooncake-store/include/cachelib_memory_allocator/common/Exceptions.h b/mooncake-store/include/cachelib_memory_allocator/common/Exceptions.h index 2efab42f8..43e8170d5 100644 --- a/mooncake-store/include/cachelib_memory_allocator/common/Exceptions.h +++ b/mooncake-store/include/cachelib_memory_allocator/common/Exceptions.h @@ -20,71 +20,74 @@ #include #ifndef CACHELIB_CHECK_THROW -#define CACHELIB_CHECK_THROW(cond, msg) \ - do { \ - if (UNLIKELY(!(cond))) { \ - XLOG(ERR, "CHECK FAILED: " #cond "," msg); \ - throw std::invalid_argument(msg); \ - } \ - } while (0) +#define CACHELIB_CHECK_THROW(cond, msg) \ + do { \ + if (UNLIKELY(!(cond))) { \ + XLOG(ERR, "CHECK FAILED: " #cond "," msg); \ + throw std::invalid_argument(msg); \ + } \ + } while (0) #endif #ifndef CACHELIB_CHECK_THROWF -#define CACHELIB_CHECK_THROWF(cond, fmt, arg1, ...) \ - do { \ - if (UNLIKELY(!(cond))) { \ - XLOGF(ERR, "CHECK FAILED: " #cond "," fmt, arg1, ##__VA_ARGS__); \ - throw std::invalid_argument(fmt::format(fmt, arg1, ##__VA_ARGS__)); \ - } \ - } while (0) +#define CACHELIB_CHECK_THROWF(cond, fmt, arg1, ...) \ + do { \ + if (UNLIKELY(!(cond))) { \ + XLOGF(ERR, "CHECK FAILED: " #cond "," fmt, arg1, ##__VA_ARGS__); \ + throw std::invalid_argument(fmt::format(fmt, arg1, ##__VA_ARGS__)); \ + } \ + } while (0) #endif namespace facebook { namespace cachelib { namespace exception { class OutOfMemory : public std::bad_alloc { - public: - OutOfMemory(std::string what) : what_{std::move(what)} {} +public: + OutOfMemory(std::string what) : what_ {std::move(what)} { + } - const char* what() const noexcept override { return what_.c_str(); } + const char *what() const noexcept override { + return what_.c_str(); + } - private: - const std::string what_; +private: + const std::string what_; }; class RefcountOverflow : public std::overflow_error { - public: - using std::overflow_error::overflow_error; +public: + using std::overflow_error::overflow_error; }; class RefcountUnderflow : public std::underflow_error { - public: - using std::underflow_error::underflow_error; +public: + using std::underflow_error::underflow_error; }; class SlabReleaseAborted : public std::runtime_error { - public: - using std::runtime_error::runtime_error; +public: + using std::runtime_error::runtime_error; }; class ChainedItemInvalid : public std::runtime_error { - public: - using std::runtime_error::runtime_error; +public: + using std::runtime_error::runtime_error; }; // An allocation error. This could be a genuine std::bad_alloc from // the global allocator, or it can be an internal allocation error // from the backing cachelib item. class ObjectCacheAllocationError : public OutOfMemory { - public: - using OutOfMemory::OutOfMemory; +public: + using OutOfMemory::OutOfMemory; }; // Bad arguments were fed into deallocate(). This indicates the alloc // argument was invalid, or the size was different from the originally // requested size. class ObjectCacheDeallocationBadArgs : public std::invalid_argument { - using std::invalid_argument::invalid_argument; + using std::invalid_argument::invalid_argument; }; } // namespace exception } // namespace cachelib diff --git a/mooncake-store/include/cachelib_memory_allocator/common/Throttler.h b/mooncake-store/include/cachelib_memory_allocator/common/Throttler.h index 046e737f3..53bd0dd52 100644 --- a/mooncake-store/include/cachelib_memory_allocator/common/Throttler.h +++ b/mooncake-store/include/cachelib_memory_allocator/common/Throttler.h @@ -30,79 +30,83 @@ namespace cachelib { namespace util { class Throttler { - public: - // Throttler is used to do certain amount of work before yielding. We call - // throttle every time we work() and throttler will sleep if we work() too - // much. To do this, we have two options. 1) sleepMs 2) workMs. SleepMs - // indicates the time we yield when we decide to be throttled. WorkMs - // indicates the time we run un-throttled. - - // The callback to be called when the time is checked for throttling - using ThrottleCb = std::function; - - // this config indicates that we sleep for sleepMs time every - // workMs time at least. - struct Config { - // time we yield when throttled - uint64_t sleepMs = 10; - - // time period of uninterrupted work. - uint64_t workMs = 5; - - // return true if the config indicates we need to run throttling logic. - bool needsThrottling() const noexcept { return sleepMs != 0; } - - static Config makeNoThrottleConfig() { - // setting to 0 on sleepMs means we dont need to throttle. - return Config{.sleepMs = 0, .workMs = 0}; - } - - std::map serialize() const { - std::map configMap; - configMap["sleepMs"] = std::to_string(sleepMs); - configMap["workMs"] = std::to_string(workMs); - return configMap; - } - }; - - // returns true if throttled, false otherwise - bool throttle() { - if (!config_.needsThrottling() || ++counter_ % kSpinLimit) { - return false; - } - - uint64_t curr = util::getCurrentTimeMs(); - if (throttleCb_) { - throttleCb_(std::chrono::milliseconds(curr)); - } - if (curr - currWorkStartMs_ > config_.workMs) { - /* sleep override */ - std::this_thread::sleep_for(std::chrono::milliseconds(config_.sleepMs)); - // start the time period when we don't throttle - currWorkStartMs_ = util::getCurrentTimeMs(); - ++throttleCounter_; - return true; - } - return false; - } - - uint64_t numThrottles() const noexcept { return throttleCounter_; } - - explicit Throttler(Config config, ThrottleCb&& throttleCb = nullptr) - : config_(std::move(config)), - currWorkStartMs_(util::getCurrentTimeMs()), - throttleCb_(std::move(throttleCb)) {} - explicit Throttler() : Throttler(Config{}) {} - - private: - // number of spins before we attempt to call time - static constexpr const uint64_t kSpinLimit = 1024; - - Config config_; - uint64_t currWorkStartMs_; // time when we started to not throttle - uint64_t counter_{0}; // counter to track the calls. - uint64_t throttleCounter_{0}; // number of times we've throttled - ThrottleCb throttleCb_; +public: + // Throttler is used to do certain amount of work before yielding. We call + // throttle every time we work() and throttler will sleep if we work() too + // much. To do this, we have two options. 1) sleepMs 2) workMs. SleepMs + // indicates the time we yield when we decide to be throttled. WorkMs + // indicates the time we run un-throttled. + + // The callback to be called when the time is checked for throttling + using ThrottleCb = std::function; + + // this config indicates that we sleep for sleepMs time every + // workMs time at least. + struct Config { + // time we yield when throttled + uint64_t sleepMs = 10; + + // time period of uninterrupted work. + uint64_t workMs = 5; + + // return true if the config indicates we need to run throttling logic. + bool needsThrottling() const noexcept { + return sleepMs != 0; + } + + static Config makeNoThrottleConfig() { + // setting to 0 on sleepMs means we dont need to throttle. + return Config {.sleepMs = 0, .workMs = 0}; + } + + std::map serialize() const { + std::map configMap; + configMap["sleepMs"] = std::to_string(sleepMs); + configMap["workMs"] = std::to_string(workMs); + return configMap; + } + }; + + // returns true if throttled, false otherwise + bool throttle() { + if (!config_.needsThrottling() || ++counter_ % kSpinLimit) { + return false; + } + + uint64_t curr = util::getCurrentTimeMs(); + if (throttleCb_) { + throttleCb_(std::chrono::milliseconds(curr)); + } + if (curr - currWorkStartMs_ > config_.workMs) { + /* sleep override */ + std::this_thread::sleep_for(std::chrono::milliseconds(config_.sleepMs)); + // start the time period when we don't throttle + currWorkStartMs_ = util::getCurrentTimeMs(); + ++throttleCounter_; + return true; + } + return false; + } + + uint64_t numThrottles() const noexcept { + return throttleCounter_; + } + + explicit Throttler(Config config, ThrottleCb &&throttleCb = nullptr) + : config_(std::move(config)), currWorkStartMs_(util::getCurrentTimeMs()), throttleCb_(std::move(throttleCb)) { + } + explicit Throttler() : Throttler(Config {}) { + } + +private: + // number of spins before we attempt to call time + static constexpr const uint64_t kSpinLimit = 1024; + + Config config_; + uint64_t currWorkStartMs_; // time when we started to not throttle + uint64_t counter_ {0}; // counter to track the calls. + uint64_t throttleCounter_ {0}; // number of times we've throttled + ThrottleCb throttleCb_; }; } // namespace util diff --git a/mooncake-store/include/cachelib_memory_allocator/common/Time.h b/mooncake-store/include/cachelib_memory_allocator/common/Time.h index 4646dfe38..b52710fdc 100644 --- a/mooncake-store/include/cachelib_memory_allocator/common/Time.h +++ b/mooncake-store/include/cachelib_memory_allocator/common/Time.h @@ -27,77 +27,81 @@ namespace util { // through the vdso. This is faster than std::chrono::system_clock::now and // counting it as seconds since epoch. inline uint32_t getCurrentTimeSec() { - // time in seconds since epoch will fit in 32 bit. We use this primarily for - // storing in cache. - return static_cast(std::time(nullptr)); + // time in seconds since epoch will fit in 32 bit. We use this primarily for + // storing in cache. + return static_cast(std::time(nullptr)); } // For nano second granularity, std::chrono::steady_clock seems to do a fine // job. inline uint64_t getCurrentTimeMs() { - auto ret = std::chrono::steady_clock::now().time_since_epoch(); - return std::chrono::duration_cast(ret).count(); + auto ret = std::chrono::steady_clock::now().time_since_epoch(); + return std::chrono::duration_cast(ret).count(); } inline uint64_t getCurrentTimeNs() { - auto ret = std::chrono::steady_clock::now().time_since_epoch(); - return std::chrono::duration_cast(ret).count(); + auto ret = std::chrono::steady_clock::now().time_since_epoch(); + return std::chrono::duration_cast(ret).count(); } inline uint32_t getSteadyCurrentTimeSec() { - auto ret = std::chrono::steady_clock::now().time_since_epoch(); - return std::chrono::duration_cast(ret).count(); + auto ret = std::chrono::steady_clock::now().time_since_epoch(); + return std::chrono::duration_cast(ret).count(); } class Timer { - using steady_clock = std::chrono::steady_clock; - class Finish { - public: - explicit Finish(Timer* t) : timer_(t) {} - ~Finish() { timer_->pause(); } - Timer* timer_; - }; + using steady_clock = std::chrono::steady_clock; + class Finish { + public: + explicit Finish(Timer *t) : timer_(t) { + } + ~Finish() { + timer_->pause(); + } + Timer *timer_; + }; - public: - steady_clock::duration getDuration() const { return duration_; } +public: + steady_clock::duration getDuration() const { + return duration_; + } - uint32_t getDurationSec() const { - return std::chrono::duration_cast(duration_).count(); - } + uint32_t getDurationSec() const { + return std::chrono::duration_cast(duration_).count(); + } - uint64_t getDurationMs() const { - return std::chrono::duration_cast(duration_) - .count(); - } + uint64_t getDurationMs() const { + return std::chrono::duration_cast(duration_).count(); + } - void startOrResume() { - if (started_) { - throw std::runtime_error("already stated"); - } - started_ = true; - start_ = steady_clock::now(); - } + void startOrResume() { + if (started_) { + throw std::runtime_error("already stated"); + } + started_ = true; + start_ = steady_clock::now(); + } - // automatically call pause() when out of scope - Finish scopedStartOrResume() { - startOrResume(); - return Finish{this}; - } + // automatically call pause() when out of scope + Finish scopedStartOrResume() { + startOrResume(); + return Finish {this}; + } - steady_clock::duration pause() { - if (!started_) { - throw std::runtime_error("not stated yet"); - } - started_ = false; - auto d = steady_clock::now() - start_; - duration_ += d; - return d; - } + steady_clock::duration pause() { + if (!started_) { + throw std::runtime_error("not stated yet"); + } + started_ = false; + auto d = steady_clock::now() - start_; + duration_ += d; + return d; + } - private: - steady_clock::duration duration_ = steady_clock::duration::zero(); - steady_clock::time_point start_; - bool started_{false}; +private: + steady_clock::duration duration_ = steady_clock::duration::zero(); + steady_clock::time_point start_; + bool started_ {false}; }; } // namespace util diff --git a/mooncake-store/include/cachelib_memory_allocator/common/Utils.h b/mooncake-store/include/cachelib_memory_allocator/common/Utils.h index fd8e271f3..578e18f01 100644 --- a/mooncake-store/include/cachelib_memory_allocator/common/Utils.h +++ b/mooncake-store/include/cachelib_memory_allocator/common/Utils.h @@ -22,10 +22,9 @@ namespace util { // @return size aligned up to the next multiple of _alignment_ template -std::enable_if_t::value, T> getAlignedSize( - T size, uint32_t alignment) { - const T rem = size % alignment; - return rem == 0 ? size : size + alignment - rem; +std::enable_if_t::value, T> getAlignedSize(T size, uint32_t alignment) { + const T rem = size % alignment; + return rem == 0 ? size : size + alignment - rem; } } // namespace util diff --git a/mooncake-store/include/cachelib_memory_allocator/fake_include/folly/logging/xlog.h b/mooncake-store/include/cachelib_memory_allocator/fake_include/folly/logging/xlog.h index 63ffb82b1..5484ea702 100644 --- a/mooncake-store/include/cachelib_memory_allocator/fake_include/folly/logging/xlog.h +++ b/mooncake-store/include/cachelib_memory_allocator/fake_include/folly/logging/xlog.h @@ -65,45 +65,28 @@ #define FOLLY_XLOG_MIN_LEVEL MIN_LEVEL #endif -#define XLOG(level, ...) \ - XLOG_IMPL( \ - ::folly::LogLevel::level, \ - ::folly::LogStreamProcessor::APPEND, \ - ##__VA_ARGS__) +#define XLOG(level, ...) XLOG_IMPL(::folly::LogLevel::level, ::folly::LogStreamProcessor::APPEND, ##__VA_ARGS__) /** * Log a message if and only if the specified condition predicate evaluates * to true. Note that the condition is *only* evaluated if the log-level check * passes. */ -#define XLOG_IF(level, cond, ...) \ - XLOG_IF_IMPL( \ - ::folly::LogLevel::level, \ - cond, \ - ::folly::LogStreamProcessor::APPEND, \ - ##__VA_ARGS__) +#define XLOG_IF(level, cond, ...) \ + XLOG_IF_IMPL(::folly::LogLevel::level, cond, ::folly::LogStreamProcessor::APPEND, ##__VA_ARGS__) /** * Log a message to this file's default log category, using a format string. */ -#define XLOGF(level, fmt, ...) \ - XLOG_IMPL( \ - ::folly::LogLevel::level, \ - ::folly::LogStreamProcessor::FORMAT, \ - fmt, \ - ##__VA_ARGS__) +#define XLOGF(level, fmt, ...) \ + XLOG_IMPL(::folly::LogLevel::level, ::folly::LogStreamProcessor::FORMAT, fmt, ##__VA_ARGS__) /** * Log a message using a format string if and only if the specified condition * predicate evaluates to true. Note that the condition is *only* evaluated * if the log-level check passes. */ -#define XLOGF_IF(level, cond, fmt, ...) \ - XLOG_IF_IMPL( \ - ::folly::LogLevel::level, \ - cond, \ - ::folly::LogStreamProcessor::FORMAT, \ - fmt, \ - ##__VA_ARGS__) +#define XLOGF_IF(level, cond, fmt, ...) \ + XLOG_IF_IMPL(::folly::LogLevel::level, cond, ::folly::LogStreamProcessor::FORMAT, fmt, ##__VA_ARGS__) /** * Similar to XLOG(...) except only log a message every @param ms @@ -111,16 +94,15 @@ * * Note that this is threadsafe. */ -#define XLOG_EVERY_MS(level, ms, ...) \ - XLOG_IF( \ - level, \ - [__folly_detail_xlog_ms = ms] { \ - static ::folly::logging::IntervalRateLimiter \ - folly_detail_xlog_limiter( \ - 1, std::chrono::milliseconds(__folly_detail_xlog_ms)); \ - return folly_detail_xlog_limiter.check(); \ - }(), \ - ##__VA_ARGS__) +#define XLOG_EVERY_MS(level, ms, ...) \ + XLOG_IF( \ + level, \ + [__folly_detail_xlog_ms = ms] { \ + static ::folly::logging::IntervalRateLimiter folly_detail_xlog_limiter( \ + 1, std::chrono::milliseconds(__folly_detail_xlog_ms)); \ + return folly_detail_xlog_limiter.check(); \ + }(), \ + ##__VA_ARGS__) /** * Similar to XLOG(...) except only log a message every @param ms @@ -128,17 +110,16 @@ * * Note that this is threadsafe. */ -#define XLOG_EVERY_MS_IF(level, cond, ms, ...) \ - XLOG_IF( \ - level, \ - (cond) && \ - [__folly_detail_xlog_ms = ms] { \ - static ::folly::logging::IntervalRateLimiter \ - folly_detail_xlog_limiter( \ - 1, ::std::chrono::milliseconds(__folly_detail_xlog_ms)); \ - return folly_detail_xlog_limiter.check(); \ - }(), \ - ##__VA_ARGS__) +#define XLOG_EVERY_MS_IF(level, cond, ms, ...) \ + XLOG_IF( \ + level, \ + (cond) && \ + [__folly_detail_xlog_ms = ms] { \ + static ::folly::logging::IntervalRateLimiter folly_detail_xlog_limiter( \ + 1, ::std::chrono::milliseconds(__folly_detail_xlog_ms)); \ + return folly_detail_xlog_limiter.check(); \ + }(), \ + ##__VA_ARGS__) /** * Similar to XLOG(...) except log a message if the specified condition @@ -146,17 +127,16 @@ * * Note that this is threadsafe. */ -#define XLOG_EVERY_MS_OR(level, cond, ms, ...) \ - XLOG_IF( \ - level, \ - (cond) || \ - [__folly_detail_xlog_ms = ms] { \ - static ::folly::logging::IntervalRateLimiter \ - folly_detail_xlog_limiter( \ - 1, ::std::chrono::milliseconds(__folly_detail_xlog_ms)); \ - return folly_detail_xlog_limiter.check(); \ - }(), \ - ##__VA_ARGS__) +#define XLOG_EVERY_MS_OR(level, cond, ms, ...) \ + XLOG_IF( \ + level, \ + (cond) || \ + [__folly_detail_xlog_ms = ms] { \ + static ::folly::logging::IntervalRateLimiter folly_detail_xlog_limiter( \ + 1, ::std::chrono::milliseconds(__folly_detail_xlog_ms)); \ + return folly_detail_xlog_limiter.check(); \ + }(), \ + ##__VA_ARGS__) /** * Similar to XLOGF(...) except only log a message every @param ms @@ -164,18 +144,16 @@ * * Note that this is threadsafe. */ -#define XLOGF_EVERY_MS_IF(level, cond, ms, fmt, ...) \ - XLOGF_IF( \ - level, \ - (cond) && \ - [__folly_detail_xlog_ms = ms] { \ - static ::folly::logging::IntervalRateLimiter \ - folly_detail_xlog_limiter( \ - 1, ::std::chrono::milliseconds(__folly_detail_xlog_ms)); \ - return folly_detail_xlog_limiter.check(); \ - }(), \ - fmt, \ - ##__VA_ARGS__) +#define XLOGF_EVERY_MS_IF(level, cond, ms, fmt, ...) \ + XLOGF_IF( \ + level, \ + (cond) && \ + [__folly_detail_xlog_ms = ms] { \ + static ::folly::logging::IntervalRateLimiter folly_detail_xlog_limiter( \ + 1, ::std::chrono::milliseconds(__folly_detail_xlog_ms)); \ + return folly_detail_xlog_limiter.check(); \ + }(), \ + fmt, ##__VA_ARGS__) /** * Similar to XLOGF(...) except only log a message every @param ms @@ -183,8 +161,7 @@ * * Note that this is threadsafe. */ -#define XLOGF_EVERY_MS(level, ms, fmt, ...) \ - XLOGF_EVERY_MS_IF(level, true, ms, fmt, ##__VA_ARGS__) +#define XLOGF_EVERY_MS(level, ms, fmt, ...) XLOGF_EVERY_MS_IF(level, true, ms, fmt, ##__VA_ARGS__) /** * Similar to XLOG(...) except only log a message every @param n @@ -196,14 +173,14 @@ * contention, leading to possible over-logging or under-logging * effects. */ -#define XLOG_EVERY_N(level, n, ...) \ - XLOG_IF( \ - level, \ - [&] { \ - struct folly_detail_xlog_tag {}; \ - return ::folly::detail::xlogEveryNImpl(n); \ - }(), \ - ##__VA_ARGS__) +#define XLOG_EVERY_N(level, n, ...) \ + XLOG_IF( \ + level, \ + [&] { \ + struct folly_detail_xlog_tag {}; \ + return ::folly::detail::xlogEveryNImpl(n); \ + }(), \ + ##__VA_ARGS__) /** * Similar to XLOGF(...) except only log a message every @param n @@ -215,15 +192,14 @@ * contention, leading to possible over-logging or under-logging * effects. */ -#define XLOGF_EVERY_N(level, n, fmt, ...) \ - XLOGF_IF( \ - level, \ - [&] { \ - struct folly_detail_xlog_tag {}; \ - return ::folly::detail::xlogEveryNImpl(n); \ - }(), \ - fmt, \ - ##__VA_ARGS__) +#define XLOGF_EVERY_N(level, n, fmt, ...) \ + XLOGF_IF( \ + level, \ + [&] { \ + struct folly_detail_xlog_tag {}; \ + return ::folly::detail::xlogEveryNImpl(n); \ + }(), \ + fmt, ##__VA_ARGS__) /** * Similar to XLOG(...) except only log a message every @param n @@ -236,15 +212,15 @@ * contention, leading to possible over-logging or under-logging * effects. */ -#define XLOG_EVERY_N_IF(level, cond, n, ...) \ - XLOG_IF( \ - level, \ - (cond) && \ - [&] { \ - struct folly_detail_xlog_tag {}; \ - return ::folly::detail::xlogEveryNImpl(n); \ - }(), \ - ##__VA_ARGS__) +#define XLOG_EVERY_N_IF(level, cond, n, ...) \ + XLOG_IF( \ + level, \ + (cond) && \ + [&] { \ + struct folly_detail_xlog_tag {}; \ + return ::folly::detail::xlogEveryNImpl(n); \ + }(), \ + ##__VA_ARGS__) /** * Similar to XLOG(...) except it logs a message if the condition predicate @@ -256,15 +232,15 @@ * contention, leading to possible over-logging or under-logging * effects. */ -#define XLOG_EVERY_N_OR(level, cond, n, ...) \ - XLOG_IF( \ - level, \ - (cond) || \ - [&] { \ - struct folly_detail_xlog_tag {}; \ - return ::folly::detail::xlogEveryNImpl(n); \ - }(), \ - ##__VA_ARGS__) +#define XLOG_EVERY_N_OR(level, cond, n, ...) \ + XLOG_IF( \ + level, \ + (cond) || \ + [&] { \ + struct folly_detail_xlog_tag {}; \ + return ::folly::detail::xlogEveryNImpl(n); \ + }(), \ + ##__VA_ARGS__) /** * Similar to XLOGF(...) except only log a message every @param n @@ -277,16 +253,15 @@ * contention, leading to possible over-logging or under-logging * effects. */ -#define XLOGF_EVERY_N_IF(level, cond, n, fmt, ...) \ - XLOGF_IF( \ - level, \ - (cond) && \ - [&] { \ - struct folly_detail_xlog_tag {}; \ - return ::folly::detail::xlogEveryNImpl(n); \ - }(), \ - fmt, \ - ##__VA_ARGS__) +#define XLOGF_EVERY_N_IF(level, cond, n, fmt, ...) \ + XLOGF_IF( \ + level, \ + (cond) && \ + [&] { \ + struct folly_detail_xlog_tag {}; \ + return ::folly::detail::xlogEveryNImpl(n); \ + }(), \ + fmt, ##__VA_ARGS__) /** * Similar to XLOG(...) except only log a message every @param n @@ -297,14 +272,14 @@ * schenarios of XLOG_EVERY_N(...) are avoided, traded off for * the performance degradation of atomic-rmw operations. */ -#define XLOG_EVERY_N_EXACT(level, n, ...) \ - XLOG_IF( \ - level, \ - [&] { \ - struct folly_detail_xlog_tag {}; \ - return ::folly::detail::xlogEveryNExactImpl(n); \ - }(), \ - ##__VA_ARGS__) +#define XLOG_EVERY_N_EXACT(level, n, ...) \ + XLOG_IF( \ + level, \ + [&] { \ + struct folly_detail_xlog_tag {}; \ + return ::folly::detail::xlogEveryNExactImpl(n); \ + }(), \ + ##__VA_ARGS__) /** * Similar to XLOG(...) except only log a message every @param n @@ -322,15 +297,14 @@ * single thread-local map to control TLS overhead, at the cost * of a small runtime performance hit. */ -#define XLOG_EVERY_N_THREAD(level, n, ...) \ - XLOG_IF( \ - level, \ - [&] { \ - struct folly_detail_xlog_tag {}; \ - return ::folly::detail::xlogEveryNThreadImpl( \ - n); \ - }(), \ - ##__VA_ARGS__) +#define XLOG_EVERY_N_THREAD(level, n, ...) \ + XLOG_IF( \ + level, \ + [&] { \ + struct folly_detail_xlog_tag {}; \ + return ::folly::detail::xlogEveryNThreadImpl(n); \ + }(), \ + ##__VA_ARGS__) /** * Similar to XLOG(...) except only log at most @param count messages @@ -338,16 +312,15 @@ * * The internal counters are process-global and threadsafe. */ -#define XLOG_N_PER_MS(level, count, ms, ...) \ - XLOG_IF( \ - level, \ - [] { \ - static ::folly::logging::IntervalRateLimiter \ - folly_detail_xlog_limiter( \ - (count), ::std::chrono::milliseconds(ms)); \ - return folly_detail_xlog_limiter.check(); \ - }(), \ - ##__VA_ARGS__) +#define XLOG_N_PER_MS(level, count, ms, ...) \ + XLOG_IF( \ + level, \ + [] { \ + static ::folly::logging::IntervalRateLimiter folly_detail_xlog_limiter((count), \ + ::std::chrono::milliseconds(ms)); \ + return folly_detail_xlog_limiter.check(); \ + }(), \ + ##__VA_ARGS__) /** * Similar to XLOG(...) except only log a message the first n times, exactly. @@ -355,14 +328,14 @@ * The internal counter is process-global and threadsafe and exchanges are * atomic. */ -#define XLOG_FIRST_N(level, n, ...) \ - XLOG_IF( \ - level, \ - [&] { \ - struct folly_detail_xlog_tag {}; \ - return ::folly::detail::xlogFirstNExactImpl(n); \ - }(), \ - ##__VA_ARGS__) +#define XLOG_FIRST_N(level, n, ...) \ + XLOG_IF( \ + level, \ + [&] { \ + struct folly_detail_xlog_tag {}; \ + return ::folly::detail::xlogFirstNExactImpl(n); \ + }(), \ + ##__VA_ARGS__) /** * FOLLY_XLOG_STRIP_PREFIXES can be defined to a string containing a @@ -376,19 +349,14 @@ * though.) */ #ifdef FOLLY_XLOG_STRIP_PREFIXES -#define XLOG_FILENAME \ - (static_cast( \ - ::folly::xlogStripFilename(__FILE__, FOLLY_XLOG_STRIP_PREFIXES))) +#define XLOG_FILENAME (static_cast(::folly::xlogStripFilename(__FILE__, FOLLY_XLOG_STRIP_PREFIXES))) #else -#define XLOG_FILENAME (static_cast(__FILE__)) +#define XLOG_FILENAME (static_cast(__FILE__)) #endif -#define XLOG_IMPL(level, type, ...) \ - XLOG_ACTUAL_IMPL( \ - level, true, ::folly::isLogLevelFatal(level), type, ##__VA_ARGS__) +#define XLOG_IMPL(level, type, ...) XLOG_ACTUAL_IMPL(level, true, ::folly::isLogLevelFatal(level), type, ##__VA_ARGS__) -#define XLOG_IF_IMPL(level, cond, type, ...) \ - XLOG_ACTUAL_IMPL(level, cond, false, type, ##__VA_ARGS__) +#define XLOG_IF_IMPL(level, cond, type, ...) XLOG_ACTUAL_IMPL(level, cond, false, type, ##__VA_ARGS__) /** * Helper macro used to implement XLOG() and XLOGF() @@ -456,10 +424,9 @@ * XLOG_IS_ON_IMPL_HELPER() must still be invoked first for fatal log levels * in order to initialize folly::detail::custom::xlogFileScopeInfo. */ -#define XLOG_IS_ON_IMPL(level) \ - ((((level) >= ::folly::LogLevel::FOLLY_XLOG_MIN_LEVEL) && \ - XLOG_IS_ON_IMPL_HELPER(level)) || \ - ((level) >= ::folly::kMinFatalLogLevel)) +#define XLOG_IS_ON_IMPL(level) \ + ((((level) >= ::folly::LogLevel::FOLLY_XLOG_MIN_LEVEL) && XLOG_IS_ON_IMPL_HELPER(level)) || \ + ((level) >= ::folly::kMinFatalLogLevel)) /** * Helper macro to implement of XLOG_IS_ON() @@ -476,29 +443,25 @@ * * See XlogLevelInfo for the implementation details. */ -#define XLOG_IS_ON_IMPL_HELPER(level) \ - ([] { \ - static ::folly::XlogLevelInfo \ - folly_detail_xlog_level; \ - constexpr auto* folly_detail_xlog_filename = XLOG_FILENAME; \ - constexpr folly::StringPiece folly_detail_xlog_category = \ - ::folly::detail::custom::getXlogCategoryName( \ - folly_detail_xlog_filename, 0); \ - return folly_detail_xlog_level.check( \ - (level), \ - folly_detail_xlog_category, \ - ::folly::detail::custom::isXlogCategoryOverridden(0), \ - &::folly::detail::custom::xlogFileScopeInfo); \ - }()) +#define XLOG_IS_ON_IMPL_HELPER(level) \ + ([] { \ + static ::folly::XlogLevelInfo folly_detail_xlog_level; \ + constexpr auto *folly_detail_xlog_filename = XLOG_FILENAME; \ + constexpr folly::StringPiece folly_detail_xlog_category = \ + ::folly::detail::custom::getXlogCategoryName(folly_detail_xlog_filename, 0); \ + return folly_detail_xlog_level.check((level), folly_detail_xlog_category, \ + ::folly::detail::custom::isXlogCategoryOverridden(0), \ + &::folly::detail::custom::xlogFileScopeInfo); \ + }()) /** * Get the name of the log category that will be used by XLOG() statements * in this file. */ -#define XLOG_GET_CATEGORY_NAME() \ - (::folly::detail::custom::isXlogCategoryOverridden(0) \ - ? ::folly::detail::custom::getXlogCategoryName(XLOG_FILENAME, 0) \ - : ::folly::getXlogCategoryNameForFile(XLOG_FILENAME)) +#define XLOG_GET_CATEGORY_NAME() \ + (::folly::detail::custom::isXlogCategoryOverridden(0) \ + ? ::folly::detail::custom::getXlogCategoryName(XLOG_FILENAME, 0) \ + : ::folly::getXlogCategoryNameForFile(XLOG_FILENAME)) /** * Get a pointer to the LogCategory that will be used by XLOG() statements in @@ -508,8 +471,7 @@ * This must be implemented as a macro since it uses __FILE__, and that must * expand to the correct filename based on where the macro is used. */ -#define XLOG_GET_CATEGORY() \ - (::folly::LoggerDB::get().getCategory(XLOG_GET_CATEGORY_NAME())) +#define XLOG_GET_CATEGORY() (::folly::LoggerDB::get().getCategory(XLOG_GET_CATEGORY_NAME())) /** * XLOG_SET_CATEGORY_NAME() can be used to explicitly define the log category @@ -524,10 +486,8 @@ * XLOG_SET_CATEGORY_NAME() cannot be used inside header files. */ #ifdef __INCLUDE_LEVEL__ -#define XLOG_SET_CATEGORY_CHECK \ - static_assert( \ - __INCLUDE_LEVEL__ == 0, \ - "XLOG_SET_CATEGORY_NAME() should not be used in header files"); +#define XLOG_SET_CATEGORY_CHECK \ + static_assert(__INCLUDE_LEVEL__ == 0, "XLOG_SET_CATEGORY_NAME() should not be used in header files"); #else #define XLOG_SET_CATEGORY_CHECK #endif @@ -540,28 +500,25 @@ // versions of gcc. #define XLOG_SET_CATEGORY_NAME(category) #else -#define XLOG_SET_CATEGORY_NAME(category) \ - namespace folly { \ - namespace detail { \ - namespace custom { \ - namespace { \ - struct xlog_correct_usage; \ - static_assert( \ - ::std::is_same< \ - xlog_correct_usage, \ - ::folly::detail::custom::xlog_correct_usage>::value, \ - "XLOG_SET_CATEGORY_NAME() should not be used within namespace scope"); \ - XLOG_SET_CATEGORY_CHECK \ - FOLLY_CONSTEVAL inline StringPiece getXlogCategoryName(StringPiece, int) { \ - return category; \ - } \ - FOLLY_CONSTEVAL inline bool isXlogCategoryOverridden(int) { \ - return true; \ - } \ - } \ - } \ - } \ - } +#define XLOG_SET_CATEGORY_NAME(category) \ + namespace folly { \ + namespace detail { \ + namespace custom { \ + namespace { \ + struct xlog_correct_usage; \ + static_assert(::std::is_same::value, \ + "XLOG_SET_CATEGORY_NAME() should not be used within namespace scope"); \ + XLOG_SET_CATEGORY_CHECK \ + FOLLY_CONSTEVAL inline StringPiece getXlogCategoryName(StringPiece, int) { \ + return category; \ + } \ + FOLLY_CONSTEVAL inline bool isXlogCategoryOverridden(int) { \ + return true; \ + } \ + } \ + } \ + } \ + } #endif /** @@ -571,12 +528,7 @@ * false. Unlike assert() CHECK statements are always enabled, regardless of * the setting of NDEBUG. */ -#define XCHECK(cond, ...) \ - XLOG_IF( \ - FATAL, \ - FOLLY_UNLIKELY(!(cond)), \ - "Check failed: " #cond " ", \ - ##__VA_ARGS__) +#define XCHECK(cond, ...) XLOG_IF(FATAL, FOLLY_UNLIKELY(!(cond)), "Check failed: " #cond " ", ##__VA_ARGS__) #define XCHECK_OP(op, arg1, arg2, ...) static_cast(0) @@ -607,8 +559,7 @@ * be evaluated in release builds but log a message without crashing the * program. */ -#define XDCHECK(cond, ...) \ - (!::folly::kIsDebug) ? static_cast(0) : XCHECK(cond, ##__VA_ARGS__) +#define XDCHECK(cond, ...) (!::folly::kIsDebug) ? static_cast(0) : XCHECK(cond, ##__VA_ARGS__) /* * It would be nice to rely solely on folly::kIsDebug here rather than NDEBUG. @@ -616,9 +567,9 @@ * much simpler to simply change the definition of XDCHECK_OP() based on NDEBUG. */ #ifdef NDEBUG -#define XDCHECK_OP(op, arg1, arg2, ...) \ - while (false) \ - XCHECK_OP(op, arg1, arg2, ##__VA_ARGS__) +#define XDCHECK_OP(op, arg1, arg2, ...) \ + while (false) \ + XCHECK_OP(op, arg1, arg2, ##__VA_ARGS__) #else #define XDCHECK_OP(op, arg1, arg2, ...) XCHECK_OP(op, arg1, arg2, ##__VA_ARGS__) #endif diff --git a/mooncake-store/include/cachelib_memory_allocator/fake_include/folly/portability/Config.h b/mooncake-store/include/cachelib_memory_allocator/fake_include/folly/portability/Config.h index 83a723a18..2b6b98282 100644 --- a/mooncake-store/include/cachelib_memory_allocator/fake_include/folly/portability/Config.h +++ b/mooncake-store/include/cachelib_memory_allocator/fake_include/folly/portability/Config.h @@ -33,7 +33,7 @@ #endif #ifdef __APPLE__ -#include // @manual +#include // @manual #include // @manual #include // @manual #endif diff --git a/mooncake-store/include/cachelib_memory_allocator/include/fmt/args.h b/mooncake-store/include/cachelib_memory_allocator/include/fmt/args.h index 562e8ab11..e3719eafa 100644 --- a/mooncake-store/include/cachelib_memory_allocator/include/fmt/args.h +++ b/mooncake-store/include/cachelib_memory_allocator/include/fmt/args.h @@ -8,8 +8,8 @@ #ifndef FMT_ARGS_H_ #define FMT_ARGS_H_ -#include // std::reference_wrapper -#include // std::unique_ptr +#include // std::reference_wrapper +#include // std::unique_ptr #include #include "core.h" @@ -18,47 +18,57 @@ FMT_BEGIN_NAMESPACE namespace detail { -template struct is_reference_wrapper : std::false_type {}; +template +struct is_reference_wrapper : std::false_type {}; template struct is_reference_wrapper> : std::true_type {}; -template const T& unwrap(const T& v) { return v; } -template const T& unwrap(const std::reference_wrapper& v) { - return static_cast(v); +template +const T &unwrap(const T &v) { + return v; +} +template +const T &unwrap(const std::reference_wrapper &v) { + return static_cast(v); } class dynamic_arg_list { - // Workaround for clang's -Wweak-vtables. Unlike for regular classes, for - // templates it doesn't complain about inability to deduce single translation - // unit for placing vtable. So storage_node_base is made a fake template. - template struct node { - virtual ~node() = default; - std::unique_ptr> next; - }; - - template struct typed_node : node<> { - T value; - - template - FMT_CONSTEXPR typed_node(const Arg& arg) : value(arg) {} - - template - FMT_CONSTEXPR typed_node(const basic_string_view& arg) - : value(arg.data(), arg.size()) {} - }; - - std::unique_ptr> head_; - - public: - template const T& push(const Arg& arg) { - auto new_node = std::unique_ptr>(new typed_node(arg)); - auto& value = new_node->value; - new_node->next = std::move(head_); - head_ = std::move(new_node); - return value; - } + // Workaround for clang's -Wweak-vtables. Unlike for regular classes, for + // templates it doesn't complain about inability to deduce single + // translation unit for placing vtable. So storage_node_base is made a fake + // template. + template + struct node { + virtual ~node() = default; + std::unique_ptr> next; + }; + + template + struct typed_node : node<> { + T value; + + template + FMT_CONSTEXPR typed_node(const Arg &arg) : value(arg) { + } + + template + FMT_CONSTEXPR typed_node(const basic_string_view &arg) : value(arg.data(), arg.size()) { + } + }; + + std::unique_ptr> head_; + +public: + template + const T &push(const Arg &arg) { + auto new_node = std::unique_ptr>(new typed_node(arg)); + auto &value = new_node->value; + new_node->next = std::move(head_); + head_ = std::move(new_node); + return value; + } }; -} // namespace detail +} // namespace detail /** \rst @@ -77,156 +87,148 @@ class dynamic_format_arg_store : public basic_format_args #endif { - private: - using char_type = typename Context::char_type; - - template struct need_copy { - static constexpr detail::type mapped_type = - detail::mapped_type_constant::value; - - enum { - value = !(detail::is_reference_wrapper::value || - std::is_same>::value || - std::is_same>::value || - (mapped_type != detail::type::cstring_type && - mapped_type != detail::type::string_type && - mapped_type != detail::type::custom_type)) - }; - }; - - template - using stored_type = conditional_t::value && - !has_formatter::value && - !detail::is_reference_wrapper::value, - std::basic_string, T>; - - // Storage of basic_format_arg must be contiguous. - std::vector> data_; - std::vector> named_info_; - - // Storage of arguments not fitting into basic_format_arg must grow - // without relocation because items in data_ refer to it. - detail::dynamic_arg_list dynamic_args_; - - friend class basic_format_args; - - unsigned long long get_types() const { - return detail::is_unpacked_bit | data_.size() | - (named_info_.empty() - ? 0ULL - : static_cast(detail::has_named_args_bit)); - } - - const basic_format_arg* data() const { - return named_info_.empty() ? data_.data() : data_.data() + 1; - } - - template void emplace_arg(const T& arg) { - data_.emplace_back(detail::make_arg(arg)); - } - - template - void emplace_arg(const detail::named_arg& arg) { - if (named_info_.empty()) { - constexpr const detail::named_arg_info* zero_ptr{nullptr}; - data_.insert(data_.begin(), {zero_ptr, 0}); - } - data_.emplace_back(detail::make_arg(detail::unwrap(arg.value))); - auto pop_one = [](std::vector>* data) { - data->pop_back(); - }; - std::unique_ptr>, decltype(pop_one)> - guard{&data_, pop_one}; - named_info_.push_back({arg.name, static_cast(data_.size() - 2u)}); - data_[0].value_.named_args = {named_info_.data(), named_info_.size()}; - guard.release(); - } - - public: - /** - \rst - Adds an argument into the dynamic store for later passing to a formatting - function. - - Note that custom types and string types (but not string views) are copied - into the store dynamically allocating memory if necessary. - - **Example**:: - - fmt::dynamic_format_arg_store store; - store.push_back(42); - store.push_back("abc"); - store.push_back(1.5f); - std::string result = fmt::vformat("{} and {} and {}", store); - \endrst - */ - template void push_back(const T& arg) { - if (detail::const_check(need_copy::value)) - emplace_arg(dynamic_args_.push>(arg)); - else - emplace_arg(detail::unwrap(arg)); - } - - /** - \rst - Adds a reference to the argument into the dynamic store for later passing to - a formatting function. - - **Example**:: - - fmt::dynamic_format_arg_store store; - char band[] = "Rolling Stones"; - store.push_back(std::cref(band)); - band[9] = 'c'; // Changing str affects the output. - std::string result = fmt::vformat("{}", store); - // result == "Rolling Scones" - \endrst - */ - template void push_back(std::reference_wrapper arg) { - static_assert( - need_copy::value, - "objects of built-in types and string views are always copied"); - emplace_arg(arg.get()); - } - - /** - Adds named argument into the dynamic store for later passing to a formatting - function. ``std::reference_wrapper`` is supported to avoid copying of the - argument. The name is always copied into the store. - */ - template - void push_back(const detail::named_arg& arg) { - const char_type* arg_name = - dynamic_args_.push>(arg.name).c_str(); - if (detail::const_check(need_copy::value)) { - emplace_arg( - fmt::arg(arg_name, dynamic_args_.push>(arg.value))); - } else { - emplace_arg(fmt::arg(arg_name, arg.value)); - } - } - - /** Erase all elements from the store */ - void clear() { - data_.clear(); - named_info_.clear(); - dynamic_args_ = detail::dynamic_arg_list(); - } - - /** - \rst - Reserves space to store at least *new_cap* arguments including - *new_cap_named* named arguments. - \endrst - */ - void reserve(size_t new_cap, size_t new_cap_named) { - FMT_ASSERT(new_cap >= new_cap_named, - "Set of arguments includes set of named arguments"); - data_.reserve(new_cap); - named_info_.reserve(new_cap_named); - } +private: + using char_type = typename Context::char_type; + + template + struct need_copy { + static constexpr detail::type mapped_type = detail::mapped_type_constant::value; + + enum { + value = !(detail::is_reference_wrapper::value || std::is_same>::value || + std::is_same>::value || + (mapped_type != detail::type::cstring_type && mapped_type != detail::type::string_type && + mapped_type != detail::type::custom_type)) + }; + }; + + template + using stored_type = conditional_t::value && !has_formatter::value && + !detail::is_reference_wrapper::value, + std::basic_string, T>; + + // Storage of basic_format_arg must be contiguous. + std::vector> data_; + std::vector> named_info_; + + // Storage of arguments not fitting into basic_format_arg must grow + // without relocation because items in data_ refer to it. + detail::dynamic_arg_list dynamic_args_; + + friend class basic_format_args; + + unsigned long long get_types() const { + return detail::is_unpacked_bit | data_.size() | + (named_info_.empty() ? 0ULL : static_cast(detail::has_named_args_bit)); + } + + const basic_format_arg *data() const { + return named_info_.empty() ? data_.data() : data_.data() + 1; + } + + template + void emplace_arg(const T &arg) { + data_.emplace_back(detail::make_arg(arg)); + } + + template + void emplace_arg(const detail::named_arg &arg) { + if (named_info_.empty()) { + constexpr const detail::named_arg_info *zero_ptr {nullptr}; + data_.insert(data_.begin(), {zero_ptr, 0}); + } + data_.emplace_back(detail::make_arg(detail::unwrap(arg.value))); + auto pop_one = [](std::vector> *data) { + data->pop_back(); + }; + std::unique_ptr>, decltype(pop_one)> guard {&data_, pop_one}; + named_info_.push_back({arg.name, static_cast(data_.size() - 2u)}); + data_[0].value_.named_args = {named_info_.data(), named_info_.size()}; + guard.release(); + } + +public: + /** + \rst + Adds an argument into the dynamic store for later passing to a formatting + function. + + Note that custom types and string types (but not string views) are copied + into the store dynamically allocating memory if necessary. + + **Example**:: + + fmt::dynamic_format_arg_store store; + store.push_back(42); + store.push_back("abc"); + store.push_back(1.5f); + std::string result = fmt::vformat("{} and {} and {}", store); + \endrst + */ + template + void push_back(const T &arg) { + if (detail::const_check(need_copy::value)) + emplace_arg(dynamic_args_.push>(arg)); + else + emplace_arg(detail::unwrap(arg)); + } + + /** + \rst + Adds a reference to the argument into the dynamic store for later passing + to a formatting function. + + **Example**:: + + fmt::dynamic_format_arg_store store; + char band[] = "Rolling Stones"; + store.push_back(std::cref(band)); + band[9] = 'c'; // Changing str affects the output. + std::string result = fmt::vformat("{}", store); + // result == "Rolling Scones" + \endrst + */ + template + void push_back(std::reference_wrapper arg) { + static_assert(need_copy::value, "objects of built-in types and string views are always copied"); + emplace_arg(arg.get()); + } + + /** + Adds named argument into the dynamic store for later passing to a + formatting function. ``std::reference_wrapper`` is supported to avoid + copying of the argument. The name is always copied into the store. + */ + template + void push_back(const detail::named_arg &arg) { + const char_type *arg_name = dynamic_args_.push>(arg.name).c_str(); + if (detail::const_check(need_copy::value)) { + emplace_arg(fmt::arg(arg_name, dynamic_args_.push>(arg.value))); + } else { + emplace_arg(fmt::arg(arg_name, arg.value)); + } + } + + /** Erase all elements from the store */ + void clear() { + data_.clear(); + named_info_.clear(); + dynamic_args_ = detail::dynamic_arg_list(); + } + + /** + \rst + Reserves space to store at least *new_cap* arguments including + *new_cap_named* named arguments. + \endrst + */ + void reserve(size_t new_cap, size_t new_cap_named) { + FMT_ASSERT(new_cap >= new_cap_named, "Set of arguments includes set of named arguments"); + data_.reserve(new_cap); + named_info_.reserve(new_cap_named); + } }; FMT_END_NAMESPACE -#endif // FMT_ARGS_H_ +#endif // FMT_ARGS_H_ diff --git a/mooncake-store/include/cachelib_memory_allocator/include/fmt/chrono.h b/mooncake-store/include/cachelib_memory_allocator/include/fmt/chrono.h index c024fd710..92a94db51 100644 --- a/mooncake-store/include/cachelib_memory_allocator/include/fmt/chrono.h +++ b/mooncake-store/include/cachelib_memory_allocator/include/fmt/chrono.h @@ -20,7 +20,7 @@ FMT_BEGIN_NAMESPACE // Enable safe chrono durations, unless explicitly disabled. #ifndef FMT_SAFE_DURATION_CAST -# define FMT_SAFE_DURATION_CAST 1 +#define FMT_SAFE_DURATION_CAST 1 #endif #if FMT_SAFE_DURATION_CAST @@ -34,27 +34,26 @@ namespace safe_duration_cast { template ::value && - std::numeric_limits::is_signed == - std::numeric_limits::is_signed)> -FMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) { - ec = 0; - using F = std::numeric_limits; - using T = std::numeric_limits; - static_assert(F::is_integer, "From must be integral"); - static_assert(T::is_integer, "To must be integral"); - - // A and B are both signed, or both unsigned. - if (F::digits <= T::digits) { - // From fits in To without any problem. - } else { - // From does not always fit in To, resort to a dynamic check. - if (from < (T::min)() || from > (T::max)()) { - // outside range. - ec = 1; - return {}; - } - } - return static_cast(from); + std::numeric_limits::is_signed == std::numeric_limits::is_signed)> +FMT_CONSTEXPR To lossless_integral_conversion(const From from, int &ec) { + ec = 0; + using F = std::numeric_limits; + using T = std::numeric_limits; + static_assert(F::is_integer, "From must be integral"); + static_assert(T::is_integer, "To must be integral"); + + // A and B are both signed, or both unsigned. + if (F::digits <= T::digits) { + // From fits in To without any problem. + } else { + // From does not always fit in To, resort to a dynamic check. + if (from < (T::min)() || from > (T::max)()) { + // outside range. + ec = 1; + return {}; + } + } + return static_cast(from); } /** @@ -63,43 +62,39 @@ FMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) { */ template ::value && - std::numeric_limits::is_signed != - std::numeric_limits::is_signed)> -FMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) { - ec = 0; - using F = std::numeric_limits; - using T = std::numeric_limits; - static_assert(F::is_integer, "From must be integral"); - static_assert(T::is_integer, "To must be integral"); - - if (detail::const_check(F::is_signed && !T::is_signed)) { - // From may be negative, not allowed! - if (fmt::detail::is_negative(from)) { - ec = 1; - return {}; - } - // From is positive. Can it always fit in To? - if (F::digits > T::digits && - from > static_cast(detail::max_value())) { - ec = 1; - return {}; - } - } - - if (!F::is_signed && T::is_signed && F::digits >= T::digits && - from > static_cast(detail::max_value())) { - ec = 1; - return {}; - } - return static_cast(from); // Lossless conversion. + std::numeric_limits::is_signed != std::numeric_limits::is_signed)> +FMT_CONSTEXPR To lossless_integral_conversion(const From from, int &ec) { + ec = 0; + using F = std::numeric_limits; + using T = std::numeric_limits; + static_assert(F::is_integer, "From must be integral"); + static_assert(T::is_integer, "To must be integral"); + + if (detail::const_check(F::is_signed && !T::is_signed)) { + // From may be negative, not allowed! + if (fmt::detail::is_negative(from)) { + ec = 1; + return {}; + } + // From is positive. Can it always fit in To? + if (F::digits > T::digits && from > static_cast(detail::max_value())) { + ec = 1; + return {}; + } + } + + if (!F::is_signed && T::is_signed && F::digits >= T::digits && from > static_cast(detail::max_value())) { + ec = 1; + return {}; + } + return static_cast(from); // Lossless conversion. } -template ::value)> -FMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) { - ec = 0; - return from; -} // function +template ::value)> +FMT_CONSTEXPR To lossless_integral_conversion(const From from, int &ec) { + ec = 0; + return from; +} // function // clang-format off /** @@ -115,166 +110,154 @@ FMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) { * -Inf | -Inf */ // clang-format on -template ::value)> -FMT_CONSTEXPR To safe_float_conversion(const From from, int& ec) { - ec = 0; - using T = std::numeric_limits; - static_assert(std::is_floating_point::value, "From must be floating"); - static_assert(std::is_floating_point::value, "To must be floating"); - - // catch the only happy case - if (std::isfinite(from)) { - if (from >= T::lowest() && from <= (T::max)()) { - return static_cast(from); - } - // not within range. - ec = 1; - return {}; - } - - // nan and inf will be preserved - return static_cast(from); -} // function - -template ::value)> -FMT_CONSTEXPR To safe_float_conversion(const From from, int& ec) { - ec = 0; - static_assert(std::is_floating_point::value, "From must be floating"); - return from; +template ::value)> +FMT_CONSTEXPR To safe_float_conversion(const From from, int &ec) { + ec = 0; + using T = std::numeric_limits; + static_assert(std::is_floating_point::value, "From must be floating"); + static_assert(std::is_floating_point::value, "To must be floating"); + + // catch the only happy case + if (std::isfinite(from)) { + if (from >= T::lowest() && from <= (T::max)()) { + return static_cast(from); + } + // not within range. + ec = 1; + return {}; + } + + // nan and inf will be preserved + return static_cast(from); +} // function + +template ::value)> +FMT_CONSTEXPR To safe_float_conversion(const From from, int &ec) { + ec = 0; + static_assert(std::is_floating_point::value, "From must be floating"); + return from; } /** * safe duration cast between integral durations */ -template ::value), +template ::value), FMT_ENABLE_IF(std::is_integral::value)> -To safe_duration_cast(std::chrono::duration from, - int& ec) { - using From = std::chrono::duration; - ec = 0; - // the basic idea is that we need to convert from count() in the from type - // to count() in the To type, by multiplying it with this: - struct Factor - : std::ratio_divide {}; - - static_assert(Factor::num > 0, "num must be positive"); - static_assert(Factor::den > 0, "den must be positive"); - - // the conversion is like this: multiply from.count() with Factor::num - // /Factor::den and convert it to To::rep, all this without - // overflow/underflow. let's start by finding a suitable type that can hold - // both To, From and Factor::num - using IntermediateRep = - typename std::common_type::type; - - // safe conversion to IntermediateRep - IntermediateRep count = - lossless_integral_conversion(from.count(), ec); - if (ec) return {}; - // multiply with Factor::num without overflow or underflow - if (detail::const_check(Factor::num != 1)) { - const auto max1 = detail::max_value() / Factor::num; - if (count > max1) { - ec = 1; - return {}; - } - const auto min1 = - (std::numeric_limits::min)() / Factor::num; - if (count < min1) { - ec = 1; - return {}; - } - count *= Factor::num; - } - - if (detail::const_check(Factor::den != 1)) count /= Factor::den; - auto tocount = lossless_integral_conversion(count, ec); - return ec ? To() : To(tocount); +To safe_duration_cast(std::chrono::duration from, int &ec) { + using From = std::chrono::duration; + ec = 0; + // the basic idea is that we need to convert from count() in the from type + // to count() in the To type, by multiplying it with this: + struct Factor : std::ratio_divide {}; + + static_assert(Factor::num > 0, "num must be positive"); + static_assert(Factor::den > 0, "den must be positive"); + + // the conversion is like this: multiply from.count() with Factor::num + // /Factor::den and convert it to To::rep, all this without + // overflow/underflow. let's start by finding a suitable type that can hold + // both To, From and Factor::num + using IntermediateRep = + typename std::common_type::type; + + // safe conversion to IntermediateRep + IntermediateRep count = lossless_integral_conversion(from.count(), ec); + if (ec) + return {}; + // multiply with Factor::num without overflow or underflow + if (detail::const_check(Factor::num != 1)) { + const auto max1 = detail::max_value() / Factor::num; + if (count > max1) { + ec = 1; + return {}; + } + const auto min1 = (std::numeric_limits::min)() / Factor::num; + if (count < min1) { + ec = 1; + return {}; + } + count *= Factor::num; + } + + if (detail::const_check(Factor::den != 1)) + count /= Factor::den; + auto tocount = lossless_integral_conversion(count, ec); + return ec ? To() : To(tocount); } /** * safe duration_cast between floating point durations */ -template ::value), +template ::value), FMT_ENABLE_IF(std::is_floating_point::value)> -To safe_duration_cast(std::chrono::duration from, - int& ec) { - using From = std::chrono::duration; - ec = 0; - if (std::isnan(from.count())) { - // nan in, gives nan out. easy. - return To{std::numeric_limits::quiet_NaN()}; - } - // maybe we should also check if from is denormal, and decide what to do about - // it. - - // +-inf should be preserved. - if (std::isinf(from.count())) { - return To{from.count()}; - } - - // the basic idea is that we need to convert from count() in the from type - // to count() in the To type, by multiplying it with this: - struct Factor - : std::ratio_divide {}; - - static_assert(Factor::num > 0, "num must be positive"); - static_assert(Factor::den > 0, "den must be positive"); - - // the conversion is like this: multiply from.count() with Factor::num - // /Factor::den and convert it to To::rep, all this without - // overflow/underflow. let's start by finding a suitable type that can hold - // both To, From and Factor::num - using IntermediateRep = - typename std::common_type::type; - - // force conversion of From::rep -> IntermediateRep to be safe, - // even if it will never happen be narrowing in this context. - IntermediateRep count = - safe_float_conversion(from.count(), ec); - if (ec) { - return {}; - } - - // multiply with Factor::num without overflow or underflow - if (Factor::num != 1) { - constexpr auto max1 = detail::max_value() / - static_cast(Factor::num); - if (count > max1) { - ec = 1; - return {}; - } - constexpr auto min1 = std::numeric_limits::lowest() / - static_cast(Factor::num); - if (count < min1) { - ec = 1; - return {}; - } - count *= static_cast(Factor::num); - } - - // this can't go wrong, right? den>0 is checked earlier. - if (Factor::den != 1) { - using common_t = typename std::common_type::type; - count /= static_cast(Factor::den); - } - - // convert to the to type, safely - using ToRep = typename To::rep; - - const ToRep tocount = safe_float_conversion(count, ec); - if (ec) { - return {}; - } - return To{tocount}; +To safe_duration_cast(std::chrono::duration from, int &ec) { + using From = std::chrono::duration; + ec = 0; + if (std::isnan(from.count())) { + // nan in, gives nan out. easy. + return To {std::numeric_limits::quiet_NaN()}; + } + // maybe we should also check if from is denormal, and decide what to do + // about it. + + // +-inf should be preserved. + if (std::isinf(from.count())) { + return To {from.count()}; + } + + // the basic idea is that we need to convert from count() in the from type + // to count() in the To type, by multiplying it with this: + struct Factor : std::ratio_divide {}; + + static_assert(Factor::num > 0, "num must be positive"); + static_assert(Factor::den > 0, "den must be positive"); + + // the conversion is like this: multiply from.count() with Factor::num + // /Factor::den and convert it to To::rep, all this without + // overflow/underflow. let's start by finding a suitable type that can hold + // both To, From and Factor::num + using IntermediateRep = + typename std::common_type::type; + + // force conversion of From::rep -> IntermediateRep to be safe, + // even if it will never happen be narrowing in this context. + IntermediateRep count = safe_float_conversion(from.count(), ec); + if (ec) { + return {}; + } + + // multiply with Factor::num without overflow or underflow + if (Factor::num != 1) { + constexpr auto max1 = detail::max_value() / static_cast(Factor::num); + if (count > max1) { + ec = 1; + return {}; + } + constexpr auto min1 = + std::numeric_limits::lowest() / static_cast(Factor::num); + if (count < min1) { + ec = 1; + return {}; + } + count *= static_cast(Factor::num); + } + + // this can't go wrong, right? den>0 is checked earlier. + if (Factor::den != 1) { + using common_t = typename std::common_type::type; + count /= static_cast(Factor::den); + } + + // convert to the to type, safely + using ToRep = typename To::rep; + + const ToRep tocount = safe_float_conversion(count, ec); + if (ec) { + return {}; + } + return To {tocount}; } -} // namespace safe_duration_cast +} // namespace safe_duration_cast #endif // Prevents expansion of a preceding token as a function-style macro. @@ -282,81 +265,88 @@ To safe_duration_cast(std::chrono::duration from, #define FMT_NOMACRO namespace detail { -template struct null {}; -inline null<> localtime_r FMT_NOMACRO(...) { return null<>(); } -inline null<> localtime_s(...) { return null<>(); } -inline null<> gmtime_r(...) { return null<>(); } -inline null<> gmtime_s(...) { return null<>(); } - -inline auto do_write(const std::tm& time, const std::locale& loc, char format, - char modifier) -> std::string { - auto&& os = std::ostringstream(); - os.imbue(loc); - using iterator = std::ostreambuf_iterator; - const auto& facet = std::use_facet>(loc); - auto end = facet.put(os, os, ' ', &time, format, modifier); - if (end.failed()) FMT_THROW(format_error("failed to format time")); - auto str = os.str(); - if (!detail::is_utf8() || loc == std::locale::classic()) return str; - // char16_t and char32_t codecvts are broken in MSVC (linkage errors) and - // gcc-4. -#if FMT_MSC_VER != 0 || \ - (defined(__GLIBCXX__) && !defined(_GLIBCXX_USE_DUAL_ABI)) - // The _GLIBCXX_USE_DUAL_ABI macro is always defined in libstdc++ from gcc-5 - // and newer. - using code_unit = wchar_t; +template +struct null {}; +inline null<> localtime_r FMT_NOMACRO(...) { + return null<>(); +} +inline null<> localtime_s(...) { + return null<>(); +} +inline null<> gmtime_r(...) { + return null<>(); +} +inline null<> gmtime_s(...) { + return null<>(); +} + +inline auto do_write(const std::tm &time, const std::locale &loc, char format, char modifier) -> std::string { + auto &&os = std::ostringstream(); + os.imbue(loc); + using iterator = std::ostreambuf_iterator; + const auto &facet = std::use_facet>(loc); + auto end = facet.put(os, os, ' ', &time, format, modifier); + if (end.failed()) + FMT_THROW(format_error("failed to format time")); + auto str = os.str(); + if (!detail::is_utf8() || loc == std::locale::classic()) + return str; + // char16_t and char32_t codecvts are broken in MSVC (linkage errors) + // and gcc-4. +#if FMT_MSC_VER != 0 || (defined(__GLIBCXX__) && !defined(_GLIBCXX_USE_DUAL_ABI)) + // The _GLIBCXX_USE_DUAL_ABI macro is always defined in libstdc++ from gcc-5 + // and newer. + using code_unit = wchar_t; #else - using code_unit = char32_t; + using code_unit = char32_t; #endif - auto& f = std::use_facet>(loc); - auto mb = std::mbstate_t(); - const char* from_next = nullptr; - code_unit* to_next = nullptr; - constexpr size_t buf_size = 32; - code_unit buf[buf_size] = {}; - auto result = f.in(mb, str.data(), str.data() + str.size(), from_next, buf, - buf + buf_size, to_next); - if (result != std::codecvt_base::ok) - FMT_THROW(format_error("failed to format time")); - str.clear(); - for (code_unit* p = buf; p != to_next; ++p) { - uint32_t c = static_cast(*p); - if (sizeof(code_unit) == 2 && c >= 0xd800 && c <= 0xdfff) { - // surrogate pair - ++p; - if (p == to_next || (c & 0xfc00) != 0xd800 || (*p & 0xfc00) != 0xdc00) { - FMT_THROW(format_error("failed to format time")); - } - c = (c << 10) + static_cast(*p) - 0x35fdc00; - } - if (c < 0x80) { - str.push_back(static_cast(c)); - } else if (c < 0x800) { - str.push_back(static_cast(0xc0 | (c >> 6))); - str.push_back(static_cast(0x80 | (c & 0x3f))); - } else if ((c >= 0x800 && c <= 0xd7ff) || (c >= 0xe000 && c <= 0xffff)) { - str.push_back(static_cast(0xe0 | (c >> 12))); - str.push_back(static_cast(0x80 | ((c & 0xfff) >> 6))); - str.push_back(static_cast(0x80 | (c & 0x3f))); - } else if (c >= 0x10000 && c <= 0x10ffff) { - str.push_back(static_cast(0xf0 | (c >> 18))); - str.push_back(static_cast(0x80 | ((c & 0x3ffff) >> 12))); - str.push_back(static_cast(0x80 | ((c & 0xfff) >> 6))); - str.push_back(static_cast(0x80 | (c & 0x3f))); - } else { - FMT_THROW(format_error("failed to format time")); - } - } - return str; + auto &f = std::use_facet>(loc); + auto mb = std::mbstate_t(); + const char *from_next = nullptr; + code_unit *to_next = nullptr; + constexpr size_t buf_size = 32; + code_unit buf[buf_size] = {}; + auto result = f.in(mb, str.data(), str.data() + str.size(), from_next, buf, buf + buf_size, to_next); + if (result != std::codecvt_base::ok) + FMT_THROW(format_error("failed to format time")); + str.clear(); + for (code_unit *p = buf; p != to_next; ++p) { + uint32_t c = static_cast(*p); + if (sizeof(code_unit) == 2 && c >= 0xd800 && c <= 0xdfff) { + // surrogate pair + ++p; + if (p == to_next || (c & 0xfc00) != 0xd800 || (*p & 0xfc00) != 0xdc00) { + FMT_THROW(format_error("failed to format time")); + } + c = (c << 10) + static_cast(*p) - 0x35fdc00; + } + if (c < 0x80) { + str.push_back(static_cast(c)); + } else if (c < 0x800) { + str.push_back(static_cast(0xc0 | (c >> 6))); + str.push_back(static_cast(0x80 | (c & 0x3f))); + } else if ((c >= 0x800 && c <= 0xd7ff) || (c >= 0xe000 && c <= 0xffff)) { + str.push_back(static_cast(0xe0 | (c >> 12))); + str.push_back(static_cast(0x80 | ((c & 0xfff) >> 6))); + str.push_back(static_cast(0x80 | (c & 0x3f))); + } else if (c >= 0x10000 && c <= 0x10ffff) { + str.push_back(static_cast(0xf0 | (c >> 18))); + str.push_back(static_cast(0x80 | ((c & 0x3ffff) >> 12))); + str.push_back(static_cast(0x80 | ((c & 0xfff) >> 6))); + str.push_back(static_cast(0x80 | (c & 0x3f))); + } else { + FMT_THROW(format_error("failed to format time")); + } + } + return str; } template -auto write(OutputIt out, const std::tm& time, const std::locale& loc, - char format, char modifier = 0) -> OutputIt { - auto str = do_write(time, loc, format, modifier); - return std::copy(str.begin(), str.end(), out); +auto write(OutputIt out, const std::tm &time, const std::locale &loc, char format, char modifier = 0) -> OutputIt { + auto str = do_write(time, loc, format, modifier); + return std::copy(str.begin(), str.end(), out); } -} // namespace detail +} // namespace detail FMT_MODULE_EXPORT_BEGIN @@ -366,44 +356,50 @@ FMT_MODULE_EXPORT_BEGIN thread-safe on most platforms. */ inline std::tm localtime(std::time_t time) { - struct dispatcher { - std::time_t time_; - std::tm tm_; + struct dispatcher { + std::time_t time_; + std::tm tm_; - dispatcher(std::time_t t) : time_(t) {} + dispatcher(std::time_t t) : time_(t) { + } - bool run() { - using namespace fmt::detail; - return handle(localtime_r(&time_, &tm_)); - } + bool run() { + using namespace fmt::detail; + return handle(localtime_r(&time_, &tm_)); + } - bool handle(std::tm* tm) { return tm != nullptr; } + bool handle(std::tm *tm) { + return tm != nullptr; + } - bool handle(detail::null<>) { - using namespace fmt::detail; - return fallback(localtime_s(&tm_, &time_)); - } + bool handle(detail::null<>) { + using namespace fmt::detail; + return fallback(localtime_s(&tm_, &time_)); + } - bool fallback(int res) { return res == 0; } + bool fallback(int res) { + return res == 0; + } #if !FMT_MSC_VER - bool fallback(detail::null<>) { - using namespace fmt::detail; - std::tm* tm = std::localtime(&time_); - if (tm) tm_ = *tm; - return tm != nullptr; - } + bool fallback(detail::null<>) { + using namespace fmt::detail; + std::tm *tm = std::localtime(&time_); + if (tm) + tm_ = *tm; + return tm != nullptr; + } #endif - }; - dispatcher lt(time); - // Too big time values may be unsupported. - if (!lt.run()) FMT_THROW(format_error("time_t value out of range")); - return lt.tm_; + }; + dispatcher lt(time); + // Too big time values may be unsupported. + if (!lt.run()) + FMT_THROW(format_error("time_t value out of range")); + return lt.tm_; } -inline std::tm localtime( - std::chrono::time_point time_point) { - return localtime(std::chrono::system_clock::to_time_t(time_point)); +inline std::tm localtime(std::chrono::time_point time_point) { + return localtime(std::chrono::system_clock::to_time_t(time_point)); } /** @@ -412,735 +408,840 @@ inline std::tm localtime( function is thread-safe on most platforms. */ inline std::tm gmtime(std::time_t time) { - struct dispatcher { - std::time_t time_; - std::tm tm_; + struct dispatcher { + std::time_t time_; + std::tm tm_; - dispatcher(std::time_t t) : time_(t) {} + dispatcher(std::time_t t) : time_(t) { + } - bool run() { - using namespace fmt::detail; - return handle(gmtime_r(&time_, &tm_)); - } + bool run() { + using namespace fmt::detail; + return handle(gmtime_r(&time_, &tm_)); + } - bool handle(std::tm* tm) { return tm != nullptr; } + bool handle(std::tm *tm) { + return tm != nullptr; + } - bool handle(detail::null<>) { - using namespace fmt::detail; - return fallback(gmtime_s(&tm_, &time_)); - } + bool handle(detail::null<>) { + using namespace fmt::detail; + return fallback(gmtime_s(&tm_, &time_)); + } - bool fallback(int res) { return res == 0; } + bool fallback(int res) { + return res == 0; + } #if !FMT_MSC_VER - bool fallback(detail::null<>) { - std::tm* tm = std::gmtime(&time_); - if (tm) tm_ = *tm; - return tm != nullptr; - } + bool fallback(detail::null<>) { + std::tm *tm = std::gmtime(&time_); + if (tm) + tm_ = *tm; + return tm != nullptr; + } #endif - }; - dispatcher gt(time); - // Too big time values may be unsupported. - if (!gt.run()) FMT_THROW(format_error("time_t value out of range")); - return gt.tm_; + }; + dispatcher gt(time); + // Too big time values may be unsupported. + if (!gt.run()) + FMT_THROW(format_error("time_t value out of range")); + return gt.tm_; } -inline std::tm gmtime( - std::chrono::time_point time_point) { - return gmtime(std::chrono::system_clock::to_time_t(time_point)); +inline std::tm gmtime(std::chrono::time_point time_point) { + return gmtime(std::chrono::system_clock::to_time_t(time_point)); } FMT_BEGIN_DETAIL_NAMESPACE -inline size_t strftime(char* str, size_t count, const char* format, - const std::tm* time) { - // Assign to a pointer to suppress GCCs -Wformat-nonliteral - // First assign the nullptr to suppress -Wsuggest-attribute=format - std::size_t (*strftime)(char*, std::size_t, const char*, const std::tm*) = - nullptr; - strftime = std::strftime; - return strftime(str, count, format, time); +inline size_t strftime(char *str, size_t count, const char *format, const std::tm *time) { + // Assign to a pointer to suppress GCCs -Wformat-nonliteral + // First assign the nullptr to suppress -Wsuggest-attribute=format + std::size_t (*strftime)(char *, std::size_t, const char *, const std::tm *) = nullptr; + strftime = std::strftime; + return strftime(str, count, format, time); } -inline size_t strftime(wchar_t* str, size_t count, const wchar_t* format, - const std::tm* time) { - // See above - std::size_t (*wcsftime)(wchar_t*, std::size_t, const wchar_t*, - const std::tm*) = nullptr; - wcsftime = std::wcsftime; - return wcsftime(str, count, format, time); +inline size_t strftime(wchar_t *str, size_t count, const wchar_t *format, const std::tm *time) { + // See above + std::size_t (*wcsftime)(wchar_t *, std::size_t, const wchar_t *, const std::tm *) = nullptr; + wcsftime = std::wcsftime; + return wcsftime(str, count, format, time); } FMT_END_DETAIL_NAMESPACE template -struct formatter, - Char> : formatter { - FMT_CONSTEXPR formatter() { - this->specs = {default_specs, sizeof(default_specs) / sizeof(Char)}; - } - - template - FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) { - auto it = ctx.begin(); - if (it != ctx.end() && *it == ':') ++it; - auto end = it; - while (end != ctx.end() && *end != '}') ++end; - if (end != it) this->specs = {it, detail::to_unsigned(end - it)}; - return end; - } - - template - auto format(std::chrono::time_point val, - FormatContext& ctx) -> decltype(ctx.out()) { - std::tm time = localtime(val); - return formatter::format(time, ctx); - } - - static constexpr Char default_specs[] = {'%', 'Y', '-', '%', 'm', '-', - '%', 'd', ' ', '%', 'H', ':', - '%', 'M', ':', '%', 'S'}; +struct formatter, Char> : formatter { + FMT_CONSTEXPR formatter() { + this->specs = {default_specs, sizeof(default_specs) / sizeof(Char)}; + } + + template + FMT_CONSTEXPR auto parse(ParseContext &ctx) -> decltype(ctx.begin()) { + auto it = ctx.begin(); + if (it != ctx.end() && *it == ':') + ++it; + auto end = it; + while (end != ctx.end() && *end != '}') + ++end; + if (end != it) + this->specs = {it, detail::to_unsigned(end - it)}; + return end; + } + + template + auto format(std::chrono::time_point val, FormatContext &ctx) -> decltype(ctx.out()) { + std::tm time = localtime(val); + return formatter::format(time, ctx); + } + + static constexpr Char default_specs[] = {'%', 'Y', '-', '%', 'm', '-', '%', 'd', ' ', + '%', 'H', ':', '%', 'M', ':', '%', 'S'}; }; template -constexpr Char - formatter, - Char>::default_specs[]; - -template struct formatter { - template - FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) { - auto it = ctx.begin(); - if (it != ctx.end() && *it == ':') ++it; - auto end = it; - while (end != ctx.end() && *end != '}') ++end; - specs = {it, detail::to_unsigned(end - it)}; - return end; - } - - template - auto format(const std::tm& tm, FormatContext& ctx) const - -> decltype(ctx.out()) { - basic_memory_buffer tm_format; - tm_format.append(specs.begin(), specs.end()); - // By appending an extra space we can distinguish an empty result that - // indicates insufficient buffer size from a guaranteed non-empty result - // https://github.com/fmtlib/fmt/issues/2238 - tm_format.push_back(' '); - tm_format.push_back('\0'); - basic_memory_buffer buf; - size_t start = buf.size(); - for (;;) { - size_t size = buf.capacity() - start; - size_t count = detail::strftime(&buf[start], size, &tm_format[0], &tm); - if (count != 0) { - buf.resize(start + count); - break; - } - const size_t MIN_GROWTH = 10; - buf.reserve(buf.capacity() + (size > MIN_GROWTH ? size : MIN_GROWTH)); - } - // Remove the extra space. - return std::copy(buf.begin(), buf.end() - 1, ctx.out()); - } - - basic_string_view specs; +constexpr Char formatter, Char>::default_specs[]; + +template +struct formatter { + template + FMT_CONSTEXPR auto parse(ParseContext &ctx) -> decltype(ctx.begin()) { + auto it = ctx.begin(); + if (it != ctx.end() && *it == ':') + ++it; + auto end = it; + while (end != ctx.end() && *end != '}') + ++end; + specs = {it, detail::to_unsigned(end - it)}; + return end; + } + + template + auto format(const std::tm &tm, FormatContext &ctx) const -> decltype(ctx.out()) { + basic_memory_buffer tm_format; + tm_format.append(specs.begin(), specs.end()); + // By appending an extra space we can distinguish an empty result that + // indicates insufficient buffer size from a guaranteed non-empty result + // https://github.com/fmtlib/fmt/issues/2238 + tm_format.push_back(' '); + tm_format.push_back('\0'); + basic_memory_buffer buf; + size_t start = buf.size(); + for (;;) { + size_t size = buf.capacity() - start; + size_t count = detail::strftime(&buf[start], size, &tm_format[0], &tm); + if (count != 0) { + buf.resize(start + count); + break; + } + const size_t MIN_GROWTH = 10; + buf.reserve(buf.capacity() + (size > MIN_GROWTH ? size : MIN_GROWTH)); + } + // Remove the extra space. + return std::copy(buf.begin(), buf.end() - 1, ctx.out()); + } + + basic_string_view specs; }; FMT_BEGIN_DETAIL_NAMESPACE -template FMT_CONSTEXPR inline const char* get_units() { - if (std::is_same::value) return "as"; - if (std::is_same::value) return "fs"; - if (std::is_same::value) return "ps"; - if (std::is_same::value) return "ns"; - if (std::is_same::value) return "µs"; - if (std::is_same::value) return "ms"; - if (std::is_same::value) return "cs"; - if (std::is_same::value) return "ds"; - if (std::is_same>::value) return "s"; - if (std::is_same::value) return "das"; - if (std::is_same::value) return "hs"; - if (std::is_same::value) return "ks"; - if (std::is_same::value) return "Ms"; - if (std::is_same::value) return "Gs"; - if (std::is_same::value) return "Ts"; - if (std::is_same::value) return "Ps"; - if (std::is_same::value) return "Es"; - if (std::is_same>::value) return "m"; - if (std::is_same>::value) return "h"; - return nullptr; +template +FMT_CONSTEXPR inline const char *get_units() { + if (std::is_same::value) + return "as"; + if (std::is_same::value) + return "fs"; + if (std::is_same::value) + return "ps"; + if (std::is_same::value) + return "ns"; + if (std::is_same::value) + return "µs"; + if (std::is_same::value) + return "ms"; + if (std::is_same::value) + return "cs"; + if (std::is_same::value) + return "ds"; + if (std::is_same>::value) + return "s"; + if (std::is_same::value) + return "das"; + if (std::is_same::value) + return "hs"; + if (std::is_same::value) + return "ks"; + if (std::is_same::value) + return "Ms"; + if (std::is_same::value) + return "Gs"; + if (std::is_same::value) + return "Ts"; + if (std::is_same::value) + return "Ps"; + if (std::is_same::value) + return "Es"; + if (std::is_same>::value) + return "m"; + if (std::is_same>::value) + return "h"; + return nullptr; } enum class numeric_system { - standard, - // Alternative numeric system, e.g. 十二 instead of 12 in ja_JP locale. - alternative + standard, + // Alternative numeric system, e.g. 十二 instead of 12 in ja_JP locale. + alternative }; // Parses a put_time-like format string and invokes handler actions. template -FMT_CONSTEXPR const Char* parse_chrono_format(const Char* begin, - const Char* end, - Handler&& handler) { - auto ptr = begin; - while (ptr != end) { - auto c = *ptr; - if (c == '}') break; - if (c != '%') { - ++ptr; - continue; - } - if (begin != ptr) handler.on_text(begin, ptr); - ++ptr; // consume '%' - if (ptr == end) FMT_THROW(format_error("invalid format")); - c = *ptr++; - switch (c) { - case '%': - handler.on_text(ptr - 1, ptr); - break; - case 'n': { - const Char newline[] = {'\n'}; - handler.on_text(newline, newline + 1); - break; - } - case 't': { - const Char tab[] = {'\t'}; - handler.on_text(tab, tab + 1); - break; - } - // Day of the week: - case 'a': - handler.on_abbr_weekday(); - break; - case 'A': - handler.on_full_weekday(); - break; - case 'w': - handler.on_dec0_weekday(numeric_system::standard); - break; - case 'u': - handler.on_dec1_weekday(numeric_system::standard); - break; - // Month: - case 'b': - handler.on_abbr_month(); - break; - case 'B': - handler.on_full_month(); - break; - // Hour, minute, second: - case 'H': - handler.on_24_hour(numeric_system::standard); - break; - case 'I': - handler.on_12_hour(numeric_system::standard); - break; - case 'M': - handler.on_minute(numeric_system::standard); - break; - case 'S': - handler.on_second(numeric_system::standard); - break; - // Other: - case 'c': - handler.on_datetime(numeric_system::standard); - break; - case 'x': - handler.on_loc_date(numeric_system::standard); - break; - case 'X': - handler.on_loc_time(numeric_system::standard); - break; - case 'D': - handler.on_us_date(); - break; - case 'F': - handler.on_iso_date(); - break; - case 'r': - handler.on_12_hour_time(); - break; - case 'R': - handler.on_24_hour_time(); - break; - case 'T': - handler.on_iso_time(); - break; - case 'p': - handler.on_am_pm(); - break; - case 'Q': - handler.on_duration_value(); - break; - case 'q': - handler.on_duration_unit(); - break; - case 'z': - handler.on_utc_offset(); - break; - case 'Z': - handler.on_tz_name(); - break; - // Alternative representation: - case 'E': { - if (ptr == end) FMT_THROW(format_error("invalid format")); - c = *ptr++; - switch (c) { - case 'c': - handler.on_datetime(numeric_system::alternative); - break; - case 'x': - handler.on_loc_date(numeric_system::alternative); - break; - case 'X': - handler.on_loc_time(numeric_system::alternative); - break; - default: - FMT_THROW(format_error("invalid format")); - } - break; - } - case 'O': - if (ptr == end) FMT_THROW(format_error("invalid format")); - c = *ptr++; - switch (c) { - case 'w': - handler.on_dec0_weekday(numeric_system::alternative); - break; - case 'u': - handler.on_dec1_weekday(numeric_system::alternative); - break; - case 'H': - handler.on_24_hour(numeric_system::alternative); - break; - case 'I': - handler.on_12_hour(numeric_system::alternative); - break; - case 'M': - handler.on_minute(numeric_system::alternative); - break; - case 'S': - handler.on_second(numeric_system::alternative); - break; - default: - FMT_THROW(format_error("invalid format")); - } - break; - default: - FMT_THROW(format_error("invalid format")); - } - begin = ptr; - } - if (begin != ptr) handler.on_text(begin, ptr); - return ptr; +FMT_CONSTEXPR const Char *parse_chrono_format(const Char *begin, const Char *end, Handler &&handler) { + auto ptr = begin; + while (ptr != end) { + auto c = *ptr; + if (c == '}') + break; + if (c != '%') { + ++ptr; + continue; + } + if (begin != ptr) + handler.on_text(begin, ptr); + ++ptr; // consume '%' + if (ptr == end) + FMT_THROW(format_error("invalid format")); + c = *ptr++; + switch (c) { + case '%': + handler.on_text(ptr - 1, ptr); + break; + case 'n': { + const Char newline[] = {'\n'}; + handler.on_text(newline, newline + 1); + break; + } + case 't': { + const Char tab[] = {'\t'}; + handler.on_text(tab, tab + 1); + break; + } + // Day of the week: + case 'a': + handler.on_abbr_weekday(); + break; + case 'A': + handler.on_full_weekday(); + break; + case 'w': + handler.on_dec0_weekday(numeric_system::standard); + break; + case 'u': + handler.on_dec1_weekday(numeric_system::standard); + break; + // Month: + case 'b': + handler.on_abbr_month(); + break; + case 'B': + handler.on_full_month(); + break; + // Hour, minute, second: + case 'H': + handler.on_24_hour(numeric_system::standard); + break; + case 'I': + handler.on_12_hour(numeric_system::standard); + break; + case 'M': + handler.on_minute(numeric_system::standard); + break; + case 'S': + handler.on_second(numeric_system::standard); + break; + // Other: + case 'c': + handler.on_datetime(numeric_system::standard); + break; + case 'x': + handler.on_loc_date(numeric_system::standard); + break; + case 'X': + handler.on_loc_time(numeric_system::standard); + break; + case 'D': + handler.on_us_date(); + break; + case 'F': + handler.on_iso_date(); + break; + case 'r': + handler.on_12_hour_time(); + break; + case 'R': + handler.on_24_hour_time(); + break; + case 'T': + handler.on_iso_time(); + break; + case 'p': + handler.on_am_pm(); + break; + case 'Q': + handler.on_duration_value(); + break; + case 'q': + handler.on_duration_unit(); + break; + case 'z': + handler.on_utc_offset(); + break; + case 'Z': + handler.on_tz_name(); + break; + // Alternative representation: + case 'E': { + if (ptr == end) + FMT_THROW(format_error("invalid format")); + c = *ptr++; + switch (c) { + case 'c': + handler.on_datetime(numeric_system::alternative); + break; + case 'x': + handler.on_loc_date(numeric_system::alternative); + break; + case 'X': + handler.on_loc_time(numeric_system::alternative); + break; + default: + FMT_THROW(format_error("invalid format")); + } + break; + } + case 'O': + if (ptr == end) + FMT_THROW(format_error("invalid format")); + c = *ptr++; + switch (c) { + case 'w': + handler.on_dec0_weekday(numeric_system::alternative); + break; + case 'u': + handler.on_dec1_weekday(numeric_system::alternative); + break; + case 'H': + handler.on_24_hour(numeric_system::alternative); + break; + case 'I': + handler.on_12_hour(numeric_system::alternative); + break; + case 'M': + handler.on_minute(numeric_system::alternative); + break; + case 'S': + handler.on_second(numeric_system::alternative); + break; + default: + FMT_THROW(format_error("invalid format")); + } + break; + default: + FMT_THROW(format_error("invalid format")); + } + begin = ptr; + } + if (begin != ptr) + handler.on_text(begin, ptr); + return ptr; } -template struct null_chrono_spec_handler { - FMT_CONSTEXPR void unsupported() { - static_cast(this)->unsupported(); - } - FMT_CONSTEXPR void on_abbr_weekday() { unsupported(); } - FMT_CONSTEXPR void on_full_weekday() { unsupported(); } - FMT_CONSTEXPR void on_dec0_weekday(numeric_system) { unsupported(); } - FMT_CONSTEXPR void on_dec1_weekday(numeric_system) { unsupported(); } - FMT_CONSTEXPR void on_abbr_month() { unsupported(); } - FMT_CONSTEXPR void on_full_month() { unsupported(); } - FMT_CONSTEXPR void on_24_hour(numeric_system) { unsupported(); } - FMT_CONSTEXPR void on_12_hour(numeric_system) { unsupported(); } - FMT_CONSTEXPR void on_minute(numeric_system) { unsupported(); } - FMT_CONSTEXPR void on_second(numeric_system) { unsupported(); } - FMT_CONSTEXPR void on_datetime(numeric_system) { unsupported(); } - FMT_CONSTEXPR void on_loc_date(numeric_system) { unsupported(); } - FMT_CONSTEXPR void on_loc_time(numeric_system) { unsupported(); } - FMT_CONSTEXPR void on_us_date() { unsupported(); } - FMT_CONSTEXPR void on_iso_date() { unsupported(); } - FMT_CONSTEXPR void on_12_hour_time() { unsupported(); } - FMT_CONSTEXPR void on_24_hour_time() { unsupported(); } - FMT_CONSTEXPR void on_iso_time() { unsupported(); } - FMT_CONSTEXPR void on_am_pm() { unsupported(); } - FMT_CONSTEXPR void on_duration_value() { unsupported(); } - FMT_CONSTEXPR void on_duration_unit() { unsupported(); } - FMT_CONSTEXPR void on_utc_offset() { unsupported(); } - FMT_CONSTEXPR void on_tz_name() { unsupported(); } +template +struct null_chrono_spec_handler { + FMT_CONSTEXPR void unsupported() { + static_cast(this)->unsupported(); + } + FMT_CONSTEXPR void on_abbr_weekday() { + unsupported(); + } + FMT_CONSTEXPR void on_full_weekday() { + unsupported(); + } + FMT_CONSTEXPR void on_dec0_weekday(numeric_system) { + unsupported(); + } + FMT_CONSTEXPR void on_dec1_weekday(numeric_system) { + unsupported(); + } + FMT_CONSTEXPR void on_abbr_month() { + unsupported(); + } + FMT_CONSTEXPR void on_full_month() { + unsupported(); + } + FMT_CONSTEXPR void on_24_hour(numeric_system) { + unsupported(); + } + FMT_CONSTEXPR void on_12_hour(numeric_system) { + unsupported(); + } + FMT_CONSTEXPR void on_minute(numeric_system) { + unsupported(); + } + FMT_CONSTEXPR void on_second(numeric_system) { + unsupported(); + } + FMT_CONSTEXPR void on_datetime(numeric_system) { + unsupported(); + } + FMT_CONSTEXPR void on_loc_date(numeric_system) { + unsupported(); + } + FMT_CONSTEXPR void on_loc_time(numeric_system) { + unsupported(); + } + FMT_CONSTEXPR void on_us_date() { + unsupported(); + } + FMT_CONSTEXPR void on_iso_date() { + unsupported(); + } + FMT_CONSTEXPR void on_12_hour_time() { + unsupported(); + } + FMT_CONSTEXPR void on_24_hour_time() { + unsupported(); + } + FMT_CONSTEXPR void on_iso_time() { + unsupported(); + } + FMT_CONSTEXPR void on_am_pm() { + unsupported(); + } + FMT_CONSTEXPR void on_duration_value() { + unsupported(); + } + FMT_CONSTEXPR void on_duration_unit() { + unsupported(); + } + FMT_CONSTEXPR void on_utc_offset() { + unsupported(); + } + FMT_CONSTEXPR void on_tz_name() { + unsupported(); + } }; struct chrono_format_checker : null_chrono_spec_handler { - FMT_NORETURN void unsupported() { FMT_THROW(format_error("no date")); } - - template - FMT_CONSTEXPR void on_text(const Char*, const Char*) {} - FMT_CONSTEXPR void on_24_hour(numeric_system) {} - FMT_CONSTEXPR void on_12_hour(numeric_system) {} - FMT_CONSTEXPR void on_minute(numeric_system) {} - FMT_CONSTEXPR void on_second(numeric_system) {} - FMT_CONSTEXPR void on_12_hour_time() {} - FMT_CONSTEXPR void on_24_hour_time() {} - FMT_CONSTEXPR void on_iso_time() {} - FMT_CONSTEXPR void on_am_pm() {} - FMT_CONSTEXPR void on_duration_value() {} - FMT_CONSTEXPR void on_duration_unit() {} + FMT_NORETURN void unsupported() { + FMT_THROW(format_error("no date")); + } + + template + FMT_CONSTEXPR void on_text(const Char *, const Char *) { + } + FMT_CONSTEXPR void on_24_hour(numeric_system) { + } + FMT_CONSTEXPR void on_12_hour(numeric_system) { + } + FMT_CONSTEXPR void on_minute(numeric_system) { + } + FMT_CONSTEXPR void on_second(numeric_system) { + } + FMT_CONSTEXPR void on_12_hour_time() { + } + FMT_CONSTEXPR void on_24_hour_time() { + } + FMT_CONSTEXPR void on_iso_time() { + } + FMT_CONSTEXPR void on_am_pm() { + } + FMT_CONSTEXPR void on_duration_value() { + } + FMT_CONSTEXPR void on_duration_unit() { + } }; template ::value)> inline bool isnan(T) { - return false; + return false; } template ::value)> inline bool isnan(T value) { - return std::isnan(value); + return std::isnan(value); } template ::value)> inline bool isfinite(T) { - return true; + return true; } template ::value)> inline bool isfinite(T value) { - return std::isfinite(value); + return std::isfinite(value); } // Converts value to int and checks that it's in the range [0, upper). template ::value)> inline int to_nonnegative_int(T value, int upper) { - FMT_ASSERT(value >= 0 && to_unsigned(value) <= to_unsigned(upper), - "invalid value"); - (void)upper; - return static_cast(value); + FMT_ASSERT(value >= 0 && to_unsigned(value) <= to_unsigned(upper), "invalid value"); + (void)upper; + return static_cast(value); } template ::value)> inline int to_nonnegative_int(T value, int upper) { - FMT_ASSERT( - std::isnan(value) || (value >= 0 && value <= static_cast(upper)), - "invalid value"); - (void)upper; - return static_cast(value); + FMT_ASSERT(std::isnan(value) || (value >= 0 && value <= static_cast(upper)), "invalid value"); + (void)upper; + return static_cast(value); } template ::value)> inline T mod(T x, int y) { - return x % static_cast(y); + return x % static_cast(y); } template ::value)> inline T mod(T x, int y) { - return std::fmod(x, static_cast(y)); + return std::fmod(x, static_cast(y)); } // If T is an integral type, maps T to its unsigned counterpart, otherwise // leaves it unchanged (unlike std::make_unsigned). template ::value> struct make_unsigned_or_unchanged { - using type = T; + using type = T; }; -template struct make_unsigned_or_unchanged { - using type = typename std::make_unsigned::type; +template +struct make_unsigned_or_unchanged { + using type = typename std::make_unsigned::type; }; #if FMT_SAFE_DURATION_CAST // throwing version of safe_duration_cast template To fmt_safe_duration_cast(std::chrono::duration from) { - int ec; - To to = safe_duration_cast::safe_duration_cast(from, ec); - if (ec) FMT_THROW(format_error("cannot format duration")); - return to; + int ec; + To to = safe_duration_cast::safe_duration_cast(from, ec); + if (ec) + FMT_THROW(format_error("cannot format duration")); + return to; } #endif -template ::value)> -inline std::chrono::duration get_milliseconds( - std::chrono::duration d) { - // this may overflow and/or the result may not fit in the - // target type. +template ::value)> +inline std::chrono::duration get_milliseconds(std::chrono::duration d) { + // this may overflow and/or the result may not fit in the + // target type. #if FMT_SAFE_DURATION_CAST - using CommonSecondsType = - typename std::common_type::type; - const auto d_as_common = fmt_safe_duration_cast(d); - const auto d_as_whole_seconds = - fmt_safe_duration_cast(d_as_common); - // this conversion should be nonproblematic - const auto diff = d_as_common - d_as_whole_seconds; - const auto ms = - fmt_safe_duration_cast>(diff); - return ms; + using CommonSecondsType = typename std::common_type::type; + const auto d_as_common = fmt_safe_duration_cast(d); + const auto d_as_whole_seconds = fmt_safe_duration_cast(d_as_common); + // this conversion should be nonproblematic + const auto diff = d_as_common - d_as_whole_seconds; + const auto ms = fmt_safe_duration_cast>(diff); + return ms; #else - auto s = std::chrono::duration_cast(d); - return std::chrono::duration_cast(d - s); + auto s = std::chrono::duration_cast(d); + return std::chrono::duration_cast(d - s); #endif } -template ::value)> -inline std::chrono::duration get_milliseconds( - std::chrono::duration d) { - using common_type = typename std::common_type::type; - auto ms = mod(d.count() * static_cast(Period::num) / - static_cast(Period::den) * 1000, - 1000); - return std::chrono::duration(static_cast(ms)); +template ::value)> +inline std::chrono::duration get_milliseconds(std::chrono::duration d) { + using common_type = typename std::common_type::type; + auto ms = + mod(d.count() * static_cast(Period::num) / static_cast(Period::den) * 1000, 1000); + return std::chrono::duration(static_cast(ms)); } -template ::value)> +template ::value)> OutputIt format_duration_value(OutputIt out, Rep val, int) { - return write(out, val); + return write(out, val); } -template ::value)> +template ::value)> OutputIt format_duration_value(OutputIt out, Rep val, int precision) { - auto specs = basic_format_specs(); - specs.precision = precision; - specs.type = precision > 0 ? 'f' : 'g'; - return write(out, val, specs); + auto specs = basic_format_specs(); + specs.precision = precision; + specs.type = precision > 0 ? 'f' : 'g'; + return write(out, val, specs); } template OutputIt copy_unit(string_view unit, OutputIt out, Char) { - return std::copy(unit.begin(), unit.end(), out); + return std::copy(unit.begin(), unit.end(), out); } template OutputIt copy_unit(string_view unit, OutputIt out, wchar_t) { - // This works when wchar_t is UTF-32 because units only contain characters - // that have the same representation in UTF-16 and UTF-32. - utf8_to_utf16 u(unit); - return std::copy(u.c_str(), u.c_str() + u.size(), out); + // This works when wchar_t is UTF-32 because units only contain characters + // that have the same representation in UTF-16 and UTF-32. + utf8_to_utf16 u(unit); + return std::copy(u.c_str(), u.c_str() + u.size(), out); } template OutputIt format_duration_unit(OutputIt out) { - if (const char* unit = get_units()) - return copy_unit(string_view(unit), out, Char()); - *out++ = '['; - out = write(out, Period::num); - if (const_check(Period::den != 1)) { - *out++ = '/'; - out = write(out, Period::den); - } - *out++ = ']'; - *out++ = 's'; - return out; + if (const char *unit = get_units()) + return copy_unit(string_view(unit), out, Char()); + *out++ = '['; + out = write(out, Period::num); + if (const_check(Period::den != 1)) { + *out++ = '/'; + out = write(out, Period::den); + } + *out++ = ']'; + *out++ = 's'; + return out; } -template +template struct chrono_formatter { - FormatContext& context; - OutputIt out; - int precision; - bool localized = false; - // rep is unsigned to avoid overflow. - using rep = - conditional_t::value && sizeof(Rep) < sizeof(int), - unsigned, typename make_unsigned_or_unchanged::type>; - rep val; - using seconds = std::chrono::duration; - seconds s; - using milliseconds = std::chrono::duration; - bool negative; - - using char_type = typename FormatContext::char_type; - - explicit chrono_formatter(FormatContext& ctx, OutputIt o, - std::chrono::duration d) - : context(ctx), - out(o), - val(static_cast(d.count())), - negative(false) { - if (d.count() < 0) { - val = 0 - val; - negative = true; - } - - // this may overflow and/or the result may not fit in the - // target type. + FormatContext &context; + OutputIt out; + int precision; + bool localized = false; + // rep is unsigned to avoid overflow. + using rep = conditional_t::value && sizeof(Rep) < sizeof(int), unsigned, + typename make_unsigned_or_unchanged::type>; + rep val; + using seconds = std::chrono::duration; + seconds s; + using milliseconds = std::chrono::duration; + bool negative; + + using char_type = typename FormatContext::char_type; + + explicit chrono_formatter(FormatContext &ctx, OutputIt o, std::chrono::duration d) + : context(ctx), out(o), val(static_cast(d.count())), negative(false) { + if (d.count() < 0) { + val = 0 - val; + negative = true; + } + + // this may overflow and/or the result may not fit in the + // target type. #if FMT_SAFE_DURATION_CAST - // might need checked conversion (rep!=Rep) - auto tmpval = std::chrono::duration(val); - s = fmt_safe_duration_cast(tmpval); + // might need checked conversion (rep!=Rep) + auto tmpval = std::chrono::duration(val); + s = fmt_safe_duration_cast(tmpval); #else - s = std::chrono::duration_cast( - std::chrono::duration(val)); + s = std::chrono::duration_cast(std::chrono::duration(val)); #endif - } - - // returns true if nan or inf, writes to out. - bool handle_nan_inf() { - if (isfinite(val)) { - return false; - } - if (isnan(val)) { - write_nan(); - return true; - } - // must be +-inf - if (val > 0) { - write_pinf(); - } else { - write_ninf(); - } - return true; - } - - Rep hour() const { return static_cast(mod((s.count() / 3600), 24)); } - - Rep hour12() const { - Rep hour = static_cast(mod((s.count() / 3600), 12)); - return hour <= 0 ? 12 : hour; - } - - Rep minute() const { return static_cast(mod((s.count() / 60), 60)); } - Rep second() const { return static_cast(mod(s.count(), 60)); } - - std::tm time() const { - auto time = std::tm(); - time.tm_hour = to_nonnegative_int(hour(), 24); - time.tm_min = to_nonnegative_int(minute(), 60); - time.tm_sec = to_nonnegative_int(second(), 60); - return time; - } - - void write_sign() { - if (negative) { - *out++ = '-'; - negative = false; - } - } - - void write(Rep value, int width) { - write_sign(); - if (isnan(value)) return write_nan(); - uint32_or_64_or_128_t n = - to_unsigned(to_nonnegative_int(value, max_value())); - int num_digits = detail::count_digits(n); - if (width > num_digits) out = std::fill_n(out, width - num_digits, '0'); - out = format_decimal(out, n, num_digits).end; - } - - void write_nan() { std::copy_n("nan", 3, out); } - void write_pinf() { std::copy_n("inf", 3, out); } - void write_ninf() { std::copy_n("-inf", 4, out); } - - void format_localized(const tm& time, char format, char modifier = 0) { - if (isnan(val)) return write_nan(); - const auto& loc = localized ? context.locale().template get() - : std::locale::classic(); - out = detail::write(out, time, loc, format, modifier); - } - - void on_text(const char_type* begin, const char_type* end) { - std::copy(begin, end, out); - } - - // These are not implemented because durations don't have date information. - void on_abbr_weekday() {} - void on_full_weekday() {} - void on_dec0_weekday(numeric_system) {} - void on_dec1_weekday(numeric_system) {} - void on_abbr_month() {} - void on_full_month() {} - void on_datetime(numeric_system) {} - void on_loc_date(numeric_system) {} - void on_loc_time(numeric_system) {} - void on_us_date() {} - void on_iso_date() {} - void on_utc_offset() {} - void on_tz_name() {} - - void on_24_hour(numeric_system ns) { - if (handle_nan_inf()) return; - - if (ns == numeric_system::standard) return write(hour(), 2); - auto time = tm(); - time.tm_hour = to_nonnegative_int(hour(), 24); - format_localized(time, 'H', 'O'); - } - - void on_12_hour(numeric_system ns) { - if (handle_nan_inf()) return; - - if (ns == numeric_system::standard) return write(hour12(), 2); - auto time = tm(); - time.tm_hour = to_nonnegative_int(hour12(), 12); - format_localized(time, 'I', 'O'); - } - - void on_minute(numeric_system ns) { - if (handle_nan_inf()) return; - - if (ns == numeric_system::standard) return write(minute(), 2); - auto time = tm(); - time.tm_min = to_nonnegative_int(minute(), 60); - format_localized(time, 'M', 'O'); - } - - void on_second(numeric_system ns) { - if (handle_nan_inf()) return; - - if (ns == numeric_system::standard) { - write(second(), 2); + } + + // returns true if nan or inf, writes to out. + bool handle_nan_inf() { + if (isfinite(val)) { + return false; + } + if (isnan(val)) { + write_nan(); + return true; + } + // must be +-inf + if (val > 0) { + write_pinf(); + } else { + write_ninf(); + } + return true; + } + + Rep hour() const { + return static_cast(mod((s.count() / 3600), 24)); + } + + Rep hour12() const { + Rep hour = static_cast(mod((s.count() / 3600), 12)); + return hour <= 0 ? 12 : hour; + } + + Rep minute() const { + return static_cast(mod((s.count() / 60), 60)); + } + Rep second() const { + return static_cast(mod(s.count(), 60)); + } + + std::tm time() const { + auto time = std::tm(); + time.tm_hour = to_nonnegative_int(hour(), 24); + time.tm_min = to_nonnegative_int(minute(), 60); + time.tm_sec = to_nonnegative_int(second(), 60); + return time; + } + + void write_sign() { + if (negative) { + *out++ = '-'; + negative = false; + } + } + + void write(Rep value, int width) { + write_sign(); + if (isnan(value)) + return write_nan(); + uint32_or_64_or_128_t n = to_unsigned(to_nonnegative_int(value, max_value())); + int num_digits = detail::count_digits(n); + if (width > num_digits) + out = std::fill_n(out, width - num_digits, '0'); + out = format_decimal(out, n, num_digits).end; + } + + void write_nan() { + std::copy_n("nan", 3, out); + } + void write_pinf() { + std::copy_n("inf", 3, out); + } + void write_ninf() { + std::copy_n("-inf", 4, out); + } + + void format_localized(const tm &time, char format, char modifier = 0) { + if (isnan(val)) + return write_nan(); + const auto &loc = localized ? context.locale().template get() : std::locale::classic(); + out = detail::write(out, time, loc, format, modifier); + } + + void on_text(const char_type *begin, const char_type *end) { + std::copy(begin, end, out); + } + + // These are not implemented because durations don't have date information. + void on_abbr_weekday() { + } + void on_full_weekday() { + } + void on_dec0_weekday(numeric_system) { + } + void on_dec1_weekday(numeric_system) { + } + void on_abbr_month() { + } + void on_full_month() { + } + void on_datetime(numeric_system) { + } + void on_loc_date(numeric_system) { + } + void on_loc_time(numeric_system) { + } + void on_us_date() { + } + void on_iso_date() { + } + void on_utc_offset() { + } + void on_tz_name() { + } + + void on_24_hour(numeric_system ns) { + if (handle_nan_inf()) + return; + + if (ns == numeric_system::standard) + return write(hour(), 2); + auto time = tm(); + time.tm_hour = to_nonnegative_int(hour(), 24); + format_localized(time, 'H', 'O'); + } + + void on_12_hour(numeric_system ns) { + if (handle_nan_inf()) + return; + + if (ns == numeric_system::standard) + return write(hour12(), 2); + auto time = tm(); + time.tm_hour = to_nonnegative_int(hour12(), 12); + format_localized(time, 'I', 'O'); + } + + void on_minute(numeric_system ns) { + if (handle_nan_inf()) + return; + + if (ns == numeric_system::standard) + return write(minute(), 2); + auto time = tm(); + time.tm_min = to_nonnegative_int(minute(), 60); + format_localized(time, 'M', 'O'); + } + + void on_second(numeric_system ns) { + if (handle_nan_inf()) + return; + + if (ns == numeric_system::standard) { + write(second(), 2); #if FMT_SAFE_DURATION_CAST - // convert rep->Rep - using duration_rep = std::chrono::duration; - using duration_Rep = std::chrono::duration; - auto tmpval = fmt_safe_duration_cast(duration_rep{val}); + // convert rep->Rep + using duration_rep = std::chrono::duration; + using duration_Rep = std::chrono::duration; + auto tmpval = fmt_safe_duration_cast(duration_rep {val}); #else - auto tmpval = std::chrono::duration(val); + auto tmpval = std::chrono::duration(val); #endif - auto ms = get_milliseconds(tmpval); - if (ms != std::chrono::milliseconds(0)) { - *out++ = '.'; - write(ms.count(), 3); - } - return; - } - auto time = tm(); - time.tm_sec = to_nonnegative_int(second(), 60); - format_localized(time, 'S', 'O'); - } - - void on_12_hour_time() { - if (handle_nan_inf()) return; - format_localized(time(), 'r'); - } - - void on_24_hour_time() { - if (handle_nan_inf()) { - *out++ = ':'; - handle_nan_inf(); - return; - } - - write(hour(), 2); - *out++ = ':'; - write(minute(), 2); - } - - void on_iso_time() { - on_24_hour_time(); - *out++ = ':'; - if (handle_nan_inf()) return; - write(second(), 2); - } - - void on_am_pm() { - if (handle_nan_inf()) return; - format_localized(time(), 'p'); - } - - void on_duration_value() { - if (handle_nan_inf()) return; - write_sign(); - out = format_duration_value(out, val, precision); - } - - void on_duration_unit() { - out = format_duration_unit(out); - } + auto ms = get_milliseconds(tmpval); + if (ms != std::chrono::milliseconds(0)) { + *out++ = '.'; + write(ms.count(), 3); + } + return; + } + auto time = tm(); + time.tm_sec = to_nonnegative_int(second(), 60); + format_localized(time, 'S', 'O'); + } + + void on_12_hour_time() { + if (handle_nan_inf()) + return; + format_localized(time(), 'r'); + } + + void on_24_hour_time() { + if (handle_nan_inf()) { + *out++ = ':'; + handle_nan_inf(); + return; + } + + write(hour(), 2); + *out++ = ':'; + write(minute(), 2); + } + + void on_iso_time() { + on_24_hour_time(); + *out++ = ':'; + if (handle_nan_inf()) + return; + write(second(), 2); + } + + void on_am_pm() { + if (handle_nan_inf()) + return; + format_localized(time(), 'p'); + } + + void on_duration_value() { + if (handle_nan_inf()) + return; + write_sign(); + out = format_duration_value(out, val, precision); + } + + void on_duration_unit() { + out = format_duration_unit(out); + } }; FMT_END_DETAIL_NAMESPACE @@ -1150,159 +1251,167 @@ using weekday = std::chrono::weekday; #else // A fallback version of weekday. class weekday { - private: - unsigned char value; - - public: - weekday() = default; - explicit constexpr weekday(unsigned wd) noexcept - : value(static_cast(wd != 7 ? wd : 0)) {} - constexpr unsigned c_encoding() const noexcept { return value; } +private: + unsigned char value; + +public: + weekday() = default; + explicit constexpr weekday(unsigned wd) noexcept : value(static_cast(wd != 7 ? wd : 0)) { + } + constexpr unsigned c_encoding() const noexcept { + return value; + } }; #endif // A rudimentary weekday formatter. -template <> struct formatter { - private: - bool localized = false; - - public: - FMT_CONSTEXPR auto parse(format_parse_context& ctx) -> decltype(ctx.begin()) { - auto begin = ctx.begin(), end = ctx.end(); - if (begin != end && *begin == 'L') { - ++begin; - localized = true; - } - return begin; - } - - auto format(weekday wd, format_context& ctx) -> decltype(ctx.out()) { - auto time = std::tm(); - time.tm_wday = static_cast(wd.c_encoding()); - const auto& loc = localized ? ctx.locale().template get() - : std::locale::classic(); - return detail::write(ctx.out(), time, loc, 'a'); - } +template <> +struct formatter { +private: + bool localized = false; + +public: + FMT_CONSTEXPR auto parse(format_parse_context &ctx) -> decltype(ctx.begin()) { + auto begin = ctx.begin(), end = ctx.end(); + if (begin != end && *begin == 'L') { + ++begin; + localized = true; + } + return begin; + } + + auto format(weekday wd, format_context &ctx) -> decltype(ctx.out()) { + auto time = std::tm(); + time.tm_wday = static_cast(wd.c_encoding()); + const auto &loc = localized ? ctx.locale().template get() : std::locale::classic(); + return detail::write(ctx.out(), time, loc, 'a'); + } }; template struct formatter, Char> { - private: - basic_format_specs specs; - int precision = -1; - using arg_ref_type = detail::arg_ref; - arg_ref_type width_ref; - arg_ref_type precision_ref; - bool localized = false; - basic_string_view format_str; - using duration = std::chrono::duration; - - struct spec_handler { - formatter& f; - basic_format_parse_context& context; - basic_string_view format_str; - - template FMT_CONSTEXPR arg_ref_type make_arg_ref(Id arg_id) { - context.check_arg_id(arg_id); - return arg_ref_type(arg_id); - } - - FMT_CONSTEXPR arg_ref_type make_arg_ref(basic_string_view arg_id) { - context.check_arg_id(arg_id); - return arg_ref_type(arg_id); - } - - FMT_CONSTEXPR arg_ref_type make_arg_ref(detail::auto_id) { - return arg_ref_type(context.next_arg_id()); - } - - void on_error(const char* msg) { FMT_THROW(format_error(msg)); } - FMT_CONSTEXPR void on_fill(basic_string_view fill) { - f.specs.fill = fill; - } - FMT_CONSTEXPR void on_align(align_t align) { f.specs.align = align; } - FMT_CONSTEXPR void on_width(int width) { f.specs.width = width; } - FMT_CONSTEXPR void on_precision(int _precision) { - f.precision = _precision; - } - FMT_CONSTEXPR void end_precision() {} - - template FMT_CONSTEXPR void on_dynamic_width(Id arg_id) { - f.width_ref = make_arg_ref(arg_id); - } - - template FMT_CONSTEXPR void on_dynamic_precision(Id arg_id) { - f.precision_ref = make_arg_ref(arg_id); - } - }; - - using iterator = typename basic_format_parse_context::iterator; - struct parse_range { - iterator begin; - iterator end; - }; - - FMT_CONSTEXPR parse_range do_parse(basic_format_parse_context& ctx) { - auto begin = ctx.begin(), end = ctx.end(); - if (begin == end || *begin == '}') return {begin, begin}; - spec_handler handler{*this, ctx, format_str}; - begin = detail::parse_align(begin, end, handler); - if (begin == end) return {begin, begin}; - begin = detail::parse_width(begin, end, handler); - if (begin == end) return {begin, begin}; - if (*begin == '.') { - if (std::is_floating_point::value) - begin = detail::parse_precision(begin, end, handler); - else - handler.on_error("precision not allowed for this argument type"); - } - if (begin != end && *begin == 'L') { - ++begin; - localized = true; - } - end = parse_chrono_format(begin, end, detail::chrono_format_checker()); - return {begin, end}; - } - - public: - FMT_CONSTEXPR auto parse(basic_format_parse_context& ctx) - -> decltype(ctx.begin()) { - auto range = do_parse(ctx); - format_str = basic_string_view( - &*range.begin, detail::to_unsigned(range.end - range.begin)); - return range.end; - } - - template - auto format(const duration& d, FormatContext& ctx) const - -> decltype(ctx.out()) { - auto specs_copy = specs; - auto precision_copy = precision; - auto begin = format_str.begin(), end = format_str.end(); - // As a possible future optimization, we could avoid extra copying if width - // is not specified. - basic_memory_buffer buf; - auto out = std::back_inserter(buf); - detail::handle_dynamic_spec(specs_copy.width, - width_ref, ctx); - detail::handle_dynamic_spec(precision_copy, - precision_ref, ctx); - if (begin == end || *begin == '}') { - out = detail::format_duration_value(out, d.count(), precision_copy); - detail::format_duration_unit(out); - } else { - detail::chrono_formatter f( - ctx, out, d); - f.precision = precision_copy; - f.localized = localized; - detail::parse_chrono_format(begin, end, f); - } - return detail::write( - ctx.out(), basic_string_view(buf.data(), buf.size()), specs_copy); - } +private: + basic_format_specs specs; + int precision = -1; + using arg_ref_type = detail::arg_ref; + arg_ref_type width_ref; + arg_ref_type precision_ref; + bool localized = false; + basic_string_view format_str; + using duration = std::chrono::duration; + + struct spec_handler { + formatter &f; + basic_format_parse_context &context; + basic_string_view format_str; + + template + FMT_CONSTEXPR arg_ref_type make_arg_ref(Id arg_id) { + context.check_arg_id(arg_id); + return arg_ref_type(arg_id); + } + + FMT_CONSTEXPR arg_ref_type make_arg_ref(basic_string_view arg_id) { + context.check_arg_id(arg_id); + return arg_ref_type(arg_id); + } + + FMT_CONSTEXPR arg_ref_type make_arg_ref(detail::auto_id) { + return arg_ref_type(context.next_arg_id()); + } + + void on_error(const char *msg) { + FMT_THROW(format_error(msg)); + } + FMT_CONSTEXPR void on_fill(basic_string_view fill) { + f.specs.fill = fill; + } + FMT_CONSTEXPR void on_align(align_t align) { + f.specs.align = align; + } + FMT_CONSTEXPR void on_width(int width) { + f.specs.width = width; + } + FMT_CONSTEXPR void on_precision(int _precision) { + f.precision = _precision; + } + FMT_CONSTEXPR void end_precision() { + } + + template + FMT_CONSTEXPR void on_dynamic_width(Id arg_id) { + f.width_ref = make_arg_ref(arg_id); + } + + template + FMT_CONSTEXPR void on_dynamic_precision(Id arg_id) { + f.precision_ref = make_arg_ref(arg_id); + } + }; + + using iterator = typename basic_format_parse_context::iterator; + struct parse_range { + iterator begin; + iterator end; + }; + + FMT_CONSTEXPR parse_range do_parse(basic_format_parse_context &ctx) { + auto begin = ctx.begin(), end = ctx.end(); + if (begin == end || *begin == '}') + return {begin, begin}; + spec_handler handler {*this, ctx, format_str}; + begin = detail::parse_align(begin, end, handler); + if (begin == end) + return {begin, begin}; + begin = detail::parse_width(begin, end, handler); + if (begin == end) + return {begin, begin}; + if (*begin == '.') { + if (std::is_floating_point::value) + begin = detail::parse_precision(begin, end, handler); + else + handler.on_error("precision not allowed for this argument type"); + } + if (begin != end && *begin == 'L') { + ++begin; + localized = true; + } + end = parse_chrono_format(begin, end, detail::chrono_format_checker()); + return {begin, end}; + } + +public: + FMT_CONSTEXPR auto parse(basic_format_parse_context &ctx) -> decltype(ctx.begin()) { + auto range = do_parse(ctx); + format_str = basic_string_view(&*range.begin, detail::to_unsigned(range.end - range.begin)); + return range.end; + } + + template + auto format(const duration &d, FormatContext &ctx) const -> decltype(ctx.out()) { + auto specs_copy = specs; + auto precision_copy = precision; + auto begin = format_str.begin(), end = format_str.end(); + // As a possible future optimization, we could avoid extra copying if + // width is not specified. + basic_memory_buffer buf; + auto out = std::back_inserter(buf); + detail::handle_dynamic_spec(specs_copy.width, width_ref, ctx); + detail::handle_dynamic_spec(precision_copy, precision_ref, ctx); + if (begin == end || *begin == '}') { + out = detail::format_duration_value(out, d.count(), precision_copy); + detail::format_duration_unit(out); + } else { + detail::chrono_formatter f(ctx, out, d); + f.precision = precision_copy; + f.localized = localized; + detail::parse_chrono_format(begin, end, f); + } + return detail::write(ctx.out(), basic_string_view(buf.data(), buf.size()), specs_copy); + } }; FMT_MODULE_EXPORT_END FMT_END_NAMESPACE -#endif // FMT_CHRONO_H_ +#endif // FMT_CHRONO_H_ diff --git a/mooncake-store/include/cachelib_memory_allocator/include/fmt/color.h b/mooncake-store/include/cachelib_memory_allocator/include/fmt/color.h index 3d5490e87..a065e4632 100644 --- a/mooncake-store/include/cachelib_memory_allocator/include/fmt/color.h +++ b/mooncake-store/include/cachelib_memory_allocator/include/fmt/color.h @@ -12,514 +12,507 @@ // __declspec(deprecated) is broken in some MSVC versions. #if FMT_MSC_VER -# define FMT_DEPRECATED_NONMSVC +#define FMT_DEPRECATED_NONMSVC #else -# define FMT_DEPRECATED_NONMSVC FMT_DEPRECATED +#define FMT_DEPRECATED_NONMSVC FMT_DEPRECATED #endif FMT_BEGIN_NAMESPACE FMT_MODULE_EXPORT_BEGIN enum class color : uint32_t { - alice_blue = 0xF0F8FF, // rgb(240,248,255) - antique_white = 0xFAEBD7, // rgb(250,235,215) - aqua = 0x00FFFF, // rgb(0,255,255) - aquamarine = 0x7FFFD4, // rgb(127,255,212) - azure = 0xF0FFFF, // rgb(240,255,255) - beige = 0xF5F5DC, // rgb(245,245,220) - bisque = 0xFFE4C4, // rgb(255,228,196) - black = 0x000000, // rgb(0,0,0) - blanched_almond = 0xFFEBCD, // rgb(255,235,205) - blue = 0x0000FF, // rgb(0,0,255) - blue_violet = 0x8A2BE2, // rgb(138,43,226) - brown = 0xA52A2A, // rgb(165,42,42) - burly_wood = 0xDEB887, // rgb(222,184,135) - cadet_blue = 0x5F9EA0, // rgb(95,158,160) - chartreuse = 0x7FFF00, // rgb(127,255,0) - chocolate = 0xD2691E, // rgb(210,105,30) - coral = 0xFF7F50, // rgb(255,127,80) - cornflower_blue = 0x6495ED, // rgb(100,149,237) - cornsilk = 0xFFF8DC, // rgb(255,248,220) - crimson = 0xDC143C, // rgb(220,20,60) - cyan = 0x00FFFF, // rgb(0,255,255) - dark_blue = 0x00008B, // rgb(0,0,139) - dark_cyan = 0x008B8B, // rgb(0,139,139) - dark_golden_rod = 0xB8860B, // rgb(184,134,11) - dark_gray = 0xA9A9A9, // rgb(169,169,169) - dark_green = 0x006400, // rgb(0,100,0) - dark_khaki = 0xBDB76B, // rgb(189,183,107) - dark_magenta = 0x8B008B, // rgb(139,0,139) - dark_olive_green = 0x556B2F, // rgb(85,107,47) - dark_orange = 0xFF8C00, // rgb(255,140,0) - dark_orchid = 0x9932CC, // rgb(153,50,204) - dark_red = 0x8B0000, // rgb(139,0,0) - dark_salmon = 0xE9967A, // rgb(233,150,122) - dark_sea_green = 0x8FBC8F, // rgb(143,188,143) - dark_slate_blue = 0x483D8B, // rgb(72,61,139) - dark_slate_gray = 0x2F4F4F, // rgb(47,79,79) - dark_turquoise = 0x00CED1, // rgb(0,206,209) - dark_violet = 0x9400D3, // rgb(148,0,211) - deep_pink = 0xFF1493, // rgb(255,20,147) - deep_sky_blue = 0x00BFFF, // rgb(0,191,255) - dim_gray = 0x696969, // rgb(105,105,105) - dodger_blue = 0x1E90FF, // rgb(30,144,255) - fire_brick = 0xB22222, // rgb(178,34,34) - floral_white = 0xFFFAF0, // rgb(255,250,240) - forest_green = 0x228B22, // rgb(34,139,34) - fuchsia = 0xFF00FF, // rgb(255,0,255) - gainsboro = 0xDCDCDC, // rgb(220,220,220) - ghost_white = 0xF8F8FF, // rgb(248,248,255) - gold = 0xFFD700, // rgb(255,215,0) - golden_rod = 0xDAA520, // rgb(218,165,32) - gray = 0x808080, // rgb(128,128,128) - green = 0x008000, // rgb(0,128,0) - green_yellow = 0xADFF2F, // rgb(173,255,47) - honey_dew = 0xF0FFF0, // rgb(240,255,240) - hot_pink = 0xFF69B4, // rgb(255,105,180) - indian_red = 0xCD5C5C, // rgb(205,92,92) - indigo = 0x4B0082, // rgb(75,0,130) - ivory = 0xFFFFF0, // rgb(255,255,240) - khaki = 0xF0E68C, // rgb(240,230,140) - lavender = 0xE6E6FA, // rgb(230,230,250) - lavender_blush = 0xFFF0F5, // rgb(255,240,245) - lawn_green = 0x7CFC00, // rgb(124,252,0) - lemon_chiffon = 0xFFFACD, // rgb(255,250,205) - light_blue = 0xADD8E6, // rgb(173,216,230) - light_coral = 0xF08080, // rgb(240,128,128) - light_cyan = 0xE0FFFF, // rgb(224,255,255) - light_golden_rod_yellow = 0xFAFAD2, // rgb(250,250,210) - light_gray = 0xD3D3D3, // rgb(211,211,211) - light_green = 0x90EE90, // rgb(144,238,144) - light_pink = 0xFFB6C1, // rgb(255,182,193) - light_salmon = 0xFFA07A, // rgb(255,160,122) - light_sea_green = 0x20B2AA, // rgb(32,178,170) - light_sky_blue = 0x87CEFA, // rgb(135,206,250) - light_slate_gray = 0x778899, // rgb(119,136,153) - light_steel_blue = 0xB0C4DE, // rgb(176,196,222) - light_yellow = 0xFFFFE0, // rgb(255,255,224) - lime = 0x00FF00, // rgb(0,255,0) - lime_green = 0x32CD32, // rgb(50,205,50) - linen = 0xFAF0E6, // rgb(250,240,230) - magenta = 0xFF00FF, // rgb(255,0,255) - maroon = 0x800000, // rgb(128,0,0) - medium_aquamarine = 0x66CDAA, // rgb(102,205,170) - medium_blue = 0x0000CD, // rgb(0,0,205) - medium_orchid = 0xBA55D3, // rgb(186,85,211) - medium_purple = 0x9370DB, // rgb(147,112,219) - medium_sea_green = 0x3CB371, // rgb(60,179,113) - medium_slate_blue = 0x7B68EE, // rgb(123,104,238) - medium_spring_green = 0x00FA9A, // rgb(0,250,154) - medium_turquoise = 0x48D1CC, // rgb(72,209,204) - medium_violet_red = 0xC71585, // rgb(199,21,133) - midnight_blue = 0x191970, // rgb(25,25,112) - mint_cream = 0xF5FFFA, // rgb(245,255,250) - misty_rose = 0xFFE4E1, // rgb(255,228,225) - moccasin = 0xFFE4B5, // rgb(255,228,181) - navajo_white = 0xFFDEAD, // rgb(255,222,173) - navy = 0x000080, // rgb(0,0,128) - old_lace = 0xFDF5E6, // rgb(253,245,230) - olive = 0x808000, // rgb(128,128,0) - olive_drab = 0x6B8E23, // rgb(107,142,35) - orange = 0xFFA500, // rgb(255,165,0) - orange_red = 0xFF4500, // rgb(255,69,0) - orchid = 0xDA70D6, // rgb(218,112,214) - pale_golden_rod = 0xEEE8AA, // rgb(238,232,170) - pale_green = 0x98FB98, // rgb(152,251,152) - pale_turquoise = 0xAFEEEE, // rgb(175,238,238) - pale_violet_red = 0xDB7093, // rgb(219,112,147) - papaya_whip = 0xFFEFD5, // rgb(255,239,213) - peach_puff = 0xFFDAB9, // rgb(255,218,185) - peru = 0xCD853F, // rgb(205,133,63) - pink = 0xFFC0CB, // rgb(255,192,203) - plum = 0xDDA0DD, // rgb(221,160,221) - powder_blue = 0xB0E0E6, // rgb(176,224,230) - purple = 0x800080, // rgb(128,0,128) - rebecca_purple = 0x663399, // rgb(102,51,153) - red = 0xFF0000, // rgb(255,0,0) - rosy_brown = 0xBC8F8F, // rgb(188,143,143) - royal_blue = 0x4169E1, // rgb(65,105,225) - saddle_brown = 0x8B4513, // rgb(139,69,19) - salmon = 0xFA8072, // rgb(250,128,114) - sandy_brown = 0xF4A460, // rgb(244,164,96) - sea_green = 0x2E8B57, // rgb(46,139,87) - sea_shell = 0xFFF5EE, // rgb(255,245,238) - sienna = 0xA0522D, // rgb(160,82,45) - silver = 0xC0C0C0, // rgb(192,192,192) - sky_blue = 0x87CEEB, // rgb(135,206,235) - slate_blue = 0x6A5ACD, // rgb(106,90,205) - slate_gray = 0x708090, // rgb(112,128,144) - snow = 0xFFFAFA, // rgb(255,250,250) - spring_green = 0x00FF7F, // rgb(0,255,127) - steel_blue = 0x4682B4, // rgb(70,130,180) - tan = 0xD2B48C, // rgb(210,180,140) - teal = 0x008080, // rgb(0,128,128) - thistle = 0xD8BFD8, // rgb(216,191,216) - tomato = 0xFF6347, // rgb(255,99,71) - turquoise = 0x40E0D0, // rgb(64,224,208) - violet = 0xEE82EE, // rgb(238,130,238) - wheat = 0xF5DEB3, // rgb(245,222,179) - white = 0xFFFFFF, // rgb(255,255,255) - white_smoke = 0xF5F5F5, // rgb(245,245,245) - yellow = 0xFFFF00, // rgb(255,255,0) - yellow_green = 0x9ACD32 // rgb(154,205,50) -}; // enum class color + alice_blue = 0xF0F8FF, // rgb(240,248,255) + antique_white = 0xFAEBD7, // rgb(250,235,215) + aqua = 0x00FFFF, // rgb(0,255,255) + aquamarine = 0x7FFFD4, // rgb(127,255,212) + azure = 0xF0FFFF, // rgb(240,255,255) + beige = 0xF5F5DC, // rgb(245,245,220) + bisque = 0xFFE4C4, // rgb(255,228,196) + black = 0x000000, // rgb(0,0,0) + blanched_almond = 0xFFEBCD, // rgb(255,235,205) + blue = 0x0000FF, // rgb(0,0,255) + blue_violet = 0x8A2BE2, // rgb(138,43,226) + brown = 0xA52A2A, // rgb(165,42,42) + burly_wood = 0xDEB887, // rgb(222,184,135) + cadet_blue = 0x5F9EA0, // rgb(95,158,160) + chartreuse = 0x7FFF00, // rgb(127,255,0) + chocolate = 0xD2691E, // rgb(210,105,30) + coral = 0xFF7F50, // rgb(255,127,80) + cornflower_blue = 0x6495ED, // rgb(100,149,237) + cornsilk = 0xFFF8DC, // rgb(255,248,220) + crimson = 0xDC143C, // rgb(220,20,60) + cyan = 0x00FFFF, // rgb(0,255,255) + dark_blue = 0x00008B, // rgb(0,0,139) + dark_cyan = 0x008B8B, // rgb(0,139,139) + dark_golden_rod = 0xB8860B, // rgb(184,134,11) + dark_gray = 0xA9A9A9, // rgb(169,169,169) + dark_green = 0x006400, // rgb(0,100,0) + dark_khaki = 0xBDB76B, // rgb(189,183,107) + dark_magenta = 0x8B008B, // rgb(139,0,139) + dark_olive_green = 0x556B2F, // rgb(85,107,47) + dark_orange = 0xFF8C00, // rgb(255,140,0) + dark_orchid = 0x9932CC, // rgb(153,50,204) + dark_red = 0x8B0000, // rgb(139,0,0) + dark_salmon = 0xE9967A, // rgb(233,150,122) + dark_sea_green = 0x8FBC8F, // rgb(143,188,143) + dark_slate_blue = 0x483D8B, // rgb(72,61,139) + dark_slate_gray = 0x2F4F4F, // rgb(47,79,79) + dark_turquoise = 0x00CED1, // rgb(0,206,209) + dark_violet = 0x9400D3, // rgb(148,0,211) + deep_pink = 0xFF1493, // rgb(255,20,147) + deep_sky_blue = 0x00BFFF, // rgb(0,191,255) + dim_gray = 0x696969, // rgb(105,105,105) + dodger_blue = 0x1E90FF, // rgb(30,144,255) + fire_brick = 0xB22222, // rgb(178,34,34) + floral_white = 0xFFFAF0, // rgb(255,250,240) + forest_green = 0x228B22, // rgb(34,139,34) + fuchsia = 0xFF00FF, // rgb(255,0,255) + gainsboro = 0xDCDCDC, // rgb(220,220,220) + ghost_white = 0xF8F8FF, // rgb(248,248,255) + gold = 0xFFD700, // rgb(255,215,0) + golden_rod = 0xDAA520, // rgb(218,165,32) + gray = 0x808080, // rgb(128,128,128) + green = 0x008000, // rgb(0,128,0) + green_yellow = 0xADFF2F, // rgb(173,255,47) + honey_dew = 0xF0FFF0, // rgb(240,255,240) + hot_pink = 0xFF69B4, // rgb(255,105,180) + indian_red = 0xCD5C5C, // rgb(205,92,92) + indigo = 0x4B0082, // rgb(75,0,130) + ivory = 0xFFFFF0, // rgb(255,255,240) + khaki = 0xF0E68C, // rgb(240,230,140) + lavender = 0xE6E6FA, // rgb(230,230,250) + lavender_blush = 0xFFF0F5, // rgb(255,240,245) + lawn_green = 0x7CFC00, // rgb(124,252,0) + lemon_chiffon = 0xFFFACD, // rgb(255,250,205) + light_blue = 0xADD8E6, // rgb(173,216,230) + light_coral = 0xF08080, // rgb(240,128,128) + light_cyan = 0xE0FFFF, // rgb(224,255,255) + light_golden_rod_yellow = 0xFAFAD2, // rgb(250,250,210) + light_gray = 0xD3D3D3, // rgb(211,211,211) + light_green = 0x90EE90, // rgb(144,238,144) + light_pink = 0xFFB6C1, // rgb(255,182,193) + light_salmon = 0xFFA07A, // rgb(255,160,122) + light_sea_green = 0x20B2AA, // rgb(32,178,170) + light_sky_blue = 0x87CEFA, // rgb(135,206,250) + light_slate_gray = 0x778899, // rgb(119,136,153) + light_steel_blue = 0xB0C4DE, // rgb(176,196,222) + light_yellow = 0xFFFFE0, // rgb(255,255,224) + lime = 0x00FF00, // rgb(0,255,0) + lime_green = 0x32CD32, // rgb(50,205,50) + linen = 0xFAF0E6, // rgb(250,240,230) + magenta = 0xFF00FF, // rgb(255,0,255) + maroon = 0x800000, // rgb(128,0,0) + medium_aquamarine = 0x66CDAA, // rgb(102,205,170) + medium_blue = 0x0000CD, // rgb(0,0,205) + medium_orchid = 0xBA55D3, // rgb(186,85,211) + medium_purple = 0x9370DB, // rgb(147,112,219) + medium_sea_green = 0x3CB371, // rgb(60,179,113) + medium_slate_blue = 0x7B68EE, // rgb(123,104,238) + medium_spring_green = 0x00FA9A, // rgb(0,250,154) + medium_turquoise = 0x48D1CC, // rgb(72,209,204) + medium_violet_red = 0xC71585, // rgb(199,21,133) + midnight_blue = 0x191970, // rgb(25,25,112) + mint_cream = 0xF5FFFA, // rgb(245,255,250) + misty_rose = 0xFFE4E1, // rgb(255,228,225) + moccasin = 0xFFE4B5, // rgb(255,228,181) + navajo_white = 0xFFDEAD, // rgb(255,222,173) + navy = 0x000080, // rgb(0,0,128) + old_lace = 0xFDF5E6, // rgb(253,245,230) + olive = 0x808000, // rgb(128,128,0) + olive_drab = 0x6B8E23, // rgb(107,142,35) + orange = 0xFFA500, // rgb(255,165,0) + orange_red = 0xFF4500, // rgb(255,69,0) + orchid = 0xDA70D6, // rgb(218,112,214) + pale_golden_rod = 0xEEE8AA, // rgb(238,232,170) + pale_green = 0x98FB98, // rgb(152,251,152) + pale_turquoise = 0xAFEEEE, // rgb(175,238,238) + pale_violet_red = 0xDB7093, // rgb(219,112,147) + papaya_whip = 0xFFEFD5, // rgb(255,239,213) + peach_puff = 0xFFDAB9, // rgb(255,218,185) + peru = 0xCD853F, // rgb(205,133,63) + pink = 0xFFC0CB, // rgb(255,192,203) + plum = 0xDDA0DD, // rgb(221,160,221) + powder_blue = 0xB0E0E6, // rgb(176,224,230) + purple = 0x800080, // rgb(128,0,128) + rebecca_purple = 0x663399, // rgb(102,51,153) + red = 0xFF0000, // rgb(255,0,0) + rosy_brown = 0xBC8F8F, // rgb(188,143,143) + royal_blue = 0x4169E1, // rgb(65,105,225) + saddle_brown = 0x8B4513, // rgb(139,69,19) + salmon = 0xFA8072, // rgb(250,128,114) + sandy_brown = 0xF4A460, // rgb(244,164,96) + sea_green = 0x2E8B57, // rgb(46,139,87) + sea_shell = 0xFFF5EE, // rgb(255,245,238) + sienna = 0xA0522D, // rgb(160,82,45) + silver = 0xC0C0C0, // rgb(192,192,192) + sky_blue = 0x87CEEB, // rgb(135,206,235) + slate_blue = 0x6A5ACD, // rgb(106,90,205) + slate_gray = 0x708090, // rgb(112,128,144) + snow = 0xFFFAFA, // rgb(255,250,250) + spring_green = 0x00FF7F, // rgb(0,255,127) + steel_blue = 0x4682B4, // rgb(70,130,180) + tan = 0xD2B48C, // rgb(210,180,140) + teal = 0x008080, // rgb(0,128,128) + thistle = 0xD8BFD8, // rgb(216,191,216) + tomato = 0xFF6347, // rgb(255,99,71) + turquoise = 0x40E0D0, // rgb(64,224,208) + violet = 0xEE82EE, // rgb(238,130,238) + wheat = 0xF5DEB3, // rgb(245,222,179) + white = 0xFFFFFF, // rgb(255,255,255) + white_smoke = 0xF5F5F5, // rgb(245,245,245) + yellow = 0xFFFF00, // rgb(255,255,0) + yellow_green = 0x9ACD32 // rgb(154,205,50) +}; // enum class color enum class terminal_color : uint8_t { - black = 30, - red, - green, - yellow, - blue, - magenta, - cyan, - white, - bright_black = 90, - bright_red, - bright_green, - bright_yellow, - bright_blue, - bright_magenta, - bright_cyan, - bright_white + black = 30, + red, + green, + yellow, + blue, + magenta, + cyan, + white, + bright_black = 90, + bright_red, + bright_green, + bright_yellow, + bright_blue, + bright_magenta, + bright_cyan, + bright_white }; -enum class emphasis : uint8_t { - bold = 1, - italic = 1 << 1, - underline = 1 << 2, - strikethrough = 1 << 3 -}; +enum class emphasis : uint8_t { bold = 1, italic = 1 << 1, underline = 1 << 2, strikethrough = 1 << 3 }; // rgb is a struct for red, green and blue colors. // Using the name "rgb" makes some editors show the color in a tooltip. struct rgb { - FMT_CONSTEXPR rgb() : r(0), g(0), b(0) {} - FMT_CONSTEXPR rgb(uint8_t r_, uint8_t g_, uint8_t b_) : r(r_), g(g_), b(b_) {} - FMT_CONSTEXPR rgb(uint32_t hex) - : r((hex >> 16) & 0xFF), g((hex >> 8) & 0xFF), b(hex & 0xFF) {} - FMT_CONSTEXPR rgb(color hex) - : r((uint32_t(hex) >> 16) & 0xFF), - g((uint32_t(hex) >> 8) & 0xFF), - b(uint32_t(hex) & 0xFF) {} - uint8_t r; - uint8_t g; - uint8_t b; + FMT_CONSTEXPR rgb() : r(0), g(0), b(0) { + } + FMT_CONSTEXPR rgb(uint8_t r_, uint8_t g_, uint8_t b_) : r(r_), g(g_), b(b_) { + } + FMT_CONSTEXPR rgb(uint32_t hex) : r((hex >> 16) & 0xFF), g((hex >> 8) & 0xFF), b(hex & 0xFF) { + } + FMT_CONSTEXPR rgb(color hex) + : r((uint32_t(hex) >> 16) & 0xFF), g((uint32_t(hex) >> 8) & 0xFF), b(uint32_t(hex) & 0xFF) { + } + uint8_t r; + uint8_t g; + uint8_t b; }; FMT_BEGIN_DETAIL_NAMESPACE // color is a struct of either a rgb color or a terminal color. struct color_type { - FMT_CONSTEXPR color_type() FMT_NOEXCEPT : is_rgb(), value{} {} - FMT_CONSTEXPR color_type(color rgb_color) FMT_NOEXCEPT : is_rgb(true), - value{} { - value.rgb_color = static_cast(rgb_color); - } - FMT_CONSTEXPR color_type(rgb rgb_color) FMT_NOEXCEPT : is_rgb(true), value{} { - value.rgb_color = (static_cast(rgb_color.r) << 16) | - (static_cast(rgb_color.g) << 8) | rgb_color.b; - } - FMT_CONSTEXPR color_type(terminal_color term_color) FMT_NOEXCEPT : is_rgb(), - value{} { - value.term_color = static_cast(term_color); - } - bool is_rgb; - union color_union { - uint8_t term_color; - uint32_t rgb_color; - } value; + FMT_CONSTEXPR color_type() FMT_NOEXCEPT : is_rgb(), value {} { + } + FMT_CONSTEXPR color_type(color rgb_color) FMT_NOEXCEPT : is_rgb(true), value {} { + value.rgb_color = static_cast(rgb_color); + } + FMT_CONSTEXPR color_type(rgb rgb_color) FMT_NOEXCEPT : is_rgb(true), value {} { + value.rgb_color = + (static_cast(rgb_color.r) << 16) | (static_cast(rgb_color.g) << 8) | rgb_color.b; + } + FMT_CONSTEXPR color_type(terminal_color term_color) FMT_NOEXCEPT : is_rgb(), value {} { + value.term_color = static_cast(term_color); + } + bool is_rgb; + union color_union { + uint8_t term_color; + uint32_t rgb_color; + } value; }; FMT_END_DETAIL_NAMESPACE /** A text style consisting of foreground and background colors and emphasis. */ class text_style { - public: - FMT_CONSTEXPR text_style(emphasis em = emphasis()) FMT_NOEXCEPT - : set_foreground_color(), - set_background_color(), - ems(em) {} - - FMT_CONSTEXPR text_style& operator|=(const text_style& rhs) { - if (!set_foreground_color) { - set_foreground_color = rhs.set_foreground_color; - foreground_color = rhs.foreground_color; - } else if (rhs.set_foreground_color) { - if (!foreground_color.is_rgb || !rhs.foreground_color.is_rgb) - FMT_THROW(format_error("can't OR a terminal color")); - foreground_color.value.rgb_color |= rhs.foreground_color.value.rgb_color; - } - - if (!set_background_color) { - set_background_color = rhs.set_background_color; - background_color = rhs.background_color; - } else if (rhs.set_background_color) { - if (!background_color.is_rgb || !rhs.background_color.is_rgb) - FMT_THROW(format_error("can't OR a terminal color")); - background_color.value.rgb_color |= rhs.background_color.value.rgb_color; - } - - ems = static_cast(static_cast(ems) | - static_cast(rhs.ems)); - return *this; - } - - friend FMT_CONSTEXPR text_style operator|(text_style lhs, - const text_style& rhs) { - return lhs |= rhs; - } - - FMT_DEPRECATED_NONMSVC FMT_CONSTEXPR text_style& operator&=( - const text_style& rhs) { - return and_assign(rhs); - } - - FMT_DEPRECATED_NONMSVC friend FMT_CONSTEXPR text_style - operator&(text_style lhs, const text_style& rhs) { - return lhs.and_assign(rhs); - } - - FMT_CONSTEXPR bool has_foreground() const FMT_NOEXCEPT { - return set_foreground_color; - } - FMT_CONSTEXPR bool has_background() const FMT_NOEXCEPT { - return set_background_color; - } - FMT_CONSTEXPR bool has_emphasis() const FMT_NOEXCEPT { - return static_cast(ems) != 0; - } - FMT_CONSTEXPR detail::color_type get_foreground() const FMT_NOEXCEPT { - FMT_ASSERT(has_foreground(), "no foreground specified for this style"); - return foreground_color; - } - FMT_CONSTEXPR detail::color_type get_background() const FMT_NOEXCEPT { - FMT_ASSERT(has_background(), "no background specified for this style"); - return background_color; - } - FMT_CONSTEXPR emphasis get_emphasis() const FMT_NOEXCEPT { - FMT_ASSERT(has_emphasis(), "no emphasis specified for this style"); - return ems; - } - - private: - FMT_CONSTEXPR text_style(bool is_foreground, - detail::color_type text_color) FMT_NOEXCEPT - : set_foreground_color(), - set_background_color(), - ems() { - if (is_foreground) { - foreground_color = text_color; - set_foreground_color = true; - } else { - background_color = text_color; - set_background_color = true; - } - } - - // DEPRECATED! - FMT_CONSTEXPR text_style& and_assign(const text_style& rhs) { - if (!set_foreground_color) { - set_foreground_color = rhs.set_foreground_color; - foreground_color = rhs.foreground_color; - } else if (rhs.set_foreground_color) { - if (!foreground_color.is_rgb || !rhs.foreground_color.is_rgb) - FMT_THROW(format_error("can't AND a terminal color")); - foreground_color.value.rgb_color &= rhs.foreground_color.value.rgb_color; - } - - if (!set_background_color) { - set_background_color = rhs.set_background_color; - background_color = rhs.background_color; - } else if (rhs.set_background_color) { - if (!background_color.is_rgb || !rhs.background_color.is_rgb) - FMT_THROW(format_error("can't AND a terminal color")); - background_color.value.rgb_color &= rhs.background_color.value.rgb_color; - } - - ems = static_cast(static_cast(ems) & - static_cast(rhs.ems)); - return *this; - } - - friend FMT_CONSTEXPR_DECL text_style fg(detail::color_type foreground) - FMT_NOEXCEPT; - - friend FMT_CONSTEXPR_DECL text_style bg(detail::color_type background) - FMT_NOEXCEPT; - - detail::color_type foreground_color; - detail::color_type background_color; - bool set_foreground_color; - bool set_background_color; - emphasis ems; +public: + FMT_CONSTEXPR text_style(emphasis em = emphasis()) FMT_NOEXCEPT : set_foreground_color(), + set_background_color(), + ems(em) { + } + + FMT_CONSTEXPR text_style &operator|=(const text_style &rhs) { + if (!set_foreground_color) { + set_foreground_color = rhs.set_foreground_color; + foreground_color = rhs.foreground_color; + } else if (rhs.set_foreground_color) { + if (!foreground_color.is_rgb || !rhs.foreground_color.is_rgb) + FMT_THROW(format_error("can't OR a terminal color")); + foreground_color.value.rgb_color |= rhs.foreground_color.value.rgb_color; + } + + if (!set_background_color) { + set_background_color = rhs.set_background_color; + background_color = rhs.background_color; + } else if (rhs.set_background_color) { + if (!background_color.is_rgb || !rhs.background_color.is_rgb) + FMT_THROW(format_error("can't OR a terminal color")); + background_color.value.rgb_color |= rhs.background_color.value.rgb_color; + } + + ems = static_cast(static_cast(ems) | static_cast(rhs.ems)); + return *this; + } + + friend FMT_CONSTEXPR text_style operator|(text_style lhs, const text_style &rhs) { + return lhs |= rhs; + } + + FMT_DEPRECATED_NONMSVC FMT_CONSTEXPR text_style &operator&=(const text_style &rhs) { + return and_assign(rhs); + } + + FMT_DEPRECATED_NONMSVC friend FMT_CONSTEXPR text_style operator&(text_style lhs, const text_style &rhs) { + return lhs.and_assign(rhs); + } + + FMT_CONSTEXPR bool has_foreground() const FMT_NOEXCEPT { + return set_foreground_color; + } + FMT_CONSTEXPR bool has_background() const FMT_NOEXCEPT { + return set_background_color; + } + FMT_CONSTEXPR bool has_emphasis() const FMT_NOEXCEPT { + return static_cast(ems) != 0; + } + FMT_CONSTEXPR detail::color_type get_foreground() const FMT_NOEXCEPT { + FMT_ASSERT(has_foreground(), "no foreground specified for this style"); + return foreground_color; + } + FMT_CONSTEXPR detail::color_type get_background() const FMT_NOEXCEPT { + FMT_ASSERT(has_background(), "no background specified for this style"); + return background_color; + } + FMT_CONSTEXPR emphasis get_emphasis() const FMT_NOEXCEPT { + FMT_ASSERT(has_emphasis(), "no emphasis specified for this style"); + return ems; + } + +private: + FMT_CONSTEXPR text_style(bool is_foreground, detail::color_type text_color) FMT_NOEXCEPT : set_foreground_color(), + set_background_color(), + ems() { + if (is_foreground) { + foreground_color = text_color; + set_foreground_color = true; + } else { + background_color = text_color; + set_background_color = true; + } + } + + // DEPRECATED! + FMT_CONSTEXPR text_style &and_assign(const text_style &rhs) { + if (!set_foreground_color) { + set_foreground_color = rhs.set_foreground_color; + foreground_color = rhs.foreground_color; + } else if (rhs.set_foreground_color) { + if (!foreground_color.is_rgb || !rhs.foreground_color.is_rgb) + FMT_THROW(format_error("can't AND a terminal color")); + foreground_color.value.rgb_color &= rhs.foreground_color.value.rgb_color; + } + + if (!set_background_color) { + set_background_color = rhs.set_background_color; + background_color = rhs.background_color; + } else if (rhs.set_background_color) { + if (!background_color.is_rgb || !rhs.background_color.is_rgb) + FMT_THROW(format_error("can't AND a terminal color")); + background_color.value.rgb_color &= rhs.background_color.value.rgb_color; + } + + ems = static_cast(static_cast(ems) & static_cast(rhs.ems)); + return *this; + } + + friend FMT_CONSTEXPR_DECL text_style fg(detail::color_type foreground) FMT_NOEXCEPT; + + friend FMT_CONSTEXPR_DECL text_style bg(detail::color_type background) FMT_NOEXCEPT; + + detail::color_type foreground_color; + detail::color_type background_color; + bool set_foreground_color; + bool set_background_color; + emphasis ems; }; /** Creates a text style from the foreground (text) color. */ FMT_CONSTEXPR inline text_style fg(detail::color_type foreground) FMT_NOEXCEPT { - return text_style(true, foreground); + return text_style(true, foreground); } /** Creates a text style from the background color. */ FMT_CONSTEXPR inline text_style bg(detail::color_type background) FMT_NOEXCEPT { - return text_style(false, background); + return text_style(false, background); } -FMT_CONSTEXPR inline text_style operator|(emphasis lhs, - emphasis rhs) FMT_NOEXCEPT { - return text_style(lhs) | rhs; +FMT_CONSTEXPR inline text_style operator|(emphasis lhs, emphasis rhs) FMT_NOEXCEPT { + return text_style(lhs) | rhs; } FMT_BEGIN_DETAIL_NAMESPACE -template struct ansi_color_escape { - FMT_CONSTEXPR ansi_color_escape(detail::color_type text_color, - const char* esc) FMT_NOEXCEPT { - // If we have a terminal color, we need to output another escape code - // sequence. - if (!text_color.is_rgb) { - bool is_background = esc == string_view("\x1b[48;2;"); - uint32_t value = text_color.value.term_color; - // Background ASCII codes are the same as the foreground ones but with - // 10 more. - if (is_background) value += 10u; - - size_t index = 0; - buffer[index++] = static_cast('\x1b'); - buffer[index++] = static_cast('['); - - if (value >= 100u) { - buffer[index++] = static_cast('1'); - value %= 100u; - } - buffer[index++] = static_cast('0' + value / 10u); - buffer[index++] = static_cast('0' + value % 10u); - - buffer[index++] = static_cast('m'); - buffer[index++] = static_cast('\0'); - return; - } - - for (int i = 0; i < 7; i++) { - buffer[i] = static_cast(esc[i]); - } - rgb color(text_color.value.rgb_color); - to_esc(color.r, buffer + 7, ';'); - to_esc(color.g, buffer + 11, ';'); - to_esc(color.b, buffer + 15, 'm'); - buffer[19] = static_cast(0); - } - FMT_CONSTEXPR ansi_color_escape(emphasis em) FMT_NOEXCEPT { - uint8_t em_codes[4] = {}; - uint8_t em_bits = static_cast(em); - if (em_bits & static_cast(emphasis::bold)) em_codes[0] = 1; - if (em_bits & static_cast(emphasis::italic)) em_codes[1] = 3; - if (em_bits & static_cast(emphasis::underline)) em_codes[2] = 4; - if (em_bits & static_cast(emphasis::strikethrough)) - em_codes[3] = 9; - - size_t index = 0; - for (int i = 0; i < 4; ++i) { - if (!em_codes[i]) continue; - buffer[index++] = static_cast('\x1b'); - buffer[index++] = static_cast('['); - buffer[index++] = static_cast('0' + em_codes[i]); - buffer[index++] = static_cast('m'); - } - buffer[index++] = static_cast(0); - } - FMT_CONSTEXPR operator const Char*() const FMT_NOEXCEPT { return buffer; } - - FMT_CONSTEXPR const Char* begin() const FMT_NOEXCEPT { return buffer; } - FMT_CONSTEXPR_CHAR_TRAITS const Char* end() const FMT_NOEXCEPT { - return buffer + std::char_traits::length(buffer); - } - - private: - Char buffer[7u + 3u * 4u + 1u]; - - static FMT_CONSTEXPR void to_esc(uint8_t c, Char* out, - char delimiter) FMT_NOEXCEPT { - out[0] = static_cast('0' + c / 100); - out[1] = static_cast('0' + c / 10 % 10); - out[2] = static_cast('0' + c % 10); - out[3] = static_cast(delimiter); - } +template +struct ansi_color_escape { + FMT_CONSTEXPR ansi_color_escape(detail::color_type text_color, const char *esc) FMT_NOEXCEPT { + // If we have a terminal color, we need to output another escape code + // sequence. + if (!text_color.is_rgb) { + bool is_background = esc == string_view("\x1b[48;2;"); + uint32_t value = text_color.value.term_color; + // Background ASCII codes are the same as the foreground ones but + // with 10 more. + if (is_background) + value += 10u; + + size_t index = 0; + buffer[index++] = static_cast('\x1b'); + buffer[index++] = static_cast('['); + + if (value >= 100u) { + buffer[index++] = static_cast('1'); + value %= 100u; + } + buffer[index++] = static_cast('0' + value / 10u); + buffer[index++] = static_cast('0' + value % 10u); + + buffer[index++] = static_cast('m'); + buffer[index++] = static_cast('\0'); + return; + } + + for (int i = 0; i < 7; i++) { + buffer[i] = static_cast(esc[i]); + } + rgb color(text_color.value.rgb_color); + to_esc(color.r, buffer + 7, ';'); + to_esc(color.g, buffer + 11, ';'); + to_esc(color.b, buffer + 15, 'm'); + buffer[19] = static_cast(0); + } + FMT_CONSTEXPR ansi_color_escape(emphasis em) FMT_NOEXCEPT { + uint8_t em_codes[4] = {}; + uint8_t em_bits = static_cast(em); + if (em_bits & static_cast(emphasis::bold)) + em_codes[0] = 1; + if (em_bits & static_cast(emphasis::italic)) + em_codes[1] = 3; + if (em_bits & static_cast(emphasis::underline)) + em_codes[2] = 4; + if (em_bits & static_cast(emphasis::strikethrough)) + em_codes[3] = 9; + + size_t index = 0; + for (int i = 0; i < 4; ++i) { + if (!em_codes[i]) + continue; + buffer[index++] = static_cast('\x1b'); + buffer[index++] = static_cast('['); + buffer[index++] = static_cast('0' + em_codes[i]); + buffer[index++] = static_cast('m'); + } + buffer[index++] = static_cast(0); + } + FMT_CONSTEXPR operator const Char *() const FMT_NOEXCEPT { + return buffer; + } + + FMT_CONSTEXPR const Char *begin() const FMT_NOEXCEPT { + return buffer; + } + FMT_CONSTEXPR_CHAR_TRAITS const Char *end() const FMT_NOEXCEPT { + return buffer + std::char_traits::length(buffer); + } + +private: + Char buffer[7u + 3u * 4u + 1u]; + + static FMT_CONSTEXPR void to_esc(uint8_t c, Char *out, char delimiter) FMT_NOEXCEPT { + out[0] = static_cast('0' + c / 100); + out[1] = static_cast('0' + c / 10 % 10); + out[2] = static_cast('0' + c % 10); + out[3] = static_cast(delimiter); + } }; template -FMT_CONSTEXPR ansi_color_escape make_foreground_color( - detail::color_type foreground) FMT_NOEXCEPT { - return ansi_color_escape(foreground, "\x1b[38;2;"); +FMT_CONSTEXPR ansi_color_escape make_foreground_color(detail::color_type foreground) FMT_NOEXCEPT { + return ansi_color_escape(foreground, "\x1b[38;2;"); } template -FMT_CONSTEXPR ansi_color_escape make_background_color( - detail::color_type background) FMT_NOEXCEPT { - return ansi_color_escape(background, "\x1b[48;2;"); +FMT_CONSTEXPR ansi_color_escape make_background_color(detail::color_type background) FMT_NOEXCEPT { + return ansi_color_escape(background, "\x1b[48;2;"); } template FMT_CONSTEXPR ansi_color_escape make_emphasis(emphasis em) FMT_NOEXCEPT { - return ansi_color_escape(em); + return ansi_color_escape(em); } template -inline void fputs(const Char* chars, FILE* stream) FMT_NOEXCEPT { - std::fputs(chars, stream); +inline void fputs(const Char *chars, FILE *stream) FMT_NOEXCEPT { + std::fputs(chars, stream); } template <> -inline void fputs(const wchar_t* chars, FILE* stream) FMT_NOEXCEPT { - std::fputws(chars, stream); +inline void fputs(const wchar_t *chars, FILE *stream) FMT_NOEXCEPT { + std::fputws(chars, stream); } -template inline void reset_color(FILE* stream) FMT_NOEXCEPT { - fputs("\x1b[0m", stream); +template +inline void reset_color(FILE *stream) FMT_NOEXCEPT { + fputs("\x1b[0m", stream); } -template <> inline void reset_color(FILE* stream) FMT_NOEXCEPT { - fputs(L"\x1b[0m", stream); +template <> +inline void reset_color(FILE *stream) FMT_NOEXCEPT { + fputs(L"\x1b[0m", stream); } template -inline void reset_color(buffer& buffer) FMT_NOEXCEPT { - auto reset_color = string_view("\x1b[0m"); - buffer.append(reset_color.begin(), reset_color.end()); +inline void reset_color(buffer &buffer) FMT_NOEXCEPT { + auto reset_color = string_view("\x1b[0m"); + buffer.append(reset_color.begin(), reset_color.end()); } template -void vformat_to(buffer& buf, const text_style& ts, - basic_string_view format_str, +void vformat_to(buffer &buf, const text_style &ts, basic_string_view format_str, basic_format_args>> args) { - bool has_style = false; - if (ts.has_emphasis()) { - has_style = true; - auto emphasis = detail::make_emphasis(ts.get_emphasis()); - buf.append(emphasis.begin(), emphasis.end()); - } - if (ts.has_foreground()) { - has_style = true; - auto foreground = detail::make_foreground_color(ts.get_foreground()); - buf.append(foreground.begin(), foreground.end()); - } - if (ts.has_background()) { - has_style = true; - auto background = detail::make_background_color(ts.get_background()); - buf.append(background.begin(), background.end()); - } - detail::vformat_to(buf, format_str, args, {}); - if (has_style) detail::reset_color(buf); + bool has_style = false; + if (ts.has_emphasis()) { + has_style = true; + auto emphasis = detail::make_emphasis(ts.get_emphasis()); + buf.append(emphasis.begin(), emphasis.end()); + } + if (ts.has_foreground()) { + has_style = true; + auto foreground = detail::make_foreground_color(ts.get_foreground()); + buf.append(foreground.begin(), foreground.end()); + } + if (ts.has_background()) { + has_style = true; + auto background = detail::make_background_color(ts.get_background()); + buf.append(background.begin(), background.end()); + } + detail::vformat_to(buf, format_str, args, {}); + if (has_style) + detail::reset_color(buf); } FMT_END_DETAIL_NAMESPACE template > -void vprint(std::FILE* f, const text_style& ts, const S& format, +void vprint(std::FILE *f, const text_style &ts, const S &format, basic_format_args>> args) { - basic_memory_buffer buf; - detail::vformat_to(buf, ts, to_string_view(format), args); - buf.push_back(Char(0)); - detail::fputs(buf.data(), f); + basic_memory_buffer buf; + detail::vformat_to(buf, ts, to_string_view(format), args); + buf.push_back(Char(0)); + detail::fputs(buf.data(), f); } /** @@ -533,12 +526,9 @@ void vprint(std::FILE* f, const text_style& ts, const S& format, "Elapsed time: {0:.2f} seconds", 1.23); \endrst */ -template ::value)> -void print(std::FILE* f, const text_style& ts, const S& format_str, - const Args&... args) { - vprint(f, ts, format_str, - fmt::make_args_checked(format_str, args...)); +template ::value)> +void print(std::FILE *f, const text_style &ts, const S &format_str, const Args &...args) { + vprint(f, ts, format_str, fmt::make_args_checked(format_str, args...)); } /** @@ -552,19 +542,17 @@ void print(std::FILE* f, const text_style& ts, const S& format_str, "Elapsed time: {0:.2f} seconds", 1.23); \endrst */ -template ::value)> -void print(const text_style& ts, const S& format_str, const Args&... args) { - return print(stdout, ts, format_str, args...); +template ::value)> +void print(const text_style &ts, const S &format_str, const Args &...args) { + return print(stdout, ts, format_str, args...); } template > -inline std::basic_string vformat( - const text_style& ts, const S& format_str, - basic_format_args>> args) { - basic_memory_buffer buf; - detail::vformat_to(buf, ts, to_string_view(format_str), args); - return fmt::to_string(buf); +inline std::basic_string vformat(const text_style &ts, const S &format_str, + basic_format_args>> args) { + basic_memory_buffer buf; + detail::vformat_to(buf, ts, to_string_view(format_str), args); + return fmt::to_string(buf); } /** @@ -580,23 +568,19 @@ inline std::basic_string vformat( \endrst */ template > -inline std::basic_string format(const text_style& ts, const S& format_str, - const Args&... args) { - return fmt::vformat(ts, to_string_view(format_str), - fmt::make_args_checked(format_str, args...)); +inline std::basic_string format(const text_style &ts, const S &format_str, const Args &...args) { + return fmt::vformat(ts, to_string_view(format_str), fmt::make_args_checked(format_str, args...)); } /** Formats a string with the given text_style and writes the output to ``out``. */ -template ::value)> -OutputIt vformat_to( - OutputIt out, const text_style& ts, basic_string_view format_str, - basic_format_args>> args) { - auto&& buf = detail::get_buffer(out); - detail::vformat_to(buf, ts, format_str, args); - return detail::get_iterator(buf); +template ::value)> +OutputIt vformat_to(OutputIt out, const text_style &ts, basic_string_view format_str, + basic_format_args>> args) { + auto &&buf = detail::get_buffer(out); + detail::vformat_to(buf, ts, format_str, args); + return detail::get_iterator(buf); } /** @@ -612,16 +596,13 @@ OutputIt vformat_to( \endrst */ template >::value&& - detail::is_string::value> -inline auto format_to(OutputIt out, const text_style& ts, const S& format_str, - Args&&... args) -> + bool enable = detail::is_output_iterator>::value &&detail::is_string::value> +inline auto format_to(OutputIt out, const text_style &ts, const S &format_str, Args &&...args) -> typename std::enable_if::type { - return vformat_to(out, ts, to_string_view(format_str), - fmt::make_args_checked(format_str, args...)); + return vformat_to(out, ts, to_string_view(format_str), fmt::make_args_checked(format_str, args...)); } FMT_MODULE_EXPORT_END FMT_END_NAMESPACE -#endif // FMT_COLOR_H_ +#endif // FMT_COLOR_H_ diff --git a/mooncake-store/include/cachelib_memory_allocator/include/fmt/compile.h b/mooncake-store/include/cachelib_memory_allocator/include/fmt/compile.h index 00000c92e..24b317b26 100644 --- a/mooncake-store/include/cachelib_memory_allocator/include/fmt/compile.h +++ b/mooncake-store/include/cachelib_memory_allocator/include/fmt/compile.h @@ -16,125 +16,142 @@ namespace detail { // An output iterator that counts the number of objects written to it and // discards them. class counting_iterator { - private: - size_t count_; - - public: - using iterator_category = std::output_iterator_tag; - using difference_type = std::ptrdiff_t; - using pointer = void; - using reference = void; - using _Unchecked_type = counting_iterator; // Mark iterator as checked. - - struct value_type { - template void operator=(const T&) {} - }; - - counting_iterator() : count_(0) {} - - size_t count() const { return count_; } - - counting_iterator& operator++() { - ++count_; - return *this; - } - counting_iterator operator++(int) { - auto it = *this; - ++*this; - return it; - } - - friend counting_iterator operator+(counting_iterator it, difference_type n) { - it.count_ += static_cast(n); - return it; - } - - value_type operator*() const { return {}; } +private: + size_t count_; + +public: + using iterator_category = std::output_iterator_tag; + using difference_type = std::ptrdiff_t; + using pointer = void; + using reference = void; + using _Unchecked_type = counting_iterator; // Mark iterator as checked. + + struct value_type { + template + void operator=(const T &) { + } + }; + + counting_iterator() : count_(0) { + } + + size_t count() const { + return count_; + } + + counting_iterator &operator++() { + ++count_; + return *this; + } + counting_iterator operator++(int) { + auto it = *this; + ++*this; + return it; + } + + friend counting_iterator operator+(counting_iterator it, difference_type n) { + it.count_ += static_cast(n); + return it; + } + + value_type operator*() const { + return {}; + } }; template -inline counting_iterator copy_str(InputIt begin, InputIt end, - counting_iterator it) { - return it + (end - begin); +inline counting_iterator copy_str(InputIt begin, InputIt end, counting_iterator it) { + return it + (end - begin); } -template class truncating_iterator_base { - protected: - OutputIt out_; - size_t limit_; - size_t count_ = 0; - - truncating_iterator_base() : out_(), limit_(0) {} - - truncating_iterator_base(OutputIt out, size_t limit) - : out_(out), limit_(limit) {} - - public: - using iterator_category = std::output_iterator_tag; - using value_type = typename std::iterator_traits::value_type; - using difference_type = std::ptrdiff_t; - using pointer = void; - using reference = void; - using _Unchecked_type = - truncating_iterator_base; // Mark iterator as checked. - - OutputIt base() const { return out_; } - size_t count() const { return count_; } +template +class truncating_iterator_base { +protected: + OutputIt out_; + size_t limit_; + size_t count_ = 0; + + truncating_iterator_base() : out_(), limit_(0) { + } + + truncating_iterator_base(OutputIt out, size_t limit) : out_(out), limit_(limit) { + } + +public: + using iterator_category = std::output_iterator_tag; + using value_type = typename std::iterator_traits::value_type; + using difference_type = std::ptrdiff_t; + using pointer = void; + using reference = void; + using _Unchecked_type = truncating_iterator_base; // Mark iterator as checked. + + OutputIt base() const { + return out_; + } + size_t count() const { + return count_; + } }; // An output iterator that truncates the output and counts the number of objects // written to it. template ::value_type>::type> + typename Enable = typename std::is_void::value_type>::type> class truncating_iterator; template -class truncating_iterator - : public truncating_iterator_base { - mutable typename truncating_iterator_base::value_type blackhole_; +class truncating_iterator : public truncating_iterator_base { + mutable typename truncating_iterator_base::value_type blackhole_; - public: - using value_type = typename truncating_iterator_base::value_type; +public: + using value_type = typename truncating_iterator_base::value_type; - truncating_iterator() = default; + truncating_iterator() = default; - truncating_iterator(OutputIt out, size_t limit) - : truncating_iterator_base(out, limit) {} + truncating_iterator(OutputIt out, size_t limit) : truncating_iterator_base(out, limit) { + } - truncating_iterator& operator++() { - if (this->count_++ < this->limit_) ++this->out_; - return *this; - } + truncating_iterator &operator++() { + if (this->count_++ < this->limit_) + ++this->out_; + return *this; + } - truncating_iterator operator++(int) { - auto it = *this; - ++*this; - return it; - } + truncating_iterator operator++(int) { + auto it = *this; + ++*this; + return it; + } - value_type& operator*() const { - return this->count_ < this->limit_ ? *this->out_ : blackhole_; - } + value_type &operator*() const { + return this->count_ < this->limit_ ? *this->out_ : blackhole_; + } }; template -class truncating_iterator - : public truncating_iterator_base { - public: - truncating_iterator() = default; - - truncating_iterator(OutputIt out, size_t limit) - : truncating_iterator_base(out, limit) {} - - template truncating_iterator& operator=(T val) { - if (this->count_++ < this->limit_) *this->out_++ = val; - return *this; - } - - truncating_iterator& operator++() { return *this; } - truncating_iterator& operator++(int) { return *this; } - truncating_iterator& operator*() { return *this; } +class truncating_iterator : public truncating_iterator_base { +public: + truncating_iterator() = default; + + truncating_iterator(OutputIt out, size_t limit) : truncating_iterator_base(out, limit) { + } + + template + truncating_iterator &operator=(T val) { + if (this->count_++ < this->limit_) + *this->out_++ = val; + return *this; + } + + truncating_iterator &operator++() { + return *this; + } + truncating_iterator &operator++(int) { + return *this; + } + truncating_iterator &operator*() { + return *this; + } }; // A compile-time string which is compiled into fast formatting code. @@ -157,175 +174,177 @@ struct is_compiled_string : std::is_base_of {}; \endrst */ #ifdef __cpp_if_constexpr -# define FMT_COMPILE(s) \ - FMT_STRING_IMPL(s, fmt::detail::compiled_string, explicit) +#define FMT_COMPILE(s) FMT_STRING_IMPL(s, fmt::detail::compiled_string, explicit) #else -# define FMT_COMPILE(s) FMT_STRING(s) +#define FMT_COMPILE(s) FMT_STRING(s) #endif #if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS -template Str> +template Str> struct udl_compiled_string : compiled_string { - using char_type = Char; - constexpr operator basic_string_view() const { - return {Str.data, N - 1}; - } + using char_type = Char; + constexpr operator basic_string_view() const { + return {Str.data, N - 1}; + } }; #endif template -const T& first(const T& value, const Tail&...) { - return value; +const T &first(const T &value, const Tail &...) { + return value; } #ifdef __cpp_if_constexpr -template struct type_list {}; +template +struct type_list {}; // Returns a reference to the argument at index N from [first, rest...]. template -constexpr const auto& get([[maybe_unused]] const T& first, - [[maybe_unused]] const Args&... rest) { - static_assert(N < 1 + sizeof...(Args), "index is out of bounds"); - if constexpr (N == 0) - return first; - else - return get(rest...); +constexpr const auto &get([[maybe_unused]] const T &first, [[maybe_unused]] const Args &...rest) { + static_assert(N < 1 + sizeof...(Args), "index is out of bounds"); + if constexpr (N == 0) + return first; + else + return get(rest...); } template -constexpr int get_arg_index_by_name(basic_string_view name, - type_list) { - return get_arg_index_by_name(name); +constexpr int get_arg_index_by_name(basic_string_view name, type_list) { + return get_arg_index_by_name(name); } -template struct get_type_impl; +template +struct get_type_impl; -template struct get_type_impl> { - using type = remove_cvref_t(std::declval()...))>; +template +struct get_type_impl> { + using type = remove_cvref_t(std::declval()...))>; }; template using get_type = typename get_type_impl::type; -template struct is_compiled_format : std::false_type {}; - -template struct text { - basic_string_view data; - using char_type = Char; +template +struct is_compiled_format : std::false_type {}; - template - constexpr OutputIt format(OutputIt out, const Args&...) const { - return write(out, data); - } +template +struct text { + basic_string_view data; + using char_type = Char; + + template + constexpr OutputIt format(OutputIt out, const Args &...) const { + return write(out, data); + } }; template struct is_compiled_format> : std::true_type {}; template -constexpr text make_text(basic_string_view s, size_t pos, - size_t size) { - return {{&s[pos], size}}; +constexpr text make_text(basic_string_view s, size_t pos, size_t size) { + return {{&s[pos], size}}; } -template struct code_unit { - Char value; - using char_type = Char; - - template - constexpr OutputIt format(OutputIt out, const Args&...) const { - return write(out, value); - } +template +struct code_unit { + Char value; + using char_type = Char; + + template + constexpr OutputIt format(OutputIt out, const Args &...) const { + return write(out, value); + } }; // This ensures that the argument type is convertible to `const T&`. template -constexpr const T& get_arg_checked(const Args&... args) { - const auto& arg = get(args...); - if constexpr (detail::is_named_arg>()) { - return arg.value; - } else { - return arg; - } +constexpr const T &get_arg_checked(const Args &...args) { + const auto &arg = get(args...); + if constexpr (detail::is_named_arg>()) { + return arg.value; + } else { + return arg; + } } template struct is_compiled_format> : std::true_type {}; // A replacement field that refers to argument N. -template struct field { - using char_type = Char; +template +struct field { + using char_type = Char; - template - constexpr OutputIt format(OutputIt out, const Args&... args) const { - return write(out, get_arg_checked(args...)); - } + template + constexpr OutputIt format(OutputIt out, const Args &...args) const { + return write(out, get_arg_checked(args...)); + } }; template struct is_compiled_format> : std::true_type {}; // A replacement field that refers to argument with name. -template struct runtime_named_field { - using char_type = Char; - basic_string_view name; - - template - constexpr static bool try_format_argument( - OutputIt& out, - // [[maybe_unused]] due to unused-but-set-parameter warning in GCC 7,8,9 - [[maybe_unused]] basic_string_view arg_name, const T& arg) { - if constexpr (is_named_arg::type>::value) { - if (arg_name == arg.name) { - out = write(out, arg.value); - return true; - } - } - return false; - } - - template - constexpr OutputIt format(OutputIt out, const Args&... args) const { - bool found = (try_format_argument(out, name, args) || ...); - if (!found) { - throw format_error("argument with specified name is not found"); - } - return out; - } +template +struct runtime_named_field { + using char_type = Char; + basic_string_view name; + + template + constexpr static bool try_format_argument(OutputIt &out, + // [[maybe_unused]] due to unused-but-set-parameter warning in GCC 7,8,9 + [[maybe_unused]] basic_string_view arg_name, const T &arg) { + if constexpr (is_named_arg::type>::value) { + if (arg_name == arg.name) { + out = write(out, arg.value); + return true; + } + } + return false; + } + + template + constexpr OutputIt format(OutputIt out, const Args &...args) const { + bool found = (try_format_argument(out, name, args) || ...); + if (!found) { + throw format_error("argument with specified name is not found"); + } + return out; + } }; template struct is_compiled_format> : std::true_type {}; // A replacement field that refers to argument N and has format specifiers. -template struct spec_field { - using char_type = Char; - formatter fmt; - - template - constexpr FMT_INLINE OutputIt format(OutputIt out, - const Args&... args) const { - const auto& vargs = - fmt::make_format_args>(args...); - basic_format_context ctx(out, vargs); - return fmt.format(get_arg_checked(args...), ctx); - } +template +struct spec_field { + using char_type = Char; + formatter fmt; + + template + constexpr FMT_INLINE OutputIt format(OutputIt out, const Args &...args) const { + const auto &vargs = fmt::make_format_args>(args...); + basic_format_context ctx(out, vargs); + return fmt.format(get_arg_checked(args...), ctx); + } }; template struct is_compiled_format> : std::true_type {}; -template struct concat { - L lhs; - R rhs; - using char_type = typename L::char_type; - - template - constexpr OutputIt format(OutputIt out, const Args&... args) const { - out = lhs.format(out, args...); - return rhs.format(out, args...); - } +template +struct concat { + L lhs; + R rhs; + using char_type = typename L::char_type; + + template + constexpr OutputIt format(OutputIt out, const Args &...args) const { + out = lhs.format(out, args...); + return rhs.format(out, args...); + } }; template @@ -333,17 +352,18 @@ struct is_compiled_format> : std::true_type {}; template constexpr concat make_concat(L lhs, R rhs) { - return {lhs, rhs}; + return {lhs, rhs}; } struct unknown_format {}; template constexpr size_t parse_text(basic_string_view str, size_t pos) { - for (size_t size = str.size(); pos != size; ++pos) { - if (str[pos] == '{' || str[pos] == '}') break; - } - return pos; + for (size_t size = str.size(); pos != size; ++pos) { + if (str[pos] == '{' || str[pos] == '}') + break; + } + return pos; } template @@ -351,289 +371,252 @@ constexpr auto compile_format_string(S format_str); template constexpr auto parse_tail(T head, S format_str) { - if constexpr (POS != - basic_string_view(format_str).size()) { - constexpr auto tail = compile_format_string(format_str); - if constexpr (std::is_same, - unknown_format>()) - return tail; - else - return make_concat(head, tail); - } else { - return head; - } + if constexpr (POS != basic_string_view(format_str).size()) { + constexpr auto tail = compile_format_string(format_str); + if constexpr (std::is_same, unknown_format>()) + return tail; + else + return make_concat(head, tail); + } else { + return head; + } } -template struct parse_specs_result { - formatter fmt; - size_t end; - int next_arg_id; +template +struct parse_specs_result { + formatter fmt; + size_t end; + int next_arg_id; }; constexpr int manual_indexing_id = -1; template -constexpr parse_specs_result parse_specs(basic_string_view str, - size_t pos, int next_arg_id) { - str.remove_prefix(pos); - auto ctx = basic_format_parse_context(str, {}, next_arg_id); - auto f = formatter(); - auto end = f.parse(ctx); - return {f, pos + fmt::detail::to_unsigned(end - str.data()) + 1, - next_arg_id == 0 ? manual_indexing_id : ctx.next_arg_id()}; +constexpr parse_specs_result parse_specs(basic_string_view str, size_t pos, int next_arg_id) { + str.remove_prefix(pos); + auto ctx = basic_format_parse_context(str, {}, next_arg_id); + auto f = formatter(); + auto end = f.parse(ctx); + return {f, pos + fmt::detail::to_unsigned(end - str.data()) + 1, + next_arg_id == 0 ? manual_indexing_id : ctx.next_arg_id()}; } -template struct arg_id_handler { - arg_ref arg_id; - - constexpr int operator()() { - FMT_ASSERT(false, "handler cannot be used with automatic indexing"); - return 0; - } - constexpr int operator()(int id) { - arg_id = arg_ref(id); - return 0; - } - constexpr int operator()(basic_string_view id) { - arg_id = arg_ref(id); - return 0; - } - - constexpr void on_error(const char* message) { throw format_error(message); } +template +struct arg_id_handler { + arg_ref arg_id; + + constexpr int operator()() { + FMT_ASSERT(false, "handler cannot be used with automatic indexing"); + return 0; + } + constexpr int operator()(int id) { + arg_id = arg_ref(id); + return 0; + } + constexpr int operator()(basic_string_view id) { + arg_id = arg_ref(id); + return 0; + } + + constexpr void on_error(const char *message) { + throw format_error(message); + } }; -template struct parse_arg_id_result { - arg_ref arg_id; - const Char* arg_id_end; +template +struct parse_arg_id_result { + arg_ref arg_id; + const Char *arg_id_end; }; template -constexpr auto parse_arg_id(const Char* begin, const Char* end) { - auto handler = arg_id_handler{arg_ref{}}; - auto arg_id_end = parse_arg_id(begin, end, handler); - return parse_arg_id_result{handler.arg_id, arg_id_end}; +constexpr auto parse_arg_id(const Char *begin, const Char *end) { + auto handler = arg_id_handler {arg_ref {}}; + auto arg_id_end = parse_arg_id(begin, end, handler); + return parse_arg_id_result {handler.arg_id, arg_id_end}; } -template struct field_type { - using type = remove_cvref_t; +template +struct field_type { + using type = remove_cvref_t; }; template struct field_type::value>> { - using type = remove_cvref_t; + using type = remove_cvref_t; }; -template +template constexpr auto parse_replacement_field_then_tail(S format_str) { - using char_type = typename S::char_type; - constexpr auto str = basic_string_view(format_str); - constexpr char_type c = END_POS != str.size() ? str[END_POS] : char_type(); - if constexpr (c == '}') { - return parse_tail( - field::type, ARG_INDEX>(), - format_str); - } else if constexpr (c == ':') { - constexpr auto result = parse_specs::type>( - str, END_POS + 1, NEXT_ID == manual_indexing_id ? 0 : NEXT_ID); - return parse_tail( - spec_field::type, ARG_INDEX>{ - result.fmt}, - format_str); - } + using char_type = typename S::char_type; + constexpr auto str = basic_string_view(format_str); + constexpr char_type c = END_POS != str.size() ? str[END_POS] : char_type(); + if constexpr (c == '}') { + return parse_tail(field::type, ARG_INDEX>(), + format_str); + } else if constexpr (c == ':') { + constexpr auto result = + parse_specs::type>(str, END_POS + 1, NEXT_ID == manual_indexing_id ? 0 : NEXT_ID); + return parse_tail( + spec_field::type, ARG_INDEX> {result.fmt}, format_str); + } } // Compiles a non-empty format string and returns the compiled representation // or unknown_format() on unrecognized input. template constexpr auto compile_format_string(S format_str) { - using char_type = typename S::char_type; - constexpr auto str = basic_string_view(format_str); - if constexpr (str[POS] == '{') { - if constexpr (POS + 1 == str.size()) - throw format_error("unmatched '{' in format string"); - if constexpr (str[POS + 1] == '{') { - return parse_tail(make_text(str, POS, 1), format_str); - } else if constexpr (str[POS + 1] == '}' || str[POS + 1] == ':') { - static_assert(ID != manual_indexing_id, - "cannot switch from manual to automatic argument indexing"); - constexpr auto next_id = - ID != manual_indexing_id ? ID + 1 : manual_indexing_id; - return parse_replacement_field_then_tail, Args, - POS + 1, ID, next_id>( - format_str); - } else { - constexpr auto arg_id_result = - parse_arg_id(str.data() + POS + 1, str.data() + str.size()); - constexpr auto arg_id_end_pos = arg_id_result.arg_id_end - str.data(); - constexpr char_type c = - arg_id_end_pos != str.size() ? str[arg_id_end_pos] : char_type(); - static_assert(c == '}' || c == ':', "missing '}' in format string"); - if constexpr (arg_id_result.arg_id.kind == arg_id_kind::index) { - static_assert( - ID == manual_indexing_id || ID == 0, - "cannot switch from automatic to manual argument indexing"); - constexpr auto arg_index = arg_id_result.arg_id.val.index; - return parse_replacement_field_then_tail, - Args, arg_id_end_pos, - arg_index, manual_indexing_id>( - format_str); - } else if constexpr (arg_id_result.arg_id.kind == arg_id_kind::name) { - constexpr auto arg_index = - get_arg_index_by_name(arg_id_result.arg_id.val.name, Args{}); - if constexpr (arg_index != invalid_arg_index) { - constexpr auto next_id = - ID != manual_indexing_id ? ID + 1 : manual_indexing_id; - return parse_replacement_field_then_tail< - decltype(get_type::value), Args, arg_id_end_pos, - arg_index, next_id>(format_str); - } else { - if constexpr (c == '}') { - return parse_tail( - runtime_named_field{arg_id_result.arg_id.val.name}, - format_str); - } else if constexpr (c == ':') { - return unknown_format(); // no type info for specs parsing - } - } - } - } - } else if constexpr (str[POS] == '}') { - if constexpr (POS + 1 == str.size()) - throw format_error("unmatched '}' in format string"); - return parse_tail(make_text(str, POS, 1), format_str); - } else { - constexpr auto end = parse_text(str, POS + 1); - if constexpr (end - POS > 1) { - return parse_tail(make_text(str, POS, end - POS), - format_str); - } else { - return parse_tail(code_unit{str[POS]}, - format_str); - } - } + using char_type = typename S::char_type; + constexpr auto str = basic_string_view(format_str); + if constexpr (str[POS] == '{') { + if constexpr (POS + 1 == str.size()) + throw format_error("unmatched '{' in format string"); + if constexpr (str[POS + 1] == '{') { + return parse_tail(make_text(str, POS, 1), format_str); + } else if constexpr (str[POS + 1] == '}' || str[POS + 1] == ':') { + static_assert(ID != manual_indexing_id, "cannot switch from manual to automatic argument indexing"); + constexpr auto next_id = ID != manual_indexing_id ? ID + 1 : manual_indexing_id; + return parse_replacement_field_then_tail, Args, POS + 1, ID, next_id>(format_str); + } else { + constexpr auto arg_id_result = parse_arg_id(str.data() + POS + 1, str.data() + str.size()); + constexpr auto arg_id_end_pos = arg_id_result.arg_id_end - str.data(); + constexpr char_type c = arg_id_end_pos != str.size() ? str[arg_id_end_pos] : char_type(); + static_assert(c == '}' || c == ':', "missing '}' in format string"); + if constexpr (arg_id_result.arg_id.kind == arg_id_kind::index) { + static_assert(ID == manual_indexing_id || ID == 0, + "cannot switch from automatic to manual argument indexing"); + constexpr auto arg_index = arg_id_result.arg_id.val.index; + return parse_replacement_field_then_tail, Args, arg_id_end_pos, arg_index, + manual_indexing_id>(format_str); + } else if constexpr (arg_id_result.arg_id.kind == arg_id_kind::name) { + constexpr auto arg_index = get_arg_index_by_name(arg_id_result.arg_id.val.name, Args {}); + if constexpr (arg_index != invalid_arg_index) { + constexpr auto next_id = ID != manual_indexing_id ? ID + 1 : manual_indexing_id; + return parse_replacement_field_then_tail::value), Args, + arg_id_end_pos, arg_index, next_id>(format_str); + } else { + if constexpr (c == '}') { + return parse_tail( + runtime_named_field {arg_id_result.arg_id.val.name}, format_str); + } else if constexpr (c == ':') { + return unknown_format(); // no type info for specs + // parsing + } + } + } + } + } else if constexpr (str[POS] == '}') { + if constexpr (POS + 1 == str.size()) + throw format_error("unmatched '}' in format string"); + return parse_tail(make_text(str, POS, 1), format_str); + } else { + constexpr auto end = parse_text(str, POS + 1); + if constexpr (end - POS > 1) { + return parse_tail(make_text(str, POS, end - POS), format_str); + } else { + return parse_tail(code_unit {str[POS]}, format_str); + } + } } -template ::value)> +template ::value)> constexpr auto compile(S format_str) { - constexpr auto str = basic_string_view(format_str); - if constexpr (str.size() == 0) { - return detail::make_text(str, 0, 0); - } else { - constexpr auto result = - detail::compile_format_string, 0, 0>( - format_str); - return result; - } + constexpr auto str = basic_string_view(format_str); + if constexpr (str.size() == 0) { + return detail::make_text(str, 0, 0); + } else { + constexpr auto result = detail::compile_format_string, 0, 0>(format_str); + return result; + } } -#endif // __cpp_if_constexpr -} // namespace detail +#endif // __cpp_if_constexpr +} // namespace detail FMT_MODULE_EXPORT_BEGIN #ifdef __cpp_if_constexpr -template ::value)> -FMT_INLINE std::basic_string format(const CompiledFormat& cf, - const Args&... args) { - auto s = std::basic_string(); - cf.format(std::back_inserter(s), args...); - return s; +FMT_INLINE std::basic_string format(const CompiledFormat &cf, const Args &...args) { + auto s = std::basic_string(); + cf.format(std::back_inserter(s), args...); + return s; } template ::value)> -constexpr FMT_INLINE OutputIt format_to(OutputIt out, const CompiledFormat& cf, - const Args&... args) { - return cf.format(out, args...); +constexpr FMT_INLINE OutputIt format_to(OutputIt out, const CompiledFormat &cf, const Args &...args) { + return cf.format(out, args...); } -template ::value)> -FMT_INLINE std::basic_string format(const S&, - Args&&... args) { - if constexpr (std::is_same::value) { - constexpr auto str = basic_string_view(S()); - if constexpr (str.size() == 2 && str[0] == '{' && str[1] == '}') { - const auto& first = detail::first(args...); - if constexpr (detail::is_named_arg< - remove_cvref_t>::value) { - return fmt::to_string(first.value); - } else { - return fmt::to_string(first); - } - } - } - constexpr auto compiled = detail::compile(S()); - if constexpr (std::is_same, - detail::unknown_format>()) { - return format(static_cast>(S()), - std::forward(args)...); - } else { - return format(compiled, std::forward(args)...); - } +template ::value)> +FMT_INLINE std::basic_string format(const S &, Args &&...args) { + if constexpr (std::is_same::value) { + constexpr auto str = basic_string_view(S()); + if constexpr (str.size() == 2 && str[0] == '{' && str[1] == '}') { + const auto &first = detail::first(args...); + if constexpr (detail::is_named_arg>::value) { + return fmt::to_string(first.value); + } else { + return fmt::to_string(first); + } + } + } + constexpr auto compiled = detail::compile(S()); + if constexpr (std::is_same, detail::unknown_format>()) { + return format(static_cast>(S()), std::forward(args)...); + } else { + return format(compiled, std::forward(args)...); + } } -template ::value)> -FMT_CONSTEXPR OutputIt format_to(OutputIt out, const S&, Args&&... args) { - constexpr auto compiled = detail::compile(S()); - if constexpr (std::is_same, - detail::unknown_format>()) { - return format_to(out, - static_cast>(S()), - std::forward(args)...); - } else { - return format_to(out, compiled, std::forward(args)...); - } +template ::value)> +FMT_CONSTEXPR OutputIt format_to(OutputIt out, const S &, Args &&...args) { + constexpr auto compiled = detail::compile(S()); + if constexpr (std::is_same, detail::unknown_format>()) { + return format_to(out, static_cast>(S()), std::forward(args)...); + } else { + return format_to(out, compiled, std::forward(args)...); + } } #endif -template ::value)> -format_to_n_result format_to_n(OutputIt out, size_t n, - const S& format_str, Args&&... args) { - auto it = format_to(detail::truncating_iterator(out, n), format_str, - std::forward(args)...); - return {it.base(), it.count()}; +template ::value)> +format_to_n_result format_to_n(OutputIt out, size_t n, const S &format_str, Args &&...args) { + auto it = format_to(detail::truncating_iterator(out, n), format_str, std::forward(args)...); + return {it.base(), it.count()}; } -template ::value)> -size_t formatted_size(const S& format_str, const Args&... args) { - return format_to(detail::counting_iterator(), format_str, args...).count(); +template ::value)> +size_t formatted_size(const S &format_str, const Args &...args) { + return format_to(detail::counting_iterator(), format_str, args...).count(); } -template ::value)> -void print(std::FILE* f, const S& format_str, const Args&... args) { - memory_buffer buffer; - format_to(std::back_inserter(buffer), format_str, args...); - detail::print(f, {buffer.data(), buffer.size()}); +template ::value)> +void print(std::FILE *f, const S &format_str, const Args &...args) { + memory_buffer buffer; + format_to(std::back_inserter(buffer), format_str, args...); + detail::print(f, {buffer.data(), buffer.size()}); } -template ::value)> -void print(const S& format_str, const Args&... args) { - print(stdout, format_str, args...); +template ::value)> +void print(const S &format_str, const Args &...args) { + print(stdout, format_str, args...); } #if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS inline namespace literals { template -constexpr detail::udl_compiled_string< - remove_cvref_t, - sizeof(Str.data) / sizeof(decltype(Str.data[0])), Str> +constexpr detail::udl_compiled_string, + sizeof(Str.data) / sizeof(decltype(Str.data[0])), Str> operator""_cf() { - return {}; + return {}; } -} // namespace literals +} // namespace literals #endif FMT_MODULE_EXPORT_END FMT_END_NAMESPACE -#endif // FMT_COMPILE_H_ +#endif // FMT_COMPILE_H_ diff --git a/mooncake-store/include/cachelib_memory_allocator/include/fmt/core.h b/mooncake-store/include/cachelib_memory_allocator/include/fmt/core.h index d058398ac..af3f91d79 100644 --- a/mooncake-store/include/cachelib_memory_allocator/include/fmt/core.h +++ b/mooncake-store/include/cachelib_memory_allocator/include/fmt/core.h @@ -8,7 +8,7 @@ #ifndef FMT_CORE_H_ #define FMT_CORE_H_ -#include // std::FILE +#include // std::FILE #include #include #include @@ -19,292 +19,279 @@ #define FMT_VERSION 80001 #ifdef __clang__ -# define FMT_CLANG_VERSION (__clang_major__ * 100 + __clang_minor__) +#define FMT_CLANG_VERSION (__clang_major__ * 100 + __clang_minor__) #else -# define FMT_CLANG_VERSION 0 +#define FMT_CLANG_VERSION 0 #endif #if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) -# define FMT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) -# define FMT_GCC_PRAGMA(arg) _Pragma(arg) +#define FMT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +#define FMT_GCC_PRAGMA(arg) _Pragma(arg) #else -# define FMT_GCC_VERSION 0 -# define FMT_GCC_PRAGMA(arg) +#define FMT_GCC_VERSION 0 +#define FMT_GCC_PRAGMA(arg) #endif #if __cplusplus >= 201103L || defined(__GXX_EXPERIMENTAL_CXX0X__) -# define FMT_HAS_GXX_CXX11 FMT_GCC_VERSION +#define FMT_HAS_GXX_CXX11 FMT_GCC_VERSION #else -# define FMT_HAS_GXX_CXX11 0 +#define FMT_HAS_GXX_CXX11 0 #endif #if defined(__INTEL_COMPILER) -# define FMT_ICC_VERSION __INTEL_COMPILER +#define FMT_ICC_VERSION __INTEL_COMPILER #else -# define FMT_ICC_VERSION 0 +#define FMT_ICC_VERSION 0 #endif #ifdef __NVCC__ -# define FMT_NVCC __NVCC__ +#define FMT_NVCC __NVCC__ #else -# define FMT_NVCC 0 +#define FMT_NVCC 0 #endif #ifdef _MSC_VER -# define FMT_MSC_VER _MSC_VER -# define FMT_MSC_WARNING(...) __pragma(warning(__VA_ARGS__)) +#define FMT_MSC_VER _MSC_VER +#define FMT_MSC_WARNING(...) __pragma(warning(__VA_ARGS__)) #else -# define FMT_MSC_VER 0 -# define FMT_MSC_WARNING(...) +#define FMT_MSC_VER 0 +#define FMT_MSC_WARNING(...) #endif #ifdef __has_feature -# define FMT_HAS_FEATURE(x) __has_feature(x) +#define FMT_HAS_FEATURE(x) __has_feature(x) #else -# define FMT_HAS_FEATURE(x) 0 +#define FMT_HAS_FEATURE(x) 0 #endif -#if defined(__has_include) && \ - (!defined(__INTELLISENSE__) || FMT_MSC_VER > 1900) && \ +#if defined(__has_include) && (!defined(__INTELLISENSE__) || FMT_MSC_VER > 1900) && \ (!FMT_ICC_VERSION || FMT_ICC_VERSION >= 1600) -# define FMT_HAS_INCLUDE(x) __has_include(x) +#define FMT_HAS_INCLUDE(x) __has_include(x) #else -# define FMT_HAS_INCLUDE(x) 0 +#define FMT_HAS_INCLUDE(x) 0 #endif #ifdef __has_cpp_attribute -# define FMT_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#define FMT_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) #else -# define FMT_HAS_CPP_ATTRIBUTE(x) 0 +#define FMT_HAS_CPP_ATTRIBUTE(x) 0 #endif -#define FMT_HAS_CPP14_ATTRIBUTE(attribute) \ - (__cplusplus >= 201402L && FMT_HAS_CPP_ATTRIBUTE(attribute)) +#define FMT_HAS_CPP14_ATTRIBUTE(attribute) (__cplusplus >= 201402L && FMT_HAS_CPP_ATTRIBUTE(attribute)) -#define FMT_HAS_CPP17_ATTRIBUTE(attribute) \ - (__cplusplus >= 201703L && FMT_HAS_CPP_ATTRIBUTE(attribute)) +#define FMT_HAS_CPP17_ATTRIBUTE(attribute) (__cplusplus >= 201703L && FMT_HAS_CPP_ATTRIBUTE(attribute)) // Check if relaxed C++14 constexpr is supported. // GCC doesn't allow throw in constexpr until version 6 (bug 67371). #ifndef FMT_USE_CONSTEXPR -# define FMT_USE_CONSTEXPR \ - (FMT_HAS_FEATURE(cxx_relaxed_constexpr) || FMT_MSC_VER >= 1910 || \ - (FMT_GCC_VERSION >= 600 && __cplusplus >= 201402L)) && \ - !FMT_NVCC && !FMT_ICC_VERSION +#define FMT_USE_CONSTEXPR \ + (FMT_HAS_FEATURE(cxx_relaxed_constexpr) || FMT_MSC_VER >= 1910 || \ + (FMT_GCC_VERSION >= 600 && __cplusplus >= 201402L)) && \ + !FMT_NVCC && !FMT_ICC_VERSION #endif #if FMT_USE_CONSTEXPR -# define FMT_CONSTEXPR constexpr -# define FMT_CONSTEXPR_DECL constexpr +#define FMT_CONSTEXPR constexpr +#define FMT_CONSTEXPR_DECL constexpr #else -# define FMT_CONSTEXPR -# define FMT_CONSTEXPR_DECL +#define FMT_CONSTEXPR +#define FMT_CONSTEXPR_DECL #endif // Check if constexpr std::char_traits<>::compare,length is supported. #if defined(__GLIBCXX__) -# if __cplusplus >= 201703L && defined(_GLIBCXX_RELEASE) && \ - _GLIBCXX_RELEASE >= 7 // GCC 7+ libstdc++ has _GLIBCXX_RELEASE. -# define FMT_CONSTEXPR_CHAR_TRAITS constexpr -# endif -#elif defined(_LIBCPP_VERSION) && __cplusplus >= 201703L && \ - _LIBCPP_VERSION >= 4000 -# define FMT_CONSTEXPR_CHAR_TRAITS constexpr +#if __cplusplus >= 201703L && defined(_GLIBCXX_RELEASE) && \ + _GLIBCXX_RELEASE >= 7 // GCC 7+ libstdc++ has _GLIBCXX_RELEASE. +#define FMT_CONSTEXPR_CHAR_TRAITS constexpr +#endif +#elif defined(_LIBCPP_VERSION) && __cplusplus >= 201703L && _LIBCPP_VERSION >= 4000 +#define FMT_CONSTEXPR_CHAR_TRAITS constexpr #elif FMT_MSC_VER >= 1914 && _MSVC_LANG >= 201703L -# define FMT_CONSTEXPR_CHAR_TRAITS constexpr +#define FMT_CONSTEXPR_CHAR_TRAITS constexpr #endif #ifndef FMT_CONSTEXPR_CHAR_TRAITS -# define FMT_CONSTEXPR_CHAR_TRAITS +#define FMT_CONSTEXPR_CHAR_TRAITS #endif #ifndef FMT_OVERRIDE -# if FMT_HAS_FEATURE(cxx_override_control) || \ - (FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1900 -# define FMT_OVERRIDE override -# else -# define FMT_OVERRIDE -# endif +#if FMT_HAS_FEATURE(cxx_override_control) || (FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1900 +#define FMT_OVERRIDE override +#else +#define FMT_OVERRIDE +#endif #endif // Check if exceptions are disabled. #ifndef FMT_EXCEPTIONS -# if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || \ - FMT_MSC_VER && !_HAS_EXCEPTIONS -# define FMT_EXCEPTIONS 0 -# else -# define FMT_EXCEPTIONS 1 -# endif +#if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || FMT_MSC_VER && !_HAS_EXCEPTIONS +#define FMT_EXCEPTIONS 0 +#else +#define FMT_EXCEPTIONS 1 +#endif #endif // Define FMT_USE_NOEXCEPT to make fmt use noexcept (C++11 feature). #ifndef FMT_USE_NOEXCEPT -# define FMT_USE_NOEXCEPT 0 +#define FMT_USE_NOEXCEPT 0 #endif -#if FMT_USE_NOEXCEPT || FMT_HAS_FEATURE(cxx_noexcept) || \ - (FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1900 -# define FMT_DETECTED_NOEXCEPT noexcept -# define FMT_HAS_CXX11_NOEXCEPT 1 +#if FMT_USE_NOEXCEPT || FMT_HAS_FEATURE(cxx_noexcept) || (FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || \ + FMT_MSC_VER >= 1900 +#define FMT_DETECTED_NOEXCEPT noexcept +#define FMT_HAS_CXX11_NOEXCEPT 1 #else -# define FMT_DETECTED_NOEXCEPT throw() -# define FMT_HAS_CXX11_NOEXCEPT 0 +#define FMT_DETECTED_NOEXCEPT throw() +#define FMT_HAS_CXX11_NOEXCEPT 0 #endif #ifndef FMT_NOEXCEPT -# if FMT_EXCEPTIONS || FMT_HAS_CXX11_NOEXCEPT -# define FMT_NOEXCEPT FMT_DETECTED_NOEXCEPT -# else -# define FMT_NOEXCEPT -# endif +#if FMT_EXCEPTIONS || FMT_HAS_CXX11_NOEXCEPT +#define FMT_NOEXCEPT FMT_DETECTED_NOEXCEPT +#else +#define FMT_NOEXCEPT +#endif #endif // [[noreturn]] is disabled on MSVC and NVCC because of bogus unreachable code // warnings. -#if FMT_EXCEPTIONS && FMT_HAS_CPP_ATTRIBUTE(noreturn) && !FMT_MSC_VER && \ - !FMT_NVCC -# define FMT_NORETURN [[noreturn]] +#if FMT_EXCEPTIONS && FMT_HAS_CPP_ATTRIBUTE(noreturn) && !FMT_MSC_VER && !FMT_NVCC +#define FMT_NORETURN [[noreturn]] #else -# define FMT_NORETURN +#define FMT_NORETURN #endif #ifndef FMT_MAYBE_UNUSED -# if FMT_HAS_CPP17_ATTRIBUTE(maybe_unused) -# define FMT_MAYBE_UNUSED [[maybe_unused]] -# else -# define FMT_MAYBE_UNUSED -# endif +#if FMT_HAS_CPP17_ATTRIBUTE(maybe_unused) +#define FMT_MAYBE_UNUSED [[maybe_unused]] +#else +#define FMT_MAYBE_UNUSED +#endif #endif #if __cplusplus == 201103L || __cplusplus == 201402L -# if defined(__INTEL_COMPILER) || defined(__PGI) -# define FMT_FALLTHROUGH -# elif defined(__clang__) -# define FMT_FALLTHROUGH [[clang::fallthrough]] -# elif FMT_GCC_VERSION >= 700 && \ - (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= 520) -# define FMT_FALLTHROUGH [[gnu::fallthrough]] -# else -# define FMT_FALLTHROUGH -# endif -#elif FMT_HAS_CPP17_ATTRIBUTE(fallthrough) || \ - (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) -# define FMT_FALLTHROUGH [[fallthrough]] +#if defined(__INTEL_COMPILER) || defined(__PGI) +#define FMT_FALLTHROUGH +#elif defined(__clang__) +#define FMT_FALLTHROUGH [[clang::fallthrough]] +#elif FMT_GCC_VERSION >= 700 && (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= 520) +#define FMT_FALLTHROUGH [[gnu::fallthrough]] #else -# define FMT_FALLTHROUGH +#define FMT_FALLTHROUGH +#endif +#elif FMT_HAS_CPP17_ATTRIBUTE(fallthrough) || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +#define FMT_FALLTHROUGH [[fallthrough]] +#else +#define FMT_FALLTHROUGH #endif #ifndef FMT_USE_FLOAT -# define FMT_USE_FLOAT 1 +#define FMT_USE_FLOAT 1 #endif #ifndef FMT_USE_DOUBLE -# define FMT_USE_DOUBLE 1 +#define FMT_USE_DOUBLE 1 #endif #ifndef FMT_USE_LONG_DOUBLE -# define FMT_USE_LONG_DOUBLE 1 +#define FMT_USE_LONG_DOUBLE 1 #endif #ifndef FMT_INLINE -# if FMT_GCC_VERSION || FMT_CLANG_VERSION -# define FMT_INLINE inline __attribute__((always_inline)) -# else -# define FMT_INLINE inline -# endif +#if FMT_GCC_VERSION || FMT_CLANG_VERSION +#define FMT_INLINE inline __attribute__((always_inline)) +#else +#define FMT_INLINE inline +#endif #endif #ifndef FMT_USE_INLINE_NAMESPACES -# if FMT_HAS_FEATURE(cxx_inline_namespaces) || FMT_GCC_VERSION >= 404 || \ - (FMT_MSC_VER >= 1900 && (!defined(_MANAGED) || !_MANAGED)) -# define FMT_USE_INLINE_NAMESPACES 1 -# else -# define FMT_USE_INLINE_NAMESPACES 0 -# endif +#if FMT_HAS_FEATURE(cxx_inline_namespaces) || FMT_GCC_VERSION >= 404 || \ + (FMT_MSC_VER >= 1900 && (!defined(_MANAGED) || !_MANAGED)) +#define FMT_USE_INLINE_NAMESPACES 1 +#else +#define FMT_USE_INLINE_NAMESPACES 0 +#endif #endif #ifndef FMT_BEGIN_NAMESPACE -# if FMT_USE_INLINE_NAMESPACES -# define FMT_INLINE_NAMESPACE inline namespace -# define FMT_END_NAMESPACE \ - } \ - } -# else -# define FMT_INLINE_NAMESPACE namespace -# define FMT_END_NAMESPACE \ - } \ - using namespace v8; \ - } -# endif -# define FMT_BEGIN_NAMESPACE \ - namespace fmt { \ - FMT_INLINE_NAMESPACE v8 { +#if FMT_USE_INLINE_NAMESPACES +#define FMT_INLINE_NAMESPACE inline namespace +#define FMT_END_NAMESPACE \ + } \ + } +#else +#define FMT_INLINE_NAMESPACE namespace +#define FMT_END_NAMESPACE \ + } \ + using namespace v8; \ + } +#endif +#define FMT_BEGIN_NAMESPACE \ + namespace fmt { \ + FMT_INLINE_NAMESPACE v8 { #endif #ifndef FMT_MODULE_EXPORT -# define FMT_MODULE_EXPORT -# define FMT_MODULE_EXPORT_BEGIN -# define FMT_MODULE_EXPORT_END -# define FMT_BEGIN_DETAIL_NAMESPACE namespace detail { -# define FMT_END_DETAIL_NAMESPACE } +#define FMT_MODULE_EXPORT +#define FMT_MODULE_EXPORT_BEGIN +#define FMT_MODULE_EXPORT_END +#define FMT_BEGIN_DETAIL_NAMESPACE namespace detail { +#define FMT_END_DETAIL_NAMESPACE } #endif #if !defined(FMT_HEADER_ONLY) && defined(_WIN32) -# define FMT_CLASS_API FMT_MSC_WARNING(suppress : 4275) -# ifdef FMT_EXPORT -# define FMT_API __declspec(dllexport) -# elif defined(FMT_SHARED) -# define FMT_API __declspec(dllimport) -# endif +#define FMT_CLASS_API FMT_MSC_WARNING(suppress : 4275) +#ifdef FMT_EXPORT +#define FMT_API __declspec(dllexport) +#elif defined(FMT_SHARED) +#define FMT_API __declspec(dllimport) +#endif #else -# define FMT_CLASS_API -# if defined(FMT_EXPORT) || defined(FMT_SHARED) -# if defined(__GNUC__) || defined(__clang__) -# define FMT_API __attribute__((visibility("default"))) -# endif -# endif +#define FMT_CLASS_API +#if defined(FMT_EXPORT) || defined(FMT_SHARED) +#if defined(__GNUC__) || defined(__clang__) +#define FMT_API __attribute__((visibility("default"))) +#endif +#endif #endif #ifndef FMT_API -# define FMT_API +#define FMT_API #endif #if FMT_GCC_VERSION -# define FMT_GCC_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) +#define FMT_GCC_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) #else -# define FMT_GCC_VISIBILITY_HIDDEN +#define FMT_GCC_VISIBILITY_HIDDEN #endif // libc++ supports string_view in pre-c++17. -#if (FMT_HAS_INCLUDE() && \ - (__cplusplus > 201402L || defined(_LIBCPP_VERSION))) || \ +#if (FMT_HAS_INCLUDE() && (__cplusplus > 201402L || defined(_LIBCPP_VERSION))) || \ (defined(_MSVC_LANG) && _MSVC_LANG > 201402L && _MSC_VER >= 1910) -# include -# define FMT_USE_STRING_VIEW +#include +#define FMT_USE_STRING_VIEW #elif FMT_HAS_INCLUDE("experimental/string_view") && __cplusplus >= 201402L -# include -# define FMT_USE_EXPERIMENTAL_STRING_VIEW +#include +#define FMT_USE_EXPERIMENTAL_STRING_VIEW #endif #ifndef FMT_UNICODE -# define FMT_UNICODE !FMT_MSC_VER +#define FMT_UNICODE !FMT_MSC_VER #endif #ifndef FMT_CONSTEVAL -# if ((FMT_GCC_VERSION >= 1000 || FMT_CLANG_VERSION >= 1101) && \ - __cplusplus > 201703L) || \ - (defined(__cpp_consteval) && \ - !FMT_MSC_VER) // consteval is broken in MSVC. -# define FMT_CONSTEVAL consteval -# define FMT_HAS_CONSTEVAL -# else -# define FMT_CONSTEVAL -# endif +#if ((FMT_GCC_VERSION >= 1000 || FMT_CLANG_VERSION >= 1101) && __cplusplus > 201703L) || \ + (defined(__cpp_consteval) && !FMT_MSC_VER) // consteval is broken in MSVC. +#define FMT_CONSTEVAL consteval +#define FMT_HAS_CONSTEVAL +#else +#define FMT_CONSTEVAL +#endif #endif #ifndef FMT_USE_NONTYPE_TEMPLATE_PARAMETERS -# if defined(__cpp_nontype_template_args) && \ - ((FMT_GCC_VERSION >= 903 && __cplusplus >= 201709L) || \ - __cpp_nontype_template_args >= 201911L) -# define FMT_USE_NONTYPE_TEMPLATE_PARAMETERS 1 -# else -# define FMT_USE_NONTYPE_TEMPLATE_PARAMETERS 0 -# endif +#if defined(__cpp_nontype_template_args) && \ + ((FMT_GCC_VERSION >= 903 && __cplusplus >= 201709L) || __cpp_nontype_template_args >= 201911L) +#define FMT_USE_NONTYPE_TEMPLATE_PARAMETERS 1 +#else +#define FMT_USE_NONTYPE_TEMPLATE_PARAMETERS 0 +#endif #endif // Enable minimal optimizations for more compact code in debug mode. @@ -321,108 +308,118 @@ template using enable_if_t = typename std::enable_if::type; template using conditional_t = typename std::conditional::type; -template using bool_constant = std::integral_constant; +template +using bool_constant = std::integral_constant; template using remove_reference_t = typename std::remove_reference::type; template using remove_cvref_t = typename std::remove_cv>::type; -template struct type_identity { using type = T; }; -template using type_identity_t = typename type_identity::type; +template +struct type_identity { + using type = T; +}; +template +using type_identity_t = typename type_identity::type; struct monostate { - constexpr monostate() {} + constexpr monostate() { + } }; // Suppress "unused variable" warnings with the method described in // https://herbsutter.com/2009/10/18/mailbag-shutting-up-compiler-warnings/. // (void)var does not work on many Intel compilers. -template FMT_CONSTEXPR void ignore_unused(const T&...) {} +template +FMT_CONSTEXPR void ignore_unused(const T &...) { +} // An enable_if helper to be used in template parameters which results in much // shorter symbols: https://godbolt.org/z/sWw4vP. Extra parentheses are needed // to workaround a bug in MSVC 2019 (see #1140 and #1186). #ifdef FMT_DOC -# define FMT_ENABLE_IF(...) +#define FMT_ENABLE_IF(...) #else -# define FMT_ENABLE_IF(...) enable_if_t<(__VA_ARGS__), int> = 0 +#define FMT_ENABLE_IF(...) enable_if_t<(__VA_ARGS__), int> = 0 #endif FMT_BEGIN_DETAIL_NAMESPACE constexpr FMT_INLINE auto is_constant_evaluated() FMT_NOEXCEPT -> bool { #ifdef __cpp_lib_is_constant_evaluated - return std::is_constant_evaluated(); + return std::is_constant_evaluated(); #else - return false; + return false; #endif } // A function to suppress "conditional expression is constant" warnings. -template constexpr auto const_check(T value) -> T { return value; } +template +constexpr auto const_check(T value) -> T { + return value; +} -FMT_NORETURN FMT_API void assert_fail(const char* file, int line, - const char* message); +FMT_NORETURN FMT_API void assert_fail(const char *file, int line, const char *message); #ifndef FMT_ASSERT -# ifdef NDEBUG +#ifdef NDEBUG // FMT_ASSERT is not empty to avoid -Werror=empty-body. -# define FMT_ASSERT(condition, message) \ - ::fmt::ignore_unused((condition), (message)) -# else -# define FMT_ASSERT(condition, message) \ - ((condition) /* void() fails with -Winvalid-constexpr on clang 4.0.1 */ \ - ? (void)0 \ - : ::fmt::detail::assert_fail(__FILE__, __LINE__, (message))) -# endif +#define FMT_ASSERT(condition, message) ::fmt::ignore_unused((condition), (message)) +#else +#define FMT_ASSERT(condition, message) \ + ((condition) /* void() fails with -Winvalid-constexpr on clang 4.0.1 */ \ + ? (void)0 \ + : ::fmt::detail::assert_fail(__FILE__, __LINE__, (message))) +#endif #endif #if defined(FMT_USE_STRING_VIEW) -template using std_string_view = std::basic_string_view; +template +using std_string_view = std::basic_string_view; #elif defined(FMT_USE_EXPERIMENTAL_STRING_VIEW) template using std_string_view = std::experimental::basic_string_view; #else -template struct std_string_view {}; +template +struct std_string_view {}; #endif #ifdef FMT_USE_INT128 // Do nothing. -#elif defined(__SIZEOF_INT128__) && !FMT_NVCC && \ - !(FMT_CLANG_VERSION && FMT_MSC_VER) -# define FMT_USE_INT128 1 +#elif defined(__SIZEOF_INT128__) && !FMT_NVCC && !(FMT_CLANG_VERSION && FMT_MSC_VER) +#define FMT_USE_INT128 1 using int128_t = __int128_t; using uint128_t = __uint128_t; -template inline auto convert_for_visit(T value) -> T { - return value; +template +inline auto convert_for_visit(T value) -> T { + return value; } #else -# define FMT_USE_INT128 0 +#define FMT_USE_INT128 0 #endif #if !FMT_USE_INT128 enum class int128_t {}; enum class uint128_t {}; // Reduce template instantiations. -template inline auto convert_for_visit(T) -> monostate { - return {}; +template +inline auto convert_for_visit(T) -> monostate { + return {}; } #endif // Casts a nonnegative integer to unsigned. template -FMT_CONSTEXPR auto to_unsigned(Int value) -> - typename std::make_unsigned::type { - FMT_ASSERT(value >= 0, "negative value"); - return static_cast::type>(value); +FMT_CONSTEXPR auto to_unsigned(Int value) -> typename std::make_unsigned::type { + FMT_ASSERT(value >= 0, "negative value"); + return static_cast::type>(value); } FMT_MSC_WARNING(suppress : 4566) constexpr unsigned char micro[] = "\u00B5"; constexpr auto is_utf8() -> bool { - // Avoid buggy sign extensions in MSVC's constant evaluation mode. - // https://developercommunity.visualstudio.com/t/C-difference-in-behavior-for-unsigned/1233612 - using uchar = unsigned char; - return FMT_UNICODE || (sizeof(micro) == 3 && uchar(micro[0]) == 0xC2 && - uchar(micro[1]) == 0xB5); + // Avoid buggy sign extensions in MSVC's constant evaluation mode. + // https://developercommunity.visualstudio.com/t/C-difference-in-behavior-for-unsigned/1233612 + using uchar = unsigned char; + return FMT_UNICODE || (sizeof(micro) == 3 && uchar(micro[0]) == 0xC2 && uchar(micro[1]) == 0xB5); } FMT_END_DETAIL_NAMESPACE @@ -433,125 +430,127 @@ FMT_END_DETAIL_NAMESPACE compiled with a different ``-std`` option than the client code (which is not recommended). */ -template class basic_string_view { - private: - const Char* data_; - size_t size_; - - public: - using value_type = Char; - using iterator = const Char*; - - constexpr basic_string_view() FMT_NOEXCEPT : data_(nullptr), size_(0) {} - - /** Constructs a string reference object from a C string and a size. */ - constexpr basic_string_view(const Char* s, size_t count) FMT_NOEXCEPT - : data_(s), - size_(count) {} - - /** - \rst - Constructs a string reference object from a C string computing - the size with ``std::char_traits::length``. - \endrst - */ - FMT_CONSTEXPR_CHAR_TRAITS - FMT_INLINE - basic_string_view(const Char* s) : data_(s) { - if (detail::const_check(std::is_same::value && - !detail::is_constant_evaluated())) - size_ = std::strlen(reinterpret_cast(s)); - else - size_ = std::char_traits::length(s); - } - - /** Constructs a string reference from a ``std::basic_string`` object. */ - template - FMT_CONSTEXPR basic_string_view( - const std::basic_string& s) FMT_NOEXCEPT - : data_(s.data()), - size_(s.size()) {} - - template >::value)> - FMT_CONSTEXPR basic_string_view(S s) FMT_NOEXCEPT : data_(s.data()), - size_(s.size()) {} - - /** Returns a pointer to the string data. */ - constexpr auto data() const -> const Char* { return data_; } - - /** Returns the string size. */ - constexpr auto size() const -> size_t { return size_; } - - constexpr auto begin() const -> iterator { return data_; } - constexpr auto end() const -> iterator { return data_ + size_; } - - constexpr auto operator[](size_t pos) const -> const Char& { - return data_[pos]; - } - - FMT_CONSTEXPR void remove_prefix(size_t n) { - data_ += n; - size_ -= n; - } - - // Lexicographically compare this string reference to other. - FMT_CONSTEXPR_CHAR_TRAITS auto compare(basic_string_view other) const -> int { - size_t str_size = size_ < other.size_ ? size_ : other.size_; - int result = std::char_traits::compare(data_, other.data_, str_size); - if (result == 0) - result = size_ == other.size_ ? 0 : (size_ < other.size_ ? -1 : 1); - return result; - } - - FMT_CONSTEXPR_CHAR_TRAITS friend auto operator==(basic_string_view lhs, - basic_string_view rhs) - -> bool { - return lhs.compare(rhs) == 0; - } - friend auto operator!=(basic_string_view lhs, basic_string_view rhs) -> bool { - return lhs.compare(rhs) != 0; - } - friend auto operator<(basic_string_view lhs, basic_string_view rhs) -> bool { - return lhs.compare(rhs) < 0; - } - friend auto operator<=(basic_string_view lhs, basic_string_view rhs) -> bool { - return lhs.compare(rhs) <= 0; - } - friend auto operator>(basic_string_view lhs, basic_string_view rhs) -> bool { - return lhs.compare(rhs) > 0; - } - friend auto operator>=(basic_string_view lhs, basic_string_view rhs) -> bool { - return lhs.compare(rhs) >= 0; - } +template +class basic_string_view { +private: + const Char *data_; + size_t size_; + +public: + using value_type = Char; + using iterator = const Char *; + + constexpr basic_string_view() FMT_NOEXCEPT : data_(nullptr), size_(0) { + } + + /** Constructs a string reference object from a C string and a size. */ + constexpr basic_string_view(const Char *s, size_t count) FMT_NOEXCEPT : data_(s), size_(count) { + } + + /** + \rst + Constructs a string reference object from a C string computing + the size with ``std::char_traits::length``. + \endrst + */ + FMT_CONSTEXPR_CHAR_TRAITS + FMT_INLINE + basic_string_view(const Char *s) : data_(s) { + if (detail::const_check(std::is_same::value && !detail::is_constant_evaluated())) + size_ = std::strlen(reinterpret_cast(s)); + else + size_ = std::char_traits::length(s); + } + + /** Constructs a string reference from a ``std::basic_string`` object. */ + template + FMT_CONSTEXPR basic_string_view(const std::basic_string &s) FMT_NOEXCEPT : data_(s.data()), + size_(s.size()) { + } + + template >::value)> + FMT_CONSTEXPR basic_string_view(S s) FMT_NOEXCEPT : data_(s.data()), size_(s.size()) { + } + + /** Returns a pointer to the string data. */ + constexpr auto data() const -> const Char * { + return data_; + } + + /** Returns the string size. */ + constexpr auto size() const -> size_t { + return size_; + } + + constexpr auto begin() const -> iterator { + return data_; + } + constexpr auto end() const -> iterator { + return data_ + size_; + } + + constexpr auto operator[](size_t pos) const -> const Char & { + return data_[pos]; + } + + FMT_CONSTEXPR void remove_prefix(size_t n) { + data_ += n; + size_ -= n; + } + + // Lexicographically compare this string reference to other. + FMT_CONSTEXPR_CHAR_TRAITS auto compare(basic_string_view other) const -> int { + size_t str_size = size_ < other.size_ ? size_ : other.size_; + int result = std::char_traits::compare(data_, other.data_, str_size); + if (result == 0) + result = size_ == other.size_ ? 0 : (size_ < other.size_ ? -1 : 1); + return result; + } + + FMT_CONSTEXPR_CHAR_TRAITS friend auto operator==(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) == 0; + } + friend auto operator!=(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) != 0; + } + friend auto operator<(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) < 0; + } + friend auto operator<=(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) <= 0; + } + friend auto operator>(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) > 0; + } + friend auto operator>=(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) >= 0; + } }; using string_view = basic_string_view; /** Specifies if ``T`` is a character type. Can be specialized by users. */ -template struct is_char : std::false_type {}; -template <> struct is_char : std::true_type {}; +template +struct is_char : std::false_type {}; +template <> +struct is_char : std::true_type {}; // Returns a string view of `s`. template ::value)> -FMT_INLINE auto to_string_view(const Char* s) -> basic_string_view { - return s; +FMT_INLINE auto to_string_view(const Char *s) -> basic_string_view { + return s; } template -inline auto to_string_view(const std::basic_string& s) - -> basic_string_view { - return s; +inline auto to_string_view(const std::basic_string &s) -> basic_string_view { + return s; } template -constexpr auto to_string_view(basic_string_view s) - -> basic_string_view { - return s; +constexpr auto to_string_view(basic_string_view s) -> basic_string_view { + return s; } -template >::value)> -inline auto to_string_view(detail::std_string_view s) - -> basic_string_view { - return s; +template >::value)> +inline auto to_string_view(detail::std_string_view s) -> basic_string_view { + return s; } // A base class for compile-time strings. It is defined in the fmt namespace to @@ -562,9 +561,8 @@ template struct is_compile_string : std::is_base_of {}; template ::value)> -constexpr auto to_string_view(const S& s) - -> basic_string_view { - return basic_string_view(s); +constexpr auto to_string_view(const S &s) -> basic_string_view { + return basic_string_view(s); } FMT_BEGIN_DETAIL_NAMESPACE @@ -576,38 +574,40 @@ using fmt::v8::to_string_view; // It should be a constexpr function but MSVC 2017 fails to compile it in // enable_if and MSVC 2015 fails to compile it as an alias template. template -struct is_string : std::is_class()))> { -}; +struct is_string : std::is_class()))> {}; -template struct char_t_impl {}; -template struct char_t_impl::value>> { - using result = decltype(to_string_view(std::declval())); - using type = typename result::value_type; +template +struct char_t_impl {}; +template +struct char_t_impl::value>> { + using result = decltype(to_string_view(std::declval())); + using type = typename result::value_type; }; // Reports a compile-time error if S is not a valid format string. template ::value)> -FMT_INLINE void check_format_string(const S&) { +FMT_INLINE void check_format_string(const S &) { #ifdef FMT_ENFORCE_COMPILE_STRING - static_assert(is_compile_string::value, - "FMT_ENFORCE_COMPILE_STRING requires all format strings to use " - "FMT_STRING."); + static_assert(is_compile_string::value, "FMT_ENFORCE_COMPILE_STRING requires all format strings to use " + "FMT_STRING."); #endif } template ::value)> void check_format_string(S); struct error_handler { - constexpr error_handler() = default; - constexpr error_handler(const error_handler&) = default; + constexpr error_handler() = default; + constexpr error_handler(const error_handler &) = default; - // This function is intentionally not constexpr to give a compile-time error. - FMT_NORETURN FMT_API void on_error(const char* message); + // This function is intentionally not constexpr to give a compile-time + // error. + FMT_NORETURN FMT_API void on_error(const char *message); }; FMT_END_DETAIL_NAMESPACE /** String's character type. */ -template using char_t = typename detail::char_t_impl::type; +template +using char_t = typename detail::char_t_impl::type; /** \rst @@ -618,92 +618,99 @@ template using char_t = typename detail::char_t_impl::type; */ template class basic_format_parse_context : private ErrorHandler { - private: - basic_string_view format_str_; - int next_arg_id_; - - public: - using char_type = Char; - using iterator = typename basic_string_view::iterator; - - explicit constexpr basic_format_parse_context( - basic_string_view format_str, ErrorHandler eh = {}, - int next_arg_id = 0) - : ErrorHandler(eh), format_str_(format_str), next_arg_id_(next_arg_id) {} - - /** - Returns an iterator to the beginning of the format string range being - parsed. - */ - constexpr auto begin() const FMT_NOEXCEPT -> iterator { - return format_str_.begin(); - } - - /** - Returns an iterator past the end of the format string range being parsed. - */ - constexpr auto end() const FMT_NOEXCEPT -> iterator { - return format_str_.end(); - } - - /** Advances the begin iterator to ``it``. */ - FMT_CONSTEXPR void advance_to(iterator it) { - format_str_.remove_prefix(detail::to_unsigned(it - begin())); - } - - /** - Reports an error if using the manual argument indexing; otherwise returns - the next argument index and switches to the automatic indexing. - */ - FMT_CONSTEXPR auto next_arg_id() -> int { - // Don't check if the argument id is valid to avoid overhead and because it - // will be checked during formatting anyway. - if (next_arg_id_ >= 0) return next_arg_id_++; - on_error("cannot switch from manual to automatic argument indexing"); - return 0; - } - - /** - Reports an error if using the automatic argument indexing; otherwise - switches to the manual indexing. - */ - FMT_CONSTEXPR void check_arg_id(int) { - if (next_arg_id_ > 0) - on_error("cannot switch from automatic to manual argument indexing"); - else - next_arg_id_ = -1; - } - - FMT_CONSTEXPR void check_arg_id(basic_string_view) {} - - FMT_CONSTEXPR void on_error(const char* message) { - ErrorHandler::on_error(message); - } - - constexpr auto error_handler() const -> ErrorHandler { return *this; } +private: + basic_string_view format_str_; + int next_arg_id_; + +public: + using char_type = Char; + using iterator = typename basic_string_view::iterator; + + explicit constexpr basic_format_parse_context(basic_string_view format_str, ErrorHandler eh = {}, + int next_arg_id = 0) + : ErrorHandler(eh), format_str_(format_str), next_arg_id_(next_arg_id) { + } + + /** + Returns an iterator to the beginning of the format string range being + parsed. + */ + constexpr auto begin() const FMT_NOEXCEPT -> iterator { + return format_str_.begin(); + } + + /** + Returns an iterator past the end of the format string range being parsed. + */ + constexpr auto end() const FMT_NOEXCEPT -> iterator { + return format_str_.end(); + } + + /** Advances the begin iterator to ``it``. */ + FMT_CONSTEXPR void advance_to(iterator it) { + format_str_.remove_prefix(detail::to_unsigned(it - begin())); + } + + /** + Reports an error if using the manual argument indexing; otherwise returns + the next argument index and switches to the automatic indexing. + */ + FMT_CONSTEXPR auto next_arg_id() -> int { + // Don't check if the argument id is valid to avoid overhead and because + // it will be checked during formatting anyway. + if (next_arg_id_ >= 0) + return next_arg_id_++; + on_error("cannot switch from manual to automatic argument indexing"); + return 0; + } + + /** + Reports an error if using the automatic argument indexing; otherwise + switches to the manual indexing. + */ + FMT_CONSTEXPR void check_arg_id(int) { + if (next_arg_id_ > 0) + on_error("cannot switch from automatic to manual argument indexing"); + else + next_arg_id_ = -1; + } + + FMT_CONSTEXPR void check_arg_id(basic_string_view) { + } + + FMT_CONSTEXPR void on_error(const char *message) { + ErrorHandler::on_error(message); + } + + constexpr auto error_handler() const -> ErrorHandler { + return *this; + } }; using format_parse_context = basic_format_parse_context; -template class basic_format_arg; -template class basic_format_args; -template class dynamic_format_arg_store; +template +class basic_format_arg; +template +class basic_format_args; +template +class dynamic_format_arg_store; // A formatter for objects of type T. template struct formatter { - // A deleted default constructor indicates a disabled formatter. - formatter() = delete; + // A deleted default constructor indicates a disabled formatter. + formatter() = delete; }; // Specifies if T has an enabled formatter specialization. A type can be // formattable even if it doesn't have a formatter e.g. via a conversion. template -using has_formatter = - std::is_constructible>; +using has_formatter = std::is_constructible>; // Checks whether T is a container with contiguous storage. -template struct is_contiguous : std::false_type {}; +template +struct is_contiguous : std::false_type {}; template struct is_contiguous> : std::true_type {}; @@ -713,31 +720,30 @@ FMT_BEGIN_DETAIL_NAMESPACE // Extracts a reference to the container from back_insert_iterator. template -inline auto get_container(std::back_insert_iterator it) - -> Container& { - using bi_iterator = std::back_insert_iterator; - struct accessor : bi_iterator { - accessor(bi_iterator iter) : bi_iterator(iter) {} - using bi_iterator::container; - }; - return *accessor(it).container; +inline auto get_container(std::back_insert_iterator it) -> Container & { + using bi_iterator = std::back_insert_iterator; + struct accessor : bi_iterator { + accessor(bi_iterator iter) : bi_iterator(iter) { + } + using bi_iterator::container; + }; + return *accessor(it).container; } template -FMT_CONSTEXPR auto copy_str(InputIt begin, InputIt end, OutputIt out) - -> OutputIt { - while (begin != end) *out++ = static_cast(*begin++); - return out; +FMT_CONSTEXPR auto copy_str(InputIt begin, InputIt end, OutputIt out) -> OutputIt { + while (begin != end) + *out++ = static_cast(*begin++); + return out; } template ::value)> -FMT_CONSTEXPR auto copy_str(const Char* begin, const Char* end, Char* out) - -> Char* { - if (is_constant_evaluated()) - return copy_str(begin, end, out); - auto size = to_unsigned(end - begin); - memcpy(out, begin, size); - return out + size; +FMT_CONSTEXPR auto copy_str(const Char *begin, const Char *end, Char *out) -> Char * { + if (is_constant_evaluated()) + return copy_str(begin, end, out); + auto size = to_unsigned(end - begin); + memcpy(out, begin, size); + return out + size; } /** @@ -746,336 +752,401 @@ FMT_CONSTEXPR auto copy_str(const Char* begin, const Char* end, Char* out) class and shouldn't be used directly, only via `~fmt::basic_memory_buffer`. \endrst */ -template class buffer { - private: - T* ptr_; - size_t size_; - size_t capacity_; - - protected: - // Don't initialize ptr_ since it is not accessed to save a few cycles. - FMT_MSC_WARNING(suppress : 26495) - buffer(size_t sz) FMT_NOEXCEPT : size_(sz), capacity_(sz) {} - - buffer(T* p = nullptr, size_t sz = 0, size_t cap = 0) FMT_NOEXCEPT - : ptr_(p), - size_(sz), - capacity_(cap) {} - - ~buffer() = default; - buffer(buffer&&) = default; - - /** Sets the buffer data and capacity. */ - void set(T* buf_data, size_t buf_capacity) FMT_NOEXCEPT { - ptr_ = buf_data; - capacity_ = buf_capacity; - } - - /** Increases the buffer capacity to hold at least *capacity* elements. */ - virtual void grow(size_t capacity) = 0; - - public: - using value_type = T; - using const_reference = const T&; - - buffer(const buffer&) = delete; - void operator=(const buffer&) = delete; - - auto begin() FMT_NOEXCEPT -> T* { return ptr_; } - auto end() FMT_NOEXCEPT -> T* { return ptr_ + size_; } - - auto begin() const FMT_NOEXCEPT -> const T* { return ptr_; } - auto end() const FMT_NOEXCEPT -> const T* { return ptr_ + size_; } - - /** Returns the size of this buffer. */ - auto size() const FMT_NOEXCEPT -> size_t { return size_; } - - /** Returns the capacity of this buffer. */ - auto capacity() const FMT_NOEXCEPT -> size_t { return capacity_; } - - /** Returns a pointer to the buffer data. */ - auto data() FMT_NOEXCEPT -> T* { return ptr_; } - - /** Returns a pointer to the buffer data. */ - auto data() const FMT_NOEXCEPT -> const T* { return ptr_; } - - /** Clears this buffer. */ - void clear() { size_ = 0; } - - // Tries resizing the buffer to contain *count* elements. If T is a POD type - // the new elements may not be initialized. - void try_resize(size_t count) { - try_reserve(count); - size_ = count <= capacity_ ? count : capacity_; - } - - // Tries increasing the buffer capacity to *new_capacity*. It can increase the - // capacity by a smaller amount than requested but guarantees there is space - // for at least one additional element either by increasing the capacity or by - // flushing the buffer if it is full. - void try_reserve(size_t new_capacity) { - if (new_capacity > capacity_) grow(new_capacity); - } - - void push_back(const T& value) { - try_reserve(size_ + 1); - ptr_[size_++] = value; - } - - /** Appends data to the end of the buffer. */ - template void append(const U* begin, const U* end); - - template auto operator[](I index) -> T& { return ptr_[index]; } - template auto operator[](I index) const -> const T& { - return ptr_[index]; - } +template +class buffer { +private: + T *ptr_; + size_t size_; + size_t capacity_; + +protected: + // Don't initialize ptr_ since it is not accessed to save a few cycles. + FMT_MSC_WARNING(suppress : 26495) + buffer(size_t sz) FMT_NOEXCEPT : size_(sz), capacity_(sz) { + } + + buffer(T *p = nullptr, size_t sz = 0, size_t cap = 0) FMT_NOEXCEPT : ptr_(p), size_(sz), capacity_(cap) { + } + + ~buffer() = default; + buffer(buffer &&) = default; + + /** Sets the buffer data and capacity. */ + void set(T *buf_data, size_t buf_capacity) FMT_NOEXCEPT { + ptr_ = buf_data; + capacity_ = buf_capacity; + } + + /** Increases the buffer capacity to hold at least *capacity* elements. */ + virtual void grow(size_t capacity) = 0; + +public: + using value_type = T; + using const_reference = const T &; + + buffer(const buffer &) = delete; + void operator=(const buffer &) = delete; + + auto begin() FMT_NOEXCEPT -> T * { + return ptr_; + } + auto end() FMT_NOEXCEPT -> T * { + return ptr_ + size_; + } + + auto begin() const FMT_NOEXCEPT -> const T * { + return ptr_; + } + auto end() const FMT_NOEXCEPT -> const T * { + return ptr_ + size_; + } + + /** Returns the size of this buffer. */ + auto size() const FMT_NOEXCEPT -> size_t { + return size_; + } + + /** Returns the capacity of this buffer. */ + auto capacity() const FMT_NOEXCEPT -> size_t { + return capacity_; + } + + /** Returns a pointer to the buffer data. */ + auto data() FMT_NOEXCEPT -> T * { + return ptr_; + } + + /** Returns a pointer to the buffer data. */ + auto data() const FMT_NOEXCEPT -> const T * { + return ptr_; + } + + /** Clears this buffer. */ + void clear() { + size_ = 0; + } + + // Tries resizing the buffer to contain *count* elements. If T is a POD type + // the new elements may not be initialized. + void try_resize(size_t count) { + try_reserve(count); + size_ = count <= capacity_ ? count : capacity_; + } + + // Tries increasing the buffer capacity to *new_capacity*. It can increase + // the capacity by a smaller amount than requested but guarantees there is + // space for at least one additional element either by increasing the + // capacity or by flushing the buffer if it is full. + void try_reserve(size_t new_capacity) { + if (new_capacity > capacity_) + grow(new_capacity); + } + + void push_back(const T &value) { + try_reserve(size_ + 1); + ptr_[size_++] = value; + } + + /** Appends data to the end of the buffer. */ + template + void append(const U *begin, const U *end); + + template + auto operator[](I index) -> T & { + return ptr_[index]; + } + template + auto operator[](I index) const -> const T & { + return ptr_[index]; + } }; struct buffer_traits { - explicit buffer_traits(size_t) {} - auto count() const -> size_t { return 0; } - auto limit(size_t size) -> size_t { return size; } + explicit buffer_traits(size_t) { + } + auto count() const -> size_t { + return 0; + } + auto limit(size_t size) -> size_t { + return size; + } }; class fixed_buffer_traits { - private: - size_t count_ = 0; - size_t limit_; - - public: - explicit fixed_buffer_traits(size_t limit) : limit_(limit) {} - auto count() const -> size_t { return count_; } - auto limit(size_t size) -> size_t { - size_t n = limit_ > count_ ? limit_ - count_ : 0; - count_ += size; - return size < n ? size : n; - } +private: + size_t count_ = 0; + size_t limit_; + +public: + explicit fixed_buffer_traits(size_t limit) : limit_(limit) { + } + auto count() const -> size_t { + return count_; + } + auto limit(size_t size) -> size_t { + size_t n = limit_ > count_ ? limit_ - count_ : 0; + count_ += size; + return size < n ? size : n; + } }; // A buffer that writes to an output iterator when flushed. template class iterator_buffer final : public Traits, public buffer { - private: - OutputIt out_; - enum { buffer_size = 256 }; - T data_[buffer_size]; - - protected: - void grow(size_t) final FMT_OVERRIDE { - if (this->size() == buffer_size) flush(); - } - - void flush() { - auto size = this->size(); - this->clear(); - out_ = copy_str(data_, data_ + this->limit(size), out_); - } - - public: - explicit iterator_buffer(OutputIt out, size_t n = buffer_size) - : Traits(n), buffer(data_, 0, buffer_size), out_(out) {} - iterator_buffer(iterator_buffer&& other) - : Traits(other), buffer(data_, 0, buffer_size), out_(other.out_) {} - ~iterator_buffer() { flush(); } - - auto out() -> OutputIt { - flush(); - return out_; - } - auto count() const -> size_t { return Traits::count() + this->size(); } +private: + OutputIt out_; + enum { buffer_size = 256 }; + T data_[buffer_size]; + +protected: + void grow(size_t) final FMT_OVERRIDE { + if (this->size() == buffer_size) + flush(); + } + + void flush() { + auto size = this->size(); + this->clear(); + out_ = copy_str(data_, data_ + this->limit(size), out_); + } + +public: + explicit iterator_buffer(OutputIt out, size_t n = buffer_size) + : Traits(n), buffer(data_, 0, buffer_size), out_(out) { + } + iterator_buffer(iterator_buffer &&other) : Traits(other), buffer(data_, 0, buffer_size), out_(other.out_) { + } + ~iterator_buffer() { + flush(); + } + + auto out() -> OutputIt { + flush(); + return out_; + } + auto count() const -> size_t { + return Traits::count() + this->size(); + } }; -template class iterator_buffer final : public buffer { - protected: - void grow(size_t) final FMT_OVERRIDE {} - - public: - explicit iterator_buffer(T* out, size_t = 0) : buffer(out, 0, ~size_t()) {} - - auto out() -> T* { return &*this->end(); } +template +class iterator_buffer final : public buffer { +protected: + void grow(size_t) final FMT_OVERRIDE { + } + +public: + explicit iterator_buffer(T *out, size_t = 0) : buffer(out, 0, ~size_t()) { + } + + auto out() -> T * { + return &*this->end(); + } }; // A buffer that writes to a container with the contiguous storage. template class iterator_buffer, - enable_if_t::value, - typename Container::value_type>> + enable_if_t::value, typename Container::value_type>> final : public buffer { - private: - Container& container_; - - protected: - void grow(size_t capacity) final FMT_OVERRIDE { - container_.resize(capacity); - this->set(&container_[0], capacity); - } - - public: - explicit iterator_buffer(Container& c) - : buffer(c.size()), container_(c) {} - explicit iterator_buffer(std::back_insert_iterator out, size_t = 0) - : iterator_buffer(get_container(out)) {} - auto out() -> std::back_insert_iterator { - return std::back_inserter(container_); - } +private: + Container &container_; + +protected: + void grow(size_t capacity) final FMT_OVERRIDE { + container_.resize(capacity); + this->set(&container_[0], capacity); + } + +public: + explicit iterator_buffer(Container &c) : buffer(c.size()), container_(c) { + } + explicit iterator_buffer(std::back_insert_iterator out, size_t = 0) + : iterator_buffer(get_container(out)) { + } + auto out() -> std::back_insert_iterator { + return std::back_inserter(container_); + } }; // A buffer that counts the number of code units written discarding the output. -template class counting_buffer final : public buffer { - private: - enum { buffer_size = 256 }; - T data_[buffer_size]; - size_t count_ = 0; - - protected: - void grow(size_t) final FMT_OVERRIDE { - if (this->size() != buffer_size) return; - count_ += this->size(); - this->clear(); - } - - public: - counting_buffer() : buffer(data_, 0, buffer_size) {} - - auto count() -> size_t { return count_ + this->size(); } +template +class counting_buffer final : public buffer { +private: + enum { buffer_size = 256 }; + T data_[buffer_size]; + size_t count_ = 0; + +protected: + void grow(size_t) final FMT_OVERRIDE { + if (this->size() != buffer_size) + return; + count_ += this->size(); + this->clear(); + } + +public: + counting_buffer() : buffer(data_, 0, buffer_size) { + } + + auto count() -> size_t { + return count_ + this->size(); + } }; template -using buffer_appender = conditional_t::value, appender, - std::back_insert_iterator>>; +using buffer_appender = conditional_t::value, appender, std::back_insert_iterator>>; // Maps an output iterator to a buffer. template auto get_buffer(OutputIt out) -> iterator_buffer { - return iterator_buffer(out); + return iterator_buffer(out); } template -auto get_iterator(Buffer& buf) -> decltype(buf.out()) { - return buf.out(); +auto get_iterator(Buffer &buf) -> decltype(buf.out()) { + return buf.out(); } -template auto get_iterator(buffer& buf) -> buffer_appender { - return buffer_appender(buf); +template +auto get_iterator(buffer &buf) -> buffer_appender { + return buffer_appender(buf); } template struct fallback_formatter { - fallback_formatter() = delete; + fallback_formatter() = delete; }; // Specifies if T has an enabled fallback_formatter specialization. template -using has_fallback_formatter = - std::is_constructible>; +using has_fallback_formatter = std::is_constructible>; struct view {}; -template struct named_arg : view { - const Char* name; - const T& value; - named_arg(const Char* n, const T& v) : name(n), value(v) {} +template +struct named_arg : view { + const Char *name; + const T &value; + named_arg(const Char *n, const T &v) : name(n), value(v) { + } }; -template struct named_arg_info { - const Char* name; - int id; +template +struct named_arg_info { + const Char *name; + int id; }; template struct arg_data { - // args_[0].named_args points to named_args_ to avoid bloating format_args. - // +1 to workaround a bug in gcc 7.5 that causes duplicated-branches warning. - T args_[1 + (NUM_ARGS != 0 ? NUM_ARGS : +1)]; - named_arg_info named_args_[NUM_NAMED_ARGS]; - - template - arg_data(const U&... init) : args_{T(named_args_, NUM_NAMED_ARGS), init...} {} - arg_data(const arg_data& other) = delete; - auto args() const -> const T* { return args_ + 1; } - auto named_args() -> named_arg_info* { return named_args_; } + // args_[0].named_args points to named_args_ to avoid bloating format_args. + // +1 to workaround a bug in gcc 7.5 that causes duplicated-branches + // warning. + T args_[1 + (NUM_ARGS != 0 ? NUM_ARGS : +1)]; + named_arg_info named_args_[NUM_NAMED_ARGS]; + + template + arg_data(const U &...init) : args_ {T(named_args_, NUM_NAMED_ARGS), init...} { + } + arg_data(const arg_data &other) = delete; + auto args() const -> const T * { + return args_ + 1; + } + auto named_args() -> named_arg_info * { + return named_args_; + } }; template struct arg_data { - // +1 to workaround a bug in gcc 7.5 that causes duplicated-branches warning. - T args_[NUM_ARGS != 0 ? NUM_ARGS : +1]; - - template - FMT_CONSTEXPR FMT_INLINE arg_data(const U&... init) : args_{init...} {} - FMT_CONSTEXPR FMT_INLINE auto args() const -> const T* { return args_; } - FMT_CONSTEXPR FMT_INLINE auto named_args() -> std::nullptr_t { - return nullptr; - } + // +1 to workaround a bug in gcc 7.5 that causes duplicated-branches + // warning. + T args_[NUM_ARGS != 0 ? NUM_ARGS : +1]; + + template + FMT_CONSTEXPR FMT_INLINE arg_data(const U &...init) : args_ {init...} { + } + FMT_CONSTEXPR FMT_INLINE auto args() const -> const T * { + return args_; + } + FMT_CONSTEXPR FMT_INLINE auto named_args() -> std::nullptr_t { + return nullptr; + } }; template -inline void init_named_args(named_arg_info*, int, int) {} +inline void init_named_args(named_arg_info *, int, int) { +} -template struct is_named_arg : std::false_type {}; -template struct is_statically_named_arg : std::false_type {}; +template +struct is_named_arg : std::false_type {}; +template +struct is_statically_named_arg : std::false_type {}; template struct is_named_arg> : std::true_type {}; -template ::value)> -void init_named_args(named_arg_info* named_args, int arg_count, - int named_arg_count, const T&, const Tail&... args) { - init_named_args(named_args, arg_count + 1, named_arg_count, args...); +template ::value)> +void init_named_args(named_arg_info *named_args, int arg_count, int named_arg_count, const T &, + const Tail &...args) { + init_named_args(named_args, arg_count + 1, named_arg_count, args...); } -template ::value)> -void init_named_args(named_arg_info* named_args, int arg_count, - int named_arg_count, const T& arg, const Tail&... args) { - named_args[named_arg_count++] = {arg.name, arg_count}; - init_named_args(named_args, arg_count + 1, named_arg_count, args...); +template ::value)> +void init_named_args(named_arg_info *named_args, int arg_count, int named_arg_count, const T &arg, + const Tail &...args) { + named_args[named_arg_count++] = {arg.name, arg_count}; + init_named_args(named_args, arg_count + 1, named_arg_count, args...); } template -FMT_CONSTEXPR FMT_INLINE void init_named_args(std::nullptr_t, int, int, - const Args&...) {} +FMT_CONSTEXPR FMT_INLINE void init_named_args(std::nullptr_t, int, int, const Args &...) { +} -template constexpr auto count() -> size_t { return B ? 1 : 0; } -template constexpr auto count() -> size_t { - return (B1 ? 1 : 0) + count(); +template +constexpr auto count() -> size_t { + return B ? 1 : 0; +} +template +constexpr auto count() -> size_t { + return (B1 ? 1 : 0) + count(); } -template constexpr auto count_named_args() -> size_t { - return count::value...>(); +template +constexpr auto count_named_args() -> size_t { + return count::value...>(); } enum class type { - none_type, - // Integer types should go first, - int_type, - uint_type, - long_long_type, - ulong_long_type, - int128_type, - uint128_type, - bool_type, - char_type, - last_integer_type = char_type, - // followed by floating-point types. - float_type, - double_type, - long_double_type, - last_numeric_type = long_double_type, - cstring_type, - string_type, - pointer_type, - custom_type + none_type, + // Integer types should go first, + int_type, + uint_type, + long_long_type, + ulong_long_type, + int128_type, + uint128_type, + bool_type, + char_type, + last_integer_type = char_type, + // followed by floating-point types. + float_type, + double_type, + long_double_type, + last_numeric_type = long_double_type, + cstring_type, + string_type, + pointer_type, + custom_type }; // Maps core type T to the corresponding type enum constant. template struct type_constant : std::integral_constant {}; -#define FMT_TYPE_CONSTANT(Type, constant) \ - template \ - struct type_constant \ - : std::integral_constant {} +#define FMT_TYPE_CONSTANT(Type, constant) \ + template \ + struct type_constant : std::integral_constant {} FMT_TYPE_CONSTANT(int, int_type); FMT_TYPE_CONSTANT(unsigned, uint_type); @@ -1088,107 +1159,123 @@ FMT_TYPE_CONSTANT(Char, char_type); FMT_TYPE_CONSTANT(float, float_type); FMT_TYPE_CONSTANT(double, double_type); FMT_TYPE_CONSTANT(long double, long_double_type); -FMT_TYPE_CONSTANT(const Char*, cstring_type); +FMT_TYPE_CONSTANT(const Char *, cstring_type); FMT_TYPE_CONSTANT(basic_string_view, string_type); -FMT_TYPE_CONSTANT(const void*, pointer_type); +FMT_TYPE_CONSTANT(const void *, pointer_type); constexpr bool is_integral_type(type t) { - return t > type::none_type && t <= type::last_integer_type; + return t > type::none_type && t <= type::last_integer_type; } constexpr bool is_arithmetic_type(type t) { - return t > type::none_type && t <= type::last_numeric_type; + return t > type::none_type && t <= type::last_numeric_type; } -template struct string_value { - const Char* data; - size_t size; +template +struct string_value { + const Char *data; + size_t size; }; -template struct named_arg_value { - const named_arg_info* data; - size_t size; +template +struct named_arg_value { + const named_arg_info *data; + size_t size; }; -template struct custom_value { - using parse_context = typename Context::parse_context_type; - const void* value; - void (*format)(const void* arg, parse_context& parse_ctx, Context& ctx); +template +struct custom_value { + using parse_context = typename Context::parse_context_type; + const void *value; + void (*format)(const void *arg, parse_context &parse_ctx, Context &ctx); }; // A formatting argument value. -template class value { - public: - using char_type = typename Context::char_type; - - union { - monostate no_value; - int int_value; - unsigned uint_value; - long long long_long_value; - unsigned long long ulong_long_value; - int128_t int128_value; - uint128_t uint128_value; - bool bool_value; - char_type char_value; - float float_value; - double double_value; - long double long_double_value; - const void* pointer; - string_value string; - custom_value custom; - named_arg_value named_args; - }; - - constexpr FMT_INLINE value() : no_value() {} - constexpr FMT_INLINE value(int val) : int_value(val) {} - constexpr FMT_INLINE value(unsigned val) : uint_value(val) {} - constexpr FMT_INLINE value(long long val) : long_long_value(val) {} - constexpr FMT_INLINE value(unsigned long long val) : ulong_long_value(val) {} - FMT_INLINE value(int128_t val) : int128_value(val) {} - FMT_INLINE value(uint128_t val) : uint128_value(val) {} - FMT_INLINE value(float val) : float_value(val) {} - FMT_INLINE value(double val) : double_value(val) {} - FMT_INLINE value(long double val) : long_double_value(val) {} - constexpr FMT_INLINE value(bool val) : bool_value(val) {} - constexpr FMT_INLINE value(char_type val) : char_value(val) {} - FMT_CONSTEXPR FMT_INLINE value(const char_type* val) { - string.data = val; - if (is_constant_evaluated()) string.size = {}; - } - FMT_CONSTEXPR FMT_INLINE value(basic_string_view val) { - string.data = val.data(); - string.size = val.size(); - } - FMT_INLINE value(const void* val) : pointer(val) {} - FMT_INLINE value(const named_arg_info* args, size_t size) - : named_args{args, size} {} - - template FMT_CONSTEXPR FMT_INLINE value(const T& val) { - custom.value = &val; - // Get the formatter type through the context to allow different contexts - // have different extension points, e.g. `formatter` for `format` and - // `printf_formatter` for `printf`. - custom.format = format_custom_arg< - T, conditional_t::value, - typename Context::template formatter_type, - fallback_formatter>>; - } - - private: - // Formats an argument of a custom type, such as a user-defined class. - template - static void format_custom_arg(const void* arg, - typename Context::parse_context_type& parse_ctx, - Context& ctx) { - Formatter f; - parse_ctx.advance_to(f.parse(parse_ctx)); - ctx.advance_to(f.format(*static_cast(arg), ctx)); - } +template +class value { +public: + using char_type = typename Context::char_type; + + union { + monostate no_value; + int int_value; + unsigned uint_value; + long long long_long_value; + unsigned long long ulong_long_value; + int128_t int128_value; + uint128_t uint128_value; + bool bool_value; + char_type char_value; + float float_value; + double double_value; + long double long_double_value; + const void *pointer; + string_value string; + custom_value custom; + named_arg_value named_args; + }; + + constexpr FMT_INLINE value() : no_value() { + } + constexpr FMT_INLINE value(int val) : int_value(val) { + } + constexpr FMT_INLINE value(unsigned val) : uint_value(val) { + } + constexpr FMT_INLINE value(long long val) : long_long_value(val) { + } + constexpr FMT_INLINE value(unsigned long long val) : ulong_long_value(val) { + } + FMT_INLINE value(int128_t val) : int128_value(val) { + } + FMT_INLINE value(uint128_t val) : uint128_value(val) { + } + FMT_INLINE value(float val) : float_value(val) { + } + FMT_INLINE value(double val) : double_value(val) { + } + FMT_INLINE value(long double val) : long_double_value(val) { + } + constexpr FMT_INLINE value(bool val) : bool_value(val) { + } + constexpr FMT_INLINE value(char_type val) : char_value(val) { + } + FMT_CONSTEXPR FMT_INLINE value(const char_type *val) { + string.data = val; + if (is_constant_evaluated()) + string.size = {}; + } + FMT_CONSTEXPR FMT_INLINE value(basic_string_view val) { + string.data = val.data(); + string.size = val.size(); + } + FMT_INLINE value(const void *val) : pointer(val) { + } + FMT_INLINE value(const named_arg_info *args, size_t size) : named_args {args, size} { + } + + template + FMT_CONSTEXPR FMT_INLINE value(const T &val) { + custom.value = &val; + // Get the formatter type through the context to allow different + // contexts have different extension points, e.g. `formatter` for + // `format` and `printf_formatter` for `printf`. + custom.format = format_custom_arg< + T, conditional_t::value, typename Context::template formatter_type, + fallback_formatter>>; + } + +private: + // Formats an argument of a custom type, such as a user-defined class. + template + static void format_custom_arg(const void *arg, typename Context::parse_context_type &parse_ctx, Context &ctx) { + Formatter f; + parse_ctx.advance_to(f.parse(parse_ctx)); + ctx.advance_to(f.format(*static_cast(arg), ctx)); + } }; template -FMT_CONSTEXPR auto make_arg(const T& value) -> basic_format_arg; +FMT_CONSTEXPR auto make_arg(const T &value) -> basic_format_arg; // To minimize the number of types we need to deal with, long is translated // either to int or to long long depending on its size. @@ -1199,152 +1286,162 @@ using ulong_type = conditional_t; struct unformattable {}; // Maps formatting arguments to core types. -template struct arg_mapper { - using char_type = typename Context::char_type; - - FMT_CONSTEXPR FMT_INLINE auto map(signed char val) -> int { return val; } - FMT_CONSTEXPR FMT_INLINE auto map(unsigned char val) -> unsigned { - return val; - } - FMT_CONSTEXPR FMT_INLINE auto map(short val) -> int { return val; } - FMT_CONSTEXPR FMT_INLINE auto map(unsigned short val) -> unsigned { - return val; - } - FMT_CONSTEXPR FMT_INLINE auto map(int val) -> int { return val; } - FMT_CONSTEXPR FMT_INLINE auto map(unsigned val) -> unsigned { return val; } - FMT_CONSTEXPR FMT_INLINE auto map(long val) -> long_type { return val; } - FMT_CONSTEXPR FMT_INLINE auto map(unsigned long val) -> ulong_type { - return val; - } - FMT_CONSTEXPR FMT_INLINE auto map(long long val) -> long long { return val; } - FMT_CONSTEXPR FMT_INLINE auto map(unsigned long long val) - -> unsigned long long { - return val; - } - FMT_CONSTEXPR FMT_INLINE auto map(int128_t val) -> int128_t { return val; } - FMT_CONSTEXPR FMT_INLINE auto map(uint128_t val) -> uint128_t { return val; } - FMT_CONSTEXPR FMT_INLINE auto map(bool val) -> bool { return val; } - - template ::value)> - FMT_CONSTEXPR FMT_INLINE auto map(T val) -> char_type { - static_assert( - std::is_same::value || std::is_same::value, - "mixing character types is disallowed"); - return val; - } - - FMT_CONSTEXPR FMT_INLINE auto map(float val) -> float { return val; } - FMT_CONSTEXPR FMT_INLINE auto map(double val) -> double { return val; } - FMT_CONSTEXPR FMT_INLINE auto map(long double val) -> long double { - return val; - } - - FMT_CONSTEXPR FMT_INLINE auto map(char_type* val) -> const char_type* { - return val; - } - FMT_CONSTEXPR FMT_INLINE auto map(const char_type* val) -> const char_type* { - return val; - } - template ::value)> - FMT_CONSTEXPR FMT_INLINE auto map(const T& val) - -> basic_string_view { - static_assert(std::is_same>::value, - "mixing character types is disallowed"); - return to_string_view(val); - } - template , T>::value && - !is_string::value && !has_formatter::value && - !has_fallback_formatter::value)> - FMT_CONSTEXPR FMT_INLINE auto map(const T& val) - -> basic_string_view { - return basic_string_view(val); - } - template < - typename T, - FMT_ENABLE_IF( - std::is_constructible, T>::value && - !std::is_constructible, T>::value && - !is_string::value && !has_formatter::value && - !has_fallback_formatter::value)> - FMT_CONSTEXPR FMT_INLINE auto map(const T& val) - -> basic_string_view { - return std_string_view(val); - } - FMT_CONSTEXPR FMT_INLINE auto map(const signed char* val) -> const char* { - static_assert(std::is_same::value, "invalid string type"); - return reinterpret_cast(val); - } - FMT_CONSTEXPR FMT_INLINE auto map(const unsigned char* val) -> const char* { - static_assert(std::is_same::value, "invalid string type"); - return reinterpret_cast(val); - } - FMT_CONSTEXPR FMT_INLINE auto map(signed char* val) -> const char* { - const auto* const_val = val; - return map(const_val); - } - FMT_CONSTEXPR FMT_INLINE auto map(unsigned char* val) -> const char* { - const auto* const_val = val; - return map(const_val); - } - - FMT_CONSTEXPR FMT_INLINE auto map(void* val) -> const void* { return val; } - FMT_CONSTEXPR FMT_INLINE auto map(const void* val) -> const void* { - return val; - } - FMT_CONSTEXPR FMT_INLINE auto map(std::nullptr_t val) -> const void* { - return val; - } - - // We use SFINAE instead of a const T* parameter to avoid conflicting with - // the C array overload. - template - FMT_CONSTEXPR auto map(T) -> enable_if_t::value, int> { - // Formatting of arbitrary pointers is disallowed. If you want to output - // a pointer cast it to "void *" or "const void *". In particular, this - // forbids formatting of "[const] volatile char *" which is printed as bool - // by iostreams. - static_assert(!sizeof(T), "formatting of non-void pointers is disallowed"); - return 0; - } - - template - FMT_CONSTEXPR FMT_INLINE auto map(const T (&values)[N]) -> const T (&)[N] { - return values; - } - - template ::value && - !has_formatter::value && - !has_fallback_formatter::value)> - FMT_CONSTEXPR FMT_INLINE auto map(const T& val) - -> decltype(std::declval().map( - static_cast::type>(val))) { - return map(static_cast::type>(val)); - } - template ::value && !is_char::value && - (has_formatter::value || - has_fallback_formatter::value))> - FMT_CONSTEXPR FMT_INLINE auto map(const T& val) -> const T& { - return val; - } - - template ::value)> - FMT_CONSTEXPR FMT_INLINE auto map(const T& named_arg) - -> decltype(std::declval().map(named_arg.value)) { - return map(named_arg.value); - } - - auto map(...) -> unformattable { return {}; } +template +struct arg_mapper { + using char_type = typename Context::char_type; + + FMT_CONSTEXPR FMT_INLINE auto map(signed char val) -> int { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned char val) -> unsigned { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(short val) -> int { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned short val) -> unsigned { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(int val) -> int { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned val) -> unsigned { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(long val) -> long_type { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned long val) -> ulong_type { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(long long val) -> long long { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned long long val) -> unsigned long long { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(int128_t val) -> int128_t { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(uint128_t val) -> uint128_t { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(bool val) -> bool { + return val; + } + + template ::value)> + FMT_CONSTEXPR FMT_INLINE auto map(T val) -> char_type { + static_assert(std::is_same::value || std::is_same::value, + "mixing character types is disallowed"); + return val; + } + + FMT_CONSTEXPR FMT_INLINE auto map(float val) -> float { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(double val) -> double { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(long double val) -> long double { + return val; + } + + FMT_CONSTEXPR FMT_INLINE auto map(char_type *val) -> const char_type * { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(const char_type *val) -> const char_type * { + return val; + } + template ::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T &val) -> basic_string_view { + static_assert(std::is_same>::value, "mixing character types is disallowed"); + return to_string_view(val); + } + template , T>::value && !is_string::value && + !has_formatter::value && !has_fallback_formatter::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T &val) -> basic_string_view { + return basic_string_view(val); + } + template , T>::value && + !std::is_constructible, T>::value && !is_string::value && + !has_formatter::value && !has_fallback_formatter::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T &val) -> basic_string_view { + return std_string_view(val); + } + FMT_CONSTEXPR FMT_INLINE auto map(const signed char *val) -> const char * { + static_assert(std::is_same::value, "invalid string type"); + return reinterpret_cast(val); + } + FMT_CONSTEXPR FMT_INLINE auto map(const unsigned char *val) -> const char * { + static_assert(std::is_same::value, "invalid string type"); + return reinterpret_cast(val); + } + FMT_CONSTEXPR FMT_INLINE auto map(signed char *val) -> const char * { + const auto *const_val = val; + return map(const_val); + } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned char *val) -> const char * { + const auto *const_val = val; + return map(const_val); + } + + FMT_CONSTEXPR FMT_INLINE auto map(void *val) -> const void * { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(const void *val) -> const void * { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(std::nullptr_t val) -> const void * { + return val; + } + + // We use SFINAE instead of a const T* parameter to avoid conflicting with + // the C array overload. + template + FMT_CONSTEXPR auto map(T) -> enable_if_t::value, int> { + // Formatting of arbitrary pointers is disallowed. If you want to output + // a pointer cast it to "void *" or "const void *". In particular, this + // forbids formatting of "[const] volatile char *" which is printed as + // bool by iostreams. + static_assert(!sizeof(T), "formatting of non-void pointers is disallowed"); + return 0; + } + + template + FMT_CONSTEXPR FMT_INLINE auto map(const T (&values)[N]) -> const T (&)[N] { + return values; + } + + template ::value && !has_formatter::value && + !has_fallback_formatter::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T &val) + -> decltype(std::declval().map(static_cast::type>(val))) { + return map(static_cast::type>(val)); + } + template ::value && !is_char::value && + (has_formatter::value || has_fallback_formatter::value))> + FMT_CONSTEXPR FMT_INLINE auto map(const T &val) -> const T & { + return val; + } + + template ::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T &named_arg) -> decltype(std::declval().map(named_arg.value)) { + return map(named_arg.value); + } + + auto map(...) -> unformattable { + return {}; + } }; // A type constant after applying arg_mapper. template using mapped_type_constant = - type_constant().map(std::declval())), - typename Context::char_type>; + type_constant().map(std::declval())), typename Context::char_type>; enum { packed_arg_bits = 4 }; // Maximum number of arguments with packed types. @@ -1357,83 +1454,87 @@ FMT_END_DETAIL_NAMESPACE // An output iterator that appends to a buffer. // It is used to reduce symbol sizes for the common case. class appender : public std::back_insert_iterator> { - using base = std::back_insert_iterator>; - - template - friend auto get_buffer(appender out) -> detail::buffer& { - return detail::get_container(out); - } - - public: - using std::back_insert_iterator>::back_insert_iterator; - appender(base it) : base(it) {} - using _Unchecked_type = appender; // Mark iterator as checked. - - auto operator++() -> appender& { - base::operator++(); - return *this; - } - - auto operator++(int) -> appender { - auto tmp = *this; - ++*this; - return tmp; - } + using base = std::back_insert_iterator>; + + template + friend auto get_buffer(appender out) -> detail::buffer & { + return detail::get_container(out); + } + +public: + using std::back_insert_iterator>::back_insert_iterator; + appender(base it) : base(it) { + } + using _Unchecked_type = appender; // Mark iterator as checked. + + auto operator++() -> appender & { + base::operator++(); + return *this; + } + + auto operator++(int) -> appender { + auto tmp = *this; + ++*this; + return tmp; + } }; // A formatting argument. It is a trivially copyable/constructible type to // allow storage in basic_memory_buffer. -template class basic_format_arg { - private: - detail::value value_; - detail::type type_; +template +class basic_format_arg { +private: + detail::value value_; + detail::type type_; - template - friend FMT_CONSTEXPR auto detail::make_arg(const T& value) - -> basic_format_arg; + template + friend FMT_CONSTEXPR auto detail::make_arg(const T &value) -> basic_format_arg; - template - friend FMT_CONSTEXPR auto visit_format_arg(Visitor&& vis, - const basic_format_arg& arg) - -> decltype(vis(0)); + template + friend FMT_CONSTEXPR auto visit_format_arg(Visitor &&vis, const basic_format_arg &arg) -> decltype(vis(0)); - friend class basic_format_args; - friend class dynamic_format_arg_store; + friend class basic_format_args; + friend class dynamic_format_arg_store; - using char_type = typename Context::char_type; + using char_type = typename Context::char_type; - template - friend struct detail::arg_data; + template + friend struct detail::arg_data; - basic_format_arg(const detail::named_arg_info* args, size_t size) - : value_(args, size) {} + basic_format_arg(const detail::named_arg_info *args, size_t size) : value_(args, size) { + } - public: - class handle { - public: - explicit handle(detail::custom_value custom) : custom_(custom) {} +public: + class handle { + public: + explicit handle(detail::custom_value custom) : custom_(custom) { + } - void format(typename Context::parse_context_type& parse_ctx, - Context& ctx) const { - custom_.format(custom_.value, parse_ctx, ctx); - } + void format(typename Context::parse_context_type &parse_ctx, Context &ctx) const { + custom_.format(custom_.value, parse_ctx, ctx); + } - private: - detail::custom_value custom_; - }; + private: + detail::custom_value custom_; + }; - constexpr basic_format_arg() : type_(detail::type::none_type) {} + constexpr basic_format_arg() : type_(detail::type::none_type) { + } - constexpr explicit operator bool() const FMT_NOEXCEPT { - return type_ != detail::type::none_type; - } + constexpr explicit operator bool() const FMT_NOEXCEPT { + return type_ != detail::type::none_type; + } - auto type() const -> detail::type { return type_; } + auto type() const -> detail::type { + return type_; + } - auto is_integral() const -> bool { return detail::is_integral_type(type_); } - auto is_arithmetic() const -> bool { - return detail::is_arithmetic_type(type_); - } + auto is_integral() const -> bool { + return detail::is_integral_type(type_); + } + auto is_arithmetic() const -> bool { + return detail::is_arithmetic_type(type_); + } }; /** @@ -1444,61 +1545,65 @@ template class basic_format_arg { \endrst */ template -FMT_CONSTEXPR FMT_INLINE auto visit_format_arg( - Visitor&& vis, const basic_format_arg& arg) -> decltype(vis(0)) { - switch (arg.type_) { - case detail::type::none_type: - break; - case detail::type::int_type: - return vis(arg.value_.int_value); - case detail::type::uint_type: - return vis(arg.value_.uint_value); - case detail::type::long_long_type: - return vis(arg.value_.long_long_value); - case detail::type::ulong_long_type: - return vis(arg.value_.ulong_long_value); - case detail::type::int128_type: - return vis(detail::convert_for_visit(arg.value_.int128_value)); - case detail::type::uint128_type: - return vis(detail::convert_for_visit(arg.value_.uint128_value)); - case detail::type::bool_type: - return vis(arg.value_.bool_value); - case detail::type::char_type: - return vis(arg.value_.char_value); - case detail::type::float_type: - return vis(arg.value_.float_value); - case detail::type::double_type: - return vis(arg.value_.double_value); - case detail::type::long_double_type: - return vis(arg.value_.long_double_value); - case detail::type::cstring_type: - return vis(arg.value_.string.data); - case detail::type::string_type: - using sv = basic_string_view; - return vis(sv(arg.value_.string.data, arg.value_.string.size)); - case detail::type::pointer_type: - return vis(arg.value_.pointer); - case detail::type::custom_type: - return vis(typename basic_format_arg::handle(arg.value_.custom)); - } - return vis(monostate()); +FMT_CONSTEXPR FMT_INLINE auto visit_format_arg(Visitor &&vis, const basic_format_arg &arg) + -> decltype(vis(0)) { + switch (arg.type_) { + case detail::type::none_type: + break; + case detail::type::int_type: + return vis(arg.value_.int_value); + case detail::type::uint_type: + return vis(arg.value_.uint_value); + case detail::type::long_long_type: + return vis(arg.value_.long_long_value); + case detail::type::ulong_long_type: + return vis(arg.value_.ulong_long_value); + case detail::type::int128_type: + return vis(detail::convert_for_visit(arg.value_.int128_value)); + case detail::type::uint128_type: + return vis(detail::convert_for_visit(arg.value_.uint128_value)); + case detail::type::bool_type: + return vis(arg.value_.bool_value); + case detail::type::char_type: + return vis(arg.value_.char_value); + case detail::type::float_type: + return vis(arg.value_.float_value); + case detail::type::double_type: + return vis(arg.value_.double_value); + case detail::type::long_double_type: + return vis(arg.value_.long_double_value); + case detail::type::cstring_type: + return vis(arg.value_.string.data); + case detail::type::string_type: + using sv = basic_string_view; + return vis(sv(arg.value_.string.data, arg.value_.string.size)); + case detail::type::pointer_type: + return vis(arg.value_.pointer); + case detail::type::custom_type: + return vis(typename basic_format_arg::handle(arg.value_.custom)); + } + return vis(monostate()); } FMT_BEGIN_DETAIL_NAMESPACE template auto copy_str(InputIt begin, InputIt end, appender out) -> appender { - get_container(out).append(begin, end); - return out; + get_container(out).append(begin, end); + return out; } #if FMT_GCC_VERSION && FMT_GCC_VERSION < 500 // A workaround for gcc 4.8 to make void_t work in a SFINAE context. -template struct void_t_impl { using type = void; }; +template +struct void_t_impl { + using type = void; +}; template using void_t = typename detail::void_t_impl::type; #else -template using void_t = void; +template +using void_t = void; #endif template @@ -1507,146 +1612,156 @@ struct is_output_iterator : std::false_type {}; template struct is_output_iterator< It, T, - void_t::iterator_category, - decltype(*std::declval() = std::declval())>> + void_t::iterator_category, decltype(*std::declval() = std::declval())>> : std::true_type {}; template struct is_back_insert_iterator : std::false_type {}; template -struct is_back_insert_iterator> - : std::true_type {}; +struct is_back_insert_iterator> : std::true_type {}; template struct is_contiguous_back_insert_iterator : std::false_type {}; template -struct is_contiguous_back_insert_iterator> - : is_contiguous {}; +struct is_contiguous_back_insert_iterator> : is_contiguous {}; template <> struct is_contiguous_back_insert_iterator : std::true_type {}; // A type-erased reference to an std::locale to avoid heavy include. class locale_ref { - private: - const void* locale_; // A type-erased pointer to std::locale. +private: + const void *locale_; // A type-erased pointer to std::locale. - public: - constexpr locale_ref() : locale_(nullptr) {} - template explicit locale_ref(const Locale& loc); +public: + constexpr locale_ref() : locale_(nullptr) { + } + template + explicit locale_ref(const Locale &loc); - explicit operator bool() const FMT_NOEXCEPT { return locale_ != nullptr; } + explicit operator bool() const FMT_NOEXCEPT { + return locale_ != nullptr; + } - template auto get() const -> Locale; + template + auto get() const -> Locale; }; -template constexpr auto encode_types() -> unsigned long long { - return 0; +template +constexpr auto encode_types() -> unsigned long long { + return 0; } template constexpr auto encode_types() -> unsigned long long { - return static_cast(mapped_type_constant::value) | - (encode_types() << packed_arg_bits); + return static_cast(mapped_type_constant::value) | + (encode_types() << packed_arg_bits); } template -FMT_CONSTEXPR auto make_arg(const T& value) -> basic_format_arg { - basic_format_arg arg; - arg.type_ = mapped_type_constant::value; - arg.value_ = arg_mapper().map(value); - return arg; +FMT_CONSTEXPR auto make_arg(const T &value) -> basic_format_arg { + basic_format_arg arg; + arg.type_ = mapped_type_constant::value; + arg.value_ = arg_mapper().map(value); + return arg; } // The type template parameter is there to avoid an ODR violation when using // a fallback formatter in one translation unit and an implicit conversion in // another (not recommended). -template -FMT_CONSTEXPR FMT_INLINE auto make_arg(const T& val) -> value { - const auto& arg = arg_mapper().map(val); - static_assert( - !std::is_same::value, - "Cannot format an argument. To make type T formattable provide a " - "formatter specialization: https://fmt.dev/latest/api.html#udt"); - return {arg}; -} - -template -inline auto make_arg(const T& value) -> basic_format_arg { - return make_arg(value); +template +FMT_CONSTEXPR FMT_INLINE auto make_arg(const T &val) -> value { + const auto &arg = arg_mapper().map(val); + static_assert(!std::is_same::value, + "Cannot format an argument. To make type T formattable provide a " + "formatter specialization: https://fmt.dev/latest/api.html#udt"); + return {arg}; +} + +template +inline auto make_arg(const T &value) -> basic_format_arg { + return make_arg(value); } FMT_END_DETAIL_NAMESPACE // Formatting context. -template class basic_format_context { - public: - /** The character type for the output. */ - using char_type = Char; - - private: - OutputIt out_; - basic_format_args args_; - detail::locale_ref loc_; - - public: - using iterator = OutputIt; - using format_arg = basic_format_arg; - using parse_context_type = basic_format_parse_context; - template using formatter_type = formatter; - - basic_format_context(basic_format_context&&) = default; - basic_format_context(const basic_format_context&) = delete; - void operator=(const basic_format_context&) = delete; - /** - Constructs a ``basic_format_context`` object. References to the arguments are - stored in the object so make sure they have appropriate lifetimes. - */ - constexpr basic_format_context( - OutputIt out, basic_format_args ctx_args, - detail::locale_ref loc = detail::locale_ref()) - : out_(out), args_(ctx_args), loc_(loc) {} - - constexpr auto arg(int id) const -> format_arg { return args_.get(id); } - FMT_CONSTEXPR auto arg(basic_string_view name) -> format_arg { - return args_.get(name); - } - FMT_CONSTEXPR auto arg_id(basic_string_view name) -> int { - return args_.get_id(name); - } - auto args() const -> const basic_format_args& { - return args_; - } - - FMT_CONSTEXPR auto error_handler() -> detail::error_handler { return {}; } - void on_error(const char* message) { error_handler().on_error(message); } - - // Returns an iterator to the beginning of the output range. - FMT_CONSTEXPR auto out() -> iterator { return out_; } - - // Advances the begin iterator to ``it``. - void advance_to(iterator it) { - if (!detail::is_back_insert_iterator()) out_ = it; - } - - FMT_CONSTEXPR auto locale() -> detail::locale_ref { return loc_; } +template +class basic_format_context { +public: + /** The character type for the output. */ + using char_type = Char; + +private: + OutputIt out_; + basic_format_args args_; + detail::locale_ref loc_; + +public: + using iterator = OutputIt; + using format_arg = basic_format_arg; + using parse_context_type = basic_format_parse_context; + template + using formatter_type = formatter; + + basic_format_context(basic_format_context &&) = default; + basic_format_context(const basic_format_context &) = delete; + void operator=(const basic_format_context &) = delete; + /** + Constructs a ``basic_format_context`` object. References to the arguments + are stored in the object so make sure they have appropriate lifetimes. + */ + constexpr basic_format_context(OutputIt out, basic_format_args ctx_args, + detail::locale_ref loc = detail::locale_ref()) + : out_(out), args_(ctx_args), loc_(loc) { + } + + constexpr auto arg(int id) const -> format_arg { + return args_.get(id); + } + FMT_CONSTEXPR auto arg(basic_string_view name) -> format_arg { + return args_.get(name); + } + FMT_CONSTEXPR auto arg_id(basic_string_view name) -> int { + return args_.get_id(name); + } + auto args() const -> const basic_format_args & { + return args_; + } + + FMT_CONSTEXPR auto error_handler() -> detail::error_handler { + return {}; + } + void on_error(const char *message) { + error_handler().on_error(message); + } + + // Returns an iterator to the beginning of the output range. + FMT_CONSTEXPR auto out() -> iterator { + return out_; + } + + // Advances the begin iterator to ``it``. + void advance_to(iterator it) { + if (!detail::is_back_insert_iterator()) + out_ = it; + } + + FMT_CONSTEXPR auto locale() -> detail::locale_ref { + return loc_; + } }; template -using buffer_context = - basic_format_context, Char>; +using buffer_context = basic_format_context, Char>; using format_context = buffer_context; // Workaround an alias issue: https://stackoverflow.com/q/62767544/471164. -#define FMT_BUFFER_CONTEXT(Char) \ - basic_format_context, Char> +#define FMT_BUFFER_CONTEXT(Char) basic_format_context, Char> template -using is_formattable = bool_constant< - !std::is_same>().map( - std::declval())), - detail::unformattable>::value && - !detail::has_fallback_formatter::value>; +using is_formattable = + bool_constant>().map(std::declval())), + detail::unformattable>::value && + !detail::has_fallback_formatter::value>; /** \rst @@ -1662,38 +1777,30 @@ class format_arg_store : public basic_format_args #endif { - private: - static const size_t num_args = sizeof...(Args); - static const size_t num_named_args = detail::count_named_args(); - static const bool is_packed = num_args <= detail::max_packed_args; - - using value_type = conditional_t, - basic_format_arg>; - - detail::arg_data - data_; - - friend class basic_format_args; - - static constexpr unsigned long long desc = - (is_packed ? detail::encode_types() - : detail::is_unpacked_bit | num_args) | - (num_named_args != 0 - ? static_cast(detail::has_named_args_bit) - : 0); - - public: - FMT_CONSTEXPR FMT_INLINE format_arg_store(const Args&... args) - : +private: + static const size_t num_args = sizeof...(Args); + static const size_t num_named_args = detail::count_named_args(); + static const bool is_packed = num_args <= detail::max_packed_args; + + using value_type = conditional_t, basic_format_arg>; + + detail::arg_data data_; + + friend class basic_format_args; + + static constexpr unsigned long long desc = + (is_packed ? detail::encode_types() : detail::is_unpacked_bit | num_args) | + (num_named_args != 0 ? static_cast(detail::has_named_args_bit) : 0); + +public: + FMT_CONSTEXPR FMT_INLINE format_arg_store(const Args &...args) + : #if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 - basic_format_args(*this), + basic_format_args(*this), #endif - data_{detail::make_arg< - is_packed, Context, - detail::mapped_type_constant::value>(args)...} { - detail::init_named_args(data_.named_args(), 0, 0, args...); - } + data_ {detail::make_arg::value>(args)...} { + detail::init_named_args(data_.named_args(), 0, 0, args...); + } }; /** @@ -1705,9 +1812,8 @@ class format_arg_store \endrst */ template -constexpr auto make_format_args(const Args&... args) - -> format_arg_store { - return {args...}; +constexpr auto make_format_args(const Args &...args) -> format_arg_store { + return {args...}; } /** @@ -1722,9 +1828,9 @@ constexpr auto make_format_args(const Args&... args) \endrst */ template -inline auto arg(const Char* name, const T& arg) -> detail::named_arg { - static_assert(!detail::is_named_arg(), "nested named arguments"); - return {name, arg}; +inline auto arg(const Char *name, const T &arg) -> detail::named_arg { + static_assert(!detail::is_named_arg(), "nested named arguments"); + return {name, arg}; } /** @@ -1737,115 +1843,120 @@ inline auto arg(const Char* name, const T& arg) -> detail::named_arg { format_args args = make_format_args(42); // Error: dangling reference \endrst */ -template class basic_format_args { - public: - using size_type = int; - using format_arg = basic_format_arg; - - private: - // A descriptor that contains information about formatting arguments. - // If the number of arguments is less or equal to max_packed_args then - // argument types are passed in the descriptor. This reduces binary code size - // per formatting function call. - unsigned long long desc_; - union { - // If is_packed() returns true then argument values are stored in values_; - // otherwise they are stored in args_. This is done to improve cache - // locality and reduce compiled code size since storing larger objects - // may require more code (at least on x86-64) even if the same amount of - // data is actually copied to stack. It saves ~10% on the bloat test. - const detail::value* values_; - const format_arg* args_; - }; - - constexpr auto is_packed() const -> bool { - return (desc_ & detail::is_unpacked_bit) == 0; - } - auto has_named_args() const -> bool { - return (desc_ & detail::has_named_args_bit) != 0; - } - - FMT_CONSTEXPR auto type(int index) const -> detail::type { - int shift = index * detail::packed_arg_bits; - unsigned int mask = (1 << detail::packed_arg_bits) - 1; - return static_cast((desc_ >> shift) & mask); - } - - constexpr FMT_INLINE basic_format_args(unsigned long long desc, - const detail::value* values) - : desc_(desc), values_(values) {} - constexpr basic_format_args(unsigned long long desc, const format_arg* args) - : desc_(desc), args_(args) {} - - public: - constexpr basic_format_args() : desc_(0), args_(nullptr) {} - - /** - \rst - Constructs a `basic_format_args` object from `~fmt::format_arg_store`. - \endrst - */ - template - constexpr FMT_INLINE basic_format_args( - const format_arg_store& store) - : basic_format_args(format_arg_store::desc, - store.data_.args()) {} - - /** - \rst - Constructs a `basic_format_args` object from - `~fmt::dynamic_format_arg_store`. - \endrst - */ - constexpr FMT_INLINE basic_format_args( - const dynamic_format_arg_store& store) - : basic_format_args(store.get_types(), store.data()) {} - - /** - \rst - Constructs a `basic_format_args` object from a dynamic set of arguments. - \endrst - */ - constexpr basic_format_args(const format_arg* args, int count) - : basic_format_args(detail::is_unpacked_bit | detail::to_unsigned(count), - args) {} - - /** Returns the argument with the specified id. */ - FMT_CONSTEXPR auto get(int id) const -> format_arg { - format_arg arg; - if (!is_packed()) { - if (id < max_size()) arg = args_[id]; - return arg; - } - if (id >= detail::max_packed_args) return arg; - arg.type_ = type(id); - if (arg.type_ == detail::type::none_type) return arg; - arg.value_ = values_[id]; - return arg; - } - - template - auto get(basic_string_view name) const -> format_arg { - int id = get_id(name); - return id >= 0 ? get(id) : format_arg(); - } - - template - auto get_id(basic_string_view name) const -> int { - if (!has_named_args()) return -1; - const auto& named_args = - (is_packed() ? values_[-1] : args_[-1].value_).named_args; - for (size_t i = 0; i < named_args.size; ++i) { - if (named_args.data[i].name == name) return named_args.data[i].id; - } - return -1; - } - - auto max_size() const -> int { - unsigned long long max_packed = detail::max_packed_args; - return static_cast(is_packed() ? max_packed - : desc_ & ~detail::is_unpacked_bit); - } +template +class basic_format_args { +public: + using size_type = int; + using format_arg = basic_format_arg; + +private: + // A descriptor that contains information about formatting arguments. + // If the number of arguments is less or equal to max_packed_args then + // argument types are passed in the descriptor. This reduces binary code + // size per formatting function call. + unsigned long long desc_; + union { + // If is_packed() returns true then argument values are stored in + // values_; otherwise they are stored in args_. This is done to improve + // cache locality and reduce compiled code size since storing larger + // objects may require more code (at least on x86-64) even if the same + // amount of data is actually copied to stack. It saves ~10% on the + // bloat test. + const detail::value *values_; + const format_arg *args_; + }; + + constexpr auto is_packed() const -> bool { + return (desc_ & detail::is_unpacked_bit) == 0; + } + auto has_named_args() const -> bool { + return (desc_ & detail::has_named_args_bit) != 0; + } + + FMT_CONSTEXPR auto type(int index) const -> detail::type { + int shift = index * detail::packed_arg_bits; + unsigned int mask = (1 << detail::packed_arg_bits) - 1; + return static_cast((desc_ >> shift) & mask); + } + + constexpr FMT_INLINE basic_format_args(unsigned long long desc, const detail::value *values) + : desc_(desc), values_(values) { + } + constexpr basic_format_args(unsigned long long desc, const format_arg *args) : desc_(desc), args_(args) { + } + +public: + constexpr basic_format_args() : desc_(0), args_(nullptr) { + } + + /** + \rst + Constructs a `basic_format_args` object from `~fmt::format_arg_store`. + \endrst + */ + template + constexpr FMT_INLINE basic_format_args(const format_arg_store &store) + : basic_format_args(format_arg_store::desc, store.data_.args()) { + } + + /** + \rst + Constructs a `basic_format_args` object from + `~fmt::dynamic_format_arg_store`. + \endrst + */ + constexpr FMT_INLINE basic_format_args(const dynamic_format_arg_store &store) + : basic_format_args(store.get_types(), store.data()) { + } + + /** + \rst + Constructs a `basic_format_args` object from a dynamic set of arguments. + \endrst + */ + constexpr basic_format_args(const format_arg *args, int count) + : basic_format_args(detail::is_unpacked_bit | detail::to_unsigned(count), args) { + } + + /** Returns the argument with the specified id. */ + FMT_CONSTEXPR auto get(int id) const -> format_arg { + format_arg arg; + if (!is_packed()) { + if (id < max_size()) + arg = args_[id]; + return arg; + } + if (id >= detail::max_packed_args) + return arg; + arg.type_ = type(id); + if (arg.type_ == detail::type::none_type) + return arg; + arg.value_ = values_[id]; + return arg; + } + + template + auto get(basic_string_view name) const -> format_arg { + int id = get_id(name); + return id >= 0 ? get(id) : format_arg(); + } + + template + auto get_id(basic_string_view name) const -> int { + if (!has_named_args()) + return -1; + const auto &named_args = (is_packed() ? values_[-1] : args_[-1].value_).named_args; + for (size_t i = 0; i < named_args.size; ++i) { + if (named_args.data[i].name == name) + return named_args.data[i].id; + } + return -1; + } + + auto max_size() const -> int { + unsigned long long max_packed = detail::max_packed_args; + return static_cast(is_packed() ? max_packed : desc_ & ~detail::is_unpacked_bit); + } }; /** An alias to ``basic_format_args``. */ @@ -1866,52 +1977,57 @@ using sign_t = sign::type; FMT_BEGIN_DETAIL_NAMESPACE -void throw_format_error(const char* message); +void throw_format_error(const char *message); // Workaround an array initialization issue in gcc 4.8. -template struct fill_t { - private: - enum { max_size = 4 }; - Char data_[max_size] = {Char(' '), Char(0), Char(0), Char(0)}; - unsigned char size_ = 1; - - public: - FMT_CONSTEXPR void operator=(basic_string_view s) { - auto size = s.size(); - if (size > max_size) return throw_format_error("invalid fill"); - for (size_t i = 0; i < size; ++i) data_[i] = s[i]; - size_ = static_cast(size); - } - - constexpr auto size() const -> size_t { return size_; } - constexpr auto data() const -> const Char* { return data_; } - - FMT_CONSTEXPR auto operator[](size_t index) -> Char& { return data_[index]; } - FMT_CONSTEXPR auto operator[](size_t index) const -> const Char& { - return data_[index]; - } +template +struct fill_t { +private: + enum { max_size = 4 }; + Char data_[max_size] = {Char(' '), Char(0), Char(0), Char(0)}; + unsigned char size_ = 1; + +public: + FMT_CONSTEXPR void operator=(basic_string_view s) { + auto size = s.size(); + if (size > max_size) + return throw_format_error("invalid fill"); + for (size_t i = 0; i < size; ++i) + data_[i] = s[i]; + size_ = static_cast(size); + } + + constexpr auto size() const -> size_t { + return size_; + } + constexpr auto data() const -> const Char * { + return data_; + } + + FMT_CONSTEXPR auto operator[](size_t index) -> Char & { + return data_[index]; + } + FMT_CONSTEXPR auto operator[](size_t index) const -> const Char & { + return data_[index]; + } }; FMT_END_DETAIL_NAMESPACE // Format specifiers for built-in and string types. -template struct basic_format_specs { - int width; - int precision; - char type; - align_t align : 4; - sign_t sign : 3; - bool alt : 1; // Alternate form ('#'). - bool localized : 1; - detail::fill_t fill; - - constexpr basic_format_specs() - : width(0), - precision(-1), - type(0), - align(align::none), - sign(sign::none), - alt(false), - localized(false) {} +template +struct basic_format_specs { + int width; + int precision; + char type; + align_t align : 4; + sign_t sign : 3; + bool alt : 1; // Alternate form ('#'). + bool localized : 1; + detail::fill_t fill; + + constexpr basic_format_specs() + : width(0), precision(-1), type(0), align(align::none), sign(sign::none), alt(false), localized(false) { + } }; using format_specs = basic_format_specs; @@ -1921,28 +2037,32 @@ FMT_BEGIN_DETAIL_NAMESPACE enum class arg_id_kind { none, index, name }; // An argument reference. -template struct arg_ref { - FMT_CONSTEXPR arg_ref() : kind(arg_id_kind::none), val() {} - - FMT_CONSTEXPR explicit arg_ref(int index) - : kind(arg_id_kind::index), val(index) {} - FMT_CONSTEXPR explicit arg_ref(basic_string_view name) - : kind(arg_id_kind::name), val(name) {} - - FMT_CONSTEXPR auto operator=(int idx) -> arg_ref& { - kind = arg_id_kind::index; - val.index = idx; - return *this; - } - - arg_id_kind kind; - union value { - FMT_CONSTEXPR value(int id = 0) : index{id} {} - FMT_CONSTEXPR value(basic_string_view n) : name(n) {} - - int index; - basic_string_view name; - } val; +template +struct arg_ref { + FMT_CONSTEXPR arg_ref() : kind(arg_id_kind::none), val() { + } + + FMT_CONSTEXPR explicit arg_ref(int index) : kind(arg_id_kind::index), val(index) { + } + FMT_CONSTEXPR explicit arg_ref(basic_string_view name) : kind(arg_id_kind::name), val(name) { + } + + FMT_CONSTEXPR auto operator=(int idx) -> arg_ref & { + kind = arg_id_kind::index; + val.index = idx; + return *this; + } + + arg_id_kind kind; + union value { + FMT_CONSTEXPR value(int id = 0) : index {id} { + } + FMT_CONSTEXPR value(basic_string_view n) : name(n) { + } + + int index; + basic_string_view name; + } val; }; // Format specifiers with width and precision resolved at formatting rather @@ -1950,499 +2070,524 @@ template struct arg_ref { // different sets of arguments (precompilation of format strings). template struct dynamic_format_specs : basic_format_specs { - arg_ref width_ref; - arg_ref precision_ref; + arg_ref width_ref; + arg_ref precision_ref; }; struct auto_id {}; // A format specifier handler that sets fields in basic_format_specs. -template class specs_setter { - protected: - basic_format_specs& specs_; - - public: - explicit FMT_CONSTEXPR specs_setter(basic_format_specs& specs) - : specs_(specs) {} - - FMT_CONSTEXPR specs_setter(const specs_setter& other) - : specs_(other.specs_) {} - - FMT_CONSTEXPR void on_align(align_t align) { specs_.align = align; } - FMT_CONSTEXPR void on_fill(basic_string_view fill) { - specs_.fill = fill; - } - FMT_CONSTEXPR void on_sign(sign_t s) { specs_.sign = s; } - FMT_CONSTEXPR void on_hash() { specs_.alt = true; } - FMT_CONSTEXPR void on_localized() { specs_.localized = true; } - - FMT_CONSTEXPR void on_zero() { - if (specs_.align == align::none) specs_.align = align::numeric; - specs_.fill[0] = Char('0'); - } - - FMT_CONSTEXPR void on_width(int width) { specs_.width = width; } - FMT_CONSTEXPR void on_precision(int precision) { - specs_.precision = precision; - } - FMT_CONSTEXPR void end_precision() {} - - FMT_CONSTEXPR void on_type(Char type) { - specs_.type = static_cast(type); - } +template +class specs_setter { +protected: + basic_format_specs &specs_; + +public: + explicit FMT_CONSTEXPR specs_setter(basic_format_specs &specs) : specs_(specs) { + } + + FMT_CONSTEXPR specs_setter(const specs_setter &other) : specs_(other.specs_) { + } + + FMT_CONSTEXPR void on_align(align_t align) { + specs_.align = align; + } + FMT_CONSTEXPR void on_fill(basic_string_view fill) { + specs_.fill = fill; + } + FMT_CONSTEXPR void on_sign(sign_t s) { + specs_.sign = s; + } + FMT_CONSTEXPR void on_hash() { + specs_.alt = true; + } + FMT_CONSTEXPR void on_localized() { + specs_.localized = true; + } + + FMT_CONSTEXPR void on_zero() { + if (specs_.align == align::none) + specs_.align = align::numeric; + specs_.fill[0] = Char('0'); + } + + FMT_CONSTEXPR void on_width(int width) { + specs_.width = width; + } + FMT_CONSTEXPR void on_precision(int precision) { + specs_.precision = precision; + } + FMT_CONSTEXPR void end_precision() { + } + + FMT_CONSTEXPR void on_type(Char type) { + specs_.type = static_cast(type); + } }; // Format spec handler that saves references to arguments representing dynamic // width and precision to be resolved at formatting time. template -class dynamic_specs_handler - : public specs_setter { - public: - using char_type = typename ParseContext::char_type; - - FMT_CONSTEXPR dynamic_specs_handler(dynamic_format_specs& specs, - ParseContext& ctx) - : specs_setter(specs), specs_(specs), context_(ctx) {} - - FMT_CONSTEXPR dynamic_specs_handler(const dynamic_specs_handler& other) - : specs_setter(other), - specs_(other.specs_), - context_(other.context_) {} - - template FMT_CONSTEXPR void on_dynamic_width(Id arg_id) { - specs_.width_ref = make_arg_ref(arg_id); - } - - template FMT_CONSTEXPR void on_dynamic_precision(Id arg_id) { - specs_.precision_ref = make_arg_ref(arg_id); - } - - FMT_CONSTEXPR void on_error(const char* message) { - context_.on_error(message); - } - - private: - dynamic_format_specs& specs_; - ParseContext& context_; - - using arg_ref_type = arg_ref; - - FMT_CONSTEXPR auto make_arg_ref(int arg_id) -> arg_ref_type { - context_.check_arg_id(arg_id); - return arg_ref_type(arg_id); - } - - FMT_CONSTEXPR auto make_arg_ref(auto_id) -> arg_ref_type { - return arg_ref_type(context_.next_arg_id()); - } - - FMT_CONSTEXPR auto make_arg_ref(basic_string_view arg_id) - -> arg_ref_type { - context_.check_arg_id(arg_id); - basic_string_view format_str( - context_.begin(), to_unsigned(context_.end() - context_.begin())); - return arg_ref_type(arg_id); - } +class dynamic_specs_handler : public specs_setter { +public: + using char_type = typename ParseContext::char_type; + + FMT_CONSTEXPR dynamic_specs_handler(dynamic_format_specs &specs, ParseContext &ctx) + : specs_setter(specs), specs_(specs), context_(ctx) { + } + + FMT_CONSTEXPR dynamic_specs_handler(const dynamic_specs_handler &other) + : specs_setter(other), specs_(other.specs_), context_(other.context_) { + } + + template + FMT_CONSTEXPR void on_dynamic_width(Id arg_id) { + specs_.width_ref = make_arg_ref(arg_id); + } + + template + FMT_CONSTEXPR void on_dynamic_precision(Id arg_id) { + specs_.precision_ref = make_arg_ref(arg_id); + } + + FMT_CONSTEXPR void on_error(const char *message) { + context_.on_error(message); + } + +private: + dynamic_format_specs &specs_; + ParseContext &context_; + + using arg_ref_type = arg_ref; + + FMT_CONSTEXPR auto make_arg_ref(int arg_id) -> arg_ref_type { + context_.check_arg_id(arg_id); + return arg_ref_type(arg_id); + } + + FMT_CONSTEXPR auto make_arg_ref(auto_id) -> arg_ref_type { + return arg_ref_type(context_.next_arg_id()); + } + + FMT_CONSTEXPR auto make_arg_ref(basic_string_view arg_id) -> arg_ref_type { + context_.check_arg_id(arg_id); + basic_string_view format_str(context_.begin(), to_unsigned(context_.end() - context_.begin())); + return arg_ref_type(arg_id); + } }; -template constexpr bool is_ascii_letter(Char c) { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); +template +constexpr bool is_ascii_letter(Char c) { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); } // Converts a character to ASCII. Returns a number > 127 on conversion failure. template ::value)> constexpr auto to_ascii(Char value) -> Char { - return value; + return value; } template ::value)> -constexpr auto to_ascii(Char value) -> - typename std::underlying_type::type { - return value; +constexpr auto to_ascii(Char value) -> typename std::underlying_type::type { + return value; } template -FMT_CONSTEXPR auto code_point_length(const Char* begin) -> int { - if (const_check(sizeof(Char) != 1)) return 1; - constexpr char lengths[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 3, 4, 0}; - int len = lengths[static_cast(*begin) >> 3]; +FMT_CONSTEXPR auto code_point_length(const Char *begin) -> int { + if (const_check(sizeof(Char) != 1)) + return 1; + constexpr char lengths[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 3, 4, 0}; + int len = lengths[static_cast(*begin) >> 3]; - // Compute the pointer to the next character early so that the next - // iteration can start working on the next character. Neither Clang - // nor GCC figure out this reordering on their own. - return len + !len; + // Compute the pointer to the next character early so that the next + // iteration can start working on the next character. Neither Clang + // nor GCC figure out this reordering on their own. + return len + !len; } // Return the result via the out param to workaround gcc bug 77539. -template -FMT_CONSTEXPR auto find(Ptr first, Ptr last, T value, Ptr& out) -> bool { - for (out = first; out != last; ++out) { - if (*out == value) return true; - } - return false; +template +FMT_CONSTEXPR auto find(Ptr first, Ptr last, T value, Ptr &out) -> bool { + for (out = first; out != last; ++out) { + if (*out == value) + return true; + } + return false; } template <> -inline auto find(const char* first, const char* last, char value, - const char*& out) -> bool { - out = static_cast( - std::memchr(first, value, to_unsigned(last - first))); - return out != nullptr; +inline auto find(const char *first, const char *last, char value, const char *&out) -> bool { + out = static_cast(std::memchr(first, value, to_unsigned(last - first))); + return out != nullptr; } // Parses the range [begin, end) as an unsigned integer. This function assumes // that the range is non-empty and the first character is a digit. template -FMT_CONSTEXPR auto parse_nonnegative_int(const Char*& begin, const Char* end, - int error_value) noexcept -> int { - FMT_ASSERT(begin != end && '0' <= *begin && *begin <= '9', ""); - unsigned value = 0, prev = 0; - auto p = begin; - do { - prev = value; - value = value * 10 + unsigned(*p - '0'); - ++p; - } while (p != end && '0' <= *p && *p <= '9'); - auto num_digits = p - begin; - begin = p; - if (num_digits <= std::numeric_limits::digits10) - return static_cast(value); - // Check for overflow. - const unsigned max = to_unsigned((std::numeric_limits::max)()); - return num_digits == std::numeric_limits::digits10 + 1 && - prev * 10ull + unsigned(p[-1] - '0') <= max - ? static_cast(value) - : error_value; +FMT_CONSTEXPR auto parse_nonnegative_int(const Char *&begin, const Char *end, int error_value) noexcept -> int { + FMT_ASSERT(begin != end && '0' <= *begin && *begin <= '9', ""); + unsigned value = 0, prev = 0; + auto p = begin; + do { + prev = value; + value = value * 10 + unsigned(*p - '0'); + ++p; + } while (p != end && '0' <= *p && *p <= '9'); + auto num_digits = p - begin; + begin = p; + if (num_digits <= std::numeric_limits::digits10) + return static_cast(value); + // Check for overflow. + const unsigned max = to_unsigned((std::numeric_limits::max)()); + return num_digits == std::numeric_limits::digits10 + 1 && prev * 10ull + unsigned(p[-1] - '0') <= max + ? static_cast(value) + : error_value; } // Parses fill and alignment. template -FMT_CONSTEXPR auto parse_align(const Char* begin, const Char* end, - Handler&& handler) -> const Char* { - FMT_ASSERT(begin != end, ""); - auto align = align::none; - auto p = begin + code_point_length(begin); - if (p >= end) p = begin; - for (;;) { - switch (to_ascii(*p)) { - case '<': - align = align::left; - break; - case '>': - align = align::right; - break; - case '^': - align = align::center; - break; - default: - break; - } - if (align != align::none) { - if (p != begin) { - auto c = *begin; - if (c == '{') - return handler.on_error("invalid fill character '{'"), begin; - handler.on_fill(basic_string_view(begin, to_unsigned(p - begin))); - begin = p + 1; - } else - ++begin; - handler.on_align(align); - break; - } else if (p == begin) { - break; - } - p = begin; - } - return begin; -} - -template FMT_CONSTEXPR bool is_name_start(Char c) { - return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || '_' == c; +FMT_CONSTEXPR auto parse_align(const Char *begin, const Char *end, Handler &&handler) -> const Char * { + FMT_ASSERT(begin != end, ""); + auto align = align::none; + auto p = begin + code_point_length(begin); + if (p >= end) + p = begin; + for (;;) { + switch (to_ascii(*p)) { + case '<': + align = align::left; + break; + case '>': + align = align::right; + break; + case '^': + align = align::center; + break; + default: + break; + } + if (align != align::none) { + if (p != begin) { + auto c = *begin; + if (c == '{') + return handler.on_error("invalid fill character '{'"), begin; + handler.on_fill(basic_string_view(begin, to_unsigned(p - begin))); + begin = p + 1; + } else + ++begin; + handler.on_align(align); + break; + } else if (p == begin) { + break; + } + p = begin; + } + return begin; +} + +template +FMT_CONSTEXPR bool is_name_start(Char c) { + return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || '_' == c; } template -FMT_CONSTEXPR auto do_parse_arg_id(const Char* begin, const Char* end, - IDHandler&& handler) -> const Char* { - FMT_ASSERT(begin != end, ""); - Char c = *begin; - if (c >= '0' && c <= '9') { - int index = 0; - if (c != '0') - index = - parse_nonnegative_int(begin, end, (std::numeric_limits::max)()); - else - ++begin; - if (begin == end || (*begin != '}' && *begin != ':')) - handler.on_error("invalid format string"); - else - handler(index); - return begin; - } - if (!is_name_start(c)) { - handler.on_error("invalid format string"); - return begin; - } - auto it = begin; - do { - ++it; - } while (it != end && (is_name_start(c = *it) || ('0' <= c && c <= '9'))); - handler(basic_string_view(begin, to_unsigned(it - begin))); - return it; +FMT_CONSTEXPR auto do_parse_arg_id(const Char *begin, const Char *end, IDHandler &&handler) -> const Char * { + FMT_ASSERT(begin != end, ""); + Char c = *begin; + if (c >= '0' && c <= '9') { + int index = 0; + if (c != '0') + index = parse_nonnegative_int(begin, end, (std::numeric_limits::max)()); + else + ++begin; + if (begin == end || (*begin != '}' && *begin != ':')) + handler.on_error("invalid format string"); + else + handler(index); + return begin; + } + if (!is_name_start(c)) { + handler.on_error("invalid format string"); + return begin; + } + auto it = begin; + do { + ++it; + } while (it != end && (is_name_start(c = *it) || ('0' <= c && c <= '9'))); + handler(basic_string_view(begin, to_unsigned(it - begin))); + return it; } template -FMT_CONSTEXPR FMT_INLINE auto parse_arg_id(const Char* begin, const Char* end, - IDHandler&& handler) -> const Char* { - Char c = *begin; - if (c != '}' && c != ':') return do_parse_arg_id(begin, end, handler); - handler(); - return begin; +FMT_CONSTEXPR FMT_INLINE auto parse_arg_id(const Char *begin, const Char *end, IDHandler &&handler) -> const Char * { + Char c = *begin; + if (c != '}' && c != ':') + return do_parse_arg_id(begin, end, handler); + handler(); + return begin; } template -FMT_CONSTEXPR auto parse_width(const Char* begin, const Char* end, - Handler&& handler) -> const Char* { - using detail::auto_id; - struct width_adapter { - Handler& handler; - - FMT_CONSTEXPR void operator()() { handler.on_dynamic_width(auto_id()); } - FMT_CONSTEXPR void operator()(int id) { handler.on_dynamic_width(id); } - FMT_CONSTEXPR void operator()(basic_string_view id) { - handler.on_dynamic_width(id); - } - FMT_CONSTEXPR void on_error(const char* message) { - if (message) handler.on_error(message); - } - }; - - FMT_ASSERT(begin != end, ""); - if ('0' <= *begin && *begin <= '9') { - int width = parse_nonnegative_int(begin, end, -1); - if (width != -1) - handler.on_width(width); - else - handler.on_error("number is too big"); - } else if (*begin == '{') { - ++begin; - if (begin != end) begin = parse_arg_id(begin, end, width_adapter{handler}); - if (begin == end || *begin != '}') - return handler.on_error("invalid format string"), begin; - ++begin; - } - return begin; +FMT_CONSTEXPR auto parse_width(const Char *begin, const Char *end, Handler &&handler) -> const Char * { + using detail::auto_id; + struct width_adapter { + Handler &handler; + + FMT_CONSTEXPR void operator()() { + handler.on_dynamic_width(auto_id()); + } + FMT_CONSTEXPR void operator()(int id) { + handler.on_dynamic_width(id); + } + FMT_CONSTEXPR void operator()(basic_string_view id) { + handler.on_dynamic_width(id); + } + FMT_CONSTEXPR void on_error(const char *message) { + if (message) + handler.on_error(message); + } + }; + + FMT_ASSERT(begin != end, ""); + if ('0' <= *begin && *begin <= '9') { + int width = parse_nonnegative_int(begin, end, -1); + if (width != -1) + handler.on_width(width); + else + handler.on_error("number is too big"); + } else if (*begin == '{') { + ++begin; + if (begin != end) + begin = parse_arg_id(begin, end, width_adapter {handler}); + if (begin == end || *begin != '}') + return handler.on_error("invalid format string"), begin; + ++begin; + } + return begin; } template -FMT_CONSTEXPR auto parse_precision(const Char* begin, const Char* end, - Handler&& handler) -> const Char* { - using detail::auto_id; - struct precision_adapter { - Handler& handler; - - FMT_CONSTEXPR void operator()() { handler.on_dynamic_precision(auto_id()); } - FMT_CONSTEXPR void operator()(int id) { handler.on_dynamic_precision(id); } - FMT_CONSTEXPR void operator()(basic_string_view id) { - handler.on_dynamic_precision(id); - } - FMT_CONSTEXPR void on_error(const char* message) { - if (message) handler.on_error(message); - } - }; - - ++begin; - auto c = begin != end ? *begin : Char(); - if ('0' <= c && c <= '9') { - auto precision = parse_nonnegative_int(begin, end, -1); - if (precision != -1) - handler.on_precision(precision); - else - handler.on_error("number is too big"); - } else if (c == '{') { - ++begin; - if (begin != end) - begin = parse_arg_id(begin, end, precision_adapter{handler}); - if (begin == end || *begin++ != '}') - return handler.on_error("invalid format string"), begin; - } else { - return handler.on_error("missing precision specifier"), begin; - } - handler.end_precision(); - return begin; +FMT_CONSTEXPR auto parse_precision(const Char *begin, const Char *end, Handler &&handler) -> const Char * { + using detail::auto_id; + struct precision_adapter { + Handler &handler; + + FMT_CONSTEXPR void operator()() { + handler.on_dynamic_precision(auto_id()); + } + FMT_CONSTEXPR void operator()(int id) { + handler.on_dynamic_precision(id); + } + FMT_CONSTEXPR void operator()(basic_string_view id) { + handler.on_dynamic_precision(id); + } + FMT_CONSTEXPR void on_error(const char *message) { + if (message) + handler.on_error(message); + } + }; + + ++begin; + auto c = begin != end ? *begin : Char(); + if ('0' <= c && c <= '9') { + auto precision = parse_nonnegative_int(begin, end, -1); + if (precision != -1) + handler.on_precision(precision); + else + handler.on_error("number is too big"); + } else if (c == '{') { + ++begin; + if (begin != end) + begin = parse_arg_id(begin, end, precision_adapter {handler}); + if (begin == end || *begin++ != '}') + return handler.on_error("invalid format string"), begin; + } else { + return handler.on_error("missing precision specifier"), begin; + } + handler.end_precision(); + return begin; } // Parses standard format specifiers and sends notifications about parsed // components to handler. template -FMT_CONSTEXPR FMT_INLINE auto parse_format_specs(const Char* begin, - const Char* end, - SpecHandler&& handler) - -> const Char* { - if (begin + 1 < end && begin[1] == '}' && is_ascii_letter(*begin) && - *begin != 'L') { - handler.on_type(*begin++); - return begin; - } - - if (begin == end) return begin; - - begin = parse_align(begin, end, handler); - if (begin == end) return begin; - - // Parse sign. - switch (to_ascii(*begin)) { - case '+': - handler.on_sign(sign::plus); - ++begin; - break; - case '-': - handler.on_sign(sign::minus); - ++begin; - break; - case ' ': - handler.on_sign(sign::space); - ++begin; - break; - default: - break; - } - if (begin == end) return begin; - - if (*begin == '#') { - handler.on_hash(); - if (++begin == end) return begin; - } - - // Parse zero flag. - if (*begin == '0') { - handler.on_zero(); - if (++begin == end) return begin; - } - - begin = parse_width(begin, end, handler); - if (begin == end) return begin; - - // Parse precision. - if (*begin == '.') { - begin = parse_precision(begin, end, handler); - if (begin == end) return begin; - } - - if (*begin == 'L') { - handler.on_localized(); - ++begin; - } - - // Parse type. - if (begin != end && *begin != '}') handler.on_type(*begin++); - return begin; +FMT_CONSTEXPR FMT_INLINE auto parse_format_specs(const Char *begin, const Char *end, SpecHandler &&handler) + -> const Char * { + if (begin + 1 < end && begin[1] == '}' && is_ascii_letter(*begin) && *begin != 'L') { + handler.on_type(*begin++); + return begin; + } + + if (begin == end) + return begin; + + begin = parse_align(begin, end, handler); + if (begin == end) + return begin; + + // Parse sign. + switch (to_ascii(*begin)) { + case '+': + handler.on_sign(sign::plus); + ++begin; + break; + case '-': + handler.on_sign(sign::minus); + ++begin; + break; + case ' ': + handler.on_sign(sign::space); + ++begin; + break; + default: + break; + } + if (begin == end) + return begin; + + if (*begin == '#') { + handler.on_hash(); + if (++begin == end) + return begin; + } + + // Parse zero flag. + if (*begin == '0') { + handler.on_zero(); + if (++begin == end) + return begin; + } + + begin = parse_width(begin, end, handler); + if (begin == end) + return begin; + + // Parse precision. + if (*begin == '.') { + begin = parse_precision(begin, end, handler); + if (begin == end) + return begin; + } + + if (*begin == 'L') { + handler.on_localized(); + ++begin; + } + + // Parse type. + if (begin != end && *begin != '}') + handler.on_type(*begin++); + return begin; } template -FMT_CONSTEXPR auto parse_replacement_field(const Char* begin, const Char* end, - Handler&& handler) -> const Char* { - struct id_adapter { - Handler& handler; - int arg_id; - - FMT_CONSTEXPR void operator()() { arg_id = handler.on_arg_id(); } - FMT_CONSTEXPR void operator()(int id) { arg_id = handler.on_arg_id(id); } - FMT_CONSTEXPR void operator()(basic_string_view id) { - arg_id = handler.on_arg_id(id); - } - FMT_CONSTEXPR void on_error(const char* message) { - if (message) handler.on_error(message); - } - }; - - ++begin; - if (begin == end) return handler.on_error("invalid format string"), end; - if (*begin == '}') { - handler.on_replacement_field(handler.on_arg_id(), begin); - } else if (*begin == '{') { - handler.on_text(begin, begin + 1); - } else { - auto adapter = id_adapter{handler, 0}; - begin = parse_arg_id(begin, end, adapter); - Char c = begin != end ? *begin : Char(); - if (c == '}') { - handler.on_replacement_field(adapter.arg_id, begin); - } else if (c == ':') { - begin = handler.on_format_specs(adapter.arg_id, begin + 1, end); - if (begin == end || *begin != '}') - return handler.on_error("unknown format specifier"), end; - } else { - return handler.on_error("missing '}' in format string"), end; - } - } - return begin + 1; +FMT_CONSTEXPR auto parse_replacement_field(const Char *begin, const Char *end, Handler &&handler) -> const Char * { + struct id_adapter { + Handler &handler; + int arg_id; + + FMT_CONSTEXPR void operator()() { + arg_id = handler.on_arg_id(); + } + FMT_CONSTEXPR void operator()(int id) { + arg_id = handler.on_arg_id(id); + } + FMT_CONSTEXPR void operator()(basic_string_view id) { + arg_id = handler.on_arg_id(id); + } + FMT_CONSTEXPR void on_error(const char *message) { + if (message) + handler.on_error(message); + } + }; + + ++begin; + if (begin == end) + return handler.on_error("invalid format string"), end; + if (*begin == '}') { + handler.on_replacement_field(handler.on_arg_id(), begin); + } else if (*begin == '{') { + handler.on_text(begin, begin + 1); + } else { + auto adapter = id_adapter {handler, 0}; + begin = parse_arg_id(begin, end, adapter); + Char c = begin != end ? *begin : Char(); + if (c == '}') { + handler.on_replacement_field(adapter.arg_id, begin); + } else if (c == ':') { + begin = handler.on_format_specs(adapter.arg_id, begin + 1, end); + if (begin == end || *begin != '}') + return handler.on_error("unknown format specifier"), end; + } else { + return handler.on_error("missing '}' in format string"), end; + } + } + return begin + 1; } template -FMT_CONSTEXPR FMT_INLINE void parse_format_string( - basic_string_view format_str, Handler&& handler) { - // this is most likely a name-lookup defect in msvc's modules implementation - using detail::find; - - auto begin = format_str.data(); - auto end = begin + format_str.size(); - if (end - begin < 32) { - // Use a simple loop instead of memchr for small strings. - const Char* p = begin; - while (p != end) { - auto c = *p++; - if (c == '{') { - handler.on_text(begin, p - 1); - begin = p = parse_replacement_field(p - 1, end, handler); - } else if (c == '}') { - if (p == end || *p != '}') - return handler.on_error("unmatched '}' in format string"); - handler.on_text(begin, p); - begin = ++p; - } - } - handler.on_text(begin, end); - return; - } - struct writer { - FMT_CONSTEXPR void operator()(const Char* pbegin, const Char* pend) { - if (pbegin == pend) return; - for (;;) { - const Char* p = nullptr; - if (!find(pbegin, pend, Char('}'), p)) - return handler_.on_text(pbegin, pend); - ++p; - if (p == pend || *p != '}') - return handler_.on_error("unmatched '}' in format string"); - handler_.on_text(pbegin, p); - pbegin = p + 1; - } - } - Handler& handler_; - } write{handler}; - while (begin != end) { - // Doing two passes with memchr (one for '{' and another for '}') is up to - // 2.5x faster than the naive one-pass implementation on big format strings. - const Char* p = begin; - if (*begin != '{' && !find(begin + 1, end, Char('{'), p)) - return write(begin, end); - write(begin, p); - begin = parse_replacement_field(p, end, handler); - } +FMT_CONSTEXPR FMT_INLINE void parse_format_string(basic_string_view format_str, Handler &&handler) { + // this is most likely a name-lookup defect in msvc's modules implementation + using detail::find; + + auto begin = format_str.data(); + auto end = begin + format_str.size(); + if (end - begin < 32) { + // Use a simple loop instead of memchr for small strings. + const Char *p = begin; + while (p != end) { + auto c = *p++; + if (c == '{') { + handler.on_text(begin, p - 1); + begin = p = parse_replacement_field(p - 1, end, handler); + } else if (c == '}') { + if (p == end || *p != '}') + return handler.on_error("unmatched '}' in format string"); + handler.on_text(begin, p); + begin = ++p; + } + } + handler.on_text(begin, end); + return; + } + struct writer { + FMT_CONSTEXPR void operator()(const Char *pbegin, const Char *pend) { + if (pbegin == pend) + return; + for (;;) { + const Char *p = nullptr; + if (!find(pbegin, pend, Char('}'), p)) + return handler_.on_text(pbegin, pend); + ++p; + if (p == pend || *p != '}') + return handler_.on_error("unmatched '}' in format string"); + handler_.on_text(pbegin, p); + pbegin = p + 1; + } + } + Handler &handler_; + } write {handler}; + while (begin != end) { + // Doing two passes with memchr (one for '{' and another for '}') is up + // to 2.5x faster than the naive one-pass implementation on big format + // strings. + const Char *p = begin; + if (*begin != '{' && !find(begin + 1, end, Char('{'), p)) + return write(begin, end); + write(begin, p); + begin = parse_replacement_field(p, end, handler); + } } template -FMT_CONSTEXPR auto parse_format_specs(ParseContext& ctx) - -> decltype(ctx.begin()) { - using char_type = typename ParseContext::char_type; - using context = buffer_context; - using mapped_type = conditional_t< - mapped_type_constant::value != type::custom_type, - decltype(arg_mapper().map(std::declval())), T>; - auto f = conditional_t::value, - formatter, - fallback_formatter>(); - return f.parse(ctx); +FMT_CONSTEXPR auto parse_format_specs(ParseContext &ctx) -> decltype(ctx.begin()) { + using char_type = typename ParseContext::char_type; + using context = buffer_context; + using mapped_type = conditional_t::value != type::custom_type, + decltype(arg_mapper().map(std::declval())), T>; + auto f = conditional_t::value, formatter, + fallback_formatter>(); + return f.parse(ctx); } // A parse context with extra argument id checks. It is only used at compile @@ -2450,191 +2595,194 @@ FMT_CONSTEXPR auto parse_format_specs(ParseContext& ctx) // and would be redundant since argument ids are checked when arguments are // retrieved anyway. template -class compile_parse_context - : public basic_format_parse_context { - private: - int num_args_; - using base = basic_format_parse_context; - - public: - explicit FMT_CONSTEXPR compile_parse_context( - basic_string_view format_str, - int num_args = (std::numeric_limits::max)(), ErrorHandler eh = {}) - : base(format_str, eh), num_args_(num_args) {} - - FMT_CONSTEXPR auto next_arg_id() -> int { - int id = base::next_arg_id(); - if (id >= num_args_) this->on_error("argument not found"); - return id; - } - - FMT_CONSTEXPR void check_arg_id(int id) { - base::check_arg_id(id); - if (id >= num_args_) this->on_error("argument not found"); - } - using base::check_arg_id; +class compile_parse_context : public basic_format_parse_context { +private: + int num_args_; + using base = basic_format_parse_context; + +public: + explicit FMT_CONSTEXPR compile_parse_context(basic_string_view format_str, + int num_args = (std::numeric_limits::max)(), ErrorHandler eh = {}) + : base(format_str, eh), num_args_(num_args) { + } + + FMT_CONSTEXPR auto next_arg_id() -> int { + int id = base::next_arg_id(); + if (id >= num_args_) + this->on_error("argument not found"); + return id; + } + + FMT_CONSTEXPR void check_arg_id(int id) { + base::check_arg_id(id); + if (id >= num_args_) + this->on_error("argument not found"); + } + using base::check_arg_id; }; template -FMT_CONSTEXPR void check_int_type_spec(char spec, ErrorHandler&& eh) { - switch (spec) { - case 0: - case 'd': - case 'x': - case 'X': - case 'b': - case 'B': - case 'o': - case 'c': - break; - default: - eh.on_error("invalid type specifier"); - break; - } +FMT_CONSTEXPR void check_int_type_spec(char spec, ErrorHandler &&eh) { + switch (spec) { + case 0: + case 'd': + case 'x': + case 'X': + case 'b': + case 'B': + case 'o': + case 'c': + break; + default: + eh.on_error("invalid type specifier"); + break; + } } // Checks char specs and returns true if the type spec is char (and not int). template -FMT_CONSTEXPR auto check_char_specs(const basic_format_specs& specs, - ErrorHandler&& eh = {}) -> bool { - if (specs.type && specs.type != 'c') { - check_int_type_spec(specs.type, eh); - return false; - } - if (specs.align == align::numeric || specs.sign != sign::none || specs.alt) - eh.on_error("invalid format specifier for char"); - return true; +FMT_CONSTEXPR auto check_char_specs(const basic_format_specs &specs, ErrorHandler &&eh = {}) -> bool { + if (specs.type && specs.type != 'c') { + check_int_type_spec(specs.type, eh); + return false; + } + if (specs.align == align::numeric || specs.sign != sign::none || specs.alt) + eh.on_error("invalid format specifier for char"); + return true; } // A floating-point presentation format. enum class float_format : unsigned char { - general, // General: exponent notation or fixed point based on magnitude. - exp, // Exponent notation with the default precision of 6, e.g. 1.2e-3. - fixed, // Fixed point with the default precision of 6, e.g. 0.0012. - hex + general, // General: exponent notation or fixed point based on magnitude. + exp, // Exponent notation with the default precision of 6, e.g. 1.2e-3. + fixed, // Fixed point with the default precision of 6, e.g. 0.0012. + hex }; struct float_specs { - int precision; - float_format format : 8; - sign_t sign : 8; - bool upper : 1; - bool locale : 1; - bool binary32 : 1; - bool use_grisu : 1; - bool showpoint : 1; + int precision; + float_format format : 8; + sign_t sign : 8; + bool upper : 1; + bool locale : 1; + bool binary32 : 1; + bool use_grisu : 1; + bool showpoint : 1; }; template -FMT_CONSTEXPR auto parse_float_type_spec(const basic_format_specs& specs, - ErrorHandler&& eh = {}) - -> float_specs { - auto result = float_specs(); - result.showpoint = specs.alt; - result.locale = specs.localized; - switch (specs.type) { - case 0: - result.format = float_format::general; - break; - case 'G': - result.upper = true; - FMT_FALLTHROUGH; - case 'g': - result.format = float_format::general; - break; - case 'E': - result.upper = true; - FMT_FALLTHROUGH; - case 'e': - result.format = float_format::exp; - result.showpoint |= specs.precision != 0; - break; - case 'F': - result.upper = true; - FMT_FALLTHROUGH; - case 'f': - result.format = float_format::fixed; - result.showpoint |= specs.precision != 0; - break; - case 'A': - result.upper = true; - FMT_FALLTHROUGH; - case 'a': - result.format = float_format::hex; - break; - default: - eh.on_error("invalid type specifier"); - break; - } - return result; +FMT_CONSTEXPR auto parse_float_type_spec(const basic_format_specs &specs, ErrorHandler &&eh = {}) -> float_specs { + auto result = float_specs(); + result.showpoint = specs.alt; + result.locale = specs.localized; + switch (specs.type) { + case 0: + result.format = float_format::general; + break; + case 'G': + result.upper = true; + FMT_FALLTHROUGH; + case 'g': + result.format = float_format::general; + break; + case 'E': + result.upper = true; + FMT_FALLTHROUGH; + case 'e': + result.format = float_format::exp; + result.showpoint |= specs.precision != 0; + break; + case 'F': + result.upper = true; + FMT_FALLTHROUGH; + case 'f': + result.format = float_format::fixed; + result.showpoint |= specs.precision != 0; + break; + case 'A': + result.upper = true; + FMT_FALLTHROUGH; + case 'a': + result.format = float_format::hex; + break; + default: + eh.on_error("invalid type specifier"); + break; + } + return result; } template -FMT_CONSTEXPR auto check_cstring_type_spec(Char spec, ErrorHandler&& eh = {}) - -> bool { - if (spec == 0 || spec == 's') return true; - if (spec != 'p') eh.on_error("invalid type specifier"); - return false; +FMT_CONSTEXPR auto check_cstring_type_spec(Char spec, ErrorHandler &&eh = {}) -> bool { + if (spec == 0 || spec == 's') + return true; + if (spec != 'p') + eh.on_error("invalid type specifier"); + return false; } template -FMT_CONSTEXPR void check_string_type_spec(Char spec, ErrorHandler&& eh = {}) { - if (spec != 0 && spec != 's') eh.on_error("invalid type specifier"); +FMT_CONSTEXPR void check_string_type_spec(Char spec, ErrorHandler &&eh = {}) { + if (spec != 0 && spec != 's') + eh.on_error("invalid type specifier"); } template -FMT_CONSTEXPR void check_pointer_type_spec(Char spec, ErrorHandler&& eh) { - if (spec != 0 && spec != 'p') eh.on_error("invalid type specifier"); +FMT_CONSTEXPR void check_pointer_type_spec(Char spec, ErrorHandler &&eh) { + if (spec != 0 && spec != 'p') + eh.on_error("invalid type specifier"); } // A parse_format_specs handler that checks if specifiers are consistent with // the argument type. -template class specs_checker : public Handler { - private: - detail::type arg_type_; - - FMT_CONSTEXPR void require_numeric_argument() { - if (!is_arithmetic_type(arg_type_)) - this->on_error("format specifier requires numeric argument"); - } - - public: - FMT_CONSTEXPR specs_checker(const Handler& handler, detail::type arg_type) - : Handler(handler), arg_type_(arg_type) {} - - FMT_CONSTEXPR void on_align(align_t align) { - if (align == align::numeric) require_numeric_argument(); - Handler::on_align(align); - } - - FMT_CONSTEXPR void on_sign(sign_t s) { - require_numeric_argument(); - if (is_integral_type(arg_type_) && arg_type_ != type::int_type && - arg_type_ != type::long_long_type && arg_type_ != type::char_type) { - this->on_error("format specifier requires signed argument"); - } - Handler::on_sign(s); - } - - FMT_CONSTEXPR void on_hash() { - require_numeric_argument(); - Handler::on_hash(); - } - - FMT_CONSTEXPR void on_localized() { - require_numeric_argument(); - Handler::on_localized(); - } - - FMT_CONSTEXPR void on_zero() { - require_numeric_argument(); - Handler::on_zero(); - } - - FMT_CONSTEXPR void end_precision() { - if (is_integral_type(arg_type_) || arg_type_ == type::pointer_type) - this->on_error("precision not allowed for this argument type"); - } +template +class specs_checker : public Handler { +private: + detail::type arg_type_; + + FMT_CONSTEXPR void require_numeric_argument() { + if (!is_arithmetic_type(arg_type_)) + this->on_error("format specifier requires numeric argument"); + } + +public: + FMT_CONSTEXPR specs_checker(const Handler &handler, detail::type arg_type) : Handler(handler), arg_type_(arg_type) { + } + + FMT_CONSTEXPR void on_align(align_t align) { + if (align == align::numeric) + require_numeric_argument(); + Handler::on_align(align); + } + + FMT_CONSTEXPR void on_sign(sign_t s) { + require_numeric_argument(); + if (is_integral_type(arg_type_) && arg_type_ != type::int_type && arg_type_ != type::long_long_type && + arg_type_ != type::char_type) { + this->on_error("format specifier requires signed argument"); + } + Handler::on_sign(s); + } + + FMT_CONSTEXPR void on_hash() { + require_numeric_argument(); + Handler::on_hash(); + } + + FMT_CONSTEXPR void on_localized() { + require_numeric_argument(); + Handler::on_localized(); + } + + FMT_CONSTEXPR void on_zero() { + require_numeric_argument(); + Handler::on_zero(); + } + + FMT_CONSTEXPR void end_precision() { + if (is_integral_type(arg_type_) || arg_type_ == type::pointer_type) + this->on_error("precision not allowed for this argument type"); + } }; constexpr int invalid_arg_index = -1; @@ -2642,229 +2790,232 @@ constexpr int invalid_arg_index = -1; #if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS template constexpr auto get_arg_index_by_name(basic_string_view name) -> int { - if constexpr (detail::is_statically_named_arg()) { - if (name == T::name) return N; - } - if constexpr (sizeof...(Args) > 0) { - return get_arg_index_by_name(name); - } else { - (void)name; // Workaround an MSVC bug about "unused" parameter. - return invalid_arg_index; - } + if constexpr (detail::is_statically_named_arg()) { + if (name == T::name) + return N; + } + if constexpr (sizeof...(Args) > 0) { + return get_arg_index_by_name(name); + } else { + (void)name; // Workaround an MSVC bug about "unused" parameter. + return invalid_arg_index; + } } #endif template FMT_CONSTEXPR auto get_arg_index_by_name(basic_string_view name) -> int { #if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS - if constexpr (sizeof...(Args) > 0) { - return get_arg_index_by_name<0, Args...>(name); - } else { - (void)name; - return invalid_arg_index; - } + if constexpr (sizeof...(Args) > 0) { + return get_arg_index_by_name<0, Args...>(name); + } else { + (void)name; + return invalid_arg_index; + } #else - (void)name; - return invalid_arg_index; + (void)name; + return invalid_arg_index; #endif } template class format_string_checker { - private: - using parse_context_type = compile_parse_context; - enum { num_args = sizeof...(Args) }; - - // Format specifier parsing function. - using parse_func = const Char* (*)(parse_context_type&); - - parse_context_type context_; - parse_func parse_funcs_[num_args > 0 ? num_args : 1]; - - public: - explicit FMT_CONSTEXPR format_string_checker( - basic_string_view format_str, ErrorHandler eh) - : context_(format_str, num_args, eh), - parse_funcs_{&parse_format_specs...} {} - - FMT_CONSTEXPR void on_text(const Char*, const Char*) {} - - FMT_CONSTEXPR auto on_arg_id() -> int { return context_.next_arg_id(); } - FMT_CONSTEXPR auto on_arg_id(int id) -> int { - return context_.check_arg_id(id), id; - } - FMT_CONSTEXPR auto on_arg_id(basic_string_view id) -> int { +private: + using parse_context_type = compile_parse_context; + enum { num_args = sizeof...(Args) }; + + // Format specifier parsing function. + using parse_func = const Char *(*)(parse_context_type &); + + parse_context_type context_; + parse_func parse_funcs_[num_args > 0 ? num_args : 1]; + +public: + explicit FMT_CONSTEXPR format_string_checker(basic_string_view format_str, ErrorHandler eh) + : context_(format_str, num_args, eh), parse_funcs_ {&parse_format_specs...} { + } + + FMT_CONSTEXPR void on_text(const Char *, const Char *) { + } + + FMT_CONSTEXPR auto on_arg_id() -> int { + return context_.next_arg_id(); + } + FMT_CONSTEXPR auto on_arg_id(int id) -> int { + return context_.check_arg_id(id), id; + } + FMT_CONSTEXPR auto on_arg_id(basic_string_view id) -> int { #if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS - auto index = get_arg_index_by_name(id); - if (index == invalid_arg_index) on_error("named argument is not found"); - return context_.check_arg_id(index), index; + auto index = get_arg_index_by_name(id); + if (index == invalid_arg_index) + on_error("named argument is not found"); + return context_.check_arg_id(index), index; #else - (void)id; - on_error("compile-time checks for named arguments require C++20 support"); - return 0; + (void)id; + on_error("compile-time checks for named arguments require C++20 support"); + return 0; #endif - } + } - FMT_CONSTEXPR void on_replacement_field(int, const Char*) {} + FMT_CONSTEXPR void on_replacement_field(int, const Char *) { + } - FMT_CONSTEXPR auto on_format_specs(int id, const Char* begin, const Char*) - -> const Char* { - context_.advance_to(context_.begin() + (begin - &*context_.begin())); - // id >= 0 check is a workaround for gcc 10 bug (#2065). - return id >= 0 && id < num_args ? parse_funcs_[id](context_) : begin; - } + FMT_CONSTEXPR auto on_format_specs(int id, const Char *begin, const Char *) -> const Char * { + context_.advance_to(context_.begin() + (begin - &*context_.begin())); + // id >= 0 check is a workaround for gcc 10 bug (#2065). + return id >= 0 && id < num_args ? parse_funcs_[id](context_) : begin; + } - FMT_CONSTEXPR void on_error(const char* message) { - context_.on_error(message); - } + FMT_CONSTEXPR void on_error(const char *message) { + context_.on_error(message); + } }; -template ::value), int>> +template ::value), int>> void check_format_string(S format_str) { - FMT_CONSTEXPR auto s = to_string_view(format_str); - using checker = format_string_checker...>; - FMT_CONSTEXPR bool invalid_format = - (parse_format_string(s, checker(s, {})), true); - ignore_unused(invalid_format); + FMT_CONSTEXPR auto s = to_string_view(format_str); + using checker = format_string_checker...>; + FMT_CONSTEXPR bool invalid_format = (parse_format_string(s, checker(s, {})), true); + ignore_unused(invalid_format); } template -void vformat_to( - buffer& buf, basic_string_view fmt, - basic_format_args)> args, - locale_ref loc = {}); +void vformat_to(buffer &buf, basic_string_view fmt, + basic_format_args)> args, locale_ref loc = {}); -FMT_API void vprint_mojibake(std::FILE*, string_view, format_args); +FMT_API void vprint_mojibake(std::FILE *, string_view, format_args); #ifndef _WIN32 -inline void vprint_mojibake(std::FILE*, string_view, format_args) {} +inline void vprint_mojibake(std::FILE *, string_view, format_args) { +} #endif FMT_END_DETAIL_NAMESPACE // A formatter specialization for the core types corresponding to detail::type // constants. template -struct formatter::value != - detail::type::custom_type>> { - private: - detail::dynamic_format_specs specs_; - - public: - // Parses format specifiers stopping either at the end of the range or at the - // terminating '}'. - template - FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) { - auto begin = ctx.begin(), end = ctx.end(); - if (begin == end) return begin; - using handler_type = detail::dynamic_specs_handler; - auto type = detail::type_constant::value; - auto checker = - detail::specs_checker(handler_type(specs_, ctx), type); - auto it = detail::parse_format_specs(begin, end, checker); - auto eh = ctx.error_handler(); - switch (type) { - case detail::type::none_type: - FMT_ASSERT(false, "invalid argument type"); - break; - case detail::type::bool_type: - if (!specs_.type || specs_.type == 's') break; - FMT_FALLTHROUGH; - case detail::type::int_type: - case detail::type::uint_type: - case detail::type::long_long_type: - case detail::type::ulong_long_type: - case detail::type::int128_type: - case detail::type::uint128_type: - detail::check_int_type_spec(specs_.type, eh); - break; - case detail::type::char_type: - detail::check_char_specs(specs_, eh); - break; - case detail::type::float_type: - if (detail::const_check(FMT_USE_FLOAT)) - detail::parse_float_type_spec(specs_, eh); - else - FMT_ASSERT(false, "float support disabled"); - break; - case detail::type::double_type: - if (detail::const_check(FMT_USE_DOUBLE)) - detail::parse_float_type_spec(specs_, eh); - else - FMT_ASSERT(false, "double support disabled"); - break; - case detail::type::long_double_type: - if (detail::const_check(FMT_USE_LONG_DOUBLE)) - detail::parse_float_type_spec(specs_, eh); - else - FMT_ASSERT(false, "long double support disabled"); - break; - case detail::type::cstring_type: - detail::check_cstring_type_spec(specs_.type, eh); - break; - case detail::type::string_type: - detail::check_string_type_spec(specs_.type, eh); - break; - case detail::type::pointer_type: - detail::check_pointer_type_spec(specs_.type, eh); - break; - case detail::type::custom_type: - // Custom format specifiers are checked in parse functions of - // formatter specializations. - break; - } - return it; - } - - template - FMT_CONSTEXPR auto format(const T& val, FormatContext& ctx) const - -> decltype(ctx.out()); +struct formatter::value != detail::type::custom_type>> { +private: + detail::dynamic_format_specs specs_; + +public: + // Parses format specifiers stopping either at the end of the range or at + // the terminating '}'. + template + FMT_CONSTEXPR auto parse(ParseContext &ctx) -> decltype(ctx.begin()) { + auto begin = ctx.begin(), end = ctx.end(); + if (begin == end) + return begin; + using handler_type = detail::dynamic_specs_handler; + auto type = detail::type_constant::value; + auto checker = detail::specs_checker(handler_type(specs_, ctx), type); + auto it = detail::parse_format_specs(begin, end, checker); + auto eh = ctx.error_handler(); + switch (type) { + case detail::type::none_type: + FMT_ASSERT(false, "invalid argument type"); + break; + case detail::type::bool_type: + if (!specs_.type || specs_.type == 's') + break; + FMT_FALLTHROUGH; + case detail::type::int_type: + case detail::type::uint_type: + case detail::type::long_long_type: + case detail::type::ulong_long_type: + case detail::type::int128_type: + case detail::type::uint128_type: + detail::check_int_type_spec(specs_.type, eh); + break; + case detail::type::char_type: + detail::check_char_specs(specs_, eh); + break; + case detail::type::float_type: + if (detail::const_check(FMT_USE_FLOAT)) + detail::parse_float_type_spec(specs_, eh); + else + FMT_ASSERT(false, "float support disabled"); + break; + case detail::type::double_type: + if (detail::const_check(FMT_USE_DOUBLE)) + detail::parse_float_type_spec(specs_, eh); + else + FMT_ASSERT(false, "double support disabled"); + break; + case detail::type::long_double_type: + if (detail::const_check(FMT_USE_LONG_DOUBLE)) + detail::parse_float_type_spec(specs_, eh); + else + FMT_ASSERT(false, "long double support disabled"); + break; + case detail::type::cstring_type: + detail::check_cstring_type_spec(specs_.type, eh); + break; + case detail::type::string_type: + detail::check_string_type_spec(specs_.type, eh); + break; + case detail::type::pointer_type: + detail::check_pointer_type_spec(specs_.type, eh); + break; + case detail::type::custom_type: + // Custom format specifiers are checked in parse functions of + // formatter specializations. + break; + } + return it; + } + + template + FMT_CONSTEXPR auto format(const T &val, FormatContext &ctx) const -> decltype(ctx.out()); }; -template struct basic_runtime { basic_string_view str; }; - -template class basic_format_string { - private: - basic_string_view str_; - - public: - template >::value)> - FMT_CONSTEVAL basic_format_string(const S& s) : str_(s) { - static_assert( - detail::count< - (std::is_base_of>::value && - std::is_reference::value)...>() == 0, - "passing views as lvalues is disallowed"); +template +struct basic_runtime { + basic_string_view str; +}; + +template +class basic_format_string { +private: + basic_string_view str_; + +public: + template >::value)> + FMT_CONSTEVAL basic_format_string(const S &s) : str_(s) { + static_assert(detail::count<(std::is_base_of>::value && + std::is_reference::value)...>() == 0, + "passing views as lvalues is disallowed"); #ifdef FMT_HAS_CONSTEVAL - if constexpr (detail::count_named_args() == 0) { - using checker = detail::format_string_checker...>; - detail::parse_format_string(str_, checker(s, {})); - } + if constexpr (detail::count_named_args() == 0) { + using checker = detail::format_string_checker...>; + detail::parse_format_string(str_, checker(s, {})); + } #else - detail::check_format_string(s); + detail::check_format_string(s); #endif - } - basic_format_string(basic_runtime r) : str_(r.str) {} + } + basic_format_string(basic_runtime r) : str_(r.str) { + } - FMT_INLINE operator basic_string_view() const { return str_; } + FMT_INLINE operator basic_string_view() const { + return str_; + } }; #if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 // Workaround broken conversion on older gcc. -template using format_string = string_view; -template auto runtime(const S& s) -> basic_string_view> { - return s; +template +using format_string = string_view; +template +auto runtime(const S &s) -> basic_string_view> { + return s; } #else template using format_string = basic_format_string...>; // Creates a runtime format string. -template auto runtime(const S& s) -> basic_runtime> { - return {{s}}; +template +auto runtime(const S &s) -> basic_runtime> { + return {{s}}; } #endif @@ -2882,18 +3033,17 @@ FMT_API auto vformat(string_view fmt, format_args args) -> std::string; \endrst */ template -FMT_INLINE auto format(format_string fmt, T&&... args) -> std::string { - return vformat(fmt, fmt::make_format_args(args...)); +FMT_INLINE auto format(format_string fmt, T &&...args) -> std::string { + return vformat(fmt, fmt::make_format_args(args...)); } /** Formats a string and writes the output to ``out``. */ -template ::value)> +template ::value)> auto vformat_to(OutputIt out, string_view fmt, format_args args) -> OutputIt { - using detail::get_buffer; - auto&& buf = get_buffer(out); - detail::vformat_to(buf, string_view(fmt), args, {}); - return detail::get_iterator(buf); + using detail::get_buffer; + auto &&buf = get_buffer(out); + detail::vformat_to(buf, string_view(fmt), args, {}); + return detail::get_iterator(buf); } /** @@ -2908,29 +3058,25 @@ auto vformat_to(OutputIt out, string_view fmt, format_args args) -> OutputIt { fmt::format_to(std::back_inserter(out), "{}", 42); \endrst */ -template ::value)> -FMT_INLINE auto format_to(OutputIt out, format_string fmt, T&&... args) - -> OutputIt { - return vformat_to(out, fmt, fmt::make_format_args(args...)); -} - -template struct format_to_n_result { - /** Iterator past the end of the output range. */ - OutputIt out; - /** Total (not truncated) output size. */ - size_t size; +template ::value)> +FMT_INLINE auto format_to(OutputIt out, format_string fmt, T &&...args) -> OutputIt { + return vformat_to(out, fmt, fmt::make_format_args(args...)); +} + +template +struct format_to_n_result { + /** Iterator past the end of the output range. */ + OutputIt out; + /** Total (not truncated) output size. */ + size_t size; }; -template ::value)> -auto vformat_to_n(OutputIt out, size_t n, string_view fmt, format_args args) - -> format_to_n_result { - using buffer = - detail::iterator_buffer; - auto buf = buffer(out, n); - detail::vformat_to(buf, fmt, args, {}); - return {buf.out(), buf.count()}; +template ::value)> +auto vformat_to_n(OutputIt out, size_t n, string_view fmt, format_args args) -> format_to_n_result { + using buffer = detail::iterator_buffer; + auto buf = buffer(out, n); + detail::vformat_to(buf, fmt, args, {}); + return {buf.out(), buf.count()}; } /** @@ -2940,23 +3086,22 @@ auto vformat_to_n(OutputIt out, size_t n, string_view fmt, format_args args) (not truncated) output size and the iterator past the end of the output range. \endrst */ -template ::value)> -FMT_INLINE auto format_to_n(OutputIt out, size_t n, format_string fmt, - const T&... args) -> format_to_n_result { - return vformat_to_n(out, n, fmt, fmt::make_format_args(args...)); +template ::value)> +FMT_INLINE auto format_to_n(OutputIt out, size_t n, format_string fmt, const T &...args) + -> format_to_n_result { + return vformat_to_n(out, n, fmt, fmt::make_format_args(args...)); } /** Returns the number of chars in the output of ``format(fmt, args...)``. */ template -FMT_INLINE auto formatted_size(format_string fmt, T&&... args) -> size_t { - auto buf = detail::counting_buffer<>(); - detail::vformat_to(buf, string_view(fmt), fmt::make_format_args(args...), {}); - return buf.count(); +FMT_INLINE auto formatted_size(format_string fmt, T &&...args) -> size_t { + auto buf = detail::counting_buffer<>(); + detail::vformat_to(buf, string_view(fmt), fmt::make_format_args(args...), {}); + return buf.count(); } FMT_API void vprint(string_view fmt, format_args args); -FMT_API void vprint(std::FILE* f, string_view fmt, format_args args); +FMT_API void vprint(std::FILE *f, string_view fmt, format_args args); /** \rst @@ -2969,10 +3114,9 @@ FMT_API void vprint(std::FILE* f, string_view fmt, format_args args); \endrst */ template -FMT_INLINE void print(format_string fmt, T&&... args) { - const auto& vargs = fmt::make_format_args(args...); - return detail::is_utf8() ? vprint(fmt, vargs) - : detail::vprint_mojibake(stdout, fmt, vargs); +FMT_INLINE void print(format_string fmt, T &&...args) { + const auto &vargs = fmt::make_format_args(args...); + return detail::is_utf8() ? vprint(fmt, vargs) : detail::vprint_mojibake(stdout, fmt, vargs); } /** @@ -2986,10 +3130,9 @@ FMT_INLINE void print(format_string fmt, T&&... args) { \endrst */ template -FMT_INLINE void print(std::FILE* f, format_string fmt, T&&... args) { - const auto& vargs = fmt::make_format_args(args...); - return detail::is_utf8() ? vprint(f, fmt, vargs) - : detail::vprint_mojibake(f, fmt, vargs); +FMT_INLINE void print(std::FILE *f, format_string fmt, T &&...args) { + const auto &vargs = fmt::make_format_args(args...); + return detail::is_utf8() ? vprint(f, fmt, vargs) : detail::vprint_mojibake(f, fmt, vargs); } FMT_MODULE_EXPORT_END @@ -2997,6 +3140,6 @@ FMT_GCC_PRAGMA("GCC pop_options") FMT_END_NAMESPACE #ifdef FMT_HEADER_ONLY -# include "format.h" +#include "format.h" #endif -#endif // FMT_CORE_H_ +#endif // FMT_CORE_H_ diff --git a/mooncake-store/include/cachelib_memory_allocator/include/fmt/format-inl.h b/mooncake-store/include/cachelib_memory_allocator/include/fmt/format-inl.h index 94a36d1bc..6dda203e1 100644 --- a/mooncake-store/include/cachelib_memory_allocator/include/fmt/format-inl.h +++ b/mooncake-store/include/cachelib_memory_allocator/include/fmt/format-inl.h @@ -10,20 +10,20 @@ #include #include -#include // errno +#include // errno #include #include #include -#include // std::memmove +#include // std::memmove #include #include #ifndef FMT_STATIC_THOUSANDS_SEPARATOR -# include +#include #endif #ifdef _WIN32 -# include // _isatty +#include // _isatty #endif #include "format.h" @@ -31,569 +31,600 @@ FMT_BEGIN_NAMESPACE namespace detail { -FMT_FUNC void assert_fail(const char* file, int line, const char* message) { - // Use unchecked std::fprintf to avoid triggering another assertion when - // writing to stderr fails - std::fprintf(stderr, "%s:%d: assertion failed: %s", file, line, message); - // Chosen instead of std::abort to satisfy Clang in CUDA mode during device - // code pass. - std::terminate(); +FMT_FUNC void assert_fail(const char *file, int line, const char *message) { + // Use unchecked std::fprintf to avoid triggering another assertion when + // writing to stderr fails + std::fprintf(stderr, "%s:%d: assertion failed: %s", file, line, message); + // Chosen instead of std::abort to satisfy Clang in CUDA mode during device + // code pass. + std::terminate(); } #ifndef _MSC_VER -# define FMT_SNPRINTF snprintf -#else // _MSC_VER -inline int fmt_snprintf(char* buffer, size_t size, const char* format, ...) { - va_list args; - va_start(args, format); - int result = vsnprintf_s(buffer, size, _TRUNCATE, format, args); - va_end(args); - return result; +#define FMT_SNPRINTF snprintf +#else // _MSC_VER +inline int fmt_snprintf(char *buffer, size_t size, const char *format, ...) { + va_list args; + va_start(args, format); + int result = vsnprintf_s(buffer, size, _TRUNCATE, format, args); + va_end(args); + return result; } -# define FMT_SNPRINTF fmt_snprintf -#endif // _MSC_VER - -FMT_FUNC void format_error_code(detail::buffer& out, int error_code, - string_view message) FMT_NOEXCEPT { - // Report error code making sure that the output fits into - // inline_buffer_size to avoid dynamic memory allocation and potential - // bad_alloc. - out.try_resize(0); - static const char SEP[] = ": "; - static const char ERROR_STR[] = "error "; - // Subtract 2 to account for terminating null characters in SEP and ERROR_STR. - size_t error_code_size = sizeof(SEP) + sizeof(ERROR_STR) - 2; - auto abs_value = static_cast>(error_code); - if (detail::is_negative(error_code)) { - abs_value = 0 - abs_value; - ++error_code_size; - } - error_code_size += detail::to_unsigned(detail::count_digits(abs_value)); - auto it = buffer_appender(out); - if (message.size() <= inline_buffer_size - error_code_size) - format_to(it, FMT_STRING("{}{}"), message, SEP); - format_to(it, FMT_STRING("{}{}"), ERROR_STR, error_code); - FMT_ASSERT(out.size() <= inline_buffer_size, ""); +#define FMT_SNPRINTF fmt_snprintf +#endif // _MSC_VER + +FMT_FUNC void format_error_code(detail::buffer &out, int error_code, string_view message) FMT_NOEXCEPT { + // Report error code making sure that the output fits into + // inline_buffer_size to avoid dynamic memory allocation and potential + // bad_alloc. + out.try_resize(0); + static const char SEP[] = ": "; + static const char ERROR_STR[] = "error "; + // Subtract 2 to account for terminating null characters in SEP and + // ERROR_STR. + size_t error_code_size = sizeof(SEP) + sizeof(ERROR_STR) - 2; + auto abs_value = static_cast>(error_code); + if (detail::is_negative(error_code)) { + abs_value = 0 - abs_value; + ++error_code_size; + } + error_code_size += detail::to_unsigned(detail::count_digits(abs_value)); + auto it = buffer_appender(out); + if (message.size() <= inline_buffer_size - error_code_size) + format_to(it, FMT_STRING("{}{}"), message, SEP); + format_to(it, FMT_STRING("{}{}"), ERROR_STR, error_code); + FMT_ASSERT(out.size() <= inline_buffer_size, ""); } -FMT_FUNC void report_error(format_func func, int error_code, - const char* message) FMT_NOEXCEPT { - memory_buffer full_message; - func(full_message, error_code, message); - // Don't use fwrite_fully because the latter may throw. - if (std::fwrite(full_message.data(), full_message.size(), 1, stderr) > 0) - std::fputc('\n', stderr); +FMT_FUNC void report_error(format_func func, int error_code, const char *message) FMT_NOEXCEPT { + memory_buffer full_message; + func(full_message, error_code, message); + // Don't use fwrite_fully because the latter may throw. + if (std::fwrite(full_message.data(), full_message.size(), 1, stderr) > 0) + std::fputc('\n', stderr); } // A wrapper around fwrite that throws on error. -inline void fwrite_fully(const void* ptr, size_t size, size_t count, - FILE* stream) { - size_t written = std::fwrite(ptr, size, count, stream); - if (written < count) FMT_THROW(system_error(errno, "cannot write to file")); +inline void fwrite_fully(const void *ptr, size_t size, size_t count, FILE *stream) { + size_t written = std::fwrite(ptr, size, count, stream); + if (written < count) + FMT_THROW(system_error(errno, "cannot write to file")); } #ifndef FMT_STATIC_THOUSANDS_SEPARATOR template -locale_ref::locale_ref(const Locale& loc) : locale_(&loc) { - static_assert(std::is_same::value, ""); +locale_ref::locale_ref(const Locale &loc) : locale_(&loc) { + static_assert(std::is_same::value, ""); } -template Locale locale_ref::get() const { - static_assert(std::is_same::value, ""); - return locale_ ? *static_cast(locale_) : std::locale(); +template +Locale locale_ref::get() const { + static_assert(std::is_same::value, ""); + return locale_ ? *static_cast(locale_) : std::locale(); } template FMT_FUNC auto thousands_sep_impl(locale_ref loc) -> thousands_sep_result { - auto& facet = std::use_facet>(loc.get()); - auto grouping = facet.grouping(); - auto thousands_sep = grouping.empty() ? Char() : facet.thousands_sep(); - return {std::move(grouping), thousands_sep}; + auto &facet = std::use_facet>(loc.get()); + auto grouping = facet.grouping(); + auto thousands_sep = grouping.empty() ? Char() : facet.thousands_sep(); + return {std::move(grouping), thousands_sep}; } -template FMT_FUNC Char decimal_point_impl(locale_ref loc) { - return std::use_facet>(loc.get()) - .decimal_point(); +template +FMT_FUNC Char decimal_point_impl(locale_ref loc) { + return std::use_facet>(loc.get()).decimal_point(); } #else template FMT_FUNC auto thousands_sep_impl(locale_ref) -> thousands_sep_result { - return {"\03", FMT_STATIC_THOUSANDS_SEPARATOR}; + return {"\03", FMT_STATIC_THOUSANDS_SEPARATOR}; } -template FMT_FUNC Char decimal_point_impl(locale_ref) { - return '.'; +template +FMT_FUNC Char decimal_point_impl(locale_ref) { + return '.'; } #endif -} // namespace detail +} // namespace detail #if !FMT_MSC_VER FMT_API FMT_FUNC format_error::~format_error() FMT_NOEXCEPT = default; #endif -FMT_FUNC std::system_error vsystem_error(int error_code, string_view format_str, - format_args args) { - auto ec = std::error_code(error_code, std::generic_category()); - return std::system_error(ec, vformat(format_str, args)); +FMT_FUNC std::system_error vsystem_error(int error_code, string_view format_str, format_args args) { + auto ec = std::error_code(error_code, std::generic_category()); + return std::system_error(ec, vformat(format_str, args)); } namespace detail { -template <> FMT_FUNC int count_digits<4>(detail::fallback_uintptr n) { - // fallback_uintptr is always stored in little endian. - int i = static_cast(sizeof(void*)) - 1; - while (i > 0 && n.value[i] == 0) --i; - auto char_digits = std::numeric_limits::digits / 4; - return i >= 0 ? i * char_digits + count_digits<4, unsigned>(n.value[i]) : 1; +template <> +FMT_FUNC int count_digits<4>(detail::fallback_uintptr n) { + // fallback_uintptr is always stored in little endian. + int i = static_cast(sizeof(void *)) - 1; + while (i > 0 && n.value[i] == 0) + --i; + auto char_digits = std::numeric_limits::digits / 4; + return i >= 0 ? i * char_digits + count_digits<4, unsigned>(n.value[i]) : 1; } #if __cplusplus < 201703L -template constexpr const char basic_data::digits[][2]; -template constexpr const char basic_data::hex_digits[]; -template constexpr const char basic_data::signs[]; -template constexpr const unsigned basic_data::prefixes[]; -template constexpr const char basic_data::left_padding_shifts[]; +template +constexpr const char basic_data::digits[][2]; +template +constexpr const char basic_data::hex_digits[]; +template +constexpr const char basic_data::signs[]; +template +constexpr const unsigned basic_data::prefixes[]; +template +constexpr const char basic_data::left_padding_shifts[]; template constexpr const char basic_data::right_padding_shifts[]; #endif -template struct bits { - static FMT_CONSTEXPR_DECL const int value = - static_cast(sizeof(T) * std::numeric_limits::digits); +template +struct bits { + static FMT_CONSTEXPR_DECL const int value = + static_cast(sizeof(T) * std::numeric_limits::digits); }; class fp; -template fp normalize(fp value); +template +fp normalize(fp value); // Lower (upper) boundary is a value half way between a floating-point value // and its predecessor (successor). Boundaries have the same exponent as the // value so only significands are stored. struct boundaries { - uint64_t lower; - uint64_t upper; + uint64_t lower; + uint64_t upper; }; // A handmade floating-point number f * pow(2, e). class fp { - private: - using significand_type = uint64_t; - - template - using is_supported_float = bool_constant; - - public: - significand_type f; - int e; - - // All sizes are in bits. - // Subtract 1 to account for an implicit most significant bit in the - // normalized form. - static FMT_CONSTEXPR_DECL const int double_significand_size = - std::numeric_limits::digits - 1; - static FMT_CONSTEXPR_DECL const uint64_t implicit_bit = - 1ULL << double_significand_size; - static FMT_CONSTEXPR_DECL const int significand_size = - bits::value; - - fp() : f(0), e(0) {} - fp(uint64_t f_val, int e_val) : f(f_val), e(e_val) {} - - // Constructs fp from an IEEE754 double. It is a template to prevent compile - // errors on platforms where double is not IEEE754. - template explicit fp(Double d) { assign(d); } - - // Assigns d to this and return true iff predecessor is closer than successor. - template ::value)> - bool assign(Float d) { - // Assume float is in the format [sign][exponent][significand]. - using limits = std::numeric_limits; - const int float_significand_size = limits::digits - 1; - const int exponent_size = - bits::value - float_significand_size - 1; // -1 for sign - const uint64_t float_implicit_bit = 1ULL << float_significand_size; - const uint64_t significand_mask = float_implicit_bit - 1; - const uint64_t exponent_mask = (~0ULL >> 1) & ~significand_mask; - const int exponent_bias = (1 << exponent_size) - limits::max_exponent - 1; - constexpr bool is_double = sizeof(Float) == sizeof(uint64_t); - auto u = bit_cast>(d); - f = u & significand_mask; - int biased_e = - static_cast((u & exponent_mask) >> float_significand_size); - // Predecessor is closer if d is a normalized power of 2 (f == 0) other than - // the smallest normalized number (biased_e > 1). - bool is_predecessor_closer = f == 0 && biased_e > 1; - if (biased_e != 0) - f += float_implicit_bit; - else - biased_e = 1; // Subnormals use biased exponent 1 (min exponent). - e = biased_e - exponent_bias - float_significand_size; - return is_predecessor_closer; - } - - template ::value)> - bool assign(Float) { - *this = fp(); - return false; - } +private: + using significand_type = uint64_t; + + template + using is_supported_float = bool_constant; + +public: + significand_type f; + int e; + + // All sizes are in bits. + // Subtract 1 to account for an implicit most significant bit in the + // normalized form. + static FMT_CONSTEXPR_DECL const int double_significand_size = std::numeric_limits::digits - 1; + static FMT_CONSTEXPR_DECL const uint64_t implicit_bit = 1ULL << double_significand_size; + static FMT_CONSTEXPR_DECL const int significand_size = bits::value; + + fp() : f(0), e(0) { + } + fp(uint64_t f_val, int e_val) : f(f_val), e(e_val) { + } + + // Constructs fp from an IEEE754 double. It is a template to prevent compile + // errors on platforms where double is not IEEE754. + template + explicit fp(Double d) { + assign(d); + } + + // Assigns d to this and return true iff predecessor is closer than + // successor. + template ::value)> + bool assign(Float d) { + // Assume float is in the format [sign][exponent][significand]. + using limits = std::numeric_limits; + const int float_significand_size = limits::digits - 1; + const int exponent_size = bits::value - float_significand_size - 1; // -1 for sign + const uint64_t float_implicit_bit = 1ULL << float_significand_size; + const uint64_t significand_mask = float_implicit_bit - 1; + const uint64_t exponent_mask = (~0ULL >> 1) & ~significand_mask; + const int exponent_bias = (1 << exponent_size) - limits::max_exponent - 1; + constexpr bool is_double = sizeof(Float) == sizeof(uint64_t); + auto u = bit_cast>(d); + f = u & significand_mask; + int biased_e = static_cast((u & exponent_mask) >> float_significand_size); + // Predecessor is closer if d is a normalized power of 2 (f == 0) other + // than the smallest normalized number (biased_e > 1). + bool is_predecessor_closer = f == 0 && biased_e > 1; + if (biased_e != 0) + f += float_implicit_bit; + else + biased_e = 1; // Subnormals use biased exponent 1 (min exponent). + e = biased_e - exponent_bias - float_significand_size; + return is_predecessor_closer; + } + + template ::value)> + bool assign(Float) { + *this = fp(); + return false; + } }; // Normalizes the value converted from double and multiplied by (1 << SHIFT). -template fp normalize(fp value) { - // Handle subnormals. - const auto shifted_implicit_bit = fp::implicit_bit << SHIFT; - while ((value.f & shifted_implicit_bit) == 0) { - value.f <<= 1; - --value.e; - } - // Subtract 1 to account for hidden bit. - const auto offset = - fp::significand_size - fp::double_significand_size - SHIFT - 1; - value.f <<= offset; - value.e -= offset; - return value; +template +fp normalize(fp value) { + // Handle subnormals. + const auto shifted_implicit_bit = fp::implicit_bit << SHIFT; + while ((value.f & shifted_implicit_bit) == 0) { + value.f <<= 1; + --value.e; + } + // Subtract 1 to account for hidden bit. + const auto offset = fp::significand_size - fp::double_significand_size - SHIFT - 1; + value.f <<= offset; + value.e -= offset; + return value; } -inline bool operator==(fp x, fp y) { return x.f == y.f && x.e == y.e; } +inline bool operator==(fp x, fp y) { + return x.f == y.f && x.e == y.e; +} // Computes lhs * rhs / pow(2, 64) rounded to nearest with half-up tie breaking. inline uint64_t multiply(uint64_t lhs, uint64_t rhs) { #if FMT_USE_INT128 - auto product = static_cast<__uint128_t>(lhs) * rhs; - auto f = static_cast(product >> 64); - return (static_cast(product) & (1ULL << 63)) != 0 ? f + 1 : f; + auto product = static_cast<__uint128_t>(lhs) * rhs; + auto f = static_cast(product >> 64); + return (static_cast(product) & (1ULL << 63)) != 0 ? f + 1 : f; #else - // Multiply 32-bit parts of significands. - uint64_t mask = (1ULL << 32) - 1; - uint64_t a = lhs >> 32, b = lhs & mask; - uint64_t c = rhs >> 32, d = rhs & mask; - uint64_t ac = a * c, bc = b * c, ad = a * d, bd = b * d; - // Compute mid 64-bit of result and round. - uint64_t mid = (bd >> 32) + (ad & mask) + (bc & mask) + (1U << 31); - return ac + (ad >> 32) + (bc >> 32) + (mid >> 32); + // Multiply 32-bit parts of significands. + uint64_t mask = (1ULL << 32) - 1; + uint64_t a = lhs >> 32, b = lhs & mask; + uint64_t c = rhs >> 32, d = rhs & mask; + uint64_t ac = a * c, bc = b * c, ad = a * d, bd = b * d; + // Compute mid 64-bit of result and round. + uint64_t mid = (bd >> 32) + (ad & mask) + (bc & mask) + (1U << 31); + return ac + (ad >> 32) + (bc >> 32) + (mid >> 32); #endif } -inline fp operator*(fp x, fp y) { return {multiply(x.f, y.f), x.e + y.e + 64}; } +inline fp operator*(fp x, fp y) { + return {multiply(x.f, y.f), x.e + y.e + 64}; +} // Returns a cached power of 10 `c_k = c_k.f * pow(2, c_k.e)` such that its // (binary) exponent satisfies `min_exponent <= c_k.e <= min_exponent + 28`. -inline fp get_cached_power(int min_exponent, int& pow10_exponent) { - // Normalized 64-bit significands of pow(10, k), for k = -348, -340, ..., 340. - // These are generated by support/compute-powers.py. - static constexpr const uint64_t pow10_significands[] = { - 0xfa8fd5a0081c0288, 0xbaaee17fa23ebf76, 0x8b16fb203055ac76, - 0xcf42894a5dce35ea, 0x9a6bb0aa55653b2d, 0xe61acf033d1a45df, - 0xab70fe17c79ac6ca, 0xff77b1fcbebcdc4f, 0xbe5691ef416bd60c, - 0x8dd01fad907ffc3c, 0xd3515c2831559a83, 0x9d71ac8fada6c9b5, - 0xea9c227723ee8bcb, 0xaecc49914078536d, 0x823c12795db6ce57, - 0xc21094364dfb5637, 0x9096ea6f3848984f, 0xd77485cb25823ac7, - 0xa086cfcd97bf97f4, 0xef340a98172aace5, 0xb23867fb2a35b28e, - 0x84c8d4dfd2c63f3b, 0xc5dd44271ad3cdba, 0x936b9fcebb25c996, - 0xdbac6c247d62a584, 0xa3ab66580d5fdaf6, 0xf3e2f893dec3f126, - 0xb5b5ada8aaff80b8, 0x87625f056c7c4a8b, 0xc9bcff6034c13053, - 0x964e858c91ba2655, 0xdff9772470297ebd, 0xa6dfbd9fb8e5b88f, - 0xf8a95fcf88747d94, 0xb94470938fa89bcf, 0x8a08f0f8bf0f156b, - 0xcdb02555653131b6, 0x993fe2c6d07b7fac, 0xe45c10c42a2b3b06, - 0xaa242499697392d3, 0xfd87b5f28300ca0e, 0xbce5086492111aeb, - 0x8cbccc096f5088cc, 0xd1b71758e219652c, 0x9c40000000000000, - 0xe8d4a51000000000, 0xad78ebc5ac620000, 0x813f3978f8940984, - 0xc097ce7bc90715b3, 0x8f7e32ce7bea5c70, 0xd5d238a4abe98068, - 0x9f4f2726179a2245, 0xed63a231d4c4fb27, 0xb0de65388cc8ada8, - 0x83c7088e1aab65db, 0xc45d1df942711d9a, 0x924d692ca61be758, - 0xda01ee641a708dea, 0xa26da3999aef774a, 0xf209787bb47d6b85, - 0xb454e4a179dd1877, 0x865b86925b9bc5c2, 0xc83553c5c8965d3d, - 0x952ab45cfa97a0b3, 0xde469fbd99a05fe3, 0xa59bc234db398c25, - 0xf6c69a72a3989f5c, 0xb7dcbf5354e9bece, 0x88fcf317f22241e2, - 0xcc20ce9bd35c78a5, 0x98165af37b2153df, 0xe2a0b5dc971f303a, - 0xa8d9d1535ce3b396, 0xfb9b7cd9a4a7443c, 0xbb764c4ca7a44410, - 0x8bab8eefb6409c1a, 0xd01fef10a657842c, 0x9b10a4e5e9913129, - 0xe7109bfba19c0c9d, 0xac2820d9623bf429, 0x80444b5e7aa7cf85, - 0xbf21e44003acdd2d, 0x8e679c2f5e44ff8f, 0xd433179d9c8cb841, - 0x9e19db92b4e31ba9, 0xeb96bf6ebadf77d9, 0xaf87023b9bf0ee6b, - }; - - // Binary exponents of pow(10, k), for k = -348, -340, ..., 340, corresponding - // to significands above. - static constexpr const int16_t pow10_exponents[] = { - -1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980, -954, - -927, -901, -874, -847, -821, -794, -768, -741, -715, -688, -661, - -635, -608, -582, -555, -529, -502, -475, -449, -422, -396, -369, - -343, -316, -289, -263, -236, -210, -183, -157, -130, -103, -77, - -50, -24, 3, 30, 56, 83, 109, 136, 162, 189, 216, - 242, 269, 295, 322, 348, 375, 402, 428, 455, 481, 508, - 534, 561, 588, 614, 641, 667, 694, 720, 747, 774, 800, - 827, 853, 880, 907, 933, 960, 986, 1013, 1039, 1066}; - - const int shift = 32; - const auto significand = static_cast(data::log10_2_significand); - int index = static_cast( - ((min_exponent + fp::significand_size - 1) * (significand >> shift) + - ((int64_t(1) << shift) - 1)) // ceil - >> 32 // arithmetic shift - ); - // Decimal exponent of the first (smallest) cached power of 10. - const int first_dec_exp = -348; - // Difference between 2 consecutive decimal exponents in cached powers of 10. - const int dec_exp_step = 8; - index = (index - first_dec_exp - 1) / dec_exp_step + 1; - pow10_exponent = first_dec_exp + index * dec_exp_step; - return {pow10_significands[index], pow10_exponents[index]}; +inline fp get_cached_power(int min_exponent, int &pow10_exponent) { + // Normalized 64-bit significands of pow(10, k), for k = -348, -340, ..., + // 340. These are generated by support/compute-powers.py. + static constexpr const uint64_t pow10_significands[] = { + 0xfa8fd5a0081c0288, 0xbaaee17fa23ebf76, 0x8b16fb203055ac76, 0xcf42894a5dce35ea, 0x9a6bb0aa55653b2d, + 0xe61acf033d1a45df, 0xab70fe17c79ac6ca, 0xff77b1fcbebcdc4f, 0xbe5691ef416bd60c, 0x8dd01fad907ffc3c, + 0xd3515c2831559a83, 0x9d71ac8fada6c9b5, 0xea9c227723ee8bcb, 0xaecc49914078536d, 0x823c12795db6ce57, + 0xc21094364dfb5637, 0x9096ea6f3848984f, 0xd77485cb25823ac7, 0xa086cfcd97bf97f4, 0xef340a98172aace5, + 0xb23867fb2a35b28e, 0x84c8d4dfd2c63f3b, 0xc5dd44271ad3cdba, 0x936b9fcebb25c996, 0xdbac6c247d62a584, + 0xa3ab66580d5fdaf6, 0xf3e2f893dec3f126, 0xb5b5ada8aaff80b8, 0x87625f056c7c4a8b, 0xc9bcff6034c13053, + 0x964e858c91ba2655, 0xdff9772470297ebd, 0xa6dfbd9fb8e5b88f, 0xf8a95fcf88747d94, 0xb94470938fa89bcf, + 0x8a08f0f8bf0f156b, 0xcdb02555653131b6, 0x993fe2c6d07b7fac, 0xe45c10c42a2b3b06, 0xaa242499697392d3, + 0xfd87b5f28300ca0e, 0xbce5086492111aeb, 0x8cbccc096f5088cc, 0xd1b71758e219652c, 0x9c40000000000000, + 0xe8d4a51000000000, 0xad78ebc5ac620000, 0x813f3978f8940984, 0xc097ce7bc90715b3, 0x8f7e32ce7bea5c70, + 0xd5d238a4abe98068, 0x9f4f2726179a2245, 0xed63a231d4c4fb27, 0xb0de65388cc8ada8, 0x83c7088e1aab65db, + 0xc45d1df942711d9a, 0x924d692ca61be758, 0xda01ee641a708dea, 0xa26da3999aef774a, 0xf209787bb47d6b85, + 0xb454e4a179dd1877, 0x865b86925b9bc5c2, 0xc83553c5c8965d3d, 0x952ab45cfa97a0b3, 0xde469fbd99a05fe3, + 0xa59bc234db398c25, 0xf6c69a72a3989f5c, 0xb7dcbf5354e9bece, 0x88fcf317f22241e2, 0xcc20ce9bd35c78a5, + 0x98165af37b2153df, 0xe2a0b5dc971f303a, 0xa8d9d1535ce3b396, 0xfb9b7cd9a4a7443c, 0xbb764c4ca7a44410, + 0x8bab8eefb6409c1a, 0xd01fef10a657842c, 0x9b10a4e5e9913129, 0xe7109bfba19c0c9d, 0xac2820d9623bf429, + 0x80444b5e7aa7cf85, 0xbf21e44003acdd2d, 0x8e679c2f5e44ff8f, 0xd433179d9c8cb841, 0x9e19db92b4e31ba9, + 0xeb96bf6ebadf77d9, 0xaf87023b9bf0ee6b, + }; + + // Binary exponents of pow(10, k), for k = -348, -340, ..., 340, + // corresponding to significands above. + static constexpr const int16_t pow10_exponents[] = { + -1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980, -954, -927, -901, -874, -847, + -821, -794, -768, -741, -715, -688, -661, -635, -608, -582, -555, -529, -502, -475, -449, + -422, -396, -369, -343, -316, -289, -263, -236, -210, -183, -157, -130, -103, -77, -50, + -24, 3, 30, 56, 83, 109, 136, 162, 189, 216, 242, 269, 295, 322, 348, + 375, 402, 428, 455, 481, 508, 534, 561, 588, 614, 641, 667, 694, 720, 747, + 774, 800, 827, 853, 880, 907, 933, 960, 986, 1013, 1039, 1066}; + + const int shift = 32; + const auto significand = static_cast(data::log10_2_significand); + int index = static_cast( + ((min_exponent + fp::significand_size - 1) * (significand >> shift) + ((int64_t(1) << shift) - 1)) // ceil + >> 32 // arithmetic shift + ); + // Decimal exponent of the first (smallest) cached power of 10. + const int first_dec_exp = -348; + // Difference between 2 consecutive decimal exponents in cached powers + // of 10. + const int dec_exp_step = 8; + index = (index - first_dec_exp - 1) / dec_exp_step + 1; + pow10_exponent = first_dec_exp + index * dec_exp_step; + return {pow10_significands[index], pow10_exponents[index]}; } // A simple accumulator to hold the sums of terms in bigint::square if uint128_t // is not available. struct accumulator { - uint64_t lower; - uint64_t upper; - - accumulator() : lower(0), upper(0) {} - explicit operator uint32_t() const { return static_cast(lower); } - - void operator+=(uint64_t n) { - lower += n; - if (lower < n) ++upper; - } - void operator>>=(int shift) { - FMT_ASSERT(shift == 32, ""); - (void)shift; - lower = (upper << 32) | (lower >> 32); - upper >>= 32; - } + uint64_t lower; + uint64_t upper; + + accumulator() : lower(0), upper(0) { + } + explicit operator uint32_t() const { + return static_cast(lower); + } + + void operator+=(uint64_t n) { + lower += n; + if (lower < n) + ++upper; + } + void operator>>=(int shift) { + FMT_ASSERT(shift == 32, ""); + (void)shift; + lower = (upper << 32) | (lower >> 32); + upper >>= 32; + } }; class bigint { - private: - // A bigint is stored as an array of bigits (big digits), with bigit at index - // 0 being the least significant one. - using bigit = uint32_t; - using double_bigit = uint64_t; - enum { bigits_capacity = 32 }; - basic_memory_buffer bigits_; - int exp_; - - bigit operator[](int index) const { return bigits_[to_unsigned(index)]; } - bigit& operator[](int index) { return bigits_[to_unsigned(index)]; } - - static FMT_CONSTEXPR_DECL const int bigit_bits = bits::value; - - friend struct formatter; - - void subtract_bigits(int index, bigit other, bigit& borrow) { - auto result = static_cast((*this)[index]) - other - borrow; - (*this)[index] = static_cast(result); - borrow = static_cast(result >> (bigit_bits * 2 - 1)); - } - - void remove_leading_zeros() { - int num_bigits = static_cast(bigits_.size()) - 1; - while (num_bigits > 0 && (*this)[num_bigits] == 0) --num_bigits; - bigits_.resize(to_unsigned(num_bigits + 1)); - } - - // Computes *this -= other assuming aligned bigints and *this >= other. - void subtract_aligned(const bigint& other) { - FMT_ASSERT(other.exp_ >= exp_, "unaligned bigints"); - FMT_ASSERT(compare(*this, other) >= 0, ""); - bigit borrow = 0; - int i = other.exp_ - exp_; - for (size_t j = 0, n = other.bigits_.size(); j != n; ++i, ++j) - subtract_bigits(i, other.bigits_[j], borrow); - while (borrow > 0) subtract_bigits(i, 0, borrow); - remove_leading_zeros(); - } - - void multiply(uint32_t value) { - const double_bigit wide_value = value; - bigit carry = 0; - for (size_t i = 0, n = bigits_.size(); i < n; ++i) { - double_bigit result = bigits_[i] * wide_value + carry; - bigits_[i] = static_cast(result); - carry = static_cast(result >> bigit_bits); - } - if (carry != 0) bigits_.push_back(carry); - } - - void multiply(uint64_t value) { - const bigit mask = ~bigit(0); - const double_bigit lower = value & mask; - const double_bigit upper = value >> bigit_bits; - double_bigit carry = 0; - for (size_t i = 0, n = bigits_.size(); i < n; ++i) { - double_bigit result = bigits_[i] * lower + (carry & mask); - carry = - bigits_[i] * upper + (result >> bigit_bits) + (carry >> bigit_bits); - bigits_[i] = static_cast(result); - } - while (carry != 0) { - bigits_.push_back(carry & mask); - carry >>= bigit_bits; - } - } - - public: - bigint() : exp_(0) {} - explicit bigint(uint64_t n) { assign(n); } - ~bigint() { FMT_ASSERT(bigits_.capacity() <= bigits_capacity, ""); } - - bigint(const bigint&) = delete; - void operator=(const bigint&) = delete; - - void assign(const bigint& other) { - auto size = other.bigits_.size(); - bigits_.resize(size); - auto data = other.bigits_.data(); - std::copy(data, data + size, make_checked(bigits_.data(), size)); - exp_ = other.exp_; - } - - void assign(uint64_t n) { - size_t num_bigits = 0; - do { - bigits_[num_bigits++] = n & ~bigit(0); - n >>= bigit_bits; - } while (n != 0); - bigits_.resize(num_bigits); - exp_ = 0; - } - - int num_bigits() const { return static_cast(bigits_.size()) + exp_; } - - FMT_NOINLINE bigint& operator<<=(int shift) { - FMT_ASSERT(shift >= 0, ""); - exp_ += shift / bigit_bits; - shift %= bigit_bits; - if (shift == 0) return *this; - bigit carry = 0; - for (size_t i = 0, n = bigits_.size(); i < n; ++i) { - bigit c = bigits_[i] >> (bigit_bits - shift); - bigits_[i] = (bigits_[i] << shift) + carry; - carry = c; - } - if (carry != 0) bigits_.push_back(carry); - return *this; - } - - template bigint& operator*=(Int value) { - FMT_ASSERT(value > 0, ""); - multiply(uint32_or_64_or_128_t(value)); - return *this; - } - - friend int compare(const bigint& lhs, const bigint& rhs) { - int num_lhs_bigits = lhs.num_bigits(), num_rhs_bigits = rhs.num_bigits(); - if (num_lhs_bigits != num_rhs_bigits) - return num_lhs_bigits > num_rhs_bigits ? 1 : -1; - int i = static_cast(lhs.bigits_.size()) - 1; - int j = static_cast(rhs.bigits_.size()) - 1; - int end = i - j; - if (end < 0) end = 0; - for (; i >= end; --i, --j) { - bigit lhs_bigit = lhs[i], rhs_bigit = rhs[j]; - if (lhs_bigit != rhs_bigit) return lhs_bigit > rhs_bigit ? 1 : -1; - } - if (i != j) return i > j ? 1 : -1; - return 0; - } - - // Returns compare(lhs1 + lhs2, rhs). - friend int add_compare(const bigint& lhs1, const bigint& lhs2, - const bigint& rhs) { - int max_lhs_bigits = (std::max)(lhs1.num_bigits(), lhs2.num_bigits()); - int num_rhs_bigits = rhs.num_bigits(); - if (max_lhs_bigits + 1 < num_rhs_bigits) return -1; - if (max_lhs_bigits > num_rhs_bigits) return 1; - auto get_bigit = [](const bigint& n, int i) -> bigit { - return i >= n.exp_ && i < n.num_bigits() ? n[i - n.exp_] : 0; - }; - double_bigit borrow = 0; - int min_exp = (std::min)((std::min)(lhs1.exp_, lhs2.exp_), rhs.exp_); - for (int i = num_rhs_bigits - 1; i >= min_exp; --i) { - double_bigit sum = - static_cast(get_bigit(lhs1, i)) + get_bigit(lhs2, i); - bigit rhs_bigit = get_bigit(rhs, i); - if (sum > rhs_bigit + borrow) return 1; - borrow = rhs_bigit + borrow - sum; - if (borrow > 1) return -1; - borrow <<= bigit_bits; - } - return borrow != 0 ? -1 : 0; - } - - // Assigns pow(10, exp) to this bigint. - void assign_pow10(int exp) { - FMT_ASSERT(exp >= 0, ""); - if (exp == 0) return assign(1); - // Find the top bit. - int bitmask = 1; - while (exp >= bitmask) bitmask <<= 1; - bitmask >>= 1; - // pow(10, exp) = pow(5, exp) * pow(2, exp). First compute pow(5, exp) by - // repeated squaring and multiplication. - assign(5); - bitmask >>= 1; - while (bitmask != 0) { - square(); - if ((exp & bitmask) != 0) *this *= 5; - bitmask >>= 1; - } - *this <<= exp; // Multiply by pow(2, exp) by shifting. - } - - void square() { - int num_bigits = static_cast(bigits_.size()); - int num_result_bigits = 2 * num_bigits; - basic_memory_buffer n(std::move(bigits_)); - bigits_.resize(to_unsigned(num_result_bigits)); - using accumulator_t = conditional_t; - auto sum = accumulator_t(); - for (int bigit_index = 0; bigit_index < num_bigits; ++bigit_index) { - // Compute bigit at position bigit_index of the result by adding - // cross-product terms n[i] * n[j] such that i + j == bigit_index. - for (int i = 0, j = bigit_index; j >= 0; ++i, --j) { - // Most terms are multiplied twice which can be optimized in the future. - sum += static_cast(n[i]) * n[j]; - } - (*this)[bigit_index] = static_cast(sum); - sum >>= bits::value; // Compute the carry. - } - // Do the same for the top half. - for (int bigit_index = num_bigits; bigit_index < num_result_bigits; - ++bigit_index) { - for (int j = num_bigits - 1, i = bigit_index - j; i < num_bigits;) - sum += static_cast(n[i++]) * n[j--]; - (*this)[bigit_index] = static_cast(sum); - sum >>= bits::value; - } - remove_leading_zeros(); - exp_ *= 2; - } - - // If this bigint has a bigger exponent than other, adds trailing zero to make - // exponents equal. This simplifies some operations such as subtraction. - void align(const bigint& other) { - int exp_difference = exp_ - other.exp_; - if (exp_difference <= 0) return; - int num_bigits = static_cast(bigits_.size()); - bigits_.resize(to_unsigned(num_bigits + exp_difference)); - for (int i = num_bigits - 1, j = i + exp_difference; i >= 0; --i, --j) - bigits_[j] = bigits_[i]; - std::uninitialized_fill_n(bigits_.data(), exp_difference, 0); - exp_ -= exp_difference; - } - - // Divides this bignum by divisor, assigning the remainder to this and - // returning the quotient. - int divmod_assign(const bigint& divisor) { - FMT_ASSERT(this != &divisor, ""); - if (compare(*this, divisor) < 0) return 0; - FMT_ASSERT(divisor.bigits_[divisor.bigits_.size() - 1u] != 0, ""); - align(divisor); - int quotient = 0; - do { - subtract_aligned(divisor); - ++quotient; - } while (compare(*this, divisor) >= 0); - return quotient; - } +private: + // A bigint is stored as an array of bigits (big digits), with bigit at + // index 0 being the least significant one. + using bigit = uint32_t; + using double_bigit = uint64_t; + enum { bigits_capacity = 32 }; + basic_memory_buffer bigits_; + int exp_; + + bigit operator[](int index) const { + return bigits_[to_unsigned(index)]; + } + bigit &operator[](int index) { + return bigits_[to_unsigned(index)]; + } + + static FMT_CONSTEXPR_DECL const int bigit_bits = bits::value; + + friend struct formatter; + + void subtract_bigits(int index, bigit other, bigit &borrow) { + auto result = static_cast((*this)[index]) - other - borrow; + (*this)[index] = static_cast(result); + borrow = static_cast(result >> (bigit_bits * 2 - 1)); + } + + void remove_leading_zeros() { + int num_bigits = static_cast(bigits_.size()) - 1; + while (num_bigits > 0 && (*this)[num_bigits] == 0) + --num_bigits; + bigits_.resize(to_unsigned(num_bigits + 1)); + } + + // Computes *this -= other assuming aligned bigints and *this >= other. + void subtract_aligned(const bigint &other) { + FMT_ASSERT(other.exp_ >= exp_, "unaligned bigints"); + FMT_ASSERT(compare(*this, other) >= 0, ""); + bigit borrow = 0; + int i = other.exp_ - exp_; + for (size_t j = 0, n = other.bigits_.size(); j != n; ++i, ++j) + subtract_bigits(i, other.bigits_[j], borrow); + while (borrow > 0) + subtract_bigits(i, 0, borrow); + remove_leading_zeros(); + } + + void multiply(uint32_t value) { + const double_bigit wide_value = value; + bigit carry = 0; + for (size_t i = 0, n = bigits_.size(); i < n; ++i) { + double_bigit result = bigits_[i] * wide_value + carry; + bigits_[i] = static_cast(result); + carry = static_cast(result >> bigit_bits); + } + if (carry != 0) + bigits_.push_back(carry); + } + + void multiply(uint64_t value) { + const bigit mask = ~bigit(0); + const double_bigit lower = value & mask; + const double_bigit upper = value >> bigit_bits; + double_bigit carry = 0; + for (size_t i = 0, n = bigits_.size(); i < n; ++i) { + double_bigit result = bigits_[i] * lower + (carry & mask); + carry = bigits_[i] * upper + (result >> bigit_bits) + (carry >> bigit_bits); + bigits_[i] = static_cast(result); + } + while (carry != 0) { + bigits_.push_back(carry & mask); + carry >>= bigit_bits; + } + } + +public: + bigint() : exp_(0) { + } + explicit bigint(uint64_t n) { + assign(n); + } + ~bigint() { + FMT_ASSERT(bigits_.capacity() <= bigits_capacity, ""); + } + + bigint(const bigint &) = delete; + void operator=(const bigint &) = delete; + + void assign(const bigint &other) { + auto size = other.bigits_.size(); + bigits_.resize(size); + auto data = other.bigits_.data(); + std::copy(data, data + size, make_checked(bigits_.data(), size)); + exp_ = other.exp_; + } + + void assign(uint64_t n) { + size_t num_bigits = 0; + do { + bigits_[num_bigits++] = n & ~bigit(0); + n >>= bigit_bits; + } while (n != 0); + bigits_.resize(num_bigits); + exp_ = 0; + } + + int num_bigits() const { + return static_cast(bigits_.size()) + exp_; + } + + FMT_NOINLINE bigint &operator<<=(int shift) { + FMT_ASSERT(shift >= 0, ""); + exp_ += shift / bigit_bits; + shift %= bigit_bits; + if (shift == 0) + return *this; + bigit carry = 0; + for (size_t i = 0, n = bigits_.size(); i < n; ++i) { + bigit c = bigits_[i] >> (bigit_bits - shift); + bigits_[i] = (bigits_[i] << shift) + carry; + carry = c; + } + if (carry != 0) + bigits_.push_back(carry); + return *this; + } + + template + bigint &operator*=(Int value) { + FMT_ASSERT(value > 0, ""); + multiply(uint32_or_64_or_128_t(value)); + return *this; + } + + friend int compare(const bigint &lhs, const bigint &rhs) { + int num_lhs_bigits = lhs.num_bigits(), num_rhs_bigits = rhs.num_bigits(); + if (num_lhs_bigits != num_rhs_bigits) + return num_lhs_bigits > num_rhs_bigits ? 1 : -1; + int i = static_cast(lhs.bigits_.size()) - 1; + int j = static_cast(rhs.bigits_.size()) - 1; + int end = i - j; + if (end < 0) + end = 0; + for (; i >= end; --i, --j) { + bigit lhs_bigit = lhs[i], rhs_bigit = rhs[j]; + if (lhs_bigit != rhs_bigit) + return lhs_bigit > rhs_bigit ? 1 : -1; + } + if (i != j) + return i > j ? 1 : -1; + return 0; + } + + // Returns compare(lhs1 + lhs2, rhs). + friend int add_compare(const bigint &lhs1, const bigint &lhs2, const bigint &rhs) { + int max_lhs_bigits = (std::max)(lhs1.num_bigits(), lhs2.num_bigits()); + int num_rhs_bigits = rhs.num_bigits(); + if (max_lhs_bigits + 1 < num_rhs_bigits) + return -1; + if (max_lhs_bigits > num_rhs_bigits) + return 1; + auto get_bigit = [](const bigint &n, int i) -> bigit { + return i >= n.exp_ && i < n.num_bigits() ? n[i - n.exp_] : 0; + }; + double_bigit borrow = 0; + int min_exp = (std::min)((std::min)(lhs1.exp_, lhs2.exp_), rhs.exp_); + for (int i = num_rhs_bigits - 1; i >= min_exp; --i) { + double_bigit sum = static_cast(get_bigit(lhs1, i)) + get_bigit(lhs2, i); + bigit rhs_bigit = get_bigit(rhs, i); + if (sum > rhs_bigit + borrow) + return 1; + borrow = rhs_bigit + borrow - sum; + if (borrow > 1) + return -1; + borrow <<= bigit_bits; + } + return borrow != 0 ? -1 : 0; + } + + // Assigns pow(10, exp) to this bigint. + void assign_pow10(int exp) { + FMT_ASSERT(exp >= 0, ""); + if (exp == 0) + return assign(1); + // Find the top bit. + int bitmask = 1; + while (exp >= bitmask) + bitmask <<= 1; + bitmask >>= 1; + // pow(10, exp) = pow(5, exp) * pow(2, exp). First compute pow(5, exp) + // by repeated squaring and multiplication. + assign(5); + bitmask >>= 1; + while (bitmask != 0) { + square(); + if ((exp & bitmask) != 0) + *this *= 5; + bitmask >>= 1; + } + *this <<= exp; // Multiply by pow(2, exp) by shifting. + } + + void square() { + int num_bigits = static_cast(bigits_.size()); + int num_result_bigits = 2 * num_bigits; + basic_memory_buffer n(std::move(bigits_)); + bigits_.resize(to_unsigned(num_result_bigits)); + using accumulator_t = conditional_t; + auto sum = accumulator_t(); + for (int bigit_index = 0; bigit_index < num_bigits; ++bigit_index) { + // Compute bigit at position bigit_index of the result by adding + // cross-product terms n[i] * n[j] such that i + j == bigit_index. + for (int i = 0, j = bigit_index; j >= 0; ++i, --j) { + // Most terms are multiplied twice which can be optimized in the + // future. + sum += static_cast(n[i]) * n[j]; + } + (*this)[bigit_index] = static_cast(sum); + sum >>= bits::value; // Compute the carry. + } + // Do the same for the top half. + for (int bigit_index = num_bigits; bigit_index < num_result_bigits; ++bigit_index) { + for (int j = num_bigits - 1, i = bigit_index - j; i < num_bigits;) + sum += static_cast(n[i++]) * n[j--]; + (*this)[bigit_index] = static_cast(sum); + sum >>= bits::value; + } + remove_leading_zeros(); + exp_ *= 2; + } + + // If this bigint has a bigger exponent than other, adds trailing zero to + // make exponents equal. This simplifies some operations such as + // subtraction. + void align(const bigint &other) { + int exp_difference = exp_ - other.exp_; + if (exp_difference <= 0) + return; + int num_bigits = static_cast(bigits_.size()); + bigits_.resize(to_unsigned(num_bigits + exp_difference)); + for (int i = num_bigits - 1, j = i + exp_difference; i >= 0; --i, --j) + bigits_[j] = bigits_[i]; + std::uninitialized_fill_n(bigits_.data(), exp_difference, 0); + exp_ -= exp_difference; + } + + // Divides this bignum by divisor, assigning the remainder to this and + // returning the quotient. + int divmod_assign(const bigint &divisor) { + FMT_ASSERT(this != &divisor, ""); + if (compare(*this, divisor) < 0) + return 0; + FMT_ASSERT(divisor.bigits_[divisor.bigits_.size() - 1u] != 0, ""); + align(divisor); + int quotient = 0; + do { + subtract_aligned(divisor); + ++quotient; + } while (compare(*this, divisor) >= 0); + return quotient; + } }; enum class round_direction { unknown, up, down }; @@ -602,223 +633,232 @@ enum class round_direction { unknown, up, down }; // some number v and the error, returns whether v should be rounded up, down, or // whether the rounding direction can't be determined due to error. // error should be less than divisor / 2. -inline round_direction get_round_direction(uint64_t divisor, uint64_t remainder, - uint64_t error) { - FMT_ASSERT(remainder < divisor, ""); // divisor - remainder won't overflow. - FMT_ASSERT(error < divisor, ""); // divisor - error won't overflow. - FMT_ASSERT(error < divisor - error, ""); // error * 2 won't overflow. - // Round down if (remainder + error) * 2 <= divisor. - if (remainder <= divisor - remainder && error * 2 <= divisor - remainder * 2) - return round_direction::down; - // Round up if (remainder - error) * 2 >= divisor. - if (remainder >= error && - remainder - error >= divisor - (remainder - error)) { - return round_direction::up; - } - return round_direction::unknown; +inline round_direction get_round_direction(uint64_t divisor, uint64_t remainder, uint64_t error) { + FMT_ASSERT(remainder < divisor, ""); // divisor - remainder won't overflow. + FMT_ASSERT(error < divisor, ""); // divisor - error won't overflow. + FMT_ASSERT(error < divisor - error, ""); // error * 2 won't overflow. + // Round down if (remainder + error) * 2 <= divisor. + if (remainder <= divisor - remainder && error * 2 <= divisor - remainder * 2) + return round_direction::down; + // Round up if (remainder - error) * 2 >= divisor. + if (remainder >= error && remainder - error >= divisor - (remainder - error)) { + return round_direction::up; + } + return round_direction::unknown; } namespace digits { enum result { - more, // Generate more digits. - done, // Done generating digits. - error // Digit generation cancelled due to an error. + more, // Generate more digits. + done, // Done generating digits. + error // Digit generation cancelled due to an error. }; } inline uint64_t power_of_10_64(int exp) { - static constexpr const uint64_t data[] = {1, FMT_POWERS_OF_10(1), - FMT_POWERS_OF_10(1000000000ULL), - 10000000000000000000ULL}; - return data[exp]; + static constexpr const uint64_t data[] = {1, FMT_POWERS_OF_10(1), FMT_POWERS_OF_10(1000000000ULL), + 10000000000000000000ULL}; + return data[exp]; } // Generates output using the Grisu digit-gen algorithm. // error: the size of the region (lower, upper) outside of which numbers // definitely do not round to value (Delta in Grisu3). template -FMT_INLINE digits::result grisu_gen_digits(fp value, uint64_t error, int& exp, - Handler& handler) { - const fp one(1ULL << -value.e, value.e); - // The integral part of scaled value (p1 in Grisu) = value / one. It cannot be - // zero because it contains a product of two 64-bit numbers with MSB set (due - // to normalization) - 1, shifted right by at most 60 bits. - auto integral = static_cast(value.f >> -one.e); - FMT_ASSERT(integral != 0, ""); - FMT_ASSERT(integral == value.f >> -one.e, ""); - // The fractional part of scaled value (p2 in Grisu) c = value % one. - uint64_t fractional = value.f & (one.f - 1); - exp = count_digits(integral); // kappa in Grisu. - // Divide by 10 to prevent overflow. - auto result = handler.on_start(power_of_10_64(exp - 1) << -one.e, - value.f / 10, error * 10, exp); - if (result != digits::more) return result; - // Generate digits for the integral part. This can produce up to 10 digits. - do { - uint32_t digit = 0; - auto divmod_integral = [&](uint32_t divisor) { - digit = integral / divisor; - integral %= divisor; - }; - // This optimization by Milo Yip reduces the number of integer divisions by - // one per iteration. - switch (exp) { - case 10: - divmod_integral(1000000000); - break; - case 9: - divmod_integral(100000000); - break; - case 8: - divmod_integral(10000000); - break; - case 7: - divmod_integral(1000000); - break; - case 6: - divmod_integral(100000); - break; - case 5: - divmod_integral(10000); - break; - case 4: - divmod_integral(1000); - break; - case 3: - divmod_integral(100); - break; - case 2: - divmod_integral(10); - break; - case 1: - digit = integral; - integral = 0; - break; - default: - FMT_ASSERT(false, "invalid number of digits"); - } - --exp; - auto remainder = (static_cast(integral) << -one.e) + fractional; - result = handler.on_digit(static_cast('0' + digit), - power_of_10_64(exp) << -one.e, remainder, error, - exp, true); - if (result != digits::more) return result; - } while (exp > 0); - // Generate digits for the fractional part. - for (;;) { - fractional *= 10; - error *= 10; - char digit = static_cast('0' + (fractional >> -one.e)); - fractional &= one.f - 1; - --exp; - result = handler.on_digit(digit, one.f, fractional, error, exp, false); - if (result != digits::more) return result; - } +FMT_INLINE digits::result grisu_gen_digits(fp value, uint64_t error, int &exp, Handler &handler) { + const fp one(1ULL << -value.e, value.e); + // The integral part of scaled value (p1 in Grisu) = value / one. It cannot + // be zero because it contains a product of two 64-bit numbers with MSB set + // (due to normalization) - 1, shifted right by at most 60 bits. + auto integral = static_cast(value.f >> -one.e); + FMT_ASSERT(integral != 0, ""); + FMT_ASSERT(integral == value.f >> -one.e, ""); + // The fractional part of scaled value (p2 in Grisu) c = value % one. + uint64_t fractional = value.f & (one.f - 1); + exp = count_digits(integral); // kappa in Grisu. + // Divide by 10 to prevent overflow. + auto result = handler.on_start(power_of_10_64(exp - 1) << -one.e, value.f / 10, error * 10, exp); + if (result != digits::more) + return result; + // Generate digits for the integral part. This can produce up to 10 digits. + do { + uint32_t digit = 0; + auto divmod_integral = [&](uint32_t divisor) { + digit = integral / divisor; + integral %= divisor; + }; + // This optimization by Milo Yip reduces the number of integer divisions + // by one per iteration. + switch (exp) { + case 10: + divmod_integral(1000000000); + break; + case 9: + divmod_integral(100000000); + break; + case 8: + divmod_integral(10000000); + break; + case 7: + divmod_integral(1000000); + break; + case 6: + divmod_integral(100000); + break; + case 5: + divmod_integral(10000); + break; + case 4: + divmod_integral(1000); + break; + case 3: + divmod_integral(100); + break; + case 2: + divmod_integral(10); + break; + case 1: + digit = integral; + integral = 0; + break; + default: + FMT_ASSERT(false, "invalid number of digits"); + } + --exp; + auto remainder = (static_cast(integral) << -one.e) + fractional; + result = handler.on_digit(static_cast('0' + digit), power_of_10_64(exp) << -one.e, remainder, error, exp, + true); + if (result != digits::more) + return result; + } while (exp > 0); + // Generate digits for the fractional part. + for (;;) { + fractional *= 10; + error *= 10; + char digit = static_cast('0' + (fractional >> -one.e)); + fractional &= one.f - 1; + --exp; + result = handler.on_digit(digit, one.f, fractional, error, exp, false); + if (result != digits::more) + return result; + } } // The fixed precision digit handler. struct fixed_handler { - char* buf; - int size; - int precision; - int exp10; - bool fixed; - - digits::result on_start(uint64_t divisor, uint64_t remainder, uint64_t error, - int& exp) { - // Non-fixed formats require at least one digit and no precision adjustment. - if (!fixed) return digits::more; - // Adjust fixed precision by exponent because it is relative to decimal - // point. - precision += exp + exp10; - // Check if precision is satisfied just by leading zeros, e.g. - // format("{:.2f}", 0.001) gives "0.00" without generating any digits. - if (precision > 0) return digits::more; - if (precision < 0) return digits::done; - auto dir = get_round_direction(divisor, remainder, error); - if (dir == round_direction::unknown) return digits::error; - buf[size++] = dir == round_direction::up ? '1' : '0'; - return digits::done; - } - - digits::result on_digit(char digit, uint64_t divisor, uint64_t remainder, - uint64_t error, int, bool integral) { - FMT_ASSERT(remainder < divisor, ""); - buf[size++] = digit; - if (!integral && error >= remainder) return digits::error; - if (size < precision) return digits::more; - if (!integral) { - // Check if error * 2 < divisor with overflow prevention. - // The check is not needed for the integral part because error = 1 - // and divisor > (1 << 32) there. - if (error >= divisor || error >= divisor - error) return digits::error; - } else { - FMT_ASSERT(error == 1 && divisor > 2, ""); - } - auto dir = get_round_direction(divisor, remainder, error); - if (dir != round_direction::up) - return dir == round_direction::down ? digits::done : digits::error; - ++buf[size - 1]; - for (int i = size - 1; i > 0 && buf[i] > '9'; --i) { - buf[i] = '0'; - ++buf[i - 1]; - } - if (buf[0] > '9') { - buf[0] = '1'; - if (fixed) - buf[size++] = '0'; - else - ++exp10; - } - return digits::done; - } + char *buf; + int size; + int precision; + int exp10; + bool fixed; + + digits::result on_start(uint64_t divisor, uint64_t remainder, uint64_t error, int &exp) { + // Non-fixed formats require at least one digit and no precision + // adjustment. + if (!fixed) + return digits::more; + // Adjust fixed precision by exponent because it is relative to decimal + // point. + precision += exp + exp10; + // Check if precision is satisfied just by leading zeros, e.g. + // format("{:.2f}", 0.001) gives "0.00" without generating any digits. + if (precision > 0) + return digits::more; + if (precision < 0) + return digits::done; + auto dir = get_round_direction(divisor, remainder, error); + if (dir == round_direction::unknown) + return digits::error; + buf[size++] = dir == round_direction::up ? '1' : '0'; + return digits::done; + } + + digits::result on_digit(char digit, uint64_t divisor, uint64_t remainder, uint64_t error, int, bool integral) { + FMT_ASSERT(remainder < divisor, ""); + buf[size++] = digit; + if (!integral && error >= remainder) + return digits::error; + if (size < precision) + return digits::more; + if (!integral) { + // Check if error * 2 < divisor with overflow prevention. + // The check is not needed for the integral part because error = 1 + // and divisor > (1 << 32) there. + if (error >= divisor || error >= divisor - error) + return digits::error; + } else { + FMT_ASSERT(error == 1 && divisor > 2, ""); + } + auto dir = get_round_direction(divisor, remainder, error); + if (dir != round_direction::up) + return dir == round_direction::down ? digits::done : digits::error; + ++buf[size - 1]; + for (int i = size - 1; i > 0 && buf[i] > '9'; --i) { + buf[i] = '0'; + ++buf[i - 1]; + } + if (buf[0] > '9') { + buf[0] = '1'; + if (fixed) + buf[size++] = '0'; + else + ++exp10; + } + return digits::done; + } }; // A 128-bit integer type used internally, struct uint128_wrapper { - uint128_wrapper() = default; + uint128_wrapper() = default; #if FMT_USE_INT128 - uint128_t internal_; - - constexpr uint128_wrapper(uint64_t high, uint64_t low) FMT_NOEXCEPT - : internal_{static_cast(low) | - (static_cast(high) << 64)} {} - - constexpr uint128_wrapper(uint128_t u) : internal_{u} {} - - constexpr uint64_t high() const FMT_NOEXCEPT { - return uint64_t(internal_ >> 64); - } - constexpr uint64_t low() const FMT_NOEXCEPT { return uint64_t(internal_); } - - uint128_wrapper& operator+=(uint64_t n) FMT_NOEXCEPT { - internal_ += n; - return *this; - } + uint128_t internal_; + + constexpr uint128_wrapper(uint64_t high, uint64_t low) FMT_NOEXCEPT + : internal_ {static_cast(low) | (static_cast(high) << 64)} { + } + + constexpr uint128_wrapper(uint128_t u) : internal_ {u} { + } + + constexpr uint64_t high() const FMT_NOEXCEPT { + return uint64_t(internal_ >> 64); + } + constexpr uint64_t low() const FMT_NOEXCEPT { + return uint64_t(internal_); + } + + uint128_wrapper &operator+=(uint64_t n) FMT_NOEXCEPT { + internal_ += n; + return *this; + } +#else + uint64_t high_; + uint64_t low_; + + constexpr uint128_wrapper(uint64_t high, uint64_t low) FMT_NOEXCEPT : high_ {high}, low_ {low} { + } + + constexpr uint64_t high() const FMT_NOEXCEPT { + return high_; + } + constexpr uint64_t low() const FMT_NOEXCEPT { + return low_; + } + + uint128_wrapper &operator+=(uint64_t n) FMT_NOEXCEPT { +#if defined(_MSC_VER) && defined(_M_X64) + unsigned char carry = _addcarry_u64(0, low_, n, &low_); + _addcarry_u64(carry, high_, 0, &high_); + return *this; #else - uint64_t high_; - uint64_t low_; - - constexpr uint128_wrapper(uint64_t high, uint64_t low) FMT_NOEXCEPT - : high_{high}, - low_{low} {} - - constexpr uint64_t high() const FMT_NOEXCEPT { return high_; } - constexpr uint64_t low() const FMT_NOEXCEPT { return low_; } - - uint128_wrapper& operator+=(uint64_t n) FMT_NOEXCEPT { -# if defined(_MSC_VER) && defined(_M_X64) - unsigned char carry = _addcarry_u64(0, low_, n, &low_); - _addcarry_u64(carry, high_, 0, &high_); - return *this; -# else - uint64_t sum = low_ + n; - high_ += (sum < low_ ? 1 : 0); - low_ = sum; - return *this; -# endif - } + uint64_t sum = low_ + n; + high_ += (sum < low_ ? 1 : 0); + low_ = sum; + return *this; +#endif + } #endif }; @@ -827,1794 +867,1731 @@ namespace dragonbox { // Computes 128-bit result of multiplication of two 64-bit unsigned integers. inline uint128_wrapper umul128(uint64_t x, uint64_t y) FMT_NOEXCEPT { #if FMT_USE_INT128 - return static_cast(x) * static_cast(y); + return static_cast(x) * static_cast(y); #elif defined(_MSC_VER) && defined(_M_X64) - uint128_wrapper result; - result.low_ = _umul128(x, y, &result.high_); - return result; + uint128_wrapper result; + result.low_ = _umul128(x, y, &result.high_); + return result; #else - const uint64_t mask = (uint64_t(1) << 32) - uint64_t(1); + const uint64_t mask = (uint64_t(1) << 32) - uint64_t(1); - uint64_t a = x >> 32; - uint64_t b = x & mask; - uint64_t c = y >> 32; - uint64_t d = y & mask; + uint64_t a = x >> 32; + uint64_t b = x & mask; + uint64_t c = y >> 32; + uint64_t d = y & mask; - uint64_t ac = a * c; - uint64_t bc = b * c; - uint64_t ad = a * d; - uint64_t bd = b * d; + uint64_t ac = a * c; + uint64_t bc = b * c; + uint64_t ad = a * d; + uint64_t bd = b * d; - uint64_t intermediate = (bd >> 32) + (ad & mask) + (bc & mask); + uint64_t intermediate = (bd >> 32) + (ad & mask) + (bc & mask); - return {ac + (intermediate >> 32) + (ad >> 32) + (bc >> 32), - (intermediate << 32) + (bd & mask)}; + return {ac + (intermediate >> 32) + (ad >> 32) + (bc >> 32), (intermediate << 32) + (bd & mask)}; #endif } // Computes upper 64 bits of multiplication of two 64-bit unsigned integers. inline uint64_t umul128_upper64(uint64_t x, uint64_t y) FMT_NOEXCEPT { #if FMT_USE_INT128 - auto p = static_cast(x) * static_cast(y); - return static_cast(p >> 64); + auto p = static_cast(x) * static_cast(y); + return static_cast(p >> 64); #elif defined(_MSC_VER) && defined(_M_X64) - return __umulh(x, y); + return __umulh(x, y); #else - return umul128(x, y).high(); + return umul128(x, y).high(); #endif } // Computes upper 64 bits of multiplication of a 64-bit unsigned integer and a // 128-bit unsigned integer. inline uint64_t umul192_upper64(uint64_t x, uint128_wrapper y) FMT_NOEXCEPT { - uint128_wrapper g0 = umul128(x, y.high()); - g0 += umul128_upper64(x, y.low()); - return g0.high(); + uint128_wrapper g0 = umul128(x, y.high()); + g0 += umul128_upper64(x, y.low()); + return g0.high(); } // Computes upper 32 bits of multiplication of a 32-bit unsigned integer and a // 64-bit unsigned integer. inline uint32_t umul96_upper32(uint32_t x, uint64_t y) FMT_NOEXCEPT { - return static_cast(umul128_upper64(x, y)); + return static_cast(umul128_upper64(x, y)); } // Computes middle 64 bits of multiplication of a 64-bit unsigned integer and a // 128-bit unsigned integer. inline uint64_t umul192_middle64(uint64_t x, uint128_wrapper y) FMT_NOEXCEPT { - uint64_t g01 = x * y.high(); - uint64_t g10 = umul128_upper64(x, y.low()); - return g01 + g10; + uint64_t g01 = x * y.high(); + uint64_t g10 = umul128_upper64(x, y.low()); + return g01 + g10; } // Computes lower 64 bits of multiplication of a 32-bit unsigned integer and a // 64-bit unsigned integer. inline uint64_t umul96_lower64(uint32_t x, uint64_t y) FMT_NOEXCEPT { - return x * y; + return x * y; } // Computes floor(log10(pow(2, e))) for e in [-1700, 1700] using the method from // https://fmt.dev/papers/Grisu-Exact.pdf#page=5, section 3.4. inline int floor_log10_pow2(int e) FMT_NOEXCEPT { - FMT_ASSERT(e <= 1700 && e >= -1700, "too large exponent"); - const int shift = 22; - return (e * static_cast(data::log10_2_significand >> (64 - shift))) >> - shift; + FMT_ASSERT(e <= 1700 && e >= -1700, "too large exponent"); + const int shift = 22; + return (e * static_cast(data::log10_2_significand >> (64 - shift))) >> shift; } // Various fast log computations. inline int floor_log2_pow10(int e) FMT_NOEXCEPT { - FMT_ASSERT(e <= 1233 && e >= -1233, "too large exponent"); - const uint64_t log2_10_integer_part = 3; - const uint64_t log2_10_fractional_digits = 0x5269e12f346e2bf9; - const int shift_amount = 19; - return (e * static_cast( - (log2_10_integer_part << shift_amount) | - (log2_10_fractional_digits >> (64 - shift_amount)))) >> - shift_amount; + FMT_ASSERT(e <= 1233 && e >= -1233, "too large exponent"); + const uint64_t log2_10_integer_part = 3; + const uint64_t log2_10_fractional_digits = 0x5269e12f346e2bf9; + const int shift_amount = 19; + return (e * static_cast((log2_10_integer_part << shift_amount) | + (log2_10_fractional_digits >> (64 - shift_amount)))) >> + shift_amount; } inline int floor_log10_pow2_minus_log10_4_over_3(int e) FMT_NOEXCEPT { - FMT_ASSERT(e <= 1700 && e >= -1700, "too large exponent"); - const uint64_t log10_4_over_3_fractional_digits = 0x1ffbfc2bbc780375; - const int shift_amount = 22; - return (e * static_cast(data::log10_2_significand >> - (64 - shift_amount)) - - static_cast(log10_4_over_3_fractional_digits >> - (64 - shift_amount))) >> - shift_amount; + FMT_ASSERT(e <= 1700 && e >= -1700, "too large exponent"); + const uint64_t log10_4_over_3_fractional_digits = 0x1ffbfc2bbc780375; + const int shift_amount = 22; + return (e * static_cast(data::log10_2_significand >> (64 - shift_amount)) - + static_cast(log10_4_over_3_fractional_digits >> (64 - shift_amount))) >> + shift_amount; } // Returns true iff x is divisible by pow(2, exp). inline bool divisible_by_power_of_2(uint32_t x, int exp) FMT_NOEXCEPT { - FMT_ASSERT(exp >= 1, ""); - FMT_ASSERT(x != 0, ""); + FMT_ASSERT(exp >= 1, ""); + FMT_ASSERT(x != 0, ""); #ifdef FMT_BUILTIN_CTZ - return FMT_BUILTIN_CTZ(x) >= exp; + return FMT_BUILTIN_CTZ(x) >= exp; #else - return exp < num_bits() && x == ((x >> exp) << exp); + return exp < num_bits() && x == ((x >> exp) << exp); #endif } inline bool divisible_by_power_of_2(uint64_t x, int exp) FMT_NOEXCEPT { - FMT_ASSERT(exp >= 1, ""); - FMT_ASSERT(x != 0, ""); + FMT_ASSERT(exp >= 1, ""); + FMT_ASSERT(x != 0, ""); #ifdef FMT_BUILTIN_CTZLL - return FMT_BUILTIN_CTZLL(x) >= exp; + return FMT_BUILTIN_CTZLL(x) >= exp; #else - return exp < num_bits() && x == ((x >> exp) << exp); + return exp < num_bits() && x == ((x >> exp) << exp); #endif } // Table entry type for divisibility test. -template struct divtest_table_entry { - T mod_inv; - T max_quotient; +template +struct divtest_table_entry { + T mod_inv; + T max_quotient; }; // Returns true iff x is divisible by pow(5, exp). inline bool divisible_by_power_of_5(uint32_t x, int exp) FMT_NOEXCEPT { - FMT_ASSERT(exp <= 10, "too large exponent"); - static constexpr const divtest_table_entry divtest_table[] = { - {0x00000001, 0xffffffff}, {0xcccccccd, 0x33333333}, - {0xc28f5c29, 0x0a3d70a3}, {0x26e978d5, 0x020c49ba}, - {0x3afb7e91, 0x0068db8b}, {0x0bcbe61d, 0x0014f8b5}, - {0x68c26139, 0x000431bd}, {0xae8d46a5, 0x0000d6bf}, - {0x22e90e21, 0x00002af3}, {0x3a2e9c6d, 0x00000897}, - {0x3ed61f49, 0x000001b7}}; - return x * divtest_table[exp].mod_inv <= divtest_table[exp].max_quotient; + FMT_ASSERT(exp <= 10, "too large exponent"); + static constexpr const divtest_table_entry divtest_table[] = { + {0x00000001, 0xffffffff}, {0xcccccccd, 0x33333333}, {0xc28f5c29, 0x0a3d70a3}, {0x26e978d5, 0x020c49ba}, + {0x3afb7e91, 0x0068db8b}, {0x0bcbe61d, 0x0014f8b5}, {0x68c26139, 0x000431bd}, {0xae8d46a5, 0x0000d6bf}, + {0x22e90e21, 0x00002af3}, {0x3a2e9c6d, 0x00000897}, {0x3ed61f49, 0x000001b7}}; + return x * divtest_table[exp].mod_inv <= divtest_table[exp].max_quotient; } inline bool divisible_by_power_of_5(uint64_t x, int exp) FMT_NOEXCEPT { - FMT_ASSERT(exp <= 23, "too large exponent"); - static constexpr const divtest_table_entry divtest_table[] = { - {0x0000000000000001, 0xffffffffffffffff}, - {0xcccccccccccccccd, 0x3333333333333333}, - {0x8f5c28f5c28f5c29, 0x0a3d70a3d70a3d70}, - {0x1cac083126e978d5, 0x020c49ba5e353f7c}, - {0xd288ce703afb7e91, 0x0068db8bac710cb2}, - {0x5d4e8fb00bcbe61d, 0x0014f8b588e368f0}, - {0x790fb65668c26139, 0x000431bde82d7b63}, - {0xe5032477ae8d46a5, 0x0000d6bf94d5e57a}, - {0xc767074b22e90e21, 0x00002af31dc46118}, - {0x8e47ce423a2e9c6d, 0x0000089705f4136b}, - {0x4fa7f60d3ed61f49, 0x000001b7cdfd9d7b}, - {0x0fee64690c913975, 0x00000057f5ff85e5}, - {0x3662e0e1cf503eb1, 0x000000119799812d}, - {0xa47a2cf9f6433fbd, 0x0000000384b84d09}, - {0x54186f653140a659, 0x00000000b424dc35}, - {0x7738164770402145, 0x0000000024075f3d}, - {0xe4a4d1417cd9a041, 0x000000000734aca5}, - {0xc75429d9e5c5200d, 0x000000000170ef54}, - {0xc1773b91fac10669, 0x000000000049c977}, - {0x26b172506559ce15, 0x00000000000ec1e4}, - {0xd489e3a9addec2d1, 0x000000000002f394}, - {0x90e860bb892c8d5d, 0x000000000000971d}, - {0x502e79bf1b6f4f79, 0x0000000000001e39}, - {0xdcd618596be30fe5, 0x000000000000060b}}; - return x * divtest_table[exp].mod_inv <= divtest_table[exp].max_quotient; + FMT_ASSERT(exp <= 23, "too large exponent"); + static constexpr const divtest_table_entry divtest_table[] = { + {0x0000000000000001, 0xffffffffffffffff}, {0xcccccccccccccccd, 0x3333333333333333}, + {0x8f5c28f5c28f5c29, 0x0a3d70a3d70a3d70}, {0x1cac083126e978d5, 0x020c49ba5e353f7c}, + {0xd288ce703afb7e91, 0x0068db8bac710cb2}, {0x5d4e8fb00bcbe61d, 0x0014f8b588e368f0}, + {0x790fb65668c26139, 0x000431bde82d7b63}, {0xe5032477ae8d46a5, 0x0000d6bf94d5e57a}, + {0xc767074b22e90e21, 0x00002af31dc46118}, {0x8e47ce423a2e9c6d, 0x0000089705f4136b}, + {0x4fa7f60d3ed61f49, 0x000001b7cdfd9d7b}, {0x0fee64690c913975, 0x00000057f5ff85e5}, + {0x3662e0e1cf503eb1, 0x000000119799812d}, {0xa47a2cf9f6433fbd, 0x0000000384b84d09}, + {0x54186f653140a659, 0x00000000b424dc35}, {0x7738164770402145, 0x0000000024075f3d}, + {0xe4a4d1417cd9a041, 0x000000000734aca5}, {0xc75429d9e5c5200d, 0x000000000170ef54}, + {0xc1773b91fac10669, 0x000000000049c977}, {0x26b172506559ce15, 0x00000000000ec1e4}, + {0xd489e3a9addec2d1, 0x000000000002f394}, {0x90e860bb892c8d5d, 0x000000000000971d}, + {0x502e79bf1b6f4f79, 0x0000000000001e39}, {0xdcd618596be30fe5, 0x000000000000060b}}; + return x * divtest_table[exp].mod_inv <= divtest_table[exp].max_quotient; } // Replaces n by floor(n / pow(5, N)) returning true if and only if n is // divisible by pow(5, N). // Precondition: n <= 2 * pow(5, N + 1). template -bool check_divisibility_and_divide_by_pow5(uint32_t& n) FMT_NOEXCEPT { - static constexpr struct { - uint32_t magic_number; - int bits_for_comparison; - uint32_t threshold; - int shift_amount; - } infos[] = {{0xcccd, 16, 0x3333, 18}, {0xa429, 8, 0x0a, 20}}; - constexpr auto info = infos[N - 1]; - n *= info.magic_number; - const uint32_t comparison_mask = (1u << info.bits_for_comparison) - 1; - bool result = (n & comparison_mask) <= info.threshold; - n >>= info.shift_amount; - return result; +bool check_divisibility_and_divide_by_pow5(uint32_t &n) FMT_NOEXCEPT { + static constexpr struct { + uint32_t magic_number; + int bits_for_comparison; + uint32_t threshold; + int shift_amount; + } infos[] = {{0xcccd, 16, 0x3333, 18}, {0xa429, 8, 0x0a, 20}}; + constexpr auto info = infos[N - 1]; + n *= info.magic_number; + const uint32_t comparison_mask = (1u << info.bits_for_comparison) - 1; + bool result = (n & comparison_mask) <= info.threshold; + n >>= info.shift_amount; + return result; } // Computes floor(n / pow(10, N)) for small n and N. // Precondition: n <= pow(10, N + 1). -template uint32_t small_division_by_pow10(uint32_t n) FMT_NOEXCEPT { - static constexpr struct { - uint32_t magic_number; - int shift_amount; - uint32_t divisor_times_10; - } infos[] = {{0xcccd, 19, 100}, {0xa3d8, 22, 1000}}; - constexpr auto info = infos[N - 1]; - FMT_ASSERT(n <= info.divisor_times_10, "n is too large"); - return n * info.magic_number >> info.shift_amount; +template +uint32_t small_division_by_pow10(uint32_t n) FMT_NOEXCEPT { + static constexpr struct { + uint32_t magic_number; + int shift_amount; + uint32_t divisor_times_10; + } infos[] = {{0xcccd, 19, 100}, {0xa3d8, 22, 1000}}; + constexpr auto info = infos[N - 1]; + FMT_ASSERT(n <= info.divisor_times_10, "n is too large"); + return n * info.magic_number >> info.shift_amount; } // Computes floor(n / 10^(kappa + 1)) (float) inline uint32_t divide_by_10_to_kappa_plus_1(uint32_t n) FMT_NOEXCEPT { - return n / float_info::big_divisor; + return n / float_info::big_divisor; } // Computes floor(n / 10^(kappa + 1)) (double) inline uint64_t divide_by_10_to_kappa_plus_1(uint64_t n) FMT_NOEXCEPT { - return umul128_upper64(n, 0x83126e978d4fdf3c) >> 9; + return umul128_upper64(n, 0x83126e978d4fdf3c) >> 9; } // Various subroutines using pow10 cache -template struct cache_accessor; - -template <> struct cache_accessor { - using carrier_uint = float_info::carrier_uint; - using cache_entry_type = uint64_t; - - static uint64_t get_cached_power(int k) FMT_NOEXCEPT { - FMT_ASSERT(k >= float_info::min_k && k <= float_info::max_k, - "k is out of range"); - constexpr const uint64_t pow10_significands[] = { - 0x81ceb32c4b43fcf5, 0xa2425ff75e14fc32, 0xcad2f7f5359a3b3f, - 0xfd87b5f28300ca0e, 0x9e74d1b791e07e49, 0xc612062576589ddb, - 0xf79687aed3eec552, 0x9abe14cd44753b53, 0xc16d9a0095928a28, - 0xf1c90080baf72cb2, 0x971da05074da7bef, 0xbce5086492111aeb, - 0xec1e4a7db69561a6, 0x9392ee8e921d5d08, 0xb877aa3236a4b44a, - 0xe69594bec44de15c, 0x901d7cf73ab0acda, 0xb424dc35095cd810, - 0xe12e13424bb40e14, 0x8cbccc096f5088cc, 0xafebff0bcb24aaff, - 0xdbe6fecebdedd5bf, 0x89705f4136b4a598, 0xabcc77118461cefd, - 0xd6bf94d5e57a42bd, 0x8637bd05af6c69b6, 0xa7c5ac471b478424, - 0xd1b71758e219652c, 0x83126e978d4fdf3c, 0xa3d70a3d70a3d70b, - 0xcccccccccccccccd, 0x8000000000000000, 0xa000000000000000, - 0xc800000000000000, 0xfa00000000000000, 0x9c40000000000000, - 0xc350000000000000, 0xf424000000000000, 0x9896800000000000, - 0xbebc200000000000, 0xee6b280000000000, 0x9502f90000000000, - 0xba43b74000000000, 0xe8d4a51000000000, 0x9184e72a00000000, - 0xb5e620f480000000, 0xe35fa931a0000000, 0x8e1bc9bf04000000, - 0xb1a2bc2ec5000000, 0xde0b6b3a76400000, 0x8ac7230489e80000, - 0xad78ebc5ac620000, 0xd8d726b7177a8000, 0x878678326eac9000, - 0xa968163f0a57b400, 0xd3c21bcecceda100, 0x84595161401484a0, - 0xa56fa5b99019a5c8, 0xcecb8f27f4200f3a, 0x813f3978f8940984, - 0xa18f07d736b90be5, 0xc9f2c9cd04674ede, 0xfc6f7c4045812296, - 0x9dc5ada82b70b59d, 0xc5371912364ce305, 0xf684df56c3e01bc6, - 0x9a130b963a6c115c, 0xc097ce7bc90715b3, 0xf0bdc21abb48db20, - 0x96769950b50d88f4, 0xbc143fa4e250eb31, 0xeb194f8e1ae525fd, - 0x92efd1b8d0cf37be, 0xb7abc627050305ad, 0xe596b7b0c643c719, - 0x8f7e32ce7bea5c6f, 0xb35dbf821ae4f38b, 0xe0352f62a19e306e}; - return pow10_significands[k - float_info::min_k]; - } - - static carrier_uint compute_mul(carrier_uint u, - const cache_entry_type& cache) FMT_NOEXCEPT { - return umul96_upper32(u, cache); - } - - static uint32_t compute_delta(const cache_entry_type& cache, - int beta_minus_1) FMT_NOEXCEPT { - return static_cast(cache >> (64 - 1 - beta_minus_1)); - } - - static bool compute_mul_parity(carrier_uint two_f, - const cache_entry_type& cache, - int beta_minus_1) FMT_NOEXCEPT { - FMT_ASSERT(beta_minus_1 >= 1, ""); - FMT_ASSERT(beta_minus_1 < 64, ""); - - return ((umul96_lower64(two_f, cache) >> (64 - beta_minus_1)) & 1) != 0; - } - - static carrier_uint compute_left_endpoint_for_shorter_interval_case( - const cache_entry_type& cache, int beta_minus_1) FMT_NOEXCEPT { - return static_cast( - (cache - (cache >> (float_info::significand_bits + 2))) >> - (64 - float_info::significand_bits - 1 - beta_minus_1)); - } - - static carrier_uint compute_right_endpoint_for_shorter_interval_case( - const cache_entry_type& cache, int beta_minus_1) FMT_NOEXCEPT { - return static_cast( - (cache + (cache >> (float_info::significand_bits + 1))) >> - (64 - float_info::significand_bits - 1 - beta_minus_1)); - } - - static carrier_uint compute_round_up_for_shorter_interval_case( - const cache_entry_type& cache, int beta_minus_1) FMT_NOEXCEPT { - return (static_cast( - cache >> - (64 - float_info::significand_bits - 2 - beta_minus_1)) + - 1) / - 2; - } +template +struct cache_accessor; + +template <> +struct cache_accessor { + using carrier_uint = float_info::carrier_uint; + using cache_entry_type = uint64_t; + + static uint64_t get_cached_power(int k) FMT_NOEXCEPT { + FMT_ASSERT(k >= float_info::min_k && k <= float_info::max_k, "k is out of range"); + constexpr const uint64_t pow10_significands[] = { + 0x81ceb32c4b43fcf5, 0xa2425ff75e14fc32, 0xcad2f7f5359a3b3f, 0xfd87b5f28300ca0e, 0x9e74d1b791e07e49, + 0xc612062576589ddb, 0xf79687aed3eec552, 0x9abe14cd44753b53, 0xc16d9a0095928a28, 0xf1c90080baf72cb2, + 0x971da05074da7bef, 0xbce5086492111aeb, 0xec1e4a7db69561a6, 0x9392ee8e921d5d08, 0xb877aa3236a4b44a, + 0xe69594bec44de15c, 0x901d7cf73ab0acda, 0xb424dc35095cd810, 0xe12e13424bb40e14, 0x8cbccc096f5088cc, + 0xafebff0bcb24aaff, 0xdbe6fecebdedd5bf, 0x89705f4136b4a598, 0xabcc77118461cefd, 0xd6bf94d5e57a42bd, + 0x8637bd05af6c69b6, 0xa7c5ac471b478424, 0xd1b71758e219652c, 0x83126e978d4fdf3c, 0xa3d70a3d70a3d70b, + 0xcccccccccccccccd, 0x8000000000000000, 0xa000000000000000, 0xc800000000000000, 0xfa00000000000000, + 0x9c40000000000000, 0xc350000000000000, 0xf424000000000000, 0x9896800000000000, 0xbebc200000000000, + 0xee6b280000000000, 0x9502f90000000000, 0xba43b74000000000, 0xe8d4a51000000000, 0x9184e72a00000000, + 0xb5e620f480000000, 0xe35fa931a0000000, 0x8e1bc9bf04000000, 0xb1a2bc2ec5000000, 0xde0b6b3a76400000, + 0x8ac7230489e80000, 0xad78ebc5ac620000, 0xd8d726b7177a8000, 0x878678326eac9000, 0xa968163f0a57b400, + 0xd3c21bcecceda100, 0x84595161401484a0, 0xa56fa5b99019a5c8, 0xcecb8f27f4200f3a, 0x813f3978f8940984, + 0xa18f07d736b90be5, 0xc9f2c9cd04674ede, 0xfc6f7c4045812296, 0x9dc5ada82b70b59d, 0xc5371912364ce305, + 0xf684df56c3e01bc6, 0x9a130b963a6c115c, 0xc097ce7bc90715b3, 0xf0bdc21abb48db20, 0x96769950b50d88f4, + 0xbc143fa4e250eb31, 0xeb194f8e1ae525fd, 0x92efd1b8d0cf37be, 0xb7abc627050305ad, 0xe596b7b0c643c719, + 0x8f7e32ce7bea5c6f, 0xb35dbf821ae4f38b, 0xe0352f62a19e306e}; + return pow10_significands[k - float_info::min_k]; + } + + static carrier_uint compute_mul(carrier_uint u, const cache_entry_type &cache) FMT_NOEXCEPT { + return umul96_upper32(u, cache); + } + + static uint32_t compute_delta(const cache_entry_type &cache, int beta_minus_1) FMT_NOEXCEPT { + return static_cast(cache >> (64 - 1 - beta_minus_1)); + } + + static bool compute_mul_parity(carrier_uint two_f, const cache_entry_type &cache, int beta_minus_1) FMT_NOEXCEPT { + FMT_ASSERT(beta_minus_1 >= 1, ""); + FMT_ASSERT(beta_minus_1 < 64, ""); + + return ((umul96_lower64(two_f, cache) >> (64 - beta_minus_1)) & 1) != 0; + } + + static carrier_uint compute_left_endpoint_for_shorter_interval_case(const cache_entry_type &cache, + int beta_minus_1) FMT_NOEXCEPT { + return static_cast((cache - (cache >> (float_info::significand_bits + 2))) >> + (64 - float_info::significand_bits - 1 - beta_minus_1)); + } + + static carrier_uint compute_right_endpoint_for_shorter_interval_case(const cache_entry_type &cache, + int beta_minus_1) FMT_NOEXCEPT { + return static_cast((cache + (cache >> (float_info::significand_bits + 1))) >> + (64 - float_info::significand_bits - 1 - beta_minus_1)); + } + + static carrier_uint compute_round_up_for_shorter_interval_case(const cache_entry_type &cache, + int beta_minus_1) FMT_NOEXCEPT { + return (static_cast(cache >> (64 - float_info::significand_bits - 2 - beta_minus_1)) + 1) / + 2; + } }; -template <> struct cache_accessor { - using carrier_uint = float_info::carrier_uint; - using cache_entry_type = uint128_wrapper; +template <> +struct cache_accessor { + using carrier_uint = float_info::carrier_uint; + using cache_entry_type = uint128_wrapper; - static uint128_wrapper get_cached_power(int k) FMT_NOEXCEPT { - FMT_ASSERT(k >= float_info::min_k && k <= float_info::max_k, - "k is out of range"); + static uint128_wrapper get_cached_power(int k) FMT_NOEXCEPT { + FMT_ASSERT(k >= float_info::min_k && k <= float_info::max_k, "k is out of range"); - static constexpr const uint128_wrapper pow10_significands[] = { + static constexpr const uint128_wrapper pow10_significands[] = { #if FMT_USE_FULL_CACHE_DRAGONBOX - {0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7b}, - {0x9faacf3df73609b1, 0x77b191618c54e9ad}, - {0xc795830d75038c1d, 0xd59df5b9ef6a2418}, - {0xf97ae3d0d2446f25, 0x4b0573286b44ad1e}, - {0x9becce62836ac577, 0x4ee367f9430aec33}, - {0xc2e801fb244576d5, 0x229c41f793cda740}, - {0xf3a20279ed56d48a, 0x6b43527578c11110}, - {0x9845418c345644d6, 0x830a13896b78aaaa}, - {0xbe5691ef416bd60c, 0x23cc986bc656d554}, - {0xedec366b11c6cb8f, 0x2cbfbe86b7ec8aa9}, - {0x94b3a202eb1c3f39, 0x7bf7d71432f3d6aa}, - {0xb9e08a83a5e34f07, 0xdaf5ccd93fb0cc54}, - {0xe858ad248f5c22c9, 0xd1b3400f8f9cff69}, - {0x91376c36d99995be, 0x23100809b9c21fa2}, - {0xb58547448ffffb2d, 0xabd40a0c2832a78b}, - {0xe2e69915b3fff9f9, 0x16c90c8f323f516d}, - {0x8dd01fad907ffc3b, 0xae3da7d97f6792e4}, - {0xb1442798f49ffb4a, 0x99cd11cfdf41779d}, - {0xdd95317f31c7fa1d, 0x40405643d711d584}, - {0x8a7d3eef7f1cfc52, 0x482835ea666b2573}, - {0xad1c8eab5ee43b66, 0xda3243650005eed0}, - {0xd863b256369d4a40, 0x90bed43e40076a83}, - {0x873e4f75e2224e68, 0x5a7744a6e804a292}, - {0xa90de3535aaae202, 0x711515d0a205cb37}, - {0xd3515c2831559a83, 0x0d5a5b44ca873e04}, - {0x8412d9991ed58091, 0xe858790afe9486c3}, - {0xa5178fff668ae0b6, 0x626e974dbe39a873}, - {0xce5d73ff402d98e3, 0xfb0a3d212dc81290}, - {0x80fa687f881c7f8e, 0x7ce66634bc9d0b9a}, - {0xa139029f6a239f72, 0x1c1fffc1ebc44e81}, - {0xc987434744ac874e, 0xa327ffb266b56221}, - {0xfbe9141915d7a922, 0x4bf1ff9f0062baa9}, - {0x9d71ac8fada6c9b5, 0x6f773fc3603db4aa}, - {0xc4ce17b399107c22, 0xcb550fb4384d21d4}, - {0xf6019da07f549b2b, 0x7e2a53a146606a49}, - {0x99c102844f94e0fb, 0x2eda7444cbfc426e}, - {0xc0314325637a1939, 0xfa911155fefb5309}, - {0xf03d93eebc589f88, 0x793555ab7eba27cb}, - {0x96267c7535b763b5, 0x4bc1558b2f3458df}, - {0xbbb01b9283253ca2, 0x9eb1aaedfb016f17}, - {0xea9c227723ee8bcb, 0x465e15a979c1cadd}, - {0x92a1958a7675175f, 0x0bfacd89ec191eca}, - {0xb749faed14125d36, 0xcef980ec671f667c}, - {0xe51c79a85916f484, 0x82b7e12780e7401b}, - {0x8f31cc0937ae58d2, 0xd1b2ecb8b0908811}, - {0xb2fe3f0b8599ef07, 0x861fa7e6dcb4aa16}, - {0xdfbdcece67006ac9, 0x67a791e093e1d49b}, - {0x8bd6a141006042bd, 0xe0c8bb2c5c6d24e1}, - {0xaecc49914078536d, 0x58fae9f773886e19}, - {0xda7f5bf590966848, 0xaf39a475506a899f}, - {0x888f99797a5e012d, 0x6d8406c952429604}, - {0xaab37fd7d8f58178, 0xc8e5087ba6d33b84}, - {0xd5605fcdcf32e1d6, 0xfb1e4a9a90880a65}, - {0x855c3be0a17fcd26, 0x5cf2eea09a550680}, - {0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481f}, - {0xd0601d8efc57b08b, 0xf13b94daf124da27}, - {0x823c12795db6ce57, 0x76c53d08d6b70859}, - {0xa2cb1717b52481ed, 0x54768c4b0c64ca6f}, - {0xcb7ddcdda26da268, 0xa9942f5dcf7dfd0a}, - {0xfe5d54150b090b02, 0xd3f93b35435d7c4d}, - {0x9efa548d26e5a6e1, 0xc47bc5014a1a6db0}, - {0xc6b8e9b0709f109a, 0x359ab6419ca1091c}, - {0xf867241c8cc6d4c0, 0xc30163d203c94b63}, - {0x9b407691d7fc44f8, 0x79e0de63425dcf1e}, - {0xc21094364dfb5636, 0x985915fc12f542e5}, - {0xf294b943e17a2bc4, 0x3e6f5b7b17b2939e}, - {0x979cf3ca6cec5b5a, 0xa705992ceecf9c43}, - {0xbd8430bd08277231, 0x50c6ff782a838354}, - {0xece53cec4a314ebd, 0xa4f8bf5635246429}, - {0x940f4613ae5ed136, 0x871b7795e136be9a}, - {0xb913179899f68584, 0x28e2557b59846e40}, - {0xe757dd7ec07426e5, 0x331aeada2fe589d0}, - {0x9096ea6f3848984f, 0x3ff0d2c85def7622}, - {0xb4bca50b065abe63, 0x0fed077a756b53aa}, - {0xe1ebce4dc7f16dfb, 0xd3e8495912c62895}, - {0x8d3360f09cf6e4bd, 0x64712dd7abbbd95d}, - {0xb080392cc4349dec, 0xbd8d794d96aacfb4}, - {0xdca04777f541c567, 0xecf0d7a0fc5583a1}, - {0x89e42caaf9491b60, 0xf41686c49db57245}, - {0xac5d37d5b79b6239, 0x311c2875c522ced6}, - {0xd77485cb25823ac7, 0x7d633293366b828c}, - {0x86a8d39ef77164bc, 0xae5dff9c02033198}, - {0xa8530886b54dbdeb, 0xd9f57f830283fdfd}, - {0xd267caa862a12d66, 0xd072df63c324fd7c}, - {0x8380dea93da4bc60, 0x4247cb9e59f71e6e}, - {0xa46116538d0deb78, 0x52d9be85f074e609}, - {0xcd795be870516656, 0x67902e276c921f8c}, - {0x806bd9714632dff6, 0x00ba1cd8a3db53b7}, - {0xa086cfcd97bf97f3, 0x80e8a40eccd228a5}, - {0xc8a883c0fdaf7df0, 0x6122cd128006b2ce}, - {0xfad2a4b13d1b5d6c, 0x796b805720085f82}, - {0x9cc3a6eec6311a63, 0xcbe3303674053bb1}, - {0xc3f490aa77bd60fc, 0xbedbfc4411068a9d}, - {0xf4f1b4d515acb93b, 0xee92fb5515482d45}, - {0x991711052d8bf3c5, 0x751bdd152d4d1c4b}, - {0xbf5cd54678eef0b6, 0xd262d45a78a0635e}, - {0xef340a98172aace4, 0x86fb897116c87c35}, - {0x9580869f0e7aac0e, 0xd45d35e6ae3d4da1}, - {0xbae0a846d2195712, 0x8974836059cca10a}, - {0xe998d258869facd7, 0x2bd1a438703fc94c}, - {0x91ff83775423cc06, 0x7b6306a34627ddd0}, - {0xb67f6455292cbf08, 0x1a3bc84c17b1d543}, - {0xe41f3d6a7377eeca, 0x20caba5f1d9e4a94}, - {0x8e938662882af53e, 0x547eb47b7282ee9d}, - {0xb23867fb2a35b28d, 0xe99e619a4f23aa44}, - {0xdec681f9f4c31f31, 0x6405fa00e2ec94d5}, - {0x8b3c113c38f9f37e, 0xde83bc408dd3dd05}, - {0xae0b158b4738705e, 0x9624ab50b148d446}, - {0xd98ddaee19068c76, 0x3badd624dd9b0958}, - {0x87f8a8d4cfa417c9, 0xe54ca5d70a80e5d7}, - {0xa9f6d30a038d1dbc, 0x5e9fcf4ccd211f4d}, - {0xd47487cc8470652b, 0x7647c32000696720}, - {0x84c8d4dfd2c63f3b, 0x29ecd9f40041e074}, - {0xa5fb0a17c777cf09, 0xf468107100525891}, - {0xcf79cc9db955c2cc, 0x7182148d4066eeb5}, - {0x81ac1fe293d599bf, 0xc6f14cd848405531}, - {0xa21727db38cb002f, 0xb8ada00e5a506a7d}, - {0xca9cf1d206fdc03b, 0xa6d90811f0e4851d}, - {0xfd442e4688bd304a, 0x908f4a166d1da664}, - {0x9e4a9cec15763e2e, 0x9a598e4e043287ff}, - {0xc5dd44271ad3cdba, 0x40eff1e1853f29fe}, - {0xf7549530e188c128, 0xd12bee59e68ef47d}, - {0x9a94dd3e8cf578b9, 0x82bb74f8301958cf}, - {0xc13a148e3032d6e7, 0xe36a52363c1faf02}, - {0xf18899b1bc3f8ca1, 0xdc44e6c3cb279ac2}, - {0x96f5600f15a7b7e5, 0x29ab103a5ef8c0ba}, - {0xbcb2b812db11a5de, 0x7415d448f6b6f0e8}, - {0xebdf661791d60f56, 0x111b495b3464ad22}, - {0x936b9fcebb25c995, 0xcab10dd900beec35}, - {0xb84687c269ef3bfb, 0x3d5d514f40eea743}, - {0xe65829b3046b0afa, 0x0cb4a5a3112a5113}, - {0x8ff71a0fe2c2e6dc, 0x47f0e785eaba72ac}, - {0xb3f4e093db73a093, 0x59ed216765690f57}, - {0xe0f218b8d25088b8, 0x306869c13ec3532d}, - {0x8c974f7383725573, 0x1e414218c73a13fc}, - {0xafbd2350644eeacf, 0xe5d1929ef90898fb}, - {0xdbac6c247d62a583, 0xdf45f746b74abf3a}, - {0x894bc396ce5da772, 0x6b8bba8c328eb784}, - {0xab9eb47c81f5114f, 0x066ea92f3f326565}, - {0xd686619ba27255a2, 0xc80a537b0efefebe}, - {0x8613fd0145877585, 0xbd06742ce95f5f37}, - {0xa798fc4196e952e7, 0x2c48113823b73705}, - {0xd17f3b51fca3a7a0, 0xf75a15862ca504c6}, - {0x82ef85133de648c4, 0x9a984d73dbe722fc}, - {0xa3ab66580d5fdaf5, 0xc13e60d0d2e0ebbb}, - {0xcc963fee10b7d1b3, 0x318df905079926a9}, - {0xffbbcfe994e5c61f, 0xfdf17746497f7053}, - {0x9fd561f1fd0f9bd3, 0xfeb6ea8bedefa634}, - {0xc7caba6e7c5382c8, 0xfe64a52ee96b8fc1}, - {0xf9bd690a1b68637b, 0x3dfdce7aa3c673b1}, - {0x9c1661a651213e2d, 0x06bea10ca65c084f}, - {0xc31bfa0fe5698db8, 0x486e494fcff30a63}, - {0xf3e2f893dec3f126, 0x5a89dba3c3efccfb}, - {0x986ddb5c6b3a76b7, 0xf89629465a75e01d}, - {0xbe89523386091465, 0xf6bbb397f1135824}, - {0xee2ba6c0678b597f, 0x746aa07ded582e2d}, - {0x94db483840b717ef, 0xa8c2a44eb4571cdd}, - {0xba121a4650e4ddeb, 0x92f34d62616ce414}, - {0xe896a0d7e51e1566, 0x77b020baf9c81d18}, - {0x915e2486ef32cd60, 0x0ace1474dc1d122f}, - {0xb5b5ada8aaff80b8, 0x0d819992132456bb}, - {0xe3231912d5bf60e6, 0x10e1fff697ed6c6a}, - {0x8df5efabc5979c8f, 0xca8d3ffa1ef463c2}, - {0xb1736b96b6fd83b3, 0xbd308ff8a6b17cb3}, - {0xddd0467c64bce4a0, 0xac7cb3f6d05ddbdf}, - {0x8aa22c0dbef60ee4, 0x6bcdf07a423aa96c}, - {0xad4ab7112eb3929d, 0x86c16c98d2c953c7}, - {0xd89d64d57a607744, 0xe871c7bf077ba8b8}, - {0x87625f056c7c4a8b, 0x11471cd764ad4973}, - {0xa93af6c6c79b5d2d, 0xd598e40d3dd89bd0}, - {0xd389b47879823479, 0x4aff1d108d4ec2c4}, - {0x843610cb4bf160cb, 0xcedf722a585139bb}, - {0xa54394fe1eedb8fe, 0xc2974eb4ee658829}, - {0xce947a3da6a9273e, 0x733d226229feea33}, - {0x811ccc668829b887, 0x0806357d5a3f5260}, - {0xa163ff802a3426a8, 0xca07c2dcb0cf26f8}, - {0xc9bcff6034c13052, 0xfc89b393dd02f0b6}, - {0xfc2c3f3841f17c67, 0xbbac2078d443ace3}, - {0x9d9ba7832936edc0, 0xd54b944b84aa4c0e}, - {0xc5029163f384a931, 0x0a9e795e65d4df12}, - {0xf64335bcf065d37d, 0x4d4617b5ff4a16d6}, - {0x99ea0196163fa42e, 0x504bced1bf8e4e46}, - {0xc06481fb9bcf8d39, 0xe45ec2862f71e1d7}, - {0xf07da27a82c37088, 0x5d767327bb4e5a4d}, - {0x964e858c91ba2655, 0x3a6a07f8d510f870}, - {0xbbe226efb628afea, 0x890489f70a55368c}, - {0xeadab0aba3b2dbe5, 0x2b45ac74ccea842f}, - {0x92c8ae6b464fc96f, 0x3b0b8bc90012929e}, - {0xb77ada0617e3bbcb, 0x09ce6ebb40173745}, - {0xe55990879ddcaabd, 0xcc420a6a101d0516}, - {0x8f57fa54c2a9eab6, 0x9fa946824a12232e}, - {0xb32df8e9f3546564, 0x47939822dc96abfa}, - {0xdff9772470297ebd, 0x59787e2b93bc56f8}, - {0x8bfbea76c619ef36, 0x57eb4edb3c55b65b}, - {0xaefae51477a06b03, 0xede622920b6b23f2}, - {0xdab99e59958885c4, 0xe95fab368e45ecee}, - {0x88b402f7fd75539b, 0x11dbcb0218ebb415}, - {0xaae103b5fcd2a881, 0xd652bdc29f26a11a}, - {0xd59944a37c0752a2, 0x4be76d3346f04960}, - {0x857fcae62d8493a5, 0x6f70a4400c562ddc}, - {0xa6dfbd9fb8e5b88e, 0xcb4ccd500f6bb953}, - {0xd097ad07a71f26b2, 0x7e2000a41346a7a8}, - {0x825ecc24c873782f, 0x8ed400668c0c28c9}, - {0xa2f67f2dfa90563b, 0x728900802f0f32fb}, - {0xcbb41ef979346bca, 0x4f2b40a03ad2ffba}, - {0xfea126b7d78186bc, 0xe2f610c84987bfa9}, - {0x9f24b832e6b0f436, 0x0dd9ca7d2df4d7ca}, - {0xc6ede63fa05d3143, 0x91503d1c79720dbc}, - {0xf8a95fcf88747d94, 0x75a44c6397ce912b}, - {0x9b69dbe1b548ce7c, 0xc986afbe3ee11abb}, - {0xc24452da229b021b, 0xfbe85badce996169}, - {0xf2d56790ab41c2a2, 0xfae27299423fb9c4}, - {0x97c560ba6b0919a5, 0xdccd879fc967d41b}, - {0xbdb6b8e905cb600f, 0x5400e987bbc1c921}, - {0xed246723473e3813, 0x290123e9aab23b69}, - {0x9436c0760c86e30b, 0xf9a0b6720aaf6522}, - {0xb94470938fa89bce, 0xf808e40e8d5b3e6a}, - {0xe7958cb87392c2c2, 0xb60b1d1230b20e05}, - {0x90bd77f3483bb9b9, 0xb1c6f22b5e6f48c3}, - {0xb4ecd5f01a4aa828, 0x1e38aeb6360b1af4}, - {0xe2280b6c20dd5232, 0x25c6da63c38de1b1}, - {0x8d590723948a535f, 0x579c487e5a38ad0f}, - {0xb0af48ec79ace837, 0x2d835a9df0c6d852}, - {0xdcdb1b2798182244, 0xf8e431456cf88e66}, - {0x8a08f0f8bf0f156b, 0x1b8e9ecb641b5900}, - {0xac8b2d36eed2dac5, 0xe272467e3d222f40}, - {0xd7adf884aa879177, 0x5b0ed81dcc6abb10}, - {0x86ccbb52ea94baea, 0x98e947129fc2b4ea}, - {0xa87fea27a539e9a5, 0x3f2398d747b36225}, - {0xd29fe4b18e88640e, 0x8eec7f0d19a03aae}, - {0x83a3eeeef9153e89, 0x1953cf68300424ad}, - {0xa48ceaaab75a8e2b, 0x5fa8c3423c052dd8}, - {0xcdb02555653131b6, 0x3792f412cb06794e}, - {0x808e17555f3ebf11, 0xe2bbd88bbee40bd1}, - {0xa0b19d2ab70e6ed6, 0x5b6aceaeae9d0ec5}, - {0xc8de047564d20a8b, 0xf245825a5a445276}, - {0xfb158592be068d2e, 0xeed6e2f0f0d56713}, - {0x9ced737bb6c4183d, 0x55464dd69685606c}, - {0xc428d05aa4751e4c, 0xaa97e14c3c26b887}, - {0xf53304714d9265df, 0xd53dd99f4b3066a9}, - {0x993fe2c6d07b7fab, 0xe546a8038efe402a}, - {0xbf8fdb78849a5f96, 0xde98520472bdd034}, - {0xef73d256a5c0f77c, 0x963e66858f6d4441}, - {0x95a8637627989aad, 0xdde7001379a44aa9}, - {0xbb127c53b17ec159, 0x5560c018580d5d53}, - {0xe9d71b689dde71af, 0xaab8f01e6e10b4a7}, - {0x9226712162ab070d, 0xcab3961304ca70e9}, - {0xb6b00d69bb55c8d1, 0x3d607b97c5fd0d23}, - {0xe45c10c42a2b3b05, 0x8cb89a7db77c506b}, - {0x8eb98a7a9a5b04e3, 0x77f3608e92adb243}, - {0xb267ed1940f1c61c, 0x55f038b237591ed4}, - {0xdf01e85f912e37a3, 0x6b6c46dec52f6689}, - {0x8b61313bbabce2c6, 0x2323ac4b3b3da016}, - {0xae397d8aa96c1b77, 0xabec975e0a0d081b}, - {0xd9c7dced53c72255, 0x96e7bd358c904a22}, - {0x881cea14545c7575, 0x7e50d64177da2e55}, - {0xaa242499697392d2, 0xdde50bd1d5d0b9ea}, - {0xd4ad2dbfc3d07787, 0x955e4ec64b44e865}, - {0x84ec3c97da624ab4, 0xbd5af13bef0b113f}, - {0xa6274bbdd0fadd61, 0xecb1ad8aeacdd58f}, - {0xcfb11ead453994ba, 0x67de18eda5814af3}, - {0x81ceb32c4b43fcf4, 0x80eacf948770ced8}, - {0xa2425ff75e14fc31, 0xa1258379a94d028e}, - {0xcad2f7f5359a3b3e, 0x096ee45813a04331}, - {0xfd87b5f28300ca0d, 0x8bca9d6e188853fd}, - {0x9e74d1b791e07e48, 0x775ea264cf55347e}, - {0xc612062576589dda, 0x95364afe032a819e}, - {0xf79687aed3eec551, 0x3a83ddbd83f52205}, - {0x9abe14cd44753b52, 0xc4926a9672793543}, - {0xc16d9a0095928a27, 0x75b7053c0f178294}, - {0xf1c90080baf72cb1, 0x5324c68b12dd6339}, - {0x971da05074da7bee, 0xd3f6fc16ebca5e04}, - {0xbce5086492111aea, 0x88f4bb1ca6bcf585}, - {0xec1e4a7db69561a5, 0x2b31e9e3d06c32e6}, - {0x9392ee8e921d5d07, 0x3aff322e62439fd0}, - {0xb877aa3236a4b449, 0x09befeb9fad487c3}, - {0xe69594bec44de15b, 0x4c2ebe687989a9b4}, - {0x901d7cf73ab0acd9, 0x0f9d37014bf60a11}, - {0xb424dc35095cd80f, 0x538484c19ef38c95}, - {0xe12e13424bb40e13, 0x2865a5f206b06fba}, - {0x8cbccc096f5088cb, 0xf93f87b7442e45d4}, - {0xafebff0bcb24aafe, 0xf78f69a51539d749}, - {0xdbe6fecebdedd5be, 0xb573440e5a884d1c}, - {0x89705f4136b4a597, 0x31680a88f8953031}, - {0xabcc77118461cefc, 0xfdc20d2b36ba7c3e}, - {0xd6bf94d5e57a42bc, 0x3d32907604691b4d}, - {0x8637bd05af6c69b5, 0xa63f9a49c2c1b110}, - {0xa7c5ac471b478423, 0x0fcf80dc33721d54}, - {0xd1b71758e219652b, 0xd3c36113404ea4a9}, - {0x83126e978d4fdf3b, 0x645a1cac083126ea}, - {0xa3d70a3d70a3d70a, 0x3d70a3d70a3d70a4}, - {0xcccccccccccccccc, 0xcccccccccccccccd}, - {0x8000000000000000, 0x0000000000000000}, - {0xa000000000000000, 0x0000000000000000}, - {0xc800000000000000, 0x0000000000000000}, - {0xfa00000000000000, 0x0000000000000000}, - {0x9c40000000000000, 0x0000000000000000}, - {0xc350000000000000, 0x0000000000000000}, - {0xf424000000000000, 0x0000000000000000}, - {0x9896800000000000, 0x0000000000000000}, - {0xbebc200000000000, 0x0000000000000000}, - {0xee6b280000000000, 0x0000000000000000}, - {0x9502f90000000000, 0x0000000000000000}, - {0xba43b74000000000, 0x0000000000000000}, - {0xe8d4a51000000000, 0x0000000000000000}, - {0x9184e72a00000000, 0x0000000000000000}, - {0xb5e620f480000000, 0x0000000000000000}, - {0xe35fa931a0000000, 0x0000000000000000}, - {0x8e1bc9bf04000000, 0x0000000000000000}, - {0xb1a2bc2ec5000000, 0x0000000000000000}, - {0xde0b6b3a76400000, 0x0000000000000000}, - {0x8ac7230489e80000, 0x0000000000000000}, - {0xad78ebc5ac620000, 0x0000000000000000}, - {0xd8d726b7177a8000, 0x0000000000000000}, - {0x878678326eac9000, 0x0000000000000000}, - {0xa968163f0a57b400, 0x0000000000000000}, - {0xd3c21bcecceda100, 0x0000000000000000}, - {0x84595161401484a0, 0x0000000000000000}, - {0xa56fa5b99019a5c8, 0x0000000000000000}, - {0xcecb8f27f4200f3a, 0x0000000000000000}, - {0x813f3978f8940984, 0x4000000000000000}, - {0xa18f07d736b90be5, 0x5000000000000000}, - {0xc9f2c9cd04674ede, 0xa400000000000000}, - {0xfc6f7c4045812296, 0x4d00000000000000}, - {0x9dc5ada82b70b59d, 0xf020000000000000}, - {0xc5371912364ce305, 0x6c28000000000000}, - {0xf684df56c3e01bc6, 0xc732000000000000}, - {0x9a130b963a6c115c, 0x3c7f400000000000}, - {0xc097ce7bc90715b3, 0x4b9f100000000000}, - {0xf0bdc21abb48db20, 0x1e86d40000000000}, - {0x96769950b50d88f4, 0x1314448000000000}, - {0xbc143fa4e250eb31, 0x17d955a000000000}, - {0xeb194f8e1ae525fd, 0x5dcfab0800000000}, - {0x92efd1b8d0cf37be, 0x5aa1cae500000000}, - {0xb7abc627050305ad, 0xf14a3d9e40000000}, - {0xe596b7b0c643c719, 0x6d9ccd05d0000000}, - {0x8f7e32ce7bea5c6f, 0xe4820023a2000000}, - {0xb35dbf821ae4f38b, 0xdda2802c8a800000}, - {0xe0352f62a19e306e, 0xd50b2037ad200000}, - {0x8c213d9da502de45, 0x4526f422cc340000}, - {0xaf298d050e4395d6, 0x9670b12b7f410000}, - {0xdaf3f04651d47b4c, 0x3c0cdd765f114000}, - {0x88d8762bf324cd0f, 0xa5880a69fb6ac800}, - {0xab0e93b6efee0053, 0x8eea0d047a457a00}, - {0xd5d238a4abe98068, 0x72a4904598d6d880}, - {0x85a36366eb71f041, 0x47a6da2b7f864750}, - {0xa70c3c40a64e6c51, 0x999090b65f67d924}, - {0xd0cf4b50cfe20765, 0xfff4b4e3f741cf6d}, - {0x82818f1281ed449f, 0xbff8f10e7a8921a4}, - {0xa321f2d7226895c7, 0xaff72d52192b6a0d}, - {0xcbea6f8ceb02bb39, 0x9bf4f8a69f764490}, - {0xfee50b7025c36a08, 0x02f236d04753d5b4}, - {0x9f4f2726179a2245, 0x01d762422c946590}, - {0xc722f0ef9d80aad6, 0x424d3ad2b7b97ef5}, - {0xf8ebad2b84e0d58b, 0xd2e0898765a7deb2}, - {0x9b934c3b330c8577, 0x63cc55f49f88eb2f}, - {0xc2781f49ffcfa6d5, 0x3cbf6b71c76b25fb}, - {0xf316271c7fc3908a, 0x8bef464e3945ef7a}, - {0x97edd871cfda3a56, 0x97758bf0e3cbb5ac}, - {0xbde94e8e43d0c8ec, 0x3d52eeed1cbea317}, - {0xed63a231d4c4fb27, 0x4ca7aaa863ee4bdd}, - {0x945e455f24fb1cf8, 0x8fe8caa93e74ef6a}, - {0xb975d6b6ee39e436, 0xb3e2fd538e122b44}, - {0xe7d34c64a9c85d44, 0x60dbbca87196b616}, - {0x90e40fbeea1d3a4a, 0xbc8955e946fe31cd}, - {0xb51d13aea4a488dd, 0x6babab6398bdbe41}, - {0xe264589a4dcdab14, 0xc696963c7eed2dd1}, - {0x8d7eb76070a08aec, 0xfc1e1de5cf543ca2}, - {0xb0de65388cc8ada8, 0x3b25a55f43294bcb}, - {0xdd15fe86affad912, 0x49ef0eb713f39ebe}, - {0x8a2dbf142dfcc7ab, 0x6e3569326c784337}, - {0xacb92ed9397bf996, 0x49c2c37f07965404}, - {0xd7e77a8f87daf7fb, 0xdc33745ec97be906}, - {0x86f0ac99b4e8dafd, 0x69a028bb3ded71a3}, - {0xa8acd7c0222311bc, 0xc40832ea0d68ce0c}, - {0xd2d80db02aabd62b, 0xf50a3fa490c30190}, - {0x83c7088e1aab65db, 0x792667c6da79e0fa}, - {0xa4b8cab1a1563f52, 0x577001b891185938}, - {0xcde6fd5e09abcf26, 0xed4c0226b55e6f86}, - {0x80b05e5ac60b6178, 0x544f8158315b05b4}, - {0xa0dc75f1778e39d6, 0x696361ae3db1c721}, - {0xc913936dd571c84c, 0x03bc3a19cd1e38e9}, - {0xfb5878494ace3a5f, 0x04ab48a04065c723}, - {0x9d174b2dcec0e47b, 0x62eb0d64283f9c76}, - {0xc45d1df942711d9a, 0x3ba5d0bd324f8394}, - {0xf5746577930d6500, 0xca8f44ec7ee36479}, - {0x9968bf6abbe85f20, 0x7e998b13cf4e1ecb}, - {0xbfc2ef456ae276e8, 0x9e3fedd8c321a67e}, - {0xefb3ab16c59b14a2, 0xc5cfe94ef3ea101e}, - {0x95d04aee3b80ece5, 0xbba1f1d158724a12}, - {0xbb445da9ca61281f, 0x2a8a6e45ae8edc97}, - {0xea1575143cf97226, 0xf52d09d71a3293bd}, - {0x924d692ca61be758, 0x593c2626705f9c56}, - {0xb6e0c377cfa2e12e, 0x6f8b2fb00c77836c}, - {0xe498f455c38b997a, 0x0b6dfb9c0f956447}, - {0x8edf98b59a373fec, 0x4724bd4189bd5eac}, - {0xb2977ee300c50fe7, 0x58edec91ec2cb657}, - {0xdf3d5e9bc0f653e1, 0x2f2967b66737e3ed}, - {0x8b865b215899f46c, 0xbd79e0d20082ee74}, - {0xae67f1e9aec07187, 0xecd8590680a3aa11}, - {0xda01ee641a708de9, 0xe80e6f4820cc9495}, - {0x884134fe908658b2, 0x3109058d147fdcdd}, - {0xaa51823e34a7eede, 0xbd4b46f0599fd415}, - {0xd4e5e2cdc1d1ea96, 0x6c9e18ac7007c91a}, - {0x850fadc09923329e, 0x03e2cf6bc604ddb0}, - {0xa6539930bf6bff45, 0x84db8346b786151c}, - {0xcfe87f7cef46ff16, 0xe612641865679a63}, - {0x81f14fae158c5f6e, 0x4fcb7e8f3f60c07e}, - {0xa26da3999aef7749, 0xe3be5e330f38f09d}, - {0xcb090c8001ab551c, 0x5cadf5bfd3072cc5}, - {0xfdcb4fa002162a63, 0x73d9732fc7c8f7f6}, - {0x9e9f11c4014dda7e, 0x2867e7fddcdd9afa}, - {0xc646d63501a1511d, 0xb281e1fd541501b8}, - {0xf7d88bc24209a565, 0x1f225a7ca91a4226}, - {0x9ae757596946075f, 0x3375788de9b06958}, - {0xc1a12d2fc3978937, 0x0052d6b1641c83ae}, - {0xf209787bb47d6b84, 0xc0678c5dbd23a49a}, - {0x9745eb4d50ce6332, 0xf840b7ba963646e0}, - {0xbd176620a501fbff, 0xb650e5a93bc3d898}, - {0xec5d3fa8ce427aff, 0xa3e51f138ab4cebe}, - {0x93ba47c980e98cdf, 0xc66f336c36b10137}, - {0xb8a8d9bbe123f017, 0xb80b0047445d4184}, - {0xe6d3102ad96cec1d, 0xa60dc059157491e5}, - {0x9043ea1ac7e41392, 0x87c89837ad68db2f}, - {0xb454e4a179dd1877, 0x29babe4598c311fb}, - {0xe16a1dc9d8545e94, 0xf4296dd6fef3d67a}, - {0x8ce2529e2734bb1d, 0x1899e4a65f58660c}, - {0xb01ae745b101e9e4, 0x5ec05dcff72e7f8f}, - {0xdc21a1171d42645d, 0x76707543f4fa1f73}, - {0x899504ae72497eba, 0x6a06494a791c53a8}, - {0xabfa45da0edbde69, 0x0487db9d17636892}, - {0xd6f8d7509292d603, 0x45a9d2845d3c42b6}, - {0x865b86925b9bc5c2, 0x0b8a2392ba45a9b2}, - {0xa7f26836f282b732, 0x8e6cac7768d7141e}, - {0xd1ef0244af2364ff, 0x3207d795430cd926}, - {0x8335616aed761f1f, 0x7f44e6bd49e807b8}, - {0xa402b9c5a8d3a6e7, 0x5f16206c9c6209a6}, - {0xcd036837130890a1, 0x36dba887c37a8c0f}, - {0x802221226be55a64, 0xc2494954da2c9789}, - {0xa02aa96b06deb0fd, 0xf2db9baa10b7bd6c}, - {0xc83553c5c8965d3d, 0x6f92829494e5acc7}, - {0xfa42a8b73abbf48c, 0xcb772339ba1f17f9}, - {0x9c69a97284b578d7, 0xff2a760414536efb}, - {0xc38413cf25e2d70d, 0xfef5138519684aba}, - {0xf46518c2ef5b8cd1, 0x7eb258665fc25d69}, - {0x98bf2f79d5993802, 0xef2f773ffbd97a61}, - {0xbeeefb584aff8603, 0xaafb550ffacfd8fa}, - {0xeeaaba2e5dbf6784, 0x95ba2a53f983cf38}, - {0x952ab45cfa97a0b2, 0xdd945a747bf26183}, - {0xba756174393d88df, 0x94f971119aeef9e4}, - {0xe912b9d1478ceb17, 0x7a37cd5601aab85d}, - {0x91abb422ccb812ee, 0xac62e055c10ab33a}, - {0xb616a12b7fe617aa, 0x577b986b314d6009}, - {0xe39c49765fdf9d94, 0xed5a7e85fda0b80b}, - {0x8e41ade9fbebc27d, 0x14588f13be847307}, - {0xb1d219647ae6b31c, 0x596eb2d8ae258fc8}, - {0xde469fbd99a05fe3, 0x6fca5f8ed9aef3bb}, - {0x8aec23d680043bee, 0x25de7bb9480d5854}, - {0xada72ccc20054ae9, 0xaf561aa79a10ae6a}, - {0xd910f7ff28069da4, 0x1b2ba1518094da04}, - {0x87aa9aff79042286, 0x90fb44d2f05d0842}, - {0xa99541bf57452b28, 0x353a1607ac744a53}, - {0xd3fa922f2d1675f2, 0x42889b8997915ce8}, - {0x847c9b5d7c2e09b7, 0x69956135febada11}, - {0xa59bc234db398c25, 0x43fab9837e699095}, - {0xcf02b2c21207ef2e, 0x94f967e45e03f4bb}, - {0x8161afb94b44f57d, 0x1d1be0eebac278f5}, - {0xa1ba1ba79e1632dc, 0x6462d92a69731732}, - {0xca28a291859bbf93, 0x7d7b8f7503cfdcfe}, - {0xfcb2cb35e702af78, 0x5cda735244c3d43e}, - {0x9defbf01b061adab, 0x3a0888136afa64a7}, - {0xc56baec21c7a1916, 0x088aaa1845b8fdd0}, - {0xf6c69a72a3989f5b, 0x8aad549e57273d45}, - {0x9a3c2087a63f6399, 0x36ac54e2f678864b}, - {0xc0cb28a98fcf3c7f, 0x84576a1bb416a7dd}, - {0xf0fdf2d3f3c30b9f, 0x656d44a2a11c51d5}, - {0x969eb7c47859e743, 0x9f644ae5a4b1b325}, - {0xbc4665b596706114, 0x873d5d9f0dde1fee}, - {0xeb57ff22fc0c7959, 0xa90cb506d155a7ea}, - {0x9316ff75dd87cbd8, 0x09a7f12442d588f2}, - {0xb7dcbf5354e9bece, 0x0c11ed6d538aeb2f}, - {0xe5d3ef282a242e81, 0x8f1668c8a86da5fa}, - {0x8fa475791a569d10, 0xf96e017d694487bc}, - {0xb38d92d760ec4455, 0x37c981dcc395a9ac}, - {0xe070f78d3927556a, 0x85bbe253f47b1417}, - {0x8c469ab843b89562, 0x93956d7478ccec8e}, - {0xaf58416654a6babb, 0x387ac8d1970027b2}, - {0xdb2e51bfe9d0696a, 0x06997b05fcc0319e}, - {0x88fcf317f22241e2, 0x441fece3bdf81f03}, - {0xab3c2fddeeaad25a, 0xd527e81cad7626c3}, - {0xd60b3bd56a5586f1, 0x8a71e223d8d3b074}, - {0x85c7056562757456, 0xf6872d5667844e49}, - {0xa738c6bebb12d16c, 0xb428f8ac016561db}, - {0xd106f86e69d785c7, 0xe13336d701beba52}, - {0x82a45b450226b39c, 0xecc0024661173473}, - {0xa34d721642b06084, 0x27f002d7f95d0190}, - {0xcc20ce9bd35c78a5, 0x31ec038df7b441f4}, - {0xff290242c83396ce, 0x7e67047175a15271}, - {0x9f79a169bd203e41, 0x0f0062c6e984d386}, - {0xc75809c42c684dd1, 0x52c07b78a3e60868}, - {0xf92e0c3537826145, 0xa7709a56ccdf8a82}, - {0x9bbcc7a142b17ccb, 0x88a66076400bb691}, - {0xc2abf989935ddbfe, 0x6acff893d00ea435}, - {0xf356f7ebf83552fe, 0x0583f6b8c4124d43}, - {0x98165af37b2153de, 0xc3727a337a8b704a}, - {0xbe1bf1b059e9a8d6, 0x744f18c0592e4c5c}, - {0xeda2ee1c7064130c, 0x1162def06f79df73}, - {0x9485d4d1c63e8be7, 0x8addcb5645ac2ba8}, - {0xb9a74a0637ce2ee1, 0x6d953e2bd7173692}, - {0xe8111c87c5c1ba99, 0xc8fa8db6ccdd0437}, - {0x910ab1d4db9914a0, 0x1d9c9892400a22a2}, - {0xb54d5e4a127f59c8, 0x2503beb6d00cab4b}, - {0xe2a0b5dc971f303a, 0x2e44ae64840fd61d}, - {0x8da471a9de737e24, 0x5ceaecfed289e5d2}, - {0xb10d8e1456105dad, 0x7425a83e872c5f47}, - {0xdd50f1996b947518, 0xd12f124e28f77719}, - {0x8a5296ffe33cc92f, 0x82bd6b70d99aaa6f}, - {0xace73cbfdc0bfb7b, 0x636cc64d1001550b}, - {0xd8210befd30efa5a, 0x3c47f7e05401aa4e}, - {0x8714a775e3e95c78, 0x65acfaec34810a71}, - {0xa8d9d1535ce3b396, 0x7f1839a741a14d0d}, - {0xd31045a8341ca07c, 0x1ede48111209a050}, - {0x83ea2b892091e44d, 0x934aed0aab460432}, - {0xa4e4b66b68b65d60, 0xf81da84d5617853f}, - {0xce1de40642e3f4b9, 0x36251260ab9d668e}, - {0x80d2ae83e9ce78f3, 0xc1d72b7c6b426019}, - {0xa1075a24e4421730, 0xb24cf65b8612f81f}, - {0xc94930ae1d529cfc, 0xdee033f26797b627}, - {0xfb9b7cd9a4a7443c, 0x169840ef017da3b1}, - {0x9d412e0806e88aa5, 0x8e1f289560ee864e}, - {0xc491798a08a2ad4e, 0xf1a6f2bab92a27e2}, - {0xf5b5d7ec8acb58a2, 0xae10af696774b1db}, - {0x9991a6f3d6bf1765, 0xacca6da1e0a8ef29}, - {0xbff610b0cc6edd3f, 0x17fd090a58d32af3}, - {0xeff394dcff8a948e, 0xddfc4b4cef07f5b0}, - {0x95f83d0a1fb69cd9, 0x4abdaf101564f98e}, - {0xbb764c4ca7a4440f, 0x9d6d1ad41abe37f1}, - {0xea53df5fd18d5513, 0x84c86189216dc5ed}, - {0x92746b9be2f8552c, 0x32fd3cf5b4e49bb4}, - {0xb7118682dbb66a77, 0x3fbc8c33221dc2a1}, - {0xe4d5e82392a40515, 0x0fabaf3feaa5334a}, - {0x8f05b1163ba6832d, 0x29cb4d87f2a7400e}, - {0xb2c71d5bca9023f8, 0x743e20e9ef511012}, - {0xdf78e4b2bd342cf6, 0x914da9246b255416}, - {0x8bab8eefb6409c1a, 0x1ad089b6c2f7548e}, - {0xae9672aba3d0c320, 0xa184ac2473b529b1}, - {0xda3c0f568cc4f3e8, 0xc9e5d72d90a2741e}, - {0x8865899617fb1871, 0x7e2fa67c7a658892}, - {0xaa7eebfb9df9de8d, 0xddbb901b98feeab7}, - {0xd51ea6fa85785631, 0x552a74227f3ea565}, - {0x8533285c936b35de, 0xd53a88958f87275f}, - {0xa67ff273b8460356, 0x8a892abaf368f137}, - {0xd01fef10a657842c, 0x2d2b7569b0432d85}, - {0x8213f56a67f6b29b, 0x9c3b29620e29fc73}, - {0xa298f2c501f45f42, 0x8349f3ba91b47b8f}, - {0xcb3f2f7642717713, 0x241c70a936219a73}, - {0xfe0efb53d30dd4d7, 0xed238cd383aa0110}, - {0x9ec95d1463e8a506, 0xf4363804324a40aa}, - {0xc67bb4597ce2ce48, 0xb143c6053edcd0d5}, - {0xf81aa16fdc1b81da, 0xdd94b7868e94050a}, - {0x9b10a4e5e9913128, 0xca7cf2b4191c8326}, - {0xc1d4ce1f63f57d72, 0xfd1c2f611f63a3f0}, - {0xf24a01a73cf2dccf, 0xbc633b39673c8cec}, - {0x976e41088617ca01, 0xd5be0503e085d813}, - {0xbd49d14aa79dbc82, 0x4b2d8644d8a74e18}, - {0xec9c459d51852ba2, 0xddf8e7d60ed1219e}, - {0x93e1ab8252f33b45, 0xcabb90e5c942b503}, - {0xb8da1662e7b00a17, 0x3d6a751f3b936243}, - {0xe7109bfba19c0c9d, 0x0cc512670a783ad4}, - {0x906a617d450187e2, 0x27fb2b80668b24c5}, - {0xb484f9dc9641e9da, 0xb1f9f660802dedf6}, - {0xe1a63853bbd26451, 0x5e7873f8a0396973}, - {0x8d07e33455637eb2, 0xdb0b487b6423e1e8}, - {0xb049dc016abc5e5f, 0x91ce1a9a3d2cda62}, - {0xdc5c5301c56b75f7, 0x7641a140cc7810fb}, - {0x89b9b3e11b6329ba, 0xa9e904c87fcb0a9d}, - {0xac2820d9623bf429, 0x546345fa9fbdcd44}, - {0xd732290fbacaf133, 0xa97c177947ad4095}, - {0x867f59a9d4bed6c0, 0x49ed8eabcccc485d}, - {0xa81f301449ee8c70, 0x5c68f256bfff5a74}, - {0xd226fc195c6a2f8c, 0x73832eec6fff3111}, - {0x83585d8fd9c25db7, 0xc831fd53c5ff7eab}, - {0xa42e74f3d032f525, 0xba3e7ca8b77f5e55}, - {0xcd3a1230c43fb26f, 0x28ce1bd2e55f35eb}, - {0x80444b5e7aa7cf85, 0x7980d163cf5b81b3}, - {0xa0555e361951c366, 0xd7e105bcc332621f}, - {0xc86ab5c39fa63440, 0x8dd9472bf3fefaa7}, - {0xfa856334878fc150, 0xb14f98f6f0feb951}, - {0x9c935e00d4b9d8d2, 0x6ed1bf9a569f33d3}, - {0xc3b8358109e84f07, 0x0a862f80ec4700c8}, - {0xf4a642e14c6262c8, 0xcd27bb612758c0fa}, - {0x98e7e9cccfbd7dbd, 0x8038d51cb897789c}, - {0xbf21e44003acdd2c, 0xe0470a63e6bd56c3}, - {0xeeea5d5004981478, 0x1858ccfce06cac74}, - {0x95527a5202df0ccb, 0x0f37801e0c43ebc8}, - {0xbaa718e68396cffd, 0xd30560258f54e6ba}, - {0xe950df20247c83fd, 0x47c6b82ef32a2069}, - {0x91d28b7416cdd27e, 0x4cdc331d57fa5441}, - {0xb6472e511c81471d, 0xe0133fe4adf8e952}, - {0xe3d8f9e563a198e5, 0x58180fddd97723a6}, - {0x8e679c2f5e44ff8f, 0x570f09eaa7ea7648}, - {0xb201833b35d63f73, 0x2cd2cc6551e513da}, - {0xde81e40a034bcf4f, 0xf8077f7ea65e58d1}, - {0x8b112e86420f6191, 0xfb04afaf27faf782}, - {0xadd57a27d29339f6, 0x79c5db9af1f9b563}, - {0xd94ad8b1c7380874, 0x18375281ae7822bc}, - {0x87cec76f1c830548, 0x8f2293910d0b15b5}, - {0xa9c2794ae3a3c69a, 0xb2eb3875504ddb22}, - {0xd433179d9c8cb841, 0x5fa60692a46151eb}, - {0x849feec281d7f328, 0xdbc7c41ba6bcd333}, - {0xa5c7ea73224deff3, 0x12b9b522906c0800}, - {0xcf39e50feae16bef, 0xd768226b34870a00}, - {0x81842f29f2cce375, 0xe6a1158300d46640}, - {0xa1e53af46f801c53, 0x60495ae3c1097fd0}, - {0xca5e89b18b602368, 0x385bb19cb14bdfc4}, - {0xfcf62c1dee382c42, 0x46729e03dd9ed7b5}, - {0x9e19db92b4e31ba9, 0x6c07a2c26a8346d1}, - {0xc5a05277621be293, 0xc7098b7305241885}, - { 0xf70867153aa2db38, - 0xb8cbee4fc66d1ea7 } + {0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7b}, + {0x9faacf3df73609b1, 0x77b191618c54e9ad}, + {0xc795830d75038c1d, 0xd59df5b9ef6a2418}, + {0xf97ae3d0d2446f25, 0x4b0573286b44ad1e}, + {0x9becce62836ac577, 0x4ee367f9430aec33}, + {0xc2e801fb244576d5, 0x229c41f793cda740}, + {0xf3a20279ed56d48a, 0x6b43527578c11110}, + {0x9845418c345644d6, 0x830a13896b78aaaa}, + {0xbe5691ef416bd60c, 0x23cc986bc656d554}, + {0xedec366b11c6cb8f, 0x2cbfbe86b7ec8aa9}, + {0x94b3a202eb1c3f39, 0x7bf7d71432f3d6aa}, + {0xb9e08a83a5e34f07, 0xdaf5ccd93fb0cc54}, + {0xe858ad248f5c22c9, 0xd1b3400f8f9cff69}, + {0x91376c36d99995be, 0x23100809b9c21fa2}, + {0xb58547448ffffb2d, 0xabd40a0c2832a78b}, + {0xe2e69915b3fff9f9, 0x16c90c8f323f516d}, + {0x8dd01fad907ffc3b, 0xae3da7d97f6792e4}, + {0xb1442798f49ffb4a, 0x99cd11cfdf41779d}, + {0xdd95317f31c7fa1d, 0x40405643d711d584}, + {0x8a7d3eef7f1cfc52, 0x482835ea666b2573}, + {0xad1c8eab5ee43b66, 0xda3243650005eed0}, + {0xd863b256369d4a40, 0x90bed43e40076a83}, + {0x873e4f75e2224e68, 0x5a7744a6e804a292}, + {0xa90de3535aaae202, 0x711515d0a205cb37}, + {0xd3515c2831559a83, 0x0d5a5b44ca873e04}, + {0x8412d9991ed58091, 0xe858790afe9486c3}, + {0xa5178fff668ae0b6, 0x626e974dbe39a873}, + {0xce5d73ff402d98e3, 0xfb0a3d212dc81290}, + {0x80fa687f881c7f8e, 0x7ce66634bc9d0b9a}, + {0xa139029f6a239f72, 0x1c1fffc1ebc44e81}, + {0xc987434744ac874e, 0xa327ffb266b56221}, + {0xfbe9141915d7a922, 0x4bf1ff9f0062baa9}, + {0x9d71ac8fada6c9b5, 0x6f773fc3603db4aa}, + {0xc4ce17b399107c22, 0xcb550fb4384d21d4}, + {0xf6019da07f549b2b, 0x7e2a53a146606a49}, + {0x99c102844f94e0fb, 0x2eda7444cbfc426e}, + {0xc0314325637a1939, 0xfa911155fefb5309}, + {0xf03d93eebc589f88, 0x793555ab7eba27cb}, + {0x96267c7535b763b5, 0x4bc1558b2f3458df}, + {0xbbb01b9283253ca2, 0x9eb1aaedfb016f17}, + {0xea9c227723ee8bcb, 0x465e15a979c1cadd}, + {0x92a1958a7675175f, 0x0bfacd89ec191eca}, + {0xb749faed14125d36, 0xcef980ec671f667c}, + {0xe51c79a85916f484, 0x82b7e12780e7401b}, + {0x8f31cc0937ae58d2, 0xd1b2ecb8b0908811}, + {0xb2fe3f0b8599ef07, 0x861fa7e6dcb4aa16}, + {0xdfbdcece67006ac9, 0x67a791e093e1d49b}, + {0x8bd6a141006042bd, 0xe0c8bb2c5c6d24e1}, + {0xaecc49914078536d, 0x58fae9f773886e19}, + {0xda7f5bf590966848, 0xaf39a475506a899f}, + {0x888f99797a5e012d, 0x6d8406c952429604}, + {0xaab37fd7d8f58178, 0xc8e5087ba6d33b84}, + {0xd5605fcdcf32e1d6, 0xfb1e4a9a90880a65}, + {0x855c3be0a17fcd26, 0x5cf2eea09a550680}, + {0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481f}, + {0xd0601d8efc57b08b, 0xf13b94daf124da27}, + {0x823c12795db6ce57, 0x76c53d08d6b70859}, + {0xa2cb1717b52481ed, 0x54768c4b0c64ca6f}, + {0xcb7ddcdda26da268, 0xa9942f5dcf7dfd0a}, + {0xfe5d54150b090b02, 0xd3f93b35435d7c4d}, + {0x9efa548d26e5a6e1, 0xc47bc5014a1a6db0}, + {0xc6b8e9b0709f109a, 0x359ab6419ca1091c}, + {0xf867241c8cc6d4c0, 0xc30163d203c94b63}, + {0x9b407691d7fc44f8, 0x79e0de63425dcf1e}, + {0xc21094364dfb5636, 0x985915fc12f542e5}, + {0xf294b943e17a2bc4, 0x3e6f5b7b17b2939e}, + {0x979cf3ca6cec5b5a, 0xa705992ceecf9c43}, + {0xbd8430bd08277231, 0x50c6ff782a838354}, + {0xece53cec4a314ebd, 0xa4f8bf5635246429}, + {0x940f4613ae5ed136, 0x871b7795e136be9a}, + {0xb913179899f68584, 0x28e2557b59846e40}, + {0xe757dd7ec07426e5, 0x331aeada2fe589d0}, + {0x9096ea6f3848984f, 0x3ff0d2c85def7622}, + {0xb4bca50b065abe63, 0x0fed077a756b53aa}, + {0xe1ebce4dc7f16dfb, 0xd3e8495912c62895}, + {0x8d3360f09cf6e4bd, 0x64712dd7abbbd95d}, + {0xb080392cc4349dec, 0xbd8d794d96aacfb4}, + {0xdca04777f541c567, 0xecf0d7a0fc5583a1}, + {0x89e42caaf9491b60, 0xf41686c49db57245}, + {0xac5d37d5b79b6239, 0x311c2875c522ced6}, + {0xd77485cb25823ac7, 0x7d633293366b828c}, + {0x86a8d39ef77164bc, 0xae5dff9c02033198}, + {0xa8530886b54dbdeb, 0xd9f57f830283fdfd}, + {0xd267caa862a12d66, 0xd072df63c324fd7c}, + {0x8380dea93da4bc60, 0x4247cb9e59f71e6e}, + {0xa46116538d0deb78, 0x52d9be85f074e609}, + {0xcd795be870516656, 0x67902e276c921f8c}, + {0x806bd9714632dff6, 0x00ba1cd8a3db53b7}, + {0xa086cfcd97bf97f3, 0x80e8a40eccd228a5}, + {0xc8a883c0fdaf7df0, 0x6122cd128006b2ce}, + {0xfad2a4b13d1b5d6c, 0x796b805720085f82}, + {0x9cc3a6eec6311a63, 0xcbe3303674053bb1}, + {0xc3f490aa77bd60fc, 0xbedbfc4411068a9d}, + {0xf4f1b4d515acb93b, 0xee92fb5515482d45}, + {0x991711052d8bf3c5, 0x751bdd152d4d1c4b}, + {0xbf5cd54678eef0b6, 0xd262d45a78a0635e}, + {0xef340a98172aace4, 0x86fb897116c87c35}, + {0x9580869f0e7aac0e, 0xd45d35e6ae3d4da1}, + {0xbae0a846d2195712, 0x8974836059cca10a}, + {0xe998d258869facd7, 0x2bd1a438703fc94c}, + {0x91ff83775423cc06, 0x7b6306a34627ddd0}, + {0xb67f6455292cbf08, 0x1a3bc84c17b1d543}, + {0xe41f3d6a7377eeca, 0x20caba5f1d9e4a94}, + {0x8e938662882af53e, 0x547eb47b7282ee9d}, + {0xb23867fb2a35b28d, 0xe99e619a4f23aa44}, + {0xdec681f9f4c31f31, 0x6405fa00e2ec94d5}, + {0x8b3c113c38f9f37e, 0xde83bc408dd3dd05}, + {0xae0b158b4738705e, 0x9624ab50b148d446}, + {0xd98ddaee19068c76, 0x3badd624dd9b0958}, + {0x87f8a8d4cfa417c9, 0xe54ca5d70a80e5d7}, + {0xa9f6d30a038d1dbc, 0x5e9fcf4ccd211f4d}, + {0xd47487cc8470652b, 0x7647c32000696720}, + {0x84c8d4dfd2c63f3b, 0x29ecd9f40041e074}, + {0xa5fb0a17c777cf09, 0xf468107100525891}, + {0xcf79cc9db955c2cc, 0x7182148d4066eeb5}, + {0x81ac1fe293d599bf, 0xc6f14cd848405531}, + {0xa21727db38cb002f, 0xb8ada00e5a506a7d}, + {0xca9cf1d206fdc03b, 0xa6d90811f0e4851d}, + {0xfd442e4688bd304a, 0x908f4a166d1da664}, + {0x9e4a9cec15763e2e, 0x9a598e4e043287ff}, + {0xc5dd44271ad3cdba, 0x40eff1e1853f29fe}, + {0xf7549530e188c128, 0xd12bee59e68ef47d}, + {0x9a94dd3e8cf578b9, 0x82bb74f8301958cf}, + {0xc13a148e3032d6e7, 0xe36a52363c1faf02}, + {0xf18899b1bc3f8ca1, 0xdc44e6c3cb279ac2}, + {0x96f5600f15a7b7e5, 0x29ab103a5ef8c0ba}, + {0xbcb2b812db11a5de, 0x7415d448f6b6f0e8}, + {0xebdf661791d60f56, 0x111b495b3464ad22}, + {0x936b9fcebb25c995, 0xcab10dd900beec35}, + {0xb84687c269ef3bfb, 0x3d5d514f40eea743}, + {0xe65829b3046b0afa, 0x0cb4a5a3112a5113}, + {0x8ff71a0fe2c2e6dc, 0x47f0e785eaba72ac}, + {0xb3f4e093db73a093, 0x59ed216765690f57}, + {0xe0f218b8d25088b8, 0x306869c13ec3532d}, + {0x8c974f7383725573, 0x1e414218c73a13fc}, + {0xafbd2350644eeacf, 0xe5d1929ef90898fb}, + {0xdbac6c247d62a583, 0xdf45f746b74abf3a}, + {0x894bc396ce5da772, 0x6b8bba8c328eb784}, + {0xab9eb47c81f5114f, 0x066ea92f3f326565}, + {0xd686619ba27255a2, 0xc80a537b0efefebe}, + {0x8613fd0145877585, 0xbd06742ce95f5f37}, + {0xa798fc4196e952e7, 0x2c48113823b73705}, + {0xd17f3b51fca3a7a0, 0xf75a15862ca504c6}, + {0x82ef85133de648c4, 0x9a984d73dbe722fc}, + {0xa3ab66580d5fdaf5, 0xc13e60d0d2e0ebbb}, + {0xcc963fee10b7d1b3, 0x318df905079926a9}, + {0xffbbcfe994e5c61f, 0xfdf17746497f7053}, + {0x9fd561f1fd0f9bd3, 0xfeb6ea8bedefa634}, + {0xc7caba6e7c5382c8, 0xfe64a52ee96b8fc1}, + {0xf9bd690a1b68637b, 0x3dfdce7aa3c673b1}, + {0x9c1661a651213e2d, 0x06bea10ca65c084f}, + {0xc31bfa0fe5698db8, 0x486e494fcff30a63}, + {0xf3e2f893dec3f126, 0x5a89dba3c3efccfb}, + {0x986ddb5c6b3a76b7, 0xf89629465a75e01d}, + {0xbe89523386091465, 0xf6bbb397f1135824}, + {0xee2ba6c0678b597f, 0x746aa07ded582e2d}, + {0x94db483840b717ef, 0xa8c2a44eb4571cdd}, + {0xba121a4650e4ddeb, 0x92f34d62616ce414}, + {0xe896a0d7e51e1566, 0x77b020baf9c81d18}, + {0x915e2486ef32cd60, 0x0ace1474dc1d122f}, + {0xb5b5ada8aaff80b8, 0x0d819992132456bb}, + {0xe3231912d5bf60e6, 0x10e1fff697ed6c6a}, + {0x8df5efabc5979c8f, 0xca8d3ffa1ef463c2}, + {0xb1736b96b6fd83b3, 0xbd308ff8a6b17cb3}, + {0xddd0467c64bce4a0, 0xac7cb3f6d05ddbdf}, + {0x8aa22c0dbef60ee4, 0x6bcdf07a423aa96c}, + {0xad4ab7112eb3929d, 0x86c16c98d2c953c7}, + {0xd89d64d57a607744, 0xe871c7bf077ba8b8}, + {0x87625f056c7c4a8b, 0x11471cd764ad4973}, + {0xa93af6c6c79b5d2d, 0xd598e40d3dd89bd0}, + {0xd389b47879823479, 0x4aff1d108d4ec2c4}, + {0x843610cb4bf160cb, 0xcedf722a585139bb}, + {0xa54394fe1eedb8fe, 0xc2974eb4ee658829}, + {0xce947a3da6a9273e, 0x733d226229feea33}, + {0x811ccc668829b887, 0x0806357d5a3f5260}, + {0xa163ff802a3426a8, 0xca07c2dcb0cf26f8}, + {0xc9bcff6034c13052, 0xfc89b393dd02f0b6}, + {0xfc2c3f3841f17c67, 0xbbac2078d443ace3}, + {0x9d9ba7832936edc0, 0xd54b944b84aa4c0e}, + {0xc5029163f384a931, 0x0a9e795e65d4df12}, + {0xf64335bcf065d37d, 0x4d4617b5ff4a16d6}, + {0x99ea0196163fa42e, 0x504bced1bf8e4e46}, + {0xc06481fb9bcf8d39, 0xe45ec2862f71e1d7}, + {0xf07da27a82c37088, 0x5d767327bb4e5a4d}, + {0x964e858c91ba2655, 0x3a6a07f8d510f870}, + {0xbbe226efb628afea, 0x890489f70a55368c}, + {0xeadab0aba3b2dbe5, 0x2b45ac74ccea842f}, + {0x92c8ae6b464fc96f, 0x3b0b8bc90012929e}, + {0xb77ada0617e3bbcb, 0x09ce6ebb40173745}, + {0xe55990879ddcaabd, 0xcc420a6a101d0516}, + {0x8f57fa54c2a9eab6, 0x9fa946824a12232e}, + {0xb32df8e9f3546564, 0x47939822dc96abfa}, + {0xdff9772470297ebd, 0x59787e2b93bc56f8}, + {0x8bfbea76c619ef36, 0x57eb4edb3c55b65b}, + {0xaefae51477a06b03, 0xede622920b6b23f2}, + {0xdab99e59958885c4, 0xe95fab368e45ecee}, + {0x88b402f7fd75539b, 0x11dbcb0218ebb415}, + {0xaae103b5fcd2a881, 0xd652bdc29f26a11a}, + {0xd59944a37c0752a2, 0x4be76d3346f04960}, + {0x857fcae62d8493a5, 0x6f70a4400c562ddc}, + {0xa6dfbd9fb8e5b88e, 0xcb4ccd500f6bb953}, + {0xd097ad07a71f26b2, 0x7e2000a41346a7a8}, + {0x825ecc24c873782f, 0x8ed400668c0c28c9}, + {0xa2f67f2dfa90563b, 0x728900802f0f32fb}, + {0xcbb41ef979346bca, 0x4f2b40a03ad2ffba}, + {0xfea126b7d78186bc, 0xe2f610c84987bfa9}, + {0x9f24b832e6b0f436, 0x0dd9ca7d2df4d7ca}, + {0xc6ede63fa05d3143, 0x91503d1c79720dbc}, + {0xf8a95fcf88747d94, 0x75a44c6397ce912b}, + {0x9b69dbe1b548ce7c, 0xc986afbe3ee11abb}, + {0xc24452da229b021b, 0xfbe85badce996169}, + {0xf2d56790ab41c2a2, 0xfae27299423fb9c4}, + {0x97c560ba6b0919a5, 0xdccd879fc967d41b}, + {0xbdb6b8e905cb600f, 0x5400e987bbc1c921}, + {0xed246723473e3813, 0x290123e9aab23b69}, + {0x9436c0760c86e30b, 0xf9a0b6720aaf6522}, + {0xb94470938fa89bce, 0xf808e40e8d5b3e6a}, + {0xe7958cb87392c2c2, 0xb60b1d1230b20e05}, + {0x90bd77f3483bb9b9, 0xb1c6f22b5e6f48c3}, + {0xb4ecd5f01a4aa828, 0x1e38aeb6360b1af4}, + {0xe2280b6c20dd5232, 0x25c6da63c38de1b1}, + {0x8d590723948a535f, 0x579c487e5a38ad0f}, + {0xb0af48ec79ace837, 0x2d835a9df0c6d852}, + {0xdcdb1b2798182244, 0xf8e431456cf88e66}, + {0x8a08f0f8bf0f156b, 0x1b8e9ecb641b5900}, + {0xac8b2d36eed2dac5, 0xe272467e3d222f40}, + {0xd7adf884aa879177, 0x5b0ed81dcc6abb10}, + {0x86ccbb52ea94baea, 0x98e947129fc2b4ea}, + {0xa87fea27a539e9a5, 0x3f2398d747b36225}, + {0xd29fe4b18e88640e, 0x8eec7f0d19a03aae}, + {0x83a3eeeef9153e89, 0x1953cf68300424ad}, + {0xa48ceaaab75a8e2b, 0x5fa8c3423c052dd8}, + {0xcdb02555653131b6, 0x3792f412cb06794e}, + {0x808e17555f3ebf11, 0xe2bbd88bbee40bd1}, + {0xa0b19d2ab70e6ed6, 0x5b6aceaeae9d0ec5}, + {0xc8de047564d20a8b, 0xf245825a5a445276}, + {0xfb158592be068d2e, 0xeed6e2f0f0d56713}, + {0x9ced737bb6c4183d, 0x55464dd69685606c}, + {0xc428d05aa4751e4c, 0xaa97e14c3c26b887}, + {0xf53304714d9265df, 0xd53dd99f4b3066a9}, + {0x993fe2c6d07b7fab, 0xe546a8038efe402a}, + {0xbf8fdb78849a5f96, 0xde98520472bdd034}, + {0xef73d256a5c0f77c, 0x963e66858f6d4441}, + {0x95a8637627989aad, 0xdde7001379a44aa9}, + {0xbb127c53b17ec159, 0x5560c018580d5d53}, + {0xe9d71b689dde71af, 0xaab8f01e6e10b4a7}, + {0x9226712162ab070d, 0xcab3961304ca70e9}, + {0xb6b00d69bb55c8d1, 0x3d607b97c5fd0d23}, + {0xe45c10c42a2b3b05, 0x8cb89a7db77c506b}, + {0x8eb98a7a9a5b04e3, 0x77f3608e92adb243}, + {0xb267ed1940f1c61c, 0x55f038b237591ed4}, + {0xdf01e85f912e37a3, 0x6b6c46dec52f6689}, + {0x8b61313bbabce2c6, 0x2323ac4b3b3da016}, + {0xae397d8aa96c1b77, 0xabec975e0a0d081b}, + {0xd9c7dced53c72255, 0x96e7bd358c904a22}, + {0x881cea14545c7575, 0x7e50d64177da2e55}, + {0xaa242499697392d2, 0xdde50bd1d5d0b9ea}, + {0xd4ad2dbfc3d07787, 0x955e4ec64b44e865}, + {0x84ec3c97da624ab4, 0xbd5af13bef0b113f}, + {0xa6274bbdd0fadd61, 0xecb1ad8aeacdd58f}, + {0xcfb11ead453994ba, 0x67de18eda5814af3}, + {0x81ceb32c4b43fcf4, 0x80eacf948770ced8}, + {0xa2425ff75e14fc31, 0xa1258379a94d028e}, + {0xcad2f7f5359a3b3e, 0x096ee45813a04331}, + {0xfd87b5f28300ca0d, 0x8bca9d6e188853fd}, + {0x9e74d1b791e07e48, 0x775ea264cf55347e}, + {0xc612062576589dda, 0x95364afe032a819e}, + {0xf79687aed3eec551, 0x3a83ddbd83f52205}, + {0x9abe14cd44753b52, 0xc4926a9672793543}, + {0xc16d9a0095928a27, 0x75b7053c0f178294}, + {0xf1c90080baf72cb1, 0x5324c68b12dd6339}, + {0x971da05074da7bee, 0xd3f6fc16ebca5e04}, + {0xbce5086492111aea, 0x88f4bb1ca6bcf585}, + {0xec1e4a7db69561a5, 0x2b31e9e3d06c32e6}, + {0x9392ee8e921d5d07, 0x3aff322e62439fd0}, + {0xb877aa3236a4b449, 0x09befeb9fad487c3}, + {0xe69594bec44de15b, 0x4c2ebe687989a9b4}, + {0x901d7cf73ab0acd9, 0x0f9d37014bf60a11}, + {0xb424dc35095cd80f, 0x538484c19ef38c95}, + {0xe12e13424bb40e13, 0x2865a5f206b06fba}, + {0x8cbccc096f5088cb, 0xf93f87b7442e45d4}, + {0xafebff0bcb24aafe, 0xf78f69a51539d749}, + {0xdbe6fecebdedd5be, 0xb573440e5a884d1c}, + {0x89705f4136b4a597, 0x31680a88f8953031}, + {0xabcc77118461cefc, 0xfdc20d2b36ba7c3e}, + {0xd6bf94d5e57a42bc, 0x3d32907604691b4d}, + {0x8637bd05af6c69b5, 0xa63f9a49c2c1b110}, + {0xa7c5ac471b478423, 0x0fcf80dc33721d54}, + {0xd1b71758e219652b, 0xd3c36113404ea4a9}, + {0x83126e978d4fdf3b, 0x645a1cac083126ea}, + {0xa3d70a3d70a3d70a, 0x3d70a3d70a3d70a4}, + {0xcccccccccccccccc, 0xcccccccccccccccd}, + {0x8000000000000000, 0x0000000000000000}, + {0xa000000000000000, 0x0000000000000000}, + {0xc800000000000000, 0x0000000000000000}, + {0xfa00000000000000, 0x0000000000000000}, + {0x9c40000000000000, 0x0000000000000000}, + {0xc350000000000000, 0x0000000000000000}, + {0xf424000000000000, 0x0000000000000000}, + {0x9896800000000000, 0x0000000000000000}, + {0xbebc200000000000, 0x0000000000000000}, + {0xee6b280000000000, 0x0000000000000000}, + {0x9502f90000000000, 0x0000000000000000}, + {0xba43b74000000000, 0x0000000000000000}, + {0xe8d4a51000000000, 0x0000000000000000}, + {0x9184e72a00000000, 0x0000000000000000}, + {0xb5e620f480000000, 0x0000000000000000}, + {0xe35fa931a0000000, 0x0000000000000000}, + {0x8e1bc9bf04000000, 0x0000000000000000}, + {0xb1a2bc2ec5000000, 0x0000000000000000}, + {0xde0b6b3a76400000, 0x0000000000000000}, + {0x8ac7230489e80000, 0x0000000000000000}, + {0xad78ebc5ac620000, 0x0000000000000000}, + {0xd8d726b7177a8000, 0x0000000000000000}, + {0x878678326eac9000, 0x0000000000000000}, + {0xa968163f0a57b400, 0x0000000000000000}, + {0xd3c21bcecceda100, 0x0000000000000000}, + {0x84595161401484a0, 0x0000000000000000}, + {0xa56fa5b99019a5c8, 0x0000000000000000}, + {0xcecb8f27f4200f3a, 0x0000000000000000}, + {0x813f3978f8940984, 0x4000000000000000}, + {0xa18f07d736b90be5, 0x5000000000000000}, + {0xc9f2c9cd04674ede, 0xa400000000000000}, + {0xfc6f7c4045812296, 0x4d00000000000000}, + {0x9dc5ada82b70b59d, 0xf020000000000000}, + {0xc5371912364ce305, 0x6c28000000000000}, + {0xf684df56c3e01bc6, 0xc732000000000000}, + {0x9a130b963a6c115c, 0x3c7f400000000000}, + {0xc097ce7bc90715b3, 0x4b9f100000000000}, + {0xf0bdc21abb48db20, 0x1e86d40000000000}, + {0x96769950b50d88f4, 0x1314448000000000}, + {0xbc143fa4e250eb31, 0x17d955a000000000}, + {0xeb194f8e1ae525fd, 0x5dcfab0800000000}, + {0x92efd1b8d0cf37be, 0x5aa1cae500000000}, + {0xb7abc627050305ad, 0xf14a3d9e40000000}, + {0xe596b7b0c643c719, 0x6d9ccd05d0000000}, + {0x8f7e32ce7bea5c6f, 0xe4820023a2000000}, + {0xb35dbf821ae4f38b, 0xdda2802c8a800000}, + {0xe0352f62a19e306e, 0xd50b2037ad200000}, + {0x8c213d9da502de45, 0x4526f422cc340000}, + {0xaf298d050e4395d6, 0x9670b12b7f410000}, + {0xdaf3f04651d47b4c, 0x3c0cdd765f114000}, + {0x88d8762bf324cd0f, 0xa5880a69fb6ac800}, + {0xab0e93b6efee0053, 0x8eea0d047a457a00}, + {0xd5d238a4abe98068, 0x72a4904598d6d880}, + {0x85a36366eb71f041, 0x47a6da2b7f864750}, + {0xa70c3c40a64e6c51, 0x999090b65f67d924}, + {0xd0cf4b50cfe20765, 0xfff4b4e3f741cf6d}, + {0x82818f1281ed449f, 0xbff8f10e7a8921a4}, + {0xa321f2d7226895c7, 0xaff72d52192b6a0d}, + {0xcbea6f8ceb02bb39, 0x9bf4f8a69f764490}, + {0xfee50b7025c36a08, 0x02f236d04753d5b4}, + {0x9f4f2726179a2245, 0x01d762422c946590}, + {0xc722f0ef9d80aad6, 0x424d3ad2b7b97ef5}, + {0xf8ebad2b84e0d58b, 0xd2e0898765a7deb2}, + {0x9b934c3b330c8577, 0x63cc55f49f88eb2f}, + {0xc2781f49ffcfa6d5, 0x3cbf6b71c76b25fb}, + {0xf316271c7fc3908a, 0x8bef464e3945ef7a}, + {0x97edd871cfda3a56, 0x97758bf0e3cbb5ac}, + {0xbde94e8e43d0c8ec, 0x3d52eeed1cbea317}, + {0xed63a231d4c4fb27, 0x4ca7aaa863ee4bdd}, + {0x945e455f24fb1cf8, 0x8fe8caa93e74ef6a}, + {0xb975d6b6ee39e436, 0xb3e2fd538e122b44}, + {0xe7d34c64a9c85d44, 0x60dbbca87196b616}, + {0x90e40fbeea1d3a4a, 0xbc8955e946fe31cd}, + {0xb51d13aea4a488dd, 0x6babab6398bdbe41}, + {0xe264589a4dcdab14, 0xc696963c7eed2dd1}, + {0x8d7eb76070a08aec, 0xfc1e1de5cf543ca2}, + {0xb0de65388cc8ada8, 0x3b25a55f43294bcb}, + {0xdd15fe86affad912, 0x49ef0eb713f39ebe}, + {0x8a2dbf142dfcc7ab, 0x6e3569326c784337}, + {0xacb92ed9397bf996, 0x49c2c37f07965404}, + {0xd7e77a8f87daf7fb, 0xdc33745ec97be906}, + {0x86f0ac99b4e8dafd, 0x69a028bb3ded71a3}, + {0xa8acd7c0222311bc, 0xc40832ea0d68ce0c}, + {0xd2d80db02aabd62b, 0xf50a3fa490c30190}, + {0x83c7088e1aab65db, 0x792667c6da79e0fa}, + {0xa4b8cab1a1563f52, 0x577001b891185938}, + {0xcde6fd5e09abcf26, 0xed4c0226b55e6f86}, + {0x80b05e5ac60b6178, 0x544f8158315b05b4}, + {0xa0dc75f1778e39d6, 0x696361ae3db1c721}, + {0xc913936dd571c84c, 0x03bc3a19cd1e38e9}, + {0xfb5878494ace3a5f, 0x04ab48a04065c723}, + {0x9d174b2dcec0e47b, 0x62eb0d64283f9c76}, + {0xc45d1df942711d9a, 0x3ba5d0bd324f8394}, + {0xf5746577930d6500, 0xca8f44ec7ee36479}, + {0x9968bf6abbe85f20, 0x7e998b13cf4e1ecb}, + {0xbfc2ef456ae276e8, 0x9e3fedd8c321a67e}, + {0xefb3ab16c59b14a2, 0xc5cfe94ef3ea101e}, + {0x95d04aee3b80ece5, 0xbba1f1d158724a12}, + {0xbb445da9ca61281f, 0x2a8a6e45ae8edc97}, + {0xea1575143cf97226, 0xf52d09d71a3293bd}, + {0x924d692ca61be758, 0x593c2626705f9c56}, + {0xb6e0c377cfa2e12e, 0x6f8b2fb00c77836c}, + {0xe498f455c38b997a, 0x0b6dfb9c0f956447}, + {0x8edf98b59a373fec, 0x4724bd4189bd5eac}, + {0xb2977ee300c50fe7, 0x58edec91ec2cb657}, + {0xdf3d5e9bc0f653e1, 0x2f2967b66737e3ed}, + {0x8b865b215899f46c, 0xbd79e0d20082ee74}, + {0xae67f1e9aec07187, 0xecd8590680a3aa11}, + {0xda01ee641a708de9, 0xe80e6f4820cc9495}, + {0x884134fe908658b2, 0x3109058d147fdcdd}, + {0xaa51823e34a7eede, 0xbd4b46f0599fd415}, + {0xd4e5e2cdc1d1ea96, 0x6c9e18ac7007c91a}, + {0x850fadc09923329e, 0x03e2cf6bc604ddb0}, + {0xa6539930bf6bff45, 0x84db8346b786151c}, + {0xcfe87f7cef46ff16, 0xe612641865679a63}, + {0x81f14fae158c5f6e, 0x4fcb7e8f3f60c07e}, + {0xa26da3999aef7749, 0xe3be5e330f38f09d}, + {0xcb090c8001ab551c, 0x5cadf5bfd3072cc5}, + {0xfdcb4fa002162a63, 0x73d9732fc7c8f7f6}, + {0x9e9f11c4014dda7e, 0x2867e7fddcdd9afa}, + {0xc646d63501a1511d, 0xb281e1fd541501b8}, + {0xf7d88bc24209a565, 0x1f225a7ca91a4226}, + {0x9ae757596946075f, 0x3375788de9b06958}, + {0xc1a12d2fc3978937, 0x0052d6b1641c83ae}, + {0xf209787bb47d6b84, 0xc0678c5dbd23a49a}, + {0x9745eb4d50ce6332, 0xf840b7ba963646e0}, + {0xbd176620a501fbff, 0xb650e5a93bc3d898}, + {0xec5d3fa8ce427aff, 0xa3e51f138ab4cebe}, + {0x93ba47c980e98cdf, 0xc66f336c36b10137}, + {0xb8a8d9bbe123f017, 0xb80b0047445d4184}, + {0xe6d3102ad96cec1d, 0xa60dc059157491e5}, + {0x9043ea1ac7e41392, 0x87c89837ad68db2f}, + {0xb454e4a179dd1877, 0x29babe4598c311fb}, + {0xe16a1dc9d8545e94, 0xf4296dd6fef3d67a}, + {0x8ce2529e2734bb1d, 0x1899e4a65f58660c}, + {0xb01ae745b101e9e4, 0x5ec05dcff72e7f8f}, + {0xdc21a1171d42645d, 0x76707543f4fa1f73}, + {0x899504ae72497eba, 0x6a06494a791c53a8}, + {0xabfa45da0edbde69, 0x0487db9d17636892}, + {0xd6f8d7509292d603, 0x45a9d2845d3c42b6}, + {0x865b86925b9bc5c2, 0x0b8a2392ba45a9b2}, + {0xa7f26836f282b732, 0x8e6cac7768d7141e}, + {0xd1ef0244af2364ff, 0x3207d795430cd926}, + {0x8335616aed761f1f, 0x7f44e6bd49e807b8}, + {0xa402b9c5a8d3a6e7, 0x5f16206c9c6209a6}, + {0xcd036837130890a1, 0x36dba887c37a8c0f}, + {0x802221226be55a64, 0xc2494954da2c9789}, + {0xa02aa96b06deb0fd, 0xf2db9baa10b7bd6c}, + {0xc83553c5c8965d3d, 0x6f92829494e5acc7}, + {0xfa42a8b73abbf48c, 0xcb772339ba1f17f9}, + {0x9c69a97284b578d7, 0xff2a760414536efb}, + {0xc38413cf25e2d70d, 0xfef5138519684aba}, + {0xf46518c2ef5b8cd1, 0x7eb258665fc25d69}, + {0x98bf2f79d5993802, 0xef2f773ffbd97a61}, + {0xbeeefb584aff8603, 0xaafb550ffacfd8fa}, + {0xeeaaba2e5dbf6784, 0x95ba2a53f983cf38}, + {0x952ab45cfa97a0b2, 0xdd945a747bf26183}, + {0xba756174393d88df, 0x94f971119aeef9e4}, + {0xe912b9d1478ceb17, 0x7a37cd5601aab85d}, + {0x91abb422ccb812ee, 0xac62e055c10ab33a}, + {0xb616a12b7fe617aa, 0x577b986b314d6009}, + {0xe39c49765fdf9d94, 0xed5a7e85fda0b80b}, + {0x8e41ade9fbebc27d, 0x14588f13be847307}, + {0xb1d219647ae6b31c, 0x596eb2d8ae258fc8}, + {0xde469fbd99a05fe3, 0x6fca5f8ed9aef3bb}, + {0x8aec23d680043bee, 0x25de7bb9480d5854}, + {0xada72ccc20054ae9, 0xaf561aa79a10ae6a}, + {0xd910f7ff28069da4, 0x1b2ba1518094da04}, + {0x87aa9aff79042286, 0x90fb44d2f05d0842}, + {0xa99541bf57452b28, 0x353a1607ac744a53}, + {0xd3fa922f2d1675f2, 0x42889b8997915ce8}, + {0x847c9b5d7c2e09b7, 0x69956135febada11}, + {0xa59bc234db398c25, 0x43fab9837e699095}, + {0xcf02b2c21207ef2e, 0x94f967e45e03f4bb}, + {0x8161afb94b44f57d, 0x1d1be0eebac278f5}, + {0xa1ba1ba79e1632dc, 0x6462d92a69731732}, + {0xca28a291859bbf93, 0x7d7b8f7503cfdcfe}, + {0xfcb2cb35e702af78, 0x5cda735244c3d43e}, + {0x9defbf01b061adab, 0x3a0888136afa64a7}, + {0xc56baec21c7a1916, 0x088aaa1845b8fdd0}, + {0xf6c69a72a3989f5b, 0x8aad549e57273d45}, + {0x9a3c2087a63f6399, 0x36ac54e2f678864b}, + {0xc0cb28a98fcf3c7f, 0x84576a1bb416a7dd}, + {0xf0fdf2d3f3c30b9f, 0x656d44a2a11c51d5}, + {0x969eb7c47859e743, 0x9f644ae5a4b1b325}, + {0xbc4665b596706114, 0x873d5d9f0dde1fee}, + {0xeb57ff22fc0c7959, 0xa90cb506d155a7ea}, + {0x9316ff75dd87cbd8, 0x09a7f12442d588f2}, + {0xb7dcbf5354e9bece, 0x0c11ed6d538aeb2f}, + {0xe5d3ef282a242e81, 0x8f1668c8a86da5fa}, + {0x8fa475791a569d10, 0xf96e017d694487bc}, + {0xb38d92d760ec4455, 0x37c981dcc395a9ac}, + {0xe070f78d3927556a, 0x85bbe253f47b1417}, + {0x8c469ab843b89562, 0x93956d7478ccec8e}, + {0xaf58416654a6babb, 0x387ac8d1970027b2}, + {0xdb2e51bfe9d0696a, 0x06997b05fcc0319e}, + {0x88fcf317f22241e2, 0x441fece3bdf81f03}, + {0xab3c2fddeeaad25a, 0xd527e81cad7626c3}, + {0xd60b3bd56a5586f1, 0x8a71e223d8d3b074}, + {0x85c7056562757456, 0xf6872d5667844e49}, + {0xa738c6bebb12d16c, 0xb428f8ac016561db}, + {0xd106f86e69d785c7, 0xe13336d701beba52}, + {0x82a45b450226b39c, 0xecc0024661173473}, + {0xa34d721642b06084, 0x27f002d7f95d0190}, + {0xcc20ce9bd35c78a5, 0x31ec038df7b441f4}, + {0xff290242c83396ce, 0x7e67047175a15271}, + {0x9f79a169bd203e41, 0x0f0062c6e984d386}, + {0xc75809c42c684dd1, 0x52c07b78a3e60868}, + {0xf92e0c3537826145, 0xa7709a56ccdf8a82}, + {0x9bbcc7a142b17ccb, 0x88a66076400bb691}, + {0xc2abf989935ddbfe, 0x6acff893d00ea435}, + {0xf356f7ebf83552fe, 0x0583f6b8c4124d43}, + {0x98165af37b2153de, 0xc3727a337a8b704a}, + {0xbe1bf1b059e9a8d6, 0x744f18c0592e4c5c}, + {0xeda2ee1c7064130c, 0x1162def06f79df73}, + {0x9485d4d1c63e8be7, 0x8addcb5645ac2ba8}, + {0xb9a74a0637ce2ee1, 0x6d953e2bd7173692}, + {0xe8111c87c5c1ba99, 0xc8fa8db6ccdd0437}, + {0x910ab1d4db9914a0, 0x1d9c9892400a22a2}, + {0xb54d5e4a127f59c8, 0x2503beb6d00cab4b}, + {0xe2a0b5dc971f303a, 0x2e44ae64840fd61d}, + {0x8da471a9de737e24, 0x5ceaecfed289e5d2}, + {0xb10d8e1456105dad, 0x7425a83e872c5f47}, + {0xdd50f1996b947518, 0xd12f124e28f77719}, + {0x8a5296ffe33cc92f, 0x82bd6b70d99aaa6f}, + {0xace73cbfdc0bfb7b, 0x636cc64d1001550b}, + {0xd8210befd30efa5a, 0x3c47f7e05401aa4e}, + {0x8714a775e3e95c78, 0x65acfaec34810a71}, + {0xa8d9d1535ce3b396, 0x7f1839a741a14d0d}, + {0xd31045a8341ca07c, 0x1ede48111209a050}, + {0x83ea2b892091e44d, 0x934aed0aab460432}, + {0xa4e4b66b68b65d60, 0xf81da84d5617853f}, + {0xce1de40642e3f4b9, 0x36251260ab9d668e}, + {0x80d2ae83e9ce78f3, 0xc1d72b7c6b426019}, + {0xa1075a24e4421730, 0xb24cf65b8612f81f}, + {0xc94930ae1d529cfc, 0xdee033f26797b627}, + {0xfb9b7cd9a4a7443c, 0x169840ef017da3b1}, + {0x9d412e0806e88aa5, 0x8e1f289560ee864e}, + {0xc491798a08a2ad4e, 0xf1a6f2bab92a27e2}, + {0xf5b5d7ec8acb58a2, 0xae10af696774b1db}, + {0x9991a6f3d6bf1765, 0xacca6da1e0a8ef29}, + {0xbff610b0cc6edd3f, 0x17fd090a58d32af3}, + {0xeff394dcff8a948e, 0xddfc4b4cef07f5b0}, + {0x95f83d0a1fb69cd9, 0x4abdaf101564f98e}, + {0xbb764c4ca7a4440f, 0x9d6d1ad41abe37f1}, + {0xea53df5fd18d5513, 0x84c86189216dc5ed}, + {0x92746b9be2f8552c, 0x32fd3cf5b4e49bb4}, + {0xb7118682dbb66a77, 0x3fbc8c33221dc2a1}, + {0xe4d5e82392a40515, 0x0fabaf3feaa5334a}, + {0x8f05b1163ba6832d, 0x29cb4d87f2a7400e}, + {0xb2c71d5bca9023f8, 0x743e20e9ef511012}, + {0xdf78e4b2bd342cf6, 0x914da9246b255416}, + {0x8bab8eefb6409c1a, 0x1ad089b6c2f7548e}, + {0xae9672aba3d0c320, 0xa184ac2473b529b1}, + {0xda3c0f568cc4f3e8, 0xc9e5d72d90a2741e}, + {0x8865899617fb1871, 0x7e2fa67c7a658892}, + {0xaa7eebfb9df9de8d, 0xddbb901b98feeab7}, + {0xd51ea6fa85785631, 0x552a74227f3ea565}, + {0x8533285c936b35de, 0xd53a88958f87275f}, + {0xa67ff273b8460356, 0x8a892abaf368f137}, + {0xd01fef10a657842c, 0x2d2b7569b0432d85}, + {0x8213f56a67f6b29b, 0x9c3b29620e29fc73}, + {0xa298f2c501f45f42, 0x8349f3ba91b47b8f}, + {0xcb3f2f7642717713, 0x241c70a936219a73}, + {0xfe0efb53d30dd4d7, 0xed238cd383aa0110}, + {0x9ec95d1463e8a506, 0xf4363804324a40aa}, + {0xc67bb4597ce2ce48, 0xb143c6053edcd0d5}, + {0xf81aa16fdc1b81da, 0xdd94b7868e94050a}, + {0x9b10a4e5e9913128, 0xca7cf2b4191c8326}, + {0xc1d4ce1f63f57d72, 0xfd1c2f611f63a3f0}, + {0xf24a01a73cf2dccf, 0xbc633b39673c8cec}, + {0x976e41088617ca01, 0xd5be0503e085d813}, + {0xbd49d14aa79dbc82, 0x4b2d8644d8a74e18}, + {0xec9c459d51852ba2, 0xddf8e7d60ed1219e}, + {0x93e1ab8252f33b45, 0xcabb90e5c942b503}, + {0xb8da1662e7b00a17, 0x3d6a751f3b936243}, + {0xe7109bfba19c0c9d, 0x0cc512670a783ad4}, + {0x906a617d450187e2, 0x27fb2b80668b24c5}, + {0xb484f9dc9641e9da, 0xb1f9f660802dedf6}, + {0xe1a63853bbd26451, 0x5e7873f8a0396973}, + {0x8d07e33455637eb2, 0xdb0b487b6423e1e8}, + {0xb049dc016abc5e5f, 0x91ce1a9a3d2cda62}, + {0xdc5c5301c56b75f7, 0x7641a140cc7810fb}, + {0x89b9b3e11b6329ba, 0xa9e904c87fcb0a9d}, + {0xac2820d9623bf429, 0x546345fa9fbdcd44}, + {0xd732290fbacaf133, 0xa97c177947ad4095}, + {0x867f59a9d4bed6c0, 0x49ed8eabcccc485d}, + {0xa81f301449ee8c70, 0x5c68f256bfff5a74}, + {0xd226fc195c6a2f8c, 0x73832eec6fff3111}, + {0x83585d8fd9c25db7, 0xc831fd53c5ff7eab}, + {0xa42e74f3d032f525, 0xba3e7ca8b77f5e55}, + {0xcd3a1230c43fb26f, 0x28ce1bd2e55f35eb}, + {0x80444b5e7aa7cf85, 0x7980d163cf5b81b3}, + {0xa0555e361951c366, 0xd7e105bcc332621f}, + {0xc86ab5c39fa63440, 0x8dd9472bf3fefaa7}, + {0xfa856334878fc150, 0xb14f98f6f0feb951}, + {0x9c935e00d4b9d8d2, 0x6ed1bf9a569f33d3}, + {0xc3b8358109e84f07, 0x0a862f80ec4700c8}, + {0xf4a642e14c6262c8, 0xcd27bb612758c0fa}, + {0x98e7e9cccfbd7dbd, 0x8038d51cb897789c}, + {0xbf21e44003acdd2c, 0xe0470a63e6bd56c3}, + {0xeeea5d5004981478, 0x1858ccfce06cac74}, + {0x95527a5202df0ccb, 0x0f37801e0c43ebc8}, + {0xbaa718e68396cffd, 0xd30560258f54e6ba}, + {0xe950df20247c83fd, 0x47c6b82ef32a2069}, + {0x91d28b7416cdd27e, 0x4cdc331d57fa5441}, + {0xb6472e511c81471d, 0xe0133fe4adf8e952}, + {0xe3d8f9e563a198e5, 0x58180fddd97723a6}, + {0x8e679c2f5e44ff8f, 0x570f09eaa7ea7648}, + {0xb201833b35d63f73, 0x2cd2cc6551e513da}, + {0xde81e40a034bcf4f, 0xf8077f7ea65e58d1}, + {0x8b112e86420f6191, 0xfb04afaf27faf782}, + {0xadd57a27d29339f6, 0x79c5db9af1f9b563}, + {0xd94ad8b1c7380874, 0x18375281ae7822bc}, + {0x87cec76f1c830548, 0x8f2293910d0b15b5}, + {0xa9c2794ae3a3c69a, 0xb2eb3875504ddb22}, + {0xd433179d9c8cb841, 0x5fa60692a46151eb}, + {0x849feec281d7f328, 0xdbc7c41ba6bcd333}, + {0xa5c7ea73224deff3, 0x12b9b522906c0800}, + {0xcf39e50feae16bef, 0xd768226b34870a00}, + {0x81842f29f2cce375, 0xe6a1158300d46640}, + {0xa1e53af46f801c53, 0x60495ae3c1097fd0}, + {0xca5e89b18b602368, 0x385bb19cb14bdfc4}, + {0xfcf62c1dee382c42, 0x46729e03dd9ed7b5}, + {0x9e19db92b4e31ba9, 0x6c07a2c26a8346d1}, + {0xc5a05277621be293, 0xc7098b7305241885}, + { 0xf70867153aa2db38, + 0xb8cbee4fc66d1ea7 } #else - {0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7b}, - {0xce5d73ff402d98e3, 0xfb0a3d212dc81290}, - {0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481f}, - {0x86a8d39ef77164bc, 0xae5dff9c02033198}, - {0xd98ddaee19068c76, 0x3badd624dd9b0958}, - {0xafbd2350644eeacf, 0xe5d1929ef90898fb}, - {0x8df5efabc5979c8f, 0xca8d3ffa1ef463c2}, - {0xe55990879ddcaabd, 0xcc420a6a101d0516}, - {0xb94470938fa89bce, 0xf808e40e8d5b3e6a}, - {0x95a8637627989aad, 0xdde7001379a44aa9}, - {0xf1c90080baf72cb1, 0x5324c68b12dd6339}, - {0xc350000000000000, 0x0000000000000000}, - {0x9dc5ada82b70b59d, 0xf020000000000000}, - {0xfee50b7025c36a08, 0x02f236d04753d5b4}, - {0xcde6fd5e09abcf26, 0xed4c0226b55e6f86}, - {0xa6539930bf6bff45, 0x84db8346b786151c}, - {0x865b86925b9bc5c2, 0x0b8a2392ba45a9b2}, - {0xd910f7ff28069da4, 0x1b2ba1518094da04}, - {0xaf58416654a6babb, 0x387ac8d1970027b2}, - {0x8da471a9de737e24, 0x5ceaecfed289e5d2}, - {0xe4d5e82392a40515, 0x0fabaf3feaa5334a}, - {0xb8da1662e7b00a17, 0x3d6a751f3b936243}, - { 0x95527a5202df0ccb, - 0x0f37801e0c43ebc8 } + {0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7b}, + {0xce5d73ff402d98e3, 0xfb0a3d212dc81290}, + {0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481f}, + {0x86a8d39ef77164bc, 0xae5dff9c02033198}, + {0xd98ddaee19068c76, 0x3badd624dd9b0958}, + {0xafbd2350644eeacf, 0xe5d1929ef90898fb}, + {0x8df5efabc5979c8f, 0xca8d3ffa1ef463c2}, + {0xe55990879ddcaabd, 0xcc420a6a101d0516}, + {0xb94470938fa89bce, 0xf808e40e8d5b3e6a}, + {0x95a8637627989aad, 0xdde7001379a44aa9}, + {0xf1c90080baf72cb1, 0x5324c68b12dd6339}, + {0xc350000000000000, 0x0000000000000000}, + {0x9dc5ada82b70b59d, 0xf020000000000000}, + {0xfee50b7025c36a08, 0x02f236d04753d5b4}, + {0xcde6fd5e09abcf26, 0xed4c0226b55e6f86}, + {0xa6539930bf6bff45, 0x84db8346b786151c}, + {0x865b86925b9bc5c2, 0x0b8a2392ba45a9b2}, + {0xd910f7ff28069da4, 0x1b2ba1518094da04}, + {0xaf58416654a6babb, 0x387ac8d1970027b2}, + {0x8da471a9de737e24, 0x5ceaecfed289e5d2}, + {0xe4d5e82392a40515, 0x0fabaf3feaa5334a}, + {0xb8da1662e7b00a17, 0x3d6a751f3b936243}, + { 0x95527a5202df0ccb, + 0x0f37801e0c43ebc8 } #endif - }; + }; #if FMT_USE_FULL_CACHE_DRAGONBOX - return pow10_significands[k - float_info::min_k]; + return pow10_significands[k - float_info::min_k]; #else - static constexpr const uint64_t powers_of_5_64[] = { - 0x0000000000000001, 0x0000000000000005, 0x0000000000000019, - 0x000000000000007d, 0x0000000000000271, 0x0000000000000c35, - 0x0000000000003d09, 0x000000000001312d, 0x000000000005f5e1, - 0x00000000001dcd65, 0x00000000009502f9, 0x0000000002e90edd, - 0x000000000e8d4a51, 0x0000000048c27395, 0x000000016bcc41e9, - 0x000000071afd498d, 0x0000002386f26fc1, 0x000000b1a2bc2ec5, - 0x000003782dace9d9, 0x00001158e460913d, 0x000056bc75e2d631, - 0x0001b1ae4d6e2ef5, 0x000878678326eac9, 0x002a5a058fc295ed, - 0x00d3c21bcecceda1, 0x0422ca8b0a00a425, 0x14adf4b7320334b9}; - - static constexpr const uint32_t pow10_recovery_errors[] = { - 0x50001400, 0x54044100, 0x54014555, 0x55954415, 0x54115555, 0x00000001, - 0x50000000, 0x00104000, 0x54010004, 0x05004001, 0x55555544, 0x41545555, - 0x54040551, 0x15445545, 0x51555514, 0x10000015, 0x00101100, 0x01100015, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x04450514, 0x45414110, - 0x55555145, 0x50544050, 0x15040155, 0x11054140, 0x50111514, 0x11451454, - 0x00400541, 0x00000000, 0x55555450, 0x10056551, 0x10054011, 0x55551014, - 0x69514555, 0x05151109, 0x00155555}; - - static const int compression_ratio = 27; - - // Compute base index. - int cache_index = (k - float_info::min_k) / compression_ratio; - int kb = cache_index * compression_ratio + float_info::min_k; - int offset = k - kb; - - // Get base cache. - uint128_wrapper base_cache = pow10_significands[cache_index]; - if (offset == 0) return base_cache; - - // Compute the required amount of bit-shift. - int alpha = floor_log2_pow10(kb + offset) - floor_log2_pow10(kb) - offset; - FMT_ASSERT(alpha > 0 && alpha < 64, "shifting error detected"); - - // Try to recover the real cache. - uint64_t pow5 = powers_of_5_64[offset]; - uint128_wrapper recovered_cache = umul128(base_cache.high(), pow5); - uint128_wrapper middle_low = - umul128(base_cache.low() - (kb < 0 ? 1u : 0u), pow5); - - recovered_cache += middle_low.high(); - - uint64_t high_to_middle = recovered_cache.high() << (64 - alpha); - uint64_t middle_to_low = recovered_cache.low() << (64 - alpha); - - recovered_cache = - uint128_wrapper{(recovered_cache.low() >> alpha) | high_to_middle, - ((middle_low.low() >> alpha) | middle_to_low)}; - - if (kb < 0) recovered_cache += 1; - - // Get error. - int error_idx = (k - float_info::min_k) / 16; - uint32_t error = (pow10_recovery_errors[error_idx] >> - ((k - float_info::min_k) % 16) * 2) & - 0x3; - - // Add the error back. - FMT_ASSERT(recovered_cache.low() + error >= recovered_cache.low(), ""); - return {recovered_cache.high(), recovered_cache.low() + error}; + static constexpr const uint64_t powers_of_5_64[] = { + 0x0000000000000001, 0x0000000000000005, 0x0000000000000019, 0x000000000000007d, 0x0000000000000271, + 0x0000000000000c35, 0x0000000000003d09, 0x000000000001312d, 0x000000000005f5e1, 0x00000000001dcd65, + 0x00000000009502f9, 0x0000000002e90edd, 0x000000000e8d4a51, 0x0000000048c27395, 0x000000016bcc41e9, + 0x000000071afd498d, 0x0000002386f26fc1, 0x000000b1a2bc2ec5, 0x000003782dace9d9, 0x00001158e460913d, + 0x000056bc75e2d631, 0x0001b1ae4d6e2ef5, 0x000878678326eac9, 0x002a5a058fc295ed, 0x00d3c21bcecceda1, + 0x0422ca8b0a00a425, 0x14adf4b7320334b9}; + + static constexpr const uint32_t pow10_recovery_errors[] = { + 0x50001400, 0x54044100, 0x54014555, 0x55954415, 0x54115555, 0x00000001, 0x50000000, 0x00104000, + 0x54010004, 0x05004001, 0x55555544, 0x41545555, 0x54040551, 0x15445545, 0x51555514, 0x10000015, + 0x00101100, 0x01100015, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x04450514, 0x45414110, + 0x55555145, 0x50544050, 0x15040155, 0x11054140, 0x50111514, 0x11451454, 0x00400541, 0x00000000, + 0x55555450, 0x10056551, 0x10054011, 0x55551014, 0x69514555, 0x05151109, 0x00155555}; + + static const int compression_ratio = 27; + + // Compute base index. + int cache_index = (k - float_info::min_k) / compression_ratio; + int kb = cache_index * compression_ratio + float_info::min_k; + int offset = k - kb; + + // Get base cache. + uint128_wrapper base_cache = pow10_significands[cache_index]; + if (offset == 0) + return base_cache; + + // Compute the required amount of bit-shift. + int alpha = floor_log2_pow10(kb + offset) - floor_log2_pow10(kb) - offset; + FMT_ASSERT(alpha > 0 && alpha < 64, "shifting error detected"); + + // Try to recover the real cache. + uint64_t pow5 = powers_of_5_64[offset]; + uint128_wrapper recovered_cache = umul128(base_cache.high(), pow5); + uint128_wrapper middle_low = umul128(base_cache.low() - (kb < 0 ? 1u : 0u), pow5); + + recovered_cache += middle_low.high(); + + uint64_t high_to_middle = recovered_cache.high() << (64 - alpha); + uint64_t middle_to_low = recovered_cache.low() << (64 - alpha); + + recovered_cache = uint128_wrapper {(recovered_cache.low() >> alpha) | high_to_middle, + ((middle_low.low() >> alpha) | middle_to_low)}; + + if (kb < 0) + recovered_cache += 1; + + // Get error. + int error_idx = (k - float_info::min_k) / 16; + uint32_t error = (pow10_recovery_errors[error_idx] >> ((k - float_info::min_k) % 16) * 2) & 0x3; + + // Add the error back. + FMT_ASSERT(recovered_cache.low() + error >= recovered_cache.low(), ""); + return {recovered_cache.high(), recovered_cache.low() + error}; #endif - } - - static carrier_uint compute_mul(carrier_uint u, - const cache_entry_type& cache) FMT_NOEXCEPT { - return umul192_upper64(u, cache); - } - - static uint32_t compute_delta(cache_entry_type const& cache, - int beta_minus_1) FMT_NOEXCEPT { - return static_cast(cache.high() >> (64 - 1 - beta_minus_1)); - } - - static bool compute_mul_parity(carrier_uint two_f, - const cache_entry_type& cache, - int beta_minus_1) FMT_NOEXCEPT { - FMT_ASSERT(beta_minus_1 >= 1, ""); - FMT_ASSERT(beta_minus_1 < 64, ""); - - return ((umul192_middle64(two_f, cache) >> (64 - beta_minus_1)) & 1) != 0; - } - - static carrier_uint compute_left_endpoint_for_shorter_interval_case( - const cache_entry_type& cache, int beta_minus_1) FMT_NOEXCEPT { - return (cache.high() - - (cache.high() >> (float_info::significand_bits + 2))) >> - (64 - float_info::significand_bits - 1 - beta_minus_1); - } - - static carrier_uint compute_right_endpoint_for_shorter_interval_case( - const cache_entry_type& cache, int beta_minus_1) FMT_NOEXCEPT { - return (cache.high() + - (cache.high() >> (float_info::significand_bits + 1))) >> - (64 - float_info::significand_bits - 1 - beta_minus_1); - } - - static carrier_uint compute_round_up_for_shorter_interval_case( - const cache_entry_type& cache, int beta_minus_1) FMT_NOEXCEPT { - return ((cache.high() >> - (64 - float_info::significand_bits - 2 - beta_minus_1)) + - 1) / - 2; - } + } + + static carrier_uint compute_mul(carrier_uint u, const cache_entry_type &cache) FMT_NOEXCEPT { + return umul192_upper64(u, cache); + } + + static uint32_t compute_delta(cache_entry_type const &cache, int beta_minus_1) FMT_NOEXCEPT { + return static_cast(cache.high() >> (64 - 1 - beta_minus_1)); + } + + static bool compute_mul_parity(carrier_uint two_f, const cache_entry_type &cache, int beta_minus_1) FMT_NOEXCEPT { + FMT_ASSERT(beta_minus_1 >= 1, ""); + FMT_ASSERT(beta_minus_1 < 64, ""); + + return ((umul192_middle64(two_f, cache) >> (64 - beta_minus_1)) & 1) != 0; + } + + static carrier_uint compute_left_endpoint_for_shorter_interval_case(const cache_entry_type &cache, + int beta_minus_1) FMT_NOEXCEPT { + return (cache.high() - (cache.high() >> (float_info::significand_bits + 2))) >> + (64 - float_info::significand_bits - 1 - beta_minus_1); + } + + static carrier_uint compute_right_endpoint_for_shorter_interval_case(const cache_entry_type &cache, + int beta_minus_1) FMT_NOEXCEPT { + return (cache.high() + (cache.high() >> (float_info::significand_bits + 1))) >> + (64 - float_info::significand_bits - 1 - beta_minus_1); + } + + static carrier_uint compute_round_up_for_shorter_interval_case(const cache_entry_type &cache, + int beta_minus_1) FMT_NOEXCEPT { + return ((cache.high() >> (64 - float_info::significand_bits - 2 - beta_minus_1)) + 1) / 2; + } }; // Various integer checks template bool is_left_endpoint_integer_shorter_interval(int exponent) FMT_NOEXCEPT { - return exponent >= - float_info< - T>::case_shorter_interval_left_endpoint_lower_threshold && - exponent <= - float_info::case_shorter_interval_left_endpoint_upper_threshold; + return exponent >= float_info::case_shorter_interval_left_endpoint_lower_threshold && + exponent <= float_info::case_shorter_interval_left_endpoint_upper_threshold; } template -bool is_endpoint_integer(typename float_info::carrier_uint two_f, - int exponent, int minus_k) FMT_NOEXCEPT { - if (exponent < float_info::case_fc_pm_half_lower_threshold) return false; - // For k >= 0. - if (exponent <= float_info::case_fc_pm_half_upper_threshold) return true; - // For k < 0. - if (exponent > float_info::divisibility_check_by_5_threshold) return false; - return divisible_by_power_of_5(two_f, minus_k); +bool is_endpoint_integer(typename float_info::carrier_uint two_f, int exponent, int minus_k) FMT_NOEXCEPT { + if (exponent < float_info::case_fc_pm_half_lower_threshold) + return false; + // For k >= 0. + if (exponent <= float_info::case_fc_pm_half_upper_threshold) + return true; + // For k < 0. + if (exponent > float_info::divisibility_check_by_5_threshold) + return false; + return divisible_by_power_of_5(two_f, minus_k); } template -bool is_center_integer(typename float_info::carrier_uint two_f, int exponent, - int minus_k) FMT_NOEXCEPT { - // Exponent for 5 is negative. - if (exponent > float_info::divisibility_check_by_5_threshold) return false; - if (exponent > float_info::case_fc_upper_threshold) - return divisible_by_power_of_5(two_f, minus_k); - // Both exponents are nonnegative. - if (exponent >= float_info::case_fc_lower_threshold) return true; - // Exponent for 2 is negative. - return divisible_by_power_of_2(two_f, minus_k - exponent + 1); +bool is_center_integer(typename float_info::carrier_uint two_f, int exponent, int minus_k) FMT_NOEXCEPT { + // Exponent for 5 is negative. + if (exponent > float_info::divisibility_check_by_5_threshold) + return false; + if (exponent > float_info::case_fc_upper_threshold) + return divisible_by_power_of_5(two_f, minus_k); + // Both exponents are nonnegative. + if (exponent >= float_info::case_fc_lower_threshold) + return true; + // Exponent for 2 is negative. + return divisible_by_power_of_2(two_f, minus_k - exponent + 1); } // Remove trailing zeros from n and return the number of zeros removed (float) -FMT_INLINE int remove_trailing_zeros(uint32_t& n) FMT_NOEXCEPT { +FMT_INLINE int remove_trailing_zeros(uint32_t &n) FMT_NOEXCEPT { #ifdef FMT_BUILTIN_CTZ - int t = FMT_BUILTIN_CTZ(n); + int t = FMT_BUILTIN_CTZ(n); #else - int t = ctz(n); + int t = ctz(n); #endif - if (t > float_info::max_trailing_zeros) - t = float_info::max_trailing_zeros; - - const uint32_t mod_inv1 = 0xcccccccd; - const uint32_t max_quotient1 = 0x33333333; - const uint32_t mod_inv2 = 0xc28f5c29; - const uint32_t max_quotient2 = 0x0a3d70a3; - - int s = 0; - for (; s < t - 1; s += 2) { - if (n * mod_inv2 > max_quotient2) break; - n *= mod_inv2; - } - if (s < t && n * mod_inv1 <= max_quotient1) { - n *= mod_inv1; - ++s; - } - n >>= s; - return s; + if (t > float_info::max_trailing_zeros) + t = float_info::max_trailing_zeros; + + const uint32_t mod_inv1 = 0xcccccccd; + const uint32_t max_quotient1 = 0x33333333; + const uint32_t mod_inv2 = 0xc28f5c29; + const uint32_t max_quotient2 = 0x0a3d70a3; + + int s = 0; + for (; s < t - 1; s += 2) { + if (n * mod_inv2 > max_quotient2) + break; + n *= mod_inv2; + } + if (s < t && n * mod_inv1 <= max_quotient1) { + n *= mod_inv1; + ++s; + } + n >>= s; + return s; } // Removes trailing zeros and returns the number of zeros removed (double) -FMT_INLINE int remove_trailing_zeros(uint64_t& n) FMT_NOEXCEPT { +FMT_INLINE int remove_trailing_zeros(uint64_t &n) FMT_NOEXCEPT { #ifdef FMT_BUILTIN_CTZLL - int t = FMT_BUILTIN_CTZLL(n); + int t = FMT_BUILTIN_CTZLL(n); #else - int t = ctzll(n); + int t = ctzll(n); #endif - if (t > float_info::max_trailing_zeros) - t = float_info::max_trailing_zeros; - // Divide by 10^8 and reduce to 32-bits - // Since ret_value.significand <= (2^64 - 1) / 1000 < 10^17, - // both of the quotient and the r should fit in 32-bits - - const uint32_t mod_inv1 = 0xcccccccd; - const uint32_t max_quotient1 = 0x33333333; - const uint64_t mod_inv8 = 0xc767074b22e90e21; - const uint64_t max_quotient8 = 0x00002af31dc46118; - - // If the number is divisible by 1'0000'0000, work with the quotient - if (t >= 8) { - auto quotient_candidate = n * mod_inv8; - - if (quotient_candidate <= max_quotient8) { - auto quotient = static_cast(quotient_candidate >> 8); - - int s = 8; - for (; s < t; ++s) { - if (quotient * mod_inv1 > max_quotient1) break; - quotient *= mod_inv1; - } - quotient >>= (s - 8); - n = quotient; - return s; - } - } - - // Otherwise, work with the remainder - auto quotient = static_cast(n / 100000000); - auto remainder = static_cast(n - 100000000 * quotient); - - if (t == 0 || remainder * mod_inv1 > max_quotient1) { - return 0; - } - remainder *= mod_inv1; - - if (t == 1 || remainder * mod_inv1 > max_quotient1) { - n = (remainder >> 1) + quotient * 10000000ull; - return 1; - } - remainder *= mod_inv1; - - if (t == 2 || remainder * mod_inv1 > max_quotient1) { - n = (remainder >> 2) + quotient * 1000000ull; - return 2; - } - remainder *= mod_inv1; - - if (t == 3 || remainder * mod_inv1 > max_quotient1) { - n = (remainder >> 3) + quotient * 100000ull; - return 3; - } - remainder *= mod_inv1; - - if (t == 4 || remainder * mod_inv1 > max_quotient1) { - n = (remainder >> 4) + quotient * 10000ull; - return 4; - } - remainder *= mod_inv1; - - if (t == 5 || remainder * mod_inv1 > max_quotient1) { - n = (remainder >> 5) + quotient * 1000ull; - return 5; - } - remainder *= mod_inv1; - - if (t == 6 || remainder * mod_inv1 > max_quotient1) { - n = (remainder >> 6) + quotient * 100ull; - return 6; - } - remainder *= mod_inv1; - - n = (remainder >> 7) + quotient * 10ull; - return 7; + if (t > float_info::max_trailing_zeros) + t = float_info::max_trailing_zeros; + // Divide by 10^8 and reduce to 32-bits + // Since ret_value.significand <= (2^64 - 1) / 1000 < 10^17, + // both of the quotient and the r should fit in 32-bits + + const uint32_t mod_inv1 = 0xcccccccd; + const uint32_t max_quotient1 = 0x33333333; + const uint64_t mod_inv8 = 0xc767074b22e90e21; + const uint64_t max_quotient8 = 0x00002af31dc46118; + + // If the number is divisible by 1'0000'0000, work with the quotient + if (t >= 8) { + auto quotient_candidate = n * mod_inv8; + + if (quotient_candidate <= max_quotient8) { + auto quotient = static_cast(quotient_candidate >> 8); + + int s = 8; + for (; s < t; ++s) { + if (quotient * mod_inv1 > max_quotient1) + break; + quotient *= mod_inv1; + } + quotient >>= (s - 8); + n = quotient; + return s; + } + } + + // Otherwise, work with the remainder + auto quotient = static_cast(n / 100000000); + auto remainder = static_cast(n - 100000000 * quotient); + + if (t == 0 || remainder * mod_inv1 > max_quotient1) { + return 0; + } + remainder *= mod_inv1; + + if (t == 1 || remainder * mod_inv1 > max_quotient1) { + n = (remainder >> 1) + quotient * 10000000ull; + return 1; + } + remainder *= mod_inv1; + + if (t == 2 || remainder * mod_inv1 > max_quotient1) { + n = (remainder >> 2) + quotient * 1000000ull; + return 2; + } + remainder *= mod_inv1; + + if (t == 3 || remainder * mod_inv1 > max_quotient1) { + n = (remainder >> 3) + quotient * 100000ull; + return 3; + } + remainder *= mod_inv1; + + if (t == 4 || remainder * mod_inv1 > max_quotient1) { + n = (remainder >> 4) + quotient * 10000ull; + return 4; + } + remainder *= mod_inv1; + + if (t == 5 || remainder * mod_inv1 > max_quotient1) { + n = (remainder >> 5) + quotient * 1000ull; + return 5; + } + remainder *= mod_inv1; + + if (t == 6 || remainder * mod_inv1 > max_quotient1) { + n = (remainder >> 6) + quotient * 100ull; + return 6; + } + remainder *= mod_inv1; + + n = (remainder >> 7) + quotient * 10ull; + return 7; } // The main algorithm for shorter interval case template FMT_INLINE decimal_fp shorter_interval_case(int exponent) FMT_NOEXCEPT { - decimal_fp ret_value; - // Compute k and beta - const int minus_k = floor_log10_pow2_minus_log10_4_over_3(exponent); - const int beta_minus_1 = exponent + floor_log2_pow10(-minus_k); - - // Compute xi and zi - using cache_entry_type = typename cache_accessor::cache_entry_type; - const cache_entry_type cache = cache_accessor::get_cached_power(-minus_k); - - auto xi = cache_accessor::compute_left_endpoint_for_shorter_interval_case( - cache, beta_minus_1); - auto zi = cache_accessor::compute_right_endpoint_for_shorter_interval_case( - cache, beta_minus_1); - - // If the left endpoint is not an integer, increase it - if (!is_left_endpoint_integer_shorter_interval(exponent)) ++xi; - - // Try bigger divisor - ret_value.significand = zi / 10; - - // If succeed, remove trailing zeros if necessary and return - if (ret_value.significand * 10 >= xi) { - ret_value.exponent = minus_k + 1; - ret_value.exponent += remove_trailing_zeros(ret_value.significand); - return ret_value; - } - - // Otherwise, compute the round-up of y - ret_value.significand = - cache_accessor::compute_round_up_for_shorter_interval_case( - cache, beta_minus_1); - ret_value.exponent = minus_k; - - // When tie occurs, choose one of them according to the rule - if (exponent >= float_info::shorter_interval_tie_lower_threshold && - exponent <= float_info::shorter_interval_tie_upper_threshold) { - ret_value.significand = ret_value.significand % 2 == 0 - ? ret_value.significand - : ret_value.significand - 1; - } else if (ret_value.significand < xi) { - ++ret_value.significand; - } - return ret_value; + decimal_fp ret_value; + // Compute k and beta + const int minus_k = floor_log10_pow2_minus_log10_4_over_3(exponent); + const int beta_minus_1 = exponent + floor_log2_pow10(-minus_k); + + // Compute xi and zi + using cache_entry_type = typename cache_accessor::cache_entry_type; + const cache_entry_type cache = cache_accessor::get_cached_power(-minus_k); + + auto xi = cache_accessor::compute_left_endpoint_for_shorter_interval_case(cache, beta_minus_1); + auto zi = cache_accessor::compute_right_endpoint_for_shorter_interval_case(cache, beta_minus_1); + + // If the left endpoint is not an integer, increase it + if (!is_left_endpoint_integer_shorter_interval(exponent)) + ++xi; + + // Try bigger divisor + ret_value.significand = zi / 10; + + // If succeed, remove trailing zeros if necessary and return + if (ret_value.significand * 10 >= xi) { + ret_value.exponent = minus_k + 1; + ret_value.exponent += remove_trailing_zeros(ret_value.significand); + return ret_value; + } + + // Otherwise, compute the round-up of y + ret_value.significand = cache_accessor::compute_round_up_for_shorter_interval_case(cache, beta_minus_1); + ret_value.exponent = minus_k; + + // When tie occurs, choose one of them according to the rule + if (exponent >= float_info::shorter_interval_tie_lower_threshold && + exponent <= float_info::shorter_interval_tie_upper_threshold) { + ret_value.significand = ret_value.significand % 2 == 0 ? ret_value.significand : ret_value.significand - 1; + } else if (ret_value.significand < xi) { + ++ret_value.significand; + } + return ret_value; } -template decimal_fp to_decimal(T x) FMT_NOEXCEPT { - // Step 1: integer promotion & Schubfach multiplier calculation. - - using carrier_uint = typename float_info::carrier_uint; - using cache_entry_type = typename cache_accessor::cache_entry_type; - auto br = bit_cast(x); - - // Extract significand bits and exponent bits. - const carrier_uint significand_mask = - (static_cast(1) << float_info::significand_bits) - 1; - carrier_uint significand = (br & significand_mask); - int exponent = static_cast((br & exponent_mask()) >> - float_info::significand_bits); - - if (exponent != 0) { // Check if normal. - exponent += float_info::exponent_bias - float_info::significand_bits; - - // Shorter interval case; proceed like Schubfach. - if (significand == 0) return shorter_interval_case(exponent); - - significand |= - (static_cast(1) << float_info::significand_bits); - } else { - // Subnormal case; the interval is always regular. - if (significand == 0) return {0, 0}; - exponent = float_info::min_exponent - float_info::significand_bits; - } - - const bool include_left_endpoint = (significand % 2 == 0); - const bool include_right_endpoint = include_left_endpoint; - - // Compute k and beta. - const int minus_k = floor_log10_pow2(exponent) - float_info::kappa; - const cache_entry_type cache = cache_accessor::get_cached_power(-minus_k); - const int beta_minus_1 = exponent + floor_log2_pow10(-minus_k); - - // Compute zi and deltai - // 10^kappa <= deltai < 10^(kappa + 1) - const uint32_t deltai = cache_accessor::compute_delta(cache, beta_minus_1); - const carrier_uint two_fc = significand << 1; - const carrier_uint two_fr = two_fc | 1; - const carrier_uint zi = - cache_accessor::compute_mul(two_fr << beta_minus_1, cache); - - // Step 2: Try larger divisor; remove trailing zeros if necessary - - // Using an upper bound on zi, we might be able to optimize the division - // better than the compiler; we are computing zi / big_divisor here - decimal_fp ret_value; - ret_value.significand = divide_by_10_to_kappa_plus_1(zi); - uint32_t r = static_cast(zi - float_info::big_divisor * - ret_value.significand); - - if (r > deltai) { - goto small_divisor_case_label; - } else if (r < deltai) { - // Exclude the right endpoint if necessary - if (r == 0 && !include_right_endpoint && - is_endpoint_integer(two_fr, exponent, minus_k)) { - --ret_value.significand; - r = float_info::big_divisor; - goto small_divisor_case_label; - } - } else { - // r == deltai; compare fractional parts - // Check conditions in the order different from the paper - // to take advantage of short-circuiting - const carrier_uint two_fl = two_fc - 1; - if ((!include_left_endpoint || - !is_endpoint_integer(two_fl, exponent, minus_k)) && - !cache_accessor::compute_mul_parity(two_fl, cache, beta_minus_1)) { - goto small_divisor_case_label; - } - } - ret_value.exponent = minus_k + float_info::kappa + 1; - - // We may need to remove trailing zeros - ret_value.exponent += remove_trailing_zeros(ret_value.significand); - return ret_value; - - // Step 3: Find the significand with the smaller divisor +template +decimal_fp to_decimal(T x) FMT_NOEXCEPT { + // Step 1: integer promotion & Schubfach multiplier calculation. + + using carrier_uint = typename float_info::carrier_uint; + using cache_entry_type = typename cache_accessor::cache_entry_type; + auto br = bit_cast(x); + + // Extract significand bits and exponent bits. + const carrier_uint significand_mask = (static_cast(1) << float_info::significand_bits) - 1; + carrier_uint significand = (br & significand_mask); + int exponent = static_cast((br & exponent_mask()) >> float_info::significand_bits); + + if (exponent != 0) { // Check if normal. + exponent += float_info::exponent_bias - float_info::significand_bits; + + // Shorter interval case; proceed like Schubfach. + if (significand == 0) + return shorter_interval_case(exponent); + + significand |= (static_cast(1) << float_info::significand_bits); + } else { + // Subnormal case; the interval is always regular. + if (significand == 0) + return {0, 0}; + exponent = float_info::min_exponent - float_info::significand_bits; + } + + const bool include_left_endpoint = (significand % 2 == 0); + const bool include_right_endpoint = include_left_endpoint; + + // Compute k and beta. + const int minus_k = floor_log10_pow2(exponent) - float_info::kappa; + const cache_entry_type cache = cache_accessor::get_cached_power(-minus_k); + const int beta_minus_1 = exponent + floor_log2_pow10(-minus_k); + + // Compute zi and deltai + // 10^kappa <= deltai < 10^(kappa + 1) + const uint32_t deltai = cache_accessor::compute_delta(cache, beta_minus_1); + const carrier_uint two_fc = significand << 1; + const carrier_uint two_fr = two_fc | 1; + const carrier_uint zi = cache_accessor::compute_mul(two_fr << beta_minus_1, cache); + + // Step 2: Try larger divisor; remove trailing zeros if necessary + + // Using an upper bound on zi, we might be able to optimize the division + // better than the compiler; we are computing zi / big_divisor here + decimal_fp ret_value; + ret_value.significand = divide_by_10_to_kappa_plus_1(zi); + uint32_t r = static_cast(zi - float_info::big_divisor * ret_value.significand); + + if (r > deltai) { + goto small_divisor_case_label; + } else if (r < deltai) { + // Exclude the right endpoint if necessary + if (r == 0 && !include_right_endpoint && is_endpoint_integer(two_fr, exponent, minus_k)) { + --ret_value.significand; + r = float_info::big_divisor; + goto small_divisor_case_label; + } + } else { + // r == deltai; compare fractional parts + // Check conditions in the order different from the paper + // to take advantage of short-circuiting + const carrier_uint two_fl = two_fc - 1; + if ((!include_left_endpoint || !is_endpoint_integer(two_fl, exponent, minus_k)) && + !cache_accessor::compute_mul_parity(two_fl, cache, beta_minus_1)) { + goto small_divisor_case_label; + } + } + ret_value.exponent = minus_k + float_info::kappa + 1; + + // We may need to remove trailing zeros + ret_value.exponent += remove_trailing_zeros(ret_value.significand); + return ret_value; + + // Step 3: Find the significand with the smaller divisor small_divisor_case_label: - ret_value.significand *= 10; - ret_value.exponent = minus_k + float_info::kappa; - - const uint32_t mask = (1u << float_info::kappa) - 1; - auto dist = r - (deltai / 2) + (float_info::small_divisor / 2); - - // Is dist divisible by 2^kappa? - if ((dist & mask) == 0) { - const bool approx_y_parity = - ((dist ^ (float_info::small_divisor / 2)) & 1) != 0; - dist >>= float_info::kappa; - - // Is dist divisible by 5^kappa? - if (check_divisibility_and_divide_by_pow5::kappa>(dist)) { - ret_value.significand += dist; - - // Check z^(f) >= epsilon^(f) - // We have either yi == zi - epsiloni or yi == (zi - epsiloni) - 1, - // where yi == zi - epsiloni if and only if z^(f) >= epsilon^(f) - // Since there are only 2 possibilities, we only need to care about the - // parity. Also, zi and r should have the same parity since the divisor - // is an even number - if (cache_accessor::compute_mul_parity(two_fc, cache, beta_minus_1) != - approx_y_parity) { - --ret_value.significand; - } else { - // If z^(f) >= epsilon^(f), we might have a tie - // when z^(f) == epsilon^(f), or equivalently, when y is an integer - if (is_center_integer(two_fc, exponent, minus_k)) { - ret_value.significand = ret_value.significand % 2 == 0 - ? ret_value.significand - : ret_value.significand - 1; - } - } - } - // Is dist not divisible by 5^kappa? - else { - ret_value.significand += dist; - } - } - // Is dist not divisible by 2^kappa? - else { - // Since we know dist is small, we might be able to optimize the division - // better than the compiler; we are computing dist / small_divisor here - ret_value.significand += - small_division_by_pow10::kappa>(dist); - } - return ret_value; + ret_value.significand *= 10; + ret_value.exponent = minus_k + float_info::kappa; + + const uint32_t mask = (1u << float_info::kappa) - 1; + auto dist = r - (deltai / 2) + (float_info::small_divisor / 2); + + // Is dist divisible by 2^kappa? + if ((dist & mask) == 0) { + const bool approx_y_parity = ((dist ^ (float_info::small_divisor / 2)) & 1) != 0; + dist >>= float_info::kappa; + + // Is dist divisible by 5^kappa? + if (check_divisibility_and_divide_by_pow5::kappa>(dist)) { + ret_value.significand += dist; + + // Check z^(f) >= epsilon^(f) + // We have either yi == zi - epsiloni or yi == (zi - epsiloni) - 1, + // where yi == zi - epsiloni if and only if z^(f) >= epsilon^(f) + // Since there are only 2 possibilities, we only need to care about + // the parity. Also, zi and r should have the same parity since the + // divisor is an even number + if (cache_accessor::compute_mul_parity(two_fc, cache, beta_minus_1) != approx_y_parity) { + --ret_value.significand; + } else { + // If z^(f) >= epsilon^(f), we might have a tie + // when z^(f) == epsilon^(f), or equivalently, when y is an + // integer + if (is_center_integer(two_fc, exponent, minus_k)) { + ret_value.significand = + ret_value.significand % 2 == 0 ? ret_value.significand : ret_value.significand - 1; + } + } + } + // Is dist not divisible by 5^kappa? + else { + ret_value.significand += dist; + } + } + // Is dist not divisible by 2^kappa? + else { + // Since we know dist is small, we might be able to optimize the + // division better than the compiler; we are computing dist / + // small_divisor here + ret_value.significand += small_division_by_pow10::kappa>(dist); + } + return ret_value; } -} // namespace dragonbox +} // namespace dragonbox // Formats value using a variation of the Fixed-Precision Positive // Floating-Point Printout ((FPP)^2) algorithm by Steele & White: // https://fmt.dev/papers/p372-steele.pdf. template -void fallback_format(Double d, int num_digits, bool binary32, buffer& buf, - int& exp10) { - bigint numerator; // 2 * R in (FPP)^2. - bigint denominator; // 2 * S in (FPP)^2. - // lower and upper are differences between value and corresponding boundaries. - bigint lower; // (M^- in (FPP)^2). - bigint upper_store; // upper's value if different from lower. - bigint* upper = nullptr; // (M^+ in (FPP)^2). - fp value; - // Shift numerator and denominator by an extra bit or two (if lower boundary - // is closer) to make lower and upper integers. This eliminates multiplication - // by 2 during later computations. - const bool is_predecessor_closer = - binary32 ? value.assign(static_cast(d)) : value.assign(d); - int shift = is_predecessor_closer ? 2 : 1; - uint64_t significand = value.f << shift; - if (value.e >= 0) { - numerator.assign(significand); - numerator <<= value.e; - lower.assign(1); - lower <<= value.e; - if (shift != 1) { - upper_store.assign(1); - upper_store <<= value.e + 1; - upper = &upper_store; - } - denominator.assign_pow10(exp10); - denominator <<= shift; - } else if (exp10 < 0) { - numerator.assign_pow10(-exp10); - lower.assign(numerator); - if (shift != 1) { - upper_store.assign(numerator); - upper_store <<= 1; - upper = &upper_store; - } - numerator *= significand; - denominator.assign(1); - denominator <<= shift - value.e; - } else { - numerator.assign(significand); - denominator.assign_pow10(exp10); - denominator <<= shift - value.e; - lower.assign(1); - if (shift != 1) { - upper_store.assign(1ULL << 1); - upper = &upper_store; - } - } - // Invariant: value == (numerator / denominator) * pow(10, exp10). - if (num_digits < 0) { - // Generate the shortest representation. - if (!upper) upper = &lower; - bool even = (value.f & 1) == 0; - num_digits = 0; - char* data = buf.data(); - for (;;) { - int digit = numerator.divmod_assign(denominator); - bool low = compare(numerator, lower) - even < 0; // numerator <[=] lower. - // numerator + upper >[=] pow10: - bool high = add_compare(numerator, *upper, denominator) + even > 0; - data[num_digits++] = static_cast('0' + digit); - if (low || high) { - if (!low) { - ++data[num_digits - 1]; - } else if (high) { - int result = add_compare(numerator, numerator, denominator); - // Round half to even. - if (result > 0 || (result == 0 && (digit % 2) != 0)) - ++data[num_digits - 1]; - } - buf.try_resize(to_unsigned(num_digits)); - exp10 -= num_digits - 1; - return; - } - numerator *= 10; - lower *= 10; - if (upper != &lower) *upper *= 10; - } - } - // Generate the given number of digits. - exp10 -= num_digits - 1; - if (num_digits == 0) { - buf.try_resize(1); - denominator *= 10; - buf[0] = add_compare(numerator, numerator, denominator) > 0 ? '1' : '0'; - return; - } - buf.try_resize(to_unsigned(num_digits)); - for (int i = 0; i < num_digits - 1; ++i) { - int digit = numerator.divmod_assign(denominator); - buf[i] = static_cast('0' + digit); - numerator *= 10; - } - int digit = numerator.divmod_assign(denominator); - auto result = add_compare(numerator, numerator, denominator); - if (result > 0 || (result == 0 && (digit % 2) != 0)) { - if (digit == 9) { - const auto overflow = '0' + 10; - buf[num_digits - 1] = overflow; - // Propagate the carry. - for (int i = num_digits - 1; i > 0 && buf[i] == overflow; --i) { - buf[i] = '0'; - ++buf[i - 1]; - } - if (buf[0] == overflow) { - buf[0] = '1'; - ++exp10; - } - return; - } - ++digit; - } - buf[num_digits - 1] = static_cast('0' + digit); +void fallback_format(Double d, int num_digits, bool binary32, buffer &buf, int &exp10) { + bigint numerator; // 2 * R in (FPP)^2. + bigint denominator; // 2 * S in (FPP)^2. + // lower and upper are differences between value and corresponding + // boundaries. + bigint lower; // (M^- in (FPP)^2). + bigint upper_store; // upper's value if different from lower. + bigint *upper = nullptr; // (M^+ in (FPP)^2). + fp value; + // Shift numerator and denominator by an extra bit or two (if lower boundary + // is closer) to make lower and upper integers. This eliminates + // multiplication by 2 during later computations. + const bool is_predecessor_closer = binary32 ? value.assign(static_cast(d)) : value.assign(d); + int shift = is_predecessor_closer ? 2 : 1; + uint64_t significand = value.f << shift; + if (value.e >= 0) { + numerator.assign(significand); + numerator <<= value.e; + lower.assign(1); + lower <<= value.e; + if (shift != 1) { + upper_store.assign(1); + upper_store <<= value.e + 1; + upper = &upper_store; + } + denominator.assign_pow10(exp10); + denominator <<= shift; + } else if (exp10 < 0) { + numerator.assign_pow10(-exp10); + lower.assign(numerator); + if (shift != 1) { + upper_store.assign(numerator); + upper_store <<= 1; + upper = &upper_store; + } + numerator *= significand; + denominator.assign(1); + denominator <<= shift - value.e; + } else { + numerator.assign(significand); + denominator.assign_pow10(exp10); + denominator <<= shift - value.e; + lower.assign(1); + if (shift != 1) { + upper_store.assign(1ULL << 1); + upper = &upper_store; + } + } + // Invariant: value == (numerator / denominator) * pow(10, exp10). + if (num_digits < 0) { + // Generate the shortest representation. + if (!upper) + upper = &lower; + bool even = (value.f & 1) == 0; + num_digits = 0; + char *data = buf.data(); + for (;;) { + int digit = numerator.divmod_assign(denominator); + bool low = compare(numerator, lower) - even < 0; // numerator <[=] lower. + // numerator + upper >[=] pow10: + bool high = add_compare(numerator, *upper, denominator) + even > 0; + data[num_digits++] = static_cast('0' + digit); + if (low || high) { + if (!low) { + ++data[num_digits - 1]; + } else if (high) { + int result = add_compare(numerator, numerator, denominator); + // Round half to even. + if (result > 0 || (result == 0 && (digit % 2) != 0)) + ++data[num_digits - 1]; + } + buf.try_resize(to_unsigned(num_digits)); + exp10 -= num_digits - 1; + return; + } + numerator *= 10; + lower *= 10; + if (upper != &lower) + *upper *= 10; + } + } + // Generate the given number of digits. + exp10 -= num_digits - 1; + if (num_digits == 0) { + buf.try_resize(1); + denominator *= 10; + buf[0] = add_compare(numerator, numerator, denominator) > 0 ? '1' : '0'; + return; + } + buf.try_resize(to_unsigned(num_digits)); + for (int i = 0; i < num_digits - 1; ++i) { + int digit = numerator.divmod_assign(denominator); + buf[i] = static_cast('0' + digit); + numerator *= 10; + } + int digit = numerator.divmod_assign(denominator); + auto result = add_compare(numerator, numerator, denominator); + if (result > 0 || (result == 0 && (digit % 2) != 0)) { + if (digit == 9) { + const auto overflow = '0' + 10; + buf[num_digits - 1] = overflow; + // Propagate the carry. + for (int i = num_digits - 1; i > 0 && buf[i] == overflow; --i) { + buf[i] = '0'; + ++buf[i - 1]; + } + if (buf[0] == overflow) { + buf[0] = '1'; + ++exp10; + } + return; + } + ++digit; + } + buf[num_digits - 1] = static_cast('0' + digit); } template -int format_float(T value, int precision, float_specs specs, buffer& buf) { - static_assert(!std::is_same::value, ""); - FMT_ASSERT(value >= 0, "value is negative"); - - const bool fixed = specs.format == float_format::fixed; - if (value <= 0) { // <= instead of == to silence a warning. - if (precision <= 0 || !fixed) { - buf.push_back('0'); - return 0; - } - buf.try_resize(to_unsigned(precision)); - std::uninitialized_fill_n(buf.data(), precision, '0'); - return -precision; - } - - if (!specs.use_grisu) return snprintf_float(value, precision, specs, buf); - - if (precision < 0) { - // Use Dragonbox for the shortest format. - if (specs.binary32) { - auto dec = dragonbox::to_decimal(static_cast(value)); - write(buffer_appender(buf), dec.significand); - return dec.exponent; - } - auto dec = dragonbox::to_decimal(static_cast(value)); - write(buffer_appender(buf), dec.significand); - return dec.exponent; - } - - // Use Grisu + Dragon4 for the given precision: - // https://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf. - int exp = 0; - const int min_exp = -60; // alpha in Grisu. - int cached_exp10 = 0; // K in Grisu. - fp normalized = normalize(fp(value)); - const auto cached_pow = get_cached_power( - min_exp - (normalized.e + fp::significand_size), cached_exp10); - normalized = normalized * cached_pow; - // Limit precision to the maximum possible number of significant digits in an - // IEEE754 double because we don't need to generate zeros. - const int max_double_digits = 767; - if (precision > max_double_digits) precision = max_double_digits; - fixed_handler handler{buf.data(), 0, precision, -cached_exp10, fixed}; - if (grisu_gen_digits(normalized, 1, exp, handler) == digits::error) { - exp += handler.size - cached_exp10 - 1; - fallback_format(value, handler.precision, specs.binary32, buf, exp); - } else { - exp += handler.exp10; - buf.try_resize(to_unsigned(handler.size)); - } - if (!fixed && !specs.showpoint) { - // Remove trailing zeros. - auto num_digits = buf.size(); - while (num_digits > 0 && buf[num_digits - 1] == '0') { - --num_digits; - ++exp; - } - buf.try_resize(num_digits); - } - return exp; -} // namespace detail +int format_float(T value, int precision, float_specs specs, buffer &buf) { + static_assert(!std::is_same::value, ""); + FMT_ASSERT(value >= 0, "value is negative"); + + const bool fixed = specs.format == float_format::fixed; + if (value <= 0) { // <= instead of == to silence a warning. + if (precision <= 0 || !fixed) { + buf.push_back('0'); + return 0; + } + buf.try_resize(to_unsigned(precision)); + std::uninitialized_fill_n(buf.data(), precision, '0'); + return -precision; + } + + if (!specs.use_grisu) + return snprintf_float(value, precision, specs, buf); + + if (precision < 0) { + // Use Dragonbox for the shortest format. + if (specs.binary32) { + auto dec = dragonbox::to_decimal(static_cast(value)); + write(buffer_appender(buf), dec.significand); + return dec.exponent; + } + auto dec = dragonbox::to_decimal(static_cast(value)); + write(buffer_appender(buf), dec.significand); + return dec.exponent; + } + + // Use Grisu + Dragon4 for the given precision: + // https://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf. + int exp = 0; + const int min_exp = -60; // alpha in Grisu. + int cached_exp10 = 0; // K in Grisu. + fp normalized = normalize(fp(value)); + const auto cached_pow = get_cached_power(min_exp - (normalized.e + fp::significand_size), cached_exp10); + normalized = normalized * cached_pow; + // Limit precision to the maximum possible number of significant digits in + // an IEEE754 double because we don't need to generate zeros. + const int max_double_digits = 767; + if (precision > max_double_digits) + precision = max_double_digits; + fixed_handler handler {buf.data(), 0, precision, -cached_exp10, fixed}; + if (grisu_gen_digits(normalized, 1, exp, handler) == digits::error) { + exp += handler.size - cached_exp10 - 1; + fallback_format(value, handler.precision, specs.binary32, buf, exp); + } else { + exp += handler.exp10; + buf.try_resize(to_unsigned(handler.size)); + } + if (!fixed && !specs.showpoint) { + // Remove trailing zeros. + auto num_digits = buf.size(); + while (num_digits > 0 && buf[num_digits - 1] == '0') { + --num_digits; + ++exp; + } + buf.try_resize(num_digits); + } + return exp; +} // namespace detail template -int snprintf_float(T value, int precision, float_specs specs, - buffer& buf) { - // Buffer capacity must be non-zero, otherwise MSVC's vsnprintf_s will fail. - FMT_ASSERT(buf.capacity() > buf.size(), "empty buffer"); - static_assert(!std::is_same::value, ""); - - // Subtract 1 to account for the difference in precision since we use %e for - // both general and exponent format. - if (specs.format == float_format::general || - specs.format == float_format::exp) - precision = (precision >= 0 ? precision : 6) - 1; - - // Build the format string. - enum { max_format_size = 7 }; // The longest format is "%#.*Le". - char format[max_format_size]; - char* format_ptr = format; - *format_ptr++ = '%'; - if (specs.showpoint && specs.format == float_format::hex) *format_ptr++ = '#'; - if (precision >= 0) { - *format_ptr++ = '.'; - *format_ptr++ = '*'; - } - if (std::is_same()) *format_ptr++ = 'L'; - *format_ptr++ = specs.format != float_format::hex - ? (specs.format == float_format::fixed ? 'f' : 'e') - : (specs.upper ? 'A' : 'a'); - *format_ptr = '\0'; - - // Format using snprintf. - auto offset = buf.size(); - for (;;) { - auto begin = buf.data() + offset; - auto capacity = buf.capacity() - offset; +int snprintf_float(T value, int precision, float_specs specs, buffer &buf) { + // Buffer capacity must be non-zero, otherwise MSVC's vsnprintf_s will fail. + FMT_ASSERT(buf.capacity() > buf.size(), "empty buffer"); + static_assert(!std::is_same::value, ""); + + // Subtract 1 to account for the difference in precision since we use %e for + // both general and exponent format. + if (specs.format == float_format::general || specs.format == float_format::exp) + precision = (precision >= 0 ? precision : 6) - 1; + + // Build the format string. + enum { max_format_size = 7 }; // The longest format is "%#.*Le". + char format[max_format_size]; + char *format_ptr = format; + *format_ptr++ = '%'; + if (specs.showpoint && specs.format == float_format::hex) + *format_ptr++ = '#'; + if (precision >= 0) { + *format_ptr++ = '.'; + *format_ptr++ = '*'; + } + if (std::is_same()) + *format_ptr++ = 'L'; + *format_ptr++ = specs.format != float_format::hex ? (specs.format == float_format::fixed ? 'f' : 'e') + : (specs.upper ? 'A' : 'a'); + *format_ptr = '\0'; + + // Format using snprintf. + auto offset = buf.size(); + for (;;) { + auto begin = buf.data() + offset; + auto capacity = buf.capacity() - offset; #ifdef FMT_FUZZ - if (precision > 100000) - throw std::runtime_error( - "fuzz mode - avoid large allocation inside snprintf"); + if (precision > 100000) + throw std::runtime_error("fuzz mode - avoid large allocation inside snprintf"); #endif - // Suppress the warning about a nonliteral format string. - // Cannot use auto because of a bug in MinGW (#1532). - int (*snprintf_ptr)(char*, size_t, const char*, ...) = FMT_SNPRINTF; - int result = precision >= 0 - ? snprintf_ptr(begin, capacity, format, precision, value) - : snprintf_ptr(begin, capacity, format, value); - if (result < 0) { - // The buffer will grow exponentially. - buf.try_reserve(buf.capacity() + 1); - continue; - } - auto size = to_unsigned(result); - // Size equal to capacity means that the last character was truncated. - if (size >= capacity) { - buf.try_reserve(size + offset + 1); // Add 1 for the terminating '\0'. - continue; - } - auto is_digit = [](char c) { return c >= '0' && c <= '9'; }; - if (specs.format == float_format::fixed) { - if (precision == 0) { - buf.try_resize(size); - return 0; - } - // Find and remove the decimal point. - auto end = begin + size, p = end; - do { - --p; - } while (is_digit(*p)); - int fraction_size = static_cast(end - p - 1); - std::memmove(p, p + 1, to_unsigned(fraction_size)); - buf.try_resize(size - 1); - return -fraction_size; - } - if (specs.format == float_format::hex) { - buf.try_resize(size + offset); - return 0; - } - // Find and parse the exponent. - auto end = begin + size, exp_pos = end; - do { - --exp_pos; - } while (*exp_pos != 'e'); - char sign = exp_pos[1]; - FMT_ASSERT(sign == '+' || sign == '-', ""); - int exp = 0; - auto p = exp_pos + 2; // Skip 'e' and sign. - do { - FMT_ASSERT(is_digit(*p), ""); - exp = exp * 10 + (*p++ - '0'); - } while (p != end); - if (sign == '-') exp = -exp; - int fraction_size = 0; - if (exp_pos != begin + 1) { - // Remove trailing zeros. - auto fraction_end = exp_pos - 1; - while (*fraction_end == '0') --fraction_end; - // Move the fractional part left to get rid of the decimal point. - fraction_size = static_cast(fraction_end - begin - 1); - std::memmove(begin + 1, begin + 2, to_unsigned(fraction_size)); - } - buf.try_resize(to_unsigned(fraction_size) + offset + 1); - return exp - fraction_size; - } + // Suppress the warning about a nonliteral format string. + // Cannot use auto because of a bug in MinGW (#1532). + int (*snprintf_ptr)(char *, size_t, const char *, ...) = FMT_SNPRINTF; + int result = precision >= 0 ? snprintf_ptr(begin, capacity, format, precision, value) + : snprintf_ptr(begin, capacity, format, value); + if (result < 0) { + // The buffer will grow exponentially. + buf.try_reserve(buf.capacity() + 1); + continue; + } + auto size = to_unsigned(result); + // Size equal to capacity means that the last character was truncated. + if (size >= capacity) { + buf.try_reserve(size + offset + 1); // Add 1 for the terminating '\0'. + continue; + } + auto is_digit = [](char c) { + return c >= '0' && c <= '9'; + }; + if (specs.format == float_format::fixed) { + if (precision == 0) { + buf.try_resize(size); + return 0; + } + // Find and remove the decimal point. + auto end = begin + size, p = end; + do { + --p; + } while (is_digit(*p)); + int fraction_size = static_cast(end - p - 1); + std::memmove(p, p + 1, to_unsigned(fraction_size)); + buf.try_resize(size - 1); + return -fraction_size; + } + if (specs.format == float_format::hex) { + buf.try_resize(size + offset); + return 0; + } + // Find and parse the exponent. + auto end = begin + size, exp_pos = end; + do { + --exp_pos; + } while (*exp_pos != 'e'); + char sign = exp_pos[1]; + FMT_ASSERT(sign == '+' || sign == '-', ""); + int exp = 0; + auto p = exp_pos + 2; // Skip 'e' and sign. + do { + FMT_ASSERT(is_digit(*p), ""); + exp = exp * 10 + (*p++ - '0'); + } while (p != end); + if (sign == '-') + exp = -exp; + int fraction_size = 0; + if (exp_pos != begin + 1) { + // Remove trailing zeros. + auto fraction_end = exp_pos - 1; + while (*fraction_end == '0') + --fraction_end; + // Move the fractional part left to get rid of the decimal point. + fraction_size = static_cast(fraction_end - begin - 1); + std::memmove(begin + 1, begin + 2, to_unsigned(fraction_size)); + } + buf.try_resize(to_unsigned(fraction_size) + offset + 1); + return exp - fraction_size; + } } -} // namespace detail - -template <> struct formatter { - FMT_CONSTEXPR format_parse_context::iterator parse( - format_parse_context& ctx) { - return ctx.begin(); - } - - format_context::iterator format(const detail::bigint& n, - format_context& ctx) { - auto out = ctx.out(); - bool first = true; - for (auto i = n.bigits_.size(); i > 0; --i) { - auto value = n.bigits_[i - 1u]; - if (first) { - out = format_to(out, FMT_STRING("{:x}"), value); - first = false; - continue; - } - out = format_to(out, FMT_STRING("{:08x}"), value); - } - if (n.exp_ > 0) - out = format_to(out, FMT_STRING("p{}"), - n.exp_ * detail::bigint::bigit_bits); - return out; - } +} // namespace detail + +template <> +struct formatter { + FMT_CONSTEXPR format_parse_context::iterator parse(format_parse_context &ctx) { + return ctx.begin(); + } + + format_context::iterator format(const detail::bigint &n, format_context &ctx) { + auto out = ctx.out(); + bool first = true; + for (auto i = n.bigits_.size(); i > 0; --i) { + auto value = n.bigits_[i - 1u]; + if (first) { + out = format_to(out, FMT_STRING("{:x}"), value); + first = false; + continue; + } + out = format_to(out, FMT_STRING("{:08x}"), value); + } + if (n.exp_ > 0) + out = format_to(out, FMT_STRING("p{}"), n.exp_ * detail::bigint::bigit_bits); + return out; + } }; FMT_FUNC detail::utf8_to_utf16::utf8_to_utf16(string_view s) { - for_each_codepoint(s, [this](uint32_t cp, int error) { - if (error != 0) FMT_THROW(std::runtime_error("invalid utf8")); - if (cp <= 0xFFFF) { - buffer_.push_back(static_cast(cp)); - } else { - cp -= 0x10000; - buffer_.push_back(static_cast(0xD800 + (cp >> 10))); - buffer_.push_back(static_cast(0xDC00 + (cp & 0x3FF))); - } - }); - buffer_.push_back(0); + for_each_codepoint(s, [this](uint32_t cp, int error) { + if (error != 0) + FMT_THROW(std::runtime_error("invalid utf8")); + if (cp <= 0xFFFF) { + buffer_.push_back(static_cast(cp)); + } else { + cp -= 0x10000; + buffer_.push_back(static_cast(0xD800 + (cp >> 10))); + buffer_.push_back(static_cast(0xDC00 + (cp & 0x3FF))); + } + }); + buffer_.push_back(0); } -FMT_FUNC void format_system_error(detail::buffer& out, int error_code, - const char* message) FMT_NOEXCEPT { - FMT_TRY { - auto ec = std::error_code(error_code, std::generic_category()); - write(std::back_inserter(out), std::system_error(ec, message).what()); - return; - } - FMT_CATCH(...) {} - format_error_code(out, error_code, message); +FMT_FUNC void format_system_error(detail::buffer &out, int error_code, const char *message) FMT_NOEXCEPT { + FMT_TRY { + auto ec = std::error_code(error_code, std::generic_category()); + write(std::back_inserter(out), std::system_error(ec, message).what()); + return; + } + FMT_CATCH(...) { + } + format_error_code(out, error_code, message); } -FMT_FUNC void detail::error_handler::on_error(const char* message) { - FMT_THROW(format_error(message)); +FMT_FUNC void detail::error_handler::on_error(const char *message) { + FMT_THROW(format_error(message)); } -FMT_FUNC void report_system_error(int error_code, - const char* message) FMT_NOEXCEPT { - report_error(format_system_error, error_code, message); +FMT_FUNC void report_system_error(int error_code, const char *message) FMT_NOEXCEPT { + report_error(format_system_error, error_code, message); } FMT_FUNC std::string vformat(string_view fmt, format_args args) { - // Don't optimize the "{}" case to keep the binary size small and because it - // can be better optimized in fmt::format anyway. - auto buffer = memory_buffer(); - detail::vformat_to(buffer, fmt, args); - return to_string(buffer); + // Don't optimize the "{}" case to keep the binary size small and because it + // can be better optimized in fmt::format anyway. + auto buffer = memory_buffer(); + detail::vformat_to(buffer, fmt, args); + return to_string(buffer); } #ifdef _WIN32 namespace detail { using dword = conditional_t; -extern "C" __declspec(dllimport) int __stdcall WriteConsoleW( // - void*, const void*, dword, dword*, void*); -} // namespace detail +extern "C" __declspec(dllimport) int __stdcall WriteConsoleW( // + void *, const void *, dword, dword *, void *); +} // namespace detail #endif namespace detail { -FMT_FUNC void print(std::FILE* f, string_view text) { +FMT_FUNC void print(std::FILE *f, string_view text) { #ifdef _WIN32 - auto fd = _fileno(f); - if (_isatty(fd)) { - detail::utf8_to_utf16 u16(string_view(text.data(), text.size())); - auto written = detail::dword(); - if (detail::WriteConsoleW(reinterpret_cast(_get_osfhandle(fd)), - u16.c_str(), static_cast(u16.size()), - &written, nullptr)) { - return; - } - // Fallback to fwrite on failure. It can happen if the output has been - // redirected to NUL. - } + auto fd = _fileno(f); + if (_isatty(fd)) { + detail::utf8_to_utf16 u16(string_view(text.data(), text.size())); + auto written = detail::dword(); + if (detail::WriteConsoleW(reinterpret_cast(_get_osfhandle(fd)), u16.c_str(), + static_cast(u16.size()), &written, nullptr)) { + return; + } + // Fallback to fwrite on failure. It can happen if the output has been + // redirected to NUL. + } #endif - detail::fwrite_fully(text.data(), 1, text.size(), f); + detail::fwrite_fully(text.data(), 1, text.size(), f); } -} // namespace detail +} // namespace detail -FMT_FUNC void vprint(std::FILE* f, string_view format_str, format_args args) { - memory_buffer buffer; - detail::vformat_to(buffer, format_str, args); - detail::print(f, {buffer.data(), buffer.size()}); +FMT_FUNC void vprint(std::FILE *f, string_view format_str, format_args args) { + memory_buffer buffer; + detail::vformat_to(buffer, format_str, args); + detail::print(f, {buffer.data(), buffer.size()}); } #ifdef _WIN32 // Print assuming legacy (non-Unicode) encoding. -FMT_FUNC void detail::vprint_mojibake(std::FILE* f, string_view format_str, - format_args args) { - memory_buffer buffer; - detail::vformat_to(buffer, format_str, - basic_format_args>(args)); - fwrite_fully(buffer.data(), 1, buffer.size(), f); +FMT_FUNC void detail::vprint_mojibake(std::FILE *f, string_view format_str, format_args args) { + memory_buffer buffer; + detail::vformat_to(buffer, format_str, basic_format_args>(args)); + fwrite_fully(buffer.data(), 1, buffer.size(), f); } #endif FMT_FUNC void vprint(string_view format_str, format_args args) { - vprint(stdout, format_str, args); + vprint(stdout, format_str, args); } FMT_END_NAMESPACE -#endif // FMT_FORMAT_INL_H_ +#endif // FMT_FORMAT_INL_H_ diff --git a/mooncake-store/include/cachelib_memory_allocator/include/fmt/format.h b/mooncake-store/include/cachelib_memory_allocator/include/fmt/format.h index 5398a23a8..fbb85cd36 100644 --- a/mooncake-store/include/cachelib_memory_allocator/include/fmt/format.h +++ b/mooncake-store/include/cachelib_memory_allocator/include/fmt/format.h @@ -33,111 +33,112 @@ #ifndef FMT_FORMAT_H_ #define FMT_FORMAT_H_ -#include // std::signbit -#include // uint32_t -#include // std::numeric_limits -#include // std::uninitialized_copy -#include // std::runtime_error -#include // std::system_error -#include // std::swap +#include // std::signbit +#include // uint32_t +#include // std::numeric_limits +#include // std::uninitialized_copy +#include // std::runtime_error +#include // std::system_error +#include // std::swap #include "core.h" #ifdef __INTEL_COMPILER -# define FMT_ICC_VERSION __INTEL_COMPILER +#define FMT_ICC_VERSION __INTEL_COMPILER #elif defined(__ICL) -# define FMT_ICC_VERSION __ICL +#define FMT_ICC_VERSION __ICL #else -# define FMT_ICC_VERSION 0 +#define FMT_ICC_VERSION 0 #endif #ifdef __NVCC__ -# define FMT_CUDA_VERSION (__CUDACC_VER_MAJOR__ * 100 + __CUDACC_VER_MINOR__) +#define FMT_CUDA_VERSION (__CUDACC_VER_MAJOR__ * 100 + __CUDACC_VER_MINOR__) #else -# define FMT_CUDA_VERSION 0 +#define FMT_CUDA_VERSION 0 #endif #ifdef __has_builtin -# define FMT_HAS_BUILTIN(x) __has_builtin(x) +#define FMT_HAS_BUILTIN(x) __has_builtin(x) #else -# define FMT_HAS_BUILTIN(x) 0 +#define FMT_HAS_BUILTIN(x) 0 #endif #if FMT_GCC_VERSION || FMT_CLANG_VERSION -# define FMT_NOINLINE __attribute__((noinline)) +#define FMT_NOINLINE __attribute__((noinline)) #else -# define FMT_NOINLINE +#define FMT_NOINLINE #endif #if FMT_MSC_VER -# define FMT_MSC_DEFAULT = default +#define FMT_MSC_DEFAULT = default #else -# define FMT_MSC_DEFAULT +#define FMT_MSC_DEFAULT #endif #ifndef FMT_THROW -# if FMT_EXCEPTIONS -# if FMT_MSC_VER || FMT_NVCC +#if FMT_EXCEPTIONS +#if FMT_MSC_VER || FMT_NVCC FMT_BEGIN_NAMESPACE namespace detail { -template inline void do_throw(const Exception& x) { - // Silence unreachable code warnings in MSVC and NVCC because these - // are nearly impossible to fix in a generic code. - volatile bool b = true; - if (b) throw x; -} -} // namespace detail +template +inline void do_throw(const Exception &x) { + // Silence unreachable code warnings in MSVC and NVCC because these + // are nearly impossible to fix in a generic code. + volatile bool b = true; + if (b) + throw x; +} +} // namespace detail FMT_END_NAMESPACE -# define FMT_THROW(x) detail::do_throw(x) -# else -# define FMT_THROW(x) throw x -# endif -# else -# define FMT_THROW(x) \ - do { \ - FMT_ASSERT(false, (x).what()); \ - } while (false) -# endif +#define FMT_THROW(x) detail::do_throw(x) +#else +#define FMT_THROW(x) throw x +#endif +#else +#define FMT_THROW(x) \ + do { \ + FMT_ASSERT(false, (x).what()); \ + } while (false) +#endif #endif #if FMT_EXCEPTIONS -# define FMT_TRY try -# define FMT_CATCH(x) catch (x) +#define FMT_TRY try +#define FMT_CATCH(x) catch (x) #else -# define FMT_TRY if (true) -# define FMT_CATCH(x) if (false) +#define FMT_TRY if (true) +#define FMT_CATCH(x) if (false) #endif #ifndef FMT_DEPRECATED -# if FMT_HAS_CPP14_ATTRIBUTE(deprecated) || FMT_MSC_VER >= 1900 -# define FMT_DEPRECATED [[deprecated]] -# else -# if (defined(__GNUC__) && !defined(__LCC__)) || defined(__clang__) -# define FMT_DEPRECATED __attribute__((deprecated)) -# elif FMT_MSC_VER -# define FMT_DEPRECATED __declspec(deprecated) -# else -# define FMT_DEPRECATED /* deprecated */ -# endif -# endif +#if FMT_HAS_CPP14_ATTRIBUTE(deprecated) || FMT_MSC_VER >= 1900 +#define FMT_DEPRECATED [[deprecated]] +#else +#if (defined(__GNUC__) && !defined(__LCC__)) || defined(__clang__) +#define FMT_DEPRECATED __attribute__((deprecated)) +#elif FMT_MSC_VER +#define FMT_DEPRECATED __declspec(deprecated) +#else +#define FMT_DEPRECATED /* deprecated */ +#endif +#endif #endif // Workaround broken [[deprecated]] in the Intel, PGI and NVCC compilers. #if FMT_ICC_VERSION || defined(__PGI) || FMT_NVCC -# define FMT_DEPRECATED_ALIAS +#define FMT_DEPRECATED_ALIAS #else -# define FMT_DEPRECATED_ALIAS FMT_DEPRECATED +#define FMT_DEPRECATED_ALIAS FMT_DEPRECATED #endif #ifndef FMT_USE_USER_DEFINED_LITERALS // EDG based compilers (Intel, NVIDIA, Elbrus, etc), GCC and MSVC support UDLs. -# if (FMT_HAS_FEATURE(cxx_user_literals) || FMT_GCC_VERSION >= 407 || \ - FMT_MSC_VER >= 1900) && \ - (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= /* UDL feature */ 480) -# define FMT_USE_USER_DEFINED_LITERALS 1 -# else -# define FMT_USE_USER_DEFINED_LITERALS 0 -# endif +#if (FMT_HAS_FEATURE(cxx_user_literals) || FMT_GCC_VERSION >= 407 || FMT_MSC_VER >= 1900) && \ + (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= /* UDL feature */ 480) +#define FMT_USE_USER_DEFINED_LITERALS 1 +#else +#define FMT_USE_USER_DEFINED_LITERALS 0 +#endif #endif // Defining FMT_REDUCE_INT_INSTANTIATIONS to 1, will reduce the number of @@ -145,26 +146,26 @@ FMT_END_NAMESPACE // largest integer type. This results in a reduction in binary size but will // cause a decrease in integer formatting performance. #if !defined(FMT_REDUCE_INT_INSTANTIATIONS) -# define FMT_REDUCE_INT_INSTANTIATIONS 0 +#define FMT_REDUCE_INT_INSTANTIATIONS 0 #endif // __builtin_clz is broken in clang with Microsoft CodeGen: // https://github.com/fmtlib/fmt/issues/519 #if (FMT_GCC_VERSION || FMT_HAS_BUILTIN(__builtin_clz)) && !FMT_MSC_VER -# define FMT_BUILTIN_CLZ(n) __builtin_clz(n) +#define FMT_BUILTIN_CLZ(n) __builtin_clz(n) #endif #if (FMT_GCC_VERSION || FMT_HAS_BUILTIN(__builtin_clzll)) && !FMT_MSC_VER -# define FMT_BUILTIN_CLZLL(n) __builtin_clzll(n) +#define FMT_BUILTIN_CLZLL(n) __builtin_clzll(n) #endif #if (FMT_GCC_VERSION || FMT_HAS_BUILTIN(__builtin_ctz)) -# define FMT_BUILTIN_CTZ(n) __builtin_ctz(n) +#define FMT_BUILTIN_CTZ(n) __builtin_ctz(n) #endif #if (FMT_GCC_VERSION || FMT_HAS_BUILTIN(__builtin_ctzll)) -# define FMT_BUILTIN_CTZLL(n) __builtin_ctzll(n) +#define FMT_BUILTIN_CTZLL(n) __builtin_ctzll(n) #endif #if FMT_MSC_VER -# include // _BitScanReverse[64], _BitScanForward[64], _umul128 +#include // _BitScanReverse[64], _BitScanForward[64], _umul128 #endif // Some compilers masquerade as both MSVC and GCC-likes or otherwise support @@ -174,177 +175,193 @@ FMT_END_NAMESPACE FMT_BEGIN_NAMESPACE namespace detail { // Avoid Clang with Microsoft CodeGen's -Wunknown-pragmas warning. -# if !defined(__clang__) -# pragma managed(push, off) -# pragma intrinsic(_BitScanForward) -# pragma intrinsic(_BitScanReverse) -# if defined(_WIN64) -# pragma intrinsic(_BitScanForward64) -# pragma intrinsic(_BitScanReverse64) -# endif -# endif +#if !defined(__clang__) +#pragma managed(push, off) +#pragma intrinsic(_BitScanForward) +#pragma intrinsic(_BitScanReverse) +#if defined(_WIN64) +#pragma intrinsic(_BitScanForward64) +#pragma intrinsic(_BitScanReverse64) +#endif +#endif inline auto clz(uint32_t x) -> int { - unsigned long r = 0; - _BitScanReverse(&r, x); - FMT_ASSERT(x != 0, ""); - // Static analysis complains about using uninitialized data - // "r", but the only way that can happen is if "x" is 0, - // which the callers guarantee to not happen. - FMT_MSC_WARNING(suppress : 6102) - return 31 ^ static_cast(r); + unsigned long r = 0; + _BitScanReverse(&r, x); + FMT_ASSERT(x != 0, ""); + // Static analysis complains about using uninitialized data + // "r", but the only way that can happen is if "x" is 0, + // which the callers guarantee to not happen. + FMT_MSC_WARNING(suppress : 6102) + return 31 ^ static_cast(r); } -# define FMT_BUILTIN_CLZ(n) detail::clz(n) +#define FMT_BUILTIN_CLZ(n) detail::clz(n) inline auto clzll(uint64_t x) -> int { - unsigned long r = 0; -# ifdef _WIN64 - _BitScanReverse64(&r, x); -# else - // Scan the high 32 bits. - if (_BitScanReverse(&r, static_cast(x >> 32))) return 63 ^ (r + 32); - // Scan the low 32 bits. - _BitScanReverse(&r, static_cast(x)); -# endif - FMT_ASSERT(x != 0, ""); - FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. - return 63 ^ static_cast(r); -} -# define FMT_BUILTIN_CLZLL(n) detail::clzll(n) + unsigned long r = 0; +#ifdef _WIN64 + _BitScanReverse64(&r, x); +#else + // Scan the high 32 bits. + if (_BitScanReverse(&r, static_cast(x >> 32))) + return 63 ^ (r + 32); + // Scan the low 32 bits. + _BitScanReverse(&r, static_cast(x)); +#endif + FMT_ASSERT(x != 0, ""); + FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. + return 63 ^ static_cast(r); +} +#define FMT_BUILTIN_CLZLL(n) detail::clzll(n) inline auto ctz(uint32_t x) -> int { - unsigned long r = 0; - _BitScanForward(&r, x); - FMT_ASSERT(x != 0, ""); - FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. - return static_cast(r); + unsigned long r = 0; + _BitScanForward(&r, x); + FMT_ASSERT(x != 0, ""); + FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. + return static_cast(r); } -# define FMT_BUILTIN_CTZ(n) detail::ctz(n) +#define FMT_BUILTIN_CTZ(n) detail::ctz(n) inline auto ctzll(uint64_t x) -> int { - unsigned long r = 0; - FMT_ASSERT(x != 0, ""); - FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. -# ifdef _WIN64 - _BitScanForward64(&r, x); -# else - // Scan the low 32 bits. - if (_BitScanForward(&r, static_cast(x))) return static_cast(r); - // Scan the high 32 bits. - _BitScanForward(&r, static_cast(x >> 32)); - r += 32; -# endif - return static_cast(r); -} -# define FMT_BUILTIN_CTZLL(n) detail::ctzll(n) -# if !defined(__clang__) -# pragma managed(pop) -# endif -} // namespace detail + unsigned long r = 0; + FMT_ASSERT(x != 0, ""); + FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. +#ifdef _WIN64 + _BitScanForward64(&r, x); +#else + // Scan the low 32 bits. + if (_BitScanForward(&r, static_cast(x))) + return static_cast(r); + // Scan the high 32 bits. + _BitScanForward(&r, static_cast(x >> 32)); + r += 32; +#endif + return static_cast(r); +} +#define FMT_BUILTIN_CTZLL(n) detail::ctzll(n) +#if !defined(__clang__) +#pragma managed(pop) +#endif +} // namespace detail FMT_END_NAMESPACE #endif FMT_BEGIN_NAMESPACE namespace detail { -#if __cplusplus >= 202002L || \ - (__cplusplus >= 201709L && FMT_GCC_VERSION >= 1002) -# define FMT_CONSTEXPR20 constexpr +#if __cplusplus >= 202002L || (__cplusplus >= 201709L && FMT_GCC_VERSION >= 1002) +#define FMT_CONSTEXPR20 constexpr #else -# define FMT_CONSTEXPR20 +#define FMT_CONSTEXPR20 #endif // An equivalent of `*reinterpret_cast(&source)` that doesn't have // undefined behavior (e.g. due to type aliasing). // Example: uint64_t d = bit_cast(2.718); template -inline auto bit_cast(const Source& source) -> Dest { - static_assert(sizeof(Dest) == sizeof(Source), "size mismatch"); - Dest dest; - std::memcpy(&dest, &source, sizeof(dest)); - return dest; +inline auto bit_cast(const Source &source) -> Dest { + static_assert(sizeof(Dest) == sizeof(Source), "size mismatch"); + Dest dest; + std::memcpy(&dest, &source, sizeof(dest)); + return dest; } inline auto is_big_endian() -> bool { - const auto u = 1u; - struct bytes { - char data[sizeof(u)]; - }; - return bit_cast(u).data[0] == 0; + const auto u = 1u; + struct bytes { + char data[sizeof(u)]; + }; + return bit_cast(u).data[0] == 0; } // A fallback implementation of uintptr_t for systems that lack it. struct fallback_uintptr { - unsigned char value[sizeof(void*)]; - - fallback_uintptr() = default; - explicit fallback_uintptr(const void* p) { - *this = bit_cast(p); - if (is_big_endian()) { - for (size_t i = 0, j = sizeof(void*) - 1; i < j; ++i, --j) - std::swap(value[i], value[j]); - } - } + unsigned char value[sizeof(void *)]; + + fallback_uintptr() = default; + explicit fallback_uintptr(const void *p) { + *this = bit_cast(p); + if (is_big_endian()) { + for (size_t i = 0, j = sizeof(void *) - 1; i < j; ++i, --j) + std::swap(value[i], value[j]); + } + } }; #ifdef UINTPTR_MAX using uintptr_t = ::uintptr_t; -inline auto to_uintptr(const void* p) -> uintptr_t { - return bit_cast(p); +inline auto to_uintptr(const void *p) -> uintptr_t { + return bit_cast(p); } #else using uintptr_t = fallback_uintptr; -inline auto to_uintptr(const void* p) -> fallback_uintptr { - return fallback_uintptr(p); +inline auto to_uintptr(const void *p) -> fallback_uintptr { + return fallback_uintptr(p); } #endif // Returns the largest possible value for type T. Same as // std::numeric_limits::max() but shorter and not affected by the max macro. -template constexpr auto max_value() -> T { - return (std::numeric_limits::max)(); +template +constexpr auto max_value() -> T { + return (std::numeric_limits::max)(); } -template constexpr auto num_bits() -> int { - return std::numeric_limits::digits; +template +constexpr auto num_bits() -> int { + return std::numeric_limits::digits; } // std::numeric_limits::digits may return 0 for 128-bit ints. -template <> constexpr auto num_bits() -> int { return 128; } -template <> constexpr auto num_bits() -> int { return 128; } -template <> constexpr auto num_bits() -> int { - return static_cast(sizeof(void*) * - std::numeric_limits::digits); +template <> +constexpr auto num_bits() -> int { + return 128; +} +template <> +constexpr auto num_bits() -> int { + return 128; +} +template <> +constexpr auto num_bits() -> int { + return static_cast(sizeof(void *) * std::numeric_limits::digits); } FMT_INLINE void assume(bool condition) { - (void)condition; + (void)condition; #if FMT_HAS_BUILTIN(__builtin_assume) - __builtin_assume(condition); + __builtin_assume(condition); #endif } // An approximation of iterator_t for pre-C++20 systems. template -using iterator_t = decltype(std::begin(std::declval())); -template using sentinel_t = decltype(std::end(std::declval())); +using iterator_t = decltype(std::begin(std::declval())); +template +using sentinel_t = decltype(std::end(std::declval())); // A workaround for std::string not having mutable data() until C++17. template -inline auto get_data(std::basic_string& s) -> Char* { - return &s[0]; +inline auto get_data(std::basic_string &s) -> Char * { + return &s[0]; } template -inline auto get_data(Container& c) -> typename Container::value_type* { - return c.data(); +inline auto get_data(Container &c) -> typename Container::value_type * { + return c.data(); } #if defined(_SECURE_SCL) && _SECURE_SCL // Make a checked iterator to avoid MSVC warnings. -template using checked_ptr = stdext::checked_array_iterator; -template auto make_checked(T* p, size_t size) -> checked_ptr { - return {p, size}; +template +using checked_ptr = stdext::checked_array_iterator; +template +auto make_checked(T *p, size_t size) -> checked_ptr { + return {p, size}; } #else -template using checked_ptr = T*; -template inline auto make_checked(T* p, size_t) -> T* { return p; } +template +using checked_ptr = T *; +template +inline auto make_checked(T *p, size_t) -> T * { + return p; +} #endif // Attempts to reserve space for n extra characters in the output range. @@ -354,69 +371,68 @@ template ::value)> __attribute__((no_sanitize("undefined"))) #endif inline auto -reserve(std::back_insert_iterator it, size_t n) - -> checked_ptr { - Container& c = get_container(it); - size_t size = c.size(); - c.resize(size + n); - return make_checked(get_data(c) + size, n); +reserve(std::back_insert_iterator it, size_t n) -> checked_ptr { + Container &c = get_container(it); + size_t size = c.size(); + c.resize(size + n); + return make_checked(get_data(c) + size, n); } template inline auto reserve(buffer_appender it, size_t n) -> buffer_appender { - buffer& buf = get_container(it); - buf.try_reserve(buf.size() + n); - return it; + buffer &buf = get_container(it); + buf.try_reserve(buf.size() + n); + return it; } template -constexpr auto reserve(Iterator& it, size_t) -> Iterator& { - return it; +constexpr auto reserve(Iterator &it, size_t) -> Iterator & { + return it; } template -using reserve_iterator = - remove_reference_t(), 0))>; +using reserve_iterator = remove_reference_t(), 0))>; template -constexpr auto to_pointer(OutputIt, size_t) -> T* { - return nullptr; +constexpr auto to_pointer(OutputIt, size_t) -> T * { + return nullptr; } -template auto to_pointer(buffer_appender it, size_t n) -> T* { - buffer& buf = get_container(it); - auto size = buf.size(); - if (buf.capacity() < size + n) return nullptr; - buf.try_resize(size + n); - return buf.data() + size; +template +auto to_pointer(buffer_appender it, size_t n) -> T * { + buffer &buf = get_container(it); + auto size = buf.size(); + if (buf.capacity() < size + n) + return nullptr; + buf.try_resize(size + n); + return buf.data() + size; } template ::value)> -inline auto base_iterator(std::back_insert_iterator& it, - checked_ptr) +inline auto base_iterator(std::back_insert_iterator &it, checked_ptr) -> std::back_insert_iterator { - return it; + return it; } template constexpr auto base_iterator(Iterator, Iterator it) -> Iterator { - return it; + return it; } // is spectacularly slow to compile in C++20 so use a simple fill_n // instead (#1998). template -FMT_CONSTEXPR auto fill_n(OutputIt out, Size count, const T& value) - -> OutputIt { - for (Size i = 0; i < count; ++i) *out++ = value; - return out; +FMT_CONSTEXPR auto fill_n(OutputIt out, Size count, const T &value) -> OutputIt { + for (Size i = 0; i < count; ++i) + *out++ = value; + return out; } template -FMT_CONSTEXPR20 auto fill_n(T* out, Size count, char value) -> T* { - if (is_constant_evaluated()) { - return fill_n(out, count, value); - } - std::memset(out, value, to_unsigned(count)); - return out + count; +FMT_CONSTEXPR20 auto fill_n(T *out, Size count, char value) -> T * { + if (is_constant_evaluated()) { + return fill_n(out, count, value); + } + std::memset(out, value, to_unsigned(count)); + return out + count; } #ifdef __cpp_char8_t @@ -426,9 +442,8 @@ enum char8_type : unsigned char {}; #endif template -FMT_CONSTEXPR FMT_NOINLINE auto copy_str_noinline(InputIt begin, InputIt end, - OutputIt out) -> OutputIt { - return copy_str(begin, end, out); +FMT_CONSTEXPR FMT_NOINLINE auto copy_str_noinline(InputIt begin, InputIt end, OutputIt out) -> OutputIt { + return copy_str(begin, end, out); } // A public domain branchless UTF-8 decoder by Christopher Wellons: @@ -448,149 +463,146 @@ FMT_CONSTEXPR FMT_NOINLINE auto copy_str_noinline(InputIt begin, InputIt end, * occurs, this pointer will be a guess that depends on the particular * error, but it will always advance at least one byte. */ -FMT_CONSTEXPR inline auto utf8_decode(const char* s, uint32_t* c, int* e) - -> const char* { - constexpr const int masks[] = {0x00, 0x7f, 0x1f, 0x0f, 0x07}; - constexpr const uint32_t mins[] = {4194304, 0, 128, 2048, 65536}; - constexpr const int shiftc[] = {0, 18, 12, 6, 0}; - constexpr const int shifte[] = {0, 6, 4, 2, 0}; - - int len = code_point_length(s); - const char* next = s + len; - - // Assume a four-byte character and load four bytes. Unused bits are - // shifted out. - *c = uint32_t(s[0] & masks[len]) << 18; - *c |= uint32_t(s[1] & 0x3f) << 12; - *c |= uint32_t(s[2] & 0x3f) << 6; - *c |= uint32_t(s[3] & 0x3f) << 0; - *c >>= shiftc[len]; - - // Accumulate the various error conditions. - using uchar = unsigned char; - *e = (*c < mins[len]) << 6; // non-canonical encoding - *e |= ((*c >> 11) == 0x1b) << 7; // surrogate half? - *e |= (*c > 0x10FFFF) << 8; // out of range? - *e |= (uchar(s[1]) & 0xc0) >> 2; - *e |= (uchar(s[2]) & 0xc0) >> 4; - *e |= uchar(s[3]) >> 6; - *e ^= 0x2a; // top two bits of each tail byte correct? - *e >>= shifte[len]; - - return next; +FMT_CONSTEXPR inline auto utf8_decode(const char *s, uint32_t *c, int *e) -> const char * { + constexpr const int masks[] = {0x00, 0x7f, 0x1f, 0x0f, 0x07}; + constexpr const uint32_t mins[] = {4194304, 0, 128, 2048, 65536}; + constexpr const int shiftc[] = {0, 18, 12, 6, 0}; + constexpr const int shifte[] = {0, 6, 4, 2, 0}; + + int len = code_point_length(s); + const char *next = s + len; + + // Assume a four-byte character and load four bytes. Unused bits are + // shifted out. + *c = uint32_t(s[0] & masks[len]) << 18; + *c |= uint32_t(s[1] & 0x3f) << 12; + *c |= uint32_t(s[2] & 0x3f) << 6; + *c |= uint32_t(s[3] & 0x3f) << 0; + *c >>= shiftc[len]; + + // Accumulate the various error conditions. + using uchar = unsigned char; + *e = (*c < mins[len]) << 6; // non-canonical encoding + *e |= ((*c >> 11) == 0x1b) << 7; // surrogate half? + *e |= (*c > 0x10FFFF) << 8; // out of range? + *e |= (uchar(s[1]) & 0xc0) >> 2; + *e |= (uchar(s[2]) & 0xc0) >> 4; + *e |= uchar(s[3]) >> 6; + *e ^= 0x2a; // top two bits of each tail byte correct? + *e >>= shifte[len]; + + return next; } template FMT_CONSTEXPR void for_each_codepoint(string_view s, F f) { - auto decode = [f](const char* p) { - auto cp = uint32_t(); - auto error = 0; - p = utf8_decode(p, &cp, &error); - f(cp, error); - return p; - }; - auto p = s.data(); - const size_t block_size = 4; // utf8_decode always reads blocks of 4 chars. - if (s.size() >= block_size) { - for (auto end = p + s.size() - block_size + 1; p < end;) p = decode(p); - } - if (auto num_chars_left = s.data() + s.size() - p) { - char buf[2 * block_size - 1] = {}; - copy_str(p, p + num_chars_left, buf); - p = buf; - do { - p = decode(p); - } while (p - buf < num_chars_left); - } + auto decode = [f](const char *p) { + auto cp = uint32_t(); + auto error = 0; + p = utf8_decode(p, &cp, &error); + f(cp, error); + return p; + }; + auto p = s.data(); + const size_t block_size = 4; // utf8_decode always reads blocks of 4 chars. + if (s.size() >= block_size) { + for (auto end = p + s.size() - block_size + 1; p < end;) + p = decode(p); + } + if (auto num_chars_left = s.data() + s.size() - p) { + char buf[2 * block_size - 1] = {}; + copy_str(p, p + num_chars_left, buf); + p = buf; + do { + p = decode(p); + } while (p - buf < num_chars_left); + } } template inline auto compute_width(basic_string_view s) -> size_t { - return s.size(); + return s.size(); } // Computes approximate display width of a UTF-8 string. FMT_CONSTEXPR inline size_t compute_width(string_view s) { - size_t num_code_points = 0; - // It is not a lambda for compatibility with C++14. - struct count_code_points { - size_t* count; - FMT_CONSTEXPR void operator()(uint32_t cp, int error) const { - *count += detail::to_unsigned( - 1 + - (error == 0 && cp >= 0x1100 && - (cp <= 0x115f || // Hangul Jamo init. consonants - cp == 0x2329 || // LEFT-POINTING ANGLE BRACKET - cp == 0x232a || // RIGHT-POINTING ANGLE BRACKET - // CJK ... Yi except IDEOGRAPHIC HALF FILL SPACE: - (cp >= 0x2e80 && cp <= 0xa4cf && cp != 0x303f) || - (cp >= 0xac00 && cp <= 0xd7a3) || // Hangul Syllables - (cp >= 0xf900 && cp <= 0xfaff) || // CJK Compatibility Ideographs - (cp >= 0xfe10 && cp <= 0xfe19) || // Vertical Forms - (cp >= 0xfe30 && cp <= 0xfe6f) || // CJK Compatibility Forms - (cp >= 0xff00 && cp <= 0xff60) || // Fullwidth Forms - (cp >= 0xffe0 && cp <= 0xffe6) || // Fullwidth Forms - (cp >= 0x20000 && cp <= 0x2fffd) || // CJK - (cp >= 0x30000 && cp <= 0x3fffd) || - // Miscellaneous Symbols and Pictographs + Emoticons: - (cp >= 0x1f300 && cp <= 0x1f64f) || - // Supplemental Symbols and Pictographs: - (cp >= 0x1f900 && cp <= 0x1f9ff)))); - } - }; - for_each_codepoint(s, count_code_points{&num_code_points}); - return num_code_points; + size_t num_code_points = 0; + // It is not a lambda for compatibility with C++14. + struct count_code_points { + size_t *count; + FMT_CONSTEXPR void operator()(uint32_t cp, int error) const { + *count += detail::to_unsigned(1 + (error == 0 && cp >= 0x1100 && + (cp <= 0x115f || // Hangul Jamo init. consonants + cp == 0x2329 || // LEFT-POINTING ANGLE BRACKET + cp == 0x232a || // RIGHT-POINTING ANGLE BRACKET + // CJK ... Yi except IDEOGRAPHIC HALF FILL SPACE: + (cp >= 0x2e80 && cp <= 0xa4cf && cp != 0x303f) || + (cp >= 0xac00 && cp <= 0xd7a3) || // Hangul Syllables + (cp >= 0xf900 && cp <= 0xfaff) || // CJK Compatibility Ideographs + (cp >= 0xfe10 && cp <= 0xfe19) || // Vertical Forms + (cp >= 0xfe30 && cp <= 0xfe6f) || // CJK Compatibility Forms + (cp >= 0xff00 && cp <= 0xff60) || // Fullwidth Forms + (cp >= 0xffe0 && cp <= 0xffe6) || // Fullwidth Forms + (cp >= 0x20000 && cp <= 0x2fffd) || // CJK + (cp >= 0x30000 && cp <= 0x3fffd) || + // Miscellaneous Symbols and Pictographs + Emoticons: + (cp >= 0x1f300 && cp <= 0x1f64f) || + // Supplemental Symbols and Pictographs: + (cp >= 0x1f900 && cp <= 0x1f9ff)))); + } + }; + for_each_codepoint(s, count_code_points {&num_code_points}); + return num_code_points; } inline auto compute_width(basic_string_view s) -> size_t { - return compute_width(basic_string_view( - reinterpret_cast(s.data()), s.size())); + return compute_width(basic_string_view(reinterpret_cast(s.data()), s.size())); } template inline auto code_point_index(basic_string_view s, size_t n) -> size_t { - size_t size = s.size(); - return n < size ? n : size; + size_t size = s.size(); + return n < size ? n : size; } // Calculates the index of the nth code point in a UTF-8 string. -inline auto code_point_index(basic_string_view s, size_t n) - -> size_t { - const char8_type* data = s.data(); - size_t num_code_points = 0; - for (size_t i = 0, size = s.size(); i != size; ++i) { - if ((data[i] & 0xc0) != 0x80 && ++num_code_points > n) return i; - } - return s.size(); +inline auto code_point_index(basic_string_view s, size_t n) -> size_t { + const char8_type *data = s.data(); + size_t num_code_points = 0; + for (size_t i = 0, size = s.size(); i != size; ++i) { + if ((data[i] & 0xc0) != 0x80 && ++num_code_points > n) + return i; + } + return s.size(); } template -using is_fast_float = bool_constant::is_iec559 && - sizeof(T) <= sizeof(double)>; +using is_fast_float = bool_constant::is_iec559 && sizeof(T) <= sizeof(double)>; #ifndef FMT_USE_FULL_CACHE_DRAGONBOX -# define FMT_USE_FULL_CACHE_DRAGONBOX 0 +#define FMT_USE_FULL_CACHE_DRAGONBOX 0 #endif template template -void buffer::append(const U* begin, const U* end) { - while (begin != end) { - auto count = to_unsigned(end - begin); - try_reserve(size_ + count); - auto free_cap = capacity_ - size_; - if (free_cap < count) count = free_cap; - std::uninitialized_copy_n(begin, count, make_checked(ptr_ + size_, count)); - size_ += count; - begin += count; - } +void buffer::append(const U *begin, const U *end) { + while (begin != end) { + auto count = to_unsigned(end - begin); + try_reserve(size_ + count); + auto free_cap = capacity_ - size_; + if (free_cap < count) + count = free_cap; + std::uninitialized_copy_n(begin, count, make_checked(ptr_ + size_, count)); + size_ += count; + begin += count; + } } template struct is_locale : std::false_type {}; template struct is_locale> : std::true_type {}; -} // namespace detail +} // namespace detail FMT_MODULE_EXPORT_BEGIN @@ -619,142 +631,149 @@ enum { inline_buffer_size = 500 }; The output can be converted to an ``std::string`` with ``to_string(out)``. \endrst */ -template > +template > class basic_memory_buffer final : public detail::buffer { - private: - T store_[SIZE]; - - // Don't inherit from Allocator avoid generating type_info for it. - Allocator alloc_; - - // Deallocate memory allocated by the buffer. - void deallocate() { - T* data = this->data(); - if (data != store_) alloc_.deallocate(data, this->capacity()); - } - - protected: - void grow(size_t size) final FMT_OVERRIDE; - - public: - using value_type = T; - using const_reference = const T&; - - explicit basic_memory_buffer(const Allocator& alloc = Allocator()) - : alloc_(alloc) { - this->set(store_, SIZE); - } - ~basic_memory_buffer() { deallocate(); } - - private: - // Move data from other to this buffer. - void move(basic_memory_buffer& other) { - alloc_ = std::move(other.alloc_); - T* data = other.data(); - size_t size = other.size(), capacity = other.capacity(); - if (data == other.store_) { - this->set(store_, capacity); - std::uninitialized_copy(other.store_, other.store_ + size, - detail::make_checked(store_, capacity)); - } else { - this->set(data, capacity); - // Set pointer to the inline array so that delete is not called - // when deallocating. - other.set(other.store_, 0); - } - this->resize(size); - } - - public: - /** - \rst - Constructs a :class:`fmt::basic_memory_buffer` object moving the content - of the other object to it. - \endrst - */ - basic_memory_buffer(basic_memory_buffer&& other) FMT_NOEXCEPT { move(other); } - - /** - \rst - Moves the content of the other ``basic_memory_buffer`` object to this one. - \endrst - */ - auto operator=(basic_memory_buffer&& other) FMT_NOEXCEPT - -> basic_memory_buffer& { - FMT_ASSERT(this != &other, ""); - deallocate(); - move(other); - return *this; - } - - // Returns a copy of the allocator associated with this buffer. - auto get_allocator() const -> Allocator { return alloc_; } - - /** - Resizes the buffer to contain *count* elements. If T is a POD type new - elements may not be initialized. - */ - void resize(size_t count) { this->try_resize(count); } - - /** Increases the buffer capacity to *new_capacity*. */ - void reserve(size_t new_capacity) { this->try_reserve(new_capacity); } - - // Directly append data into the buffer - using detail::buffer::append; - template - void append(const ContiguousRange& range) { - append(range.data(), range.data() + range.size()); - } +private: + T store_[SIZE]; + + // Don't inherit from Allocator avoid generating type_info for it. + Allocator alloc_; + + // Deallocate memory allocated by the buffer. + void deallocate() { + T *data = this->data(); + if (data != store_) + alloc_.deallocate(data, this->capacity()); + } + +protected: + void grow(size_t size) final FMT_OVERRIDE; + +public: + using value_type = T; + using const_reference = const T &; + + explicit basic_memory_buffer(const Allocator &alloc = Allocator()) : alloc_(alloc) { + this->set(store_, SIZE); + } + ~basic_memory_buffer() { + deallocate(); + } + +private: + // Move data from other to this buffer. + void move(basic_memory_buffer &other) { + alloc_ = std::move(other.alloc_); + T *data = other.data(); + size_t size = other.size(), capacity = other.capacity(); + if (data == other.store_) { + this->set(store_, capacity); + std::uninitialized_copy(other.store_, other.store_ + size, detail::make_checked(store_, capacity)); + } else { + this->set(data, capacity); + // Set pointer to the inline array so that delete is not called + // when deallocating. + other.set(other.store_, 0); + } + this->resize(size); + } + +public: + /** + \rst + Constructs a :class:`fmt::basic_memory_buffer` object moving the content + of the other object to it. + \endrst + */ + basic_memory_buffer(basic_memory_buffer &&other) FMT_NOEXCEPT { + move(other); + } + + /** + \rst + Moves the content of the other ``basic_memory_buffer`` object to this one. + \endrst + */ + auto operator=(basic_memory_buffer &&other) FMT_NOEXCEPT -> basic_memory_buffer & { + FMT_ASSERT(this != &other, ""); + deallocate(); + move(other); + return *this; + } + + // Returns a copy of the allocator associated with this buffer. + auto get_allocator() const -> Allocator { + return alloc_; + } + + /** + Resizes the buffer to contain *count* elements. If T is a POD type new + elements may not be initialized. + */ + void resize(size_t count) { + this->try_resize(count); + } + + /** Increases the buffer capacity to *new_capacity*. */ + void reserve(size_t new_capacity) { + this->try_reserve(new_capacity); + } + + // Directly append data into the buffer + using detail::buffer::append; + template + void append(const ContiguousRange &range) { + append(range.data(), range.data() + range.size()); + } }; template void basic_memory_buffer::grow(size_t size) { #ifdef FMT_FUZZ - if (size > 5000) throw std::runtime_error("fuzz mode - won't grow that much"); + if (size > 5000) + throw std::runtime_error("fuzz mode - won't grow that much"); #endif - const size_t max_size = std::allocator_traits::max_size(alloc_); - size_t old_capacity = this->capacity(); - size_t new_capacity = old_capacity + old_capacity / 2; - if (size > new_capacity) - new_capacity = size; - else if (new_capacity > max_size) - new_capacity = size > max_size ? size : max_size; - T* old_data = this->data(); - T* new_data = - std::allocator_traits::allocate(alloc_, new_capacity); - // The following code doesn't throw, so the raw pointer above doesn't leak. - std::uninitialized_copy(old_data, old_data + this->size(), - detail::make_checked(new_data, new_capacity)); - this->set(new_data, new_capacity); - // deallocate must not throw according to the standard, but even if it does, - // the buffer already uses the new storage and will deallocate it in - // destructor. - if (old_data != store_) alloc_.deallocate(old_data, old_capacity); + const size_t max_size = std::allocator_traits::max_size(alloc_); + size_t old_capacity = this->capacity(); + size_t new_capacity = old_capacity + old_capacity / 2; + if (size > new_capacity) + new_capacity = size; + else if (new_capacity > max_size) + new_capacity = size > max_size ? size : max_size; + T *old_data = this->data(); + T *new_data = std::allocator_traits::allocate(alloc_, new_capacity); + // The following code doesn't throw, so the raw pointer above doesn't leak. + std::uninitialized_copy(old_data, old_data + this->size(), detail::make_checked(new_data, new_capacity)); + this->set(new_data, new_capacity); + // deallocate must not throw according to the standard, but even if it does, + // the buffer already uses the new storage and will deallocate it in + // destructor. + if (old_data != store_) + alloc_.deallocate(old_data, old_capacity); } using memory_buffer = basic_memory_buffer; template -struct is_contiguous> : std::true_type { -}; +struct is_contiguous> : std::true_type {}; namespace detail { -FMT_API void print(std::FILE*, string_view); +FMT_API void print(std::FILE *, string_view); } /** A formatting error such as invalid format string. */ FMT_CLASS_API class FMT_API format_error : public std::runtime_error { - public: - explicit format_error(const char* message) : std::runtime_error(message) {} - explicit format_error(const std::string& message) - : std::runtime_error(message) {} - format_error(const format_error&) = default; - format_error& operator=(const format_error&) = default; - format_error(format_error&&) = default; - format_error& operator=(format_error&&) = default; - ~format_error() FMT_NOEXCEPT FMT_OVERRIDE FMT_MSC_DEFAULT; +public: + explicit format_error(const char *message) : std::runtime_error(message) { + } + explicit format_error(const std::string &message) : std::runtime_error(message) { + } + format_error(const format_error &) = default; + format_error &operator=(const format_error &) = default; + format_error(format_error &&) = default; + format_error &operator=(format_error &&) = default; + ~format_error() FMT_NOEXCEPT FMT_OVERRIDE FMT_MSC_DEFAULT; }; /** @@ -766,126 +785,111 @@ class FMT_API format_error : public std::runtime_error { \endrst */ template > -FMT_INLINE auto make_args_checked(const S& fmt, - const remove_reference_t&... args) +FMT_INLINE auto make_args_checked(const S &fmt, const remove_reference_t &...args) -> format_arg_store, remove_reference_t...> { - static_assert( - detail::count<( - std::is_base_of>::value && - std::is_reference::value)...>() == 0, - "passing views as lvalues is disallowed"); - detail::check_format_string(fmt); - return {args...}; + static_assert(detail::count<(std::is_base_of>::value && + std::is_reference::value)...>() == 0, + "passing views as lvalues is disallowed"); + detail::check_format_string(fmt); + return {args...}; } // compile-time support namespace detail_exported { #if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS -template struct fixed_string { - constexpr fixed_string(const Char (&str)[N]) { - detail::copy_str(static_cast(str), - str + N, data); - } - Char data[N]{}; +template +struct fixed_string { + constexpr fixed_string(const Char (&str)[N]) { + detail::copy_str(static_cast(str), str + N, data); + } + Char data[N] {}; }; #endif // Converts a compile-time string to basic_string_view. template -constexpr auto compile_string_to_view(const Char (&s)[N]) - -> basic_string_view { - // Remove trailing NUL character if needed. Won't be present if this is used - // with a raw character array (i.e. not defined as a string). - return {s, N - (std::char_traits::to_int_type(s[N - 1]) == 0 ? 1 : 0)}; +constexpr auto compile_string_to_view(const Char (&s)[N]) -> basic_string_view { + // Remove trailing NUL character if needed. Won't be present if this is used + // with a raw character array (i.e. not defined as a string). + return {s, N - (std::char_traits::to_int_type(s[N - 1]) == 0 ? 1 : 0)}; } template -constexpr auto compile_string_to_view(detail::std_string_view s) - -> basic_string_view { - return {s.data(), s.size()}; +constexpr auto compile_string_to_view(detail::std_string_view s) -> basic_string_view { + return {s.data(), s.size()}; } -} // namespace detail_exported +} // namespace detail_exported FMT_BEGIN_DETAIL_NAMESPACE -inline void throw_format_error(const char* message) { - FMT_THROW(format_error(message)); +inline void throw_format_error(const char *message) { + FMT_THROW(format_error(message)); } -template struct is_integral : std::is_integral {}; -template <> struct is_integral : std::true_type {}; -template <> struct is_integral : std::true_type {}; +template +struct is_integral : std::is_integral {}; +template <> +struct is_integral : std::true_type {}; +template <> +struct is_integral : std::true_type {}; template -using is_signed = - std::integral_constant::is_signed || - std::is_same::value>; +using is_signed = std::integral_constant::is_signed || std::is_same::value>; // Returns true if value is negative, false otherwise. // Same as `value < 0` but doesn't produce warnings if T is an unsigned type. template ::value)> FMT_CONSTEXPR auto is_negative(T value) -> bool { - return value < 0; + return value < 0; } template ::value)> FMT_CONSTEXPR auto is_negative(T) -> bool { - return false; + return false; } template ::value)> FMT_CONSTEXPR auto is_supported_floating_point(T) -> uint16_t { - return (std::is_same::value && FMT_USE_FLOAT) || - (std::is_same::value && FMT_USE_DOUBLE) || - (std::is_same::value && FMT_USE_LONG_DOUBLE); + return (std::is_same::value && FMT_USE_FLOAT) || (std::is_same::value && FMT_USE_DOUBLE) || + (std::is_same::value && FMT_USE_LONG_DOUBLE); } // Smallest of uint32_t, uint64_t, uint128_t that is large enough to // represent all values of an integral type T. template -using uint32_or_64_or_128_t = - conditional_t() <= 32 && !FMT_REDUCE_INT_INSTANTIATIONS, - uint32_t, - conditional_t() <= 64, uint64_t, uint128_t>>; +using uint32_or_64_or_128_t = conditional_t() <= 32 && !FMT_REDUCE_INT_INSTANTIATIONS, uint32_t, + conditional_t() <= 64, uint64_t, uint128_t>>; template using uint64_or_128_t = conditional_t() <= 64, uint64_t, uint128_t>; -#define FMT_POWERS_OF_10(factor) \ - factor * 10, (factor)*100, (factor)*1000, (factor)*10000, (factor)*100000, \ - (factor)*1000000, (factor)*10000000, (factor)*100000000, \ - (factor)*1000000000 +#define FMT_POWERS_OF_10(factor) \ + factor * 10, (factor)*100, (factor)*1000, (factor)*10000, (factor)*100000, (factor)*1000000, (factor)*10000000, \ + (factor)*100000000, (factor)*1000000000 // Static data is placed in this class template for the header-only config. -template struct basic_data { - // log10(2) = 0x0.4d104d427de7fbcc... - static const uint64_t log10_2_significand = 0x4d104d427de7fbcc; - - // GCC generates slightly better code for pairs than chars. - FMT_API static constexpr const char digits[100][2] = { - {'0', '0'}, {'0', '1'}, {'0', '2'}, {'0', '3'}, {'0', '4'}, {'0', '5'}, - {'0', '6'}, {'0', '7'}, {'0', '8'}, {'0', '9'}, {'1', '0'}, {'1', '1'}, - {'1', '2'}, {'1', '3'}, {'1', '4'}, {'1', '5'}, {'1', '6'}, {'1', '7'}, - {'1', '8'}, {'1', '9'}, {'2', '0'}, {'2', '1'}, {'2', '2'}, {'2', '3'}, - {'2', '4'}, {'2', '5'}, {'2', '6'}, {'2', '7'}, {'2', '8'}, {'2', '9'}, - {'3', '0'}, {'3', '1'}, {'3', '2'}, {'3', '3'}, {'3', '4'}, {'3', '5'}, - {'3', '6'}, {'3', '7'}, {'3', '8'}, {'3', '9'}, {'4', '0'}, {'4', '1'}, - {'4', '2'}, {'4', '3'}, {'4', '4'}, {'4', '5'}, {'4', '6'}, {'4', '7'}, - {'4', '8'}, {'4', '9'}, {'5', '0'}, {'5', '1'}, {'5', '2'}, {'5', '3'}, - {'5', '4'}, {'5', '5'}, {'5', '6'}, {'5', '7'}, {'5', '8'}, {'5', '9'}, - {'6', '0'}, {'6', '1'}, {'6', '2'}, {'6', '3'}, {'6', '4'}, {'6', '5'}, - {'6', '6'}, {'6', '7'}, {'6', '8'}, {'6', '9'}, {'7', '0'}, {'7', '1'}, - {'7', '2'}, {'7', '3'}, {'7', '4'}, {'7', '5'}, {'7', '6'}, {'7', '7'}, - {'7', '8'}, {'7', '9'}, {'8', '0'}, {'8', '1'}, {'8', '2'}, {'8', '3'}, - {'8', '4'}, {'8', '5'}, {'8', '6'}, {'8', '7'}, {'8', '8'}, {'8', '9'}, - {'9', '0'}, {'9', '1'}, {'9', '2'}, {'9', '3'}, {'9', '4'}, {'9', '5'}, - {'9', '6'}, {'9', '7'}, {'9', '8'}, {'9', '9'}}; - - FMT_API static constexpr const char hex_digits[] = "0123456789abcdef"; - FMT_API static constexpr const char signs[4] = {0, '-', '+', ' '}; - FMT_API static constexpr const unsigned prefixes[4] = {0, 0, 0x1000000u | '+', - 0x1000000u | ' '}; - FMT_API static constexpr const char left_padding_shifts[5] = {31, 31, 0, 1, - 0}; - FMT_API static constexpr const char right_padding_shifts[5] = {0, 31, 0, 1, - 0}; +template +struct basic_data { + // log10(2) = 0x0.4d104d427de7fbcc... + static const uint64_t log10_2_significand = 0x4d104d427de7fbcc; + + // GCC generates slightly better code for pairs than chars. + FMT_API static constexpr const char digits[100][2] = { + {'0', '0'}, {'0', '1'}, {'0', '2'}, {'0', '3'}, {'0', '4'}, {'0', '5'}, {'0', '6'}, {'0', '7'}, {'0', '8'}, + {'0', '9'}, {'1', '0'}, {'1', '1'}, {'1', '2'}, {'1', '3'}, {'1', '4'}, {'1', '5'}, {'1', '6'}, {'1', '7'}, + {'1', '8'}, {'1', '9'}, {'2', '0'}, {'2', '1'}, {'2', '2'}, {'2', '3'}, {'2', '4'}, {'2', '5'}, {'2', '6'}, + {'2', '7'}, {'2', '8'}, {'2', '9'}, {'3', '0'}, {'3', '1'}, {'3', '2'}, {'3', '3'}, {'3', '4'}, {'3', '5'}, + {'3', '6'}, {'3', '7'}, {'3', '8'}, {'3', '9'}, {'4', '0'}, {'4', '1'}, {'4', '2'}, {'4', '3'}, {'4', '4'}, + {'4', '5'}, {'4', '6'}, {'4', '7'}, {'4', '8'}, {'4', '9'}, {'5', '0'}, {'5', '1'}, {'5', '2'}, {'5', '3'}, + {'5', '4'}, {'5', '5'}, {'5', '6'}, {'5', '7'}, {'5', '8'}, {'5', '9'}, {'6', '0'}, {'6', '1'}, {'6', '2'}, + {'6', '3'}, {'6', '4'}, {'6', '5'}, {'6', '6'}, {'6', '7'}, {'6', '8'}, {'6', '9'}, {'7', '0'}, {'7', '1'}, + {'7', '2'}, {'7', '3'}, {'7', '4'}, {'7', '5'}, {'7', '6'}, {'7', '7'}, {'7', '8'}, {'7', '9'}, {'8', '0'}, + {'8', '1'}, {'8', '2'}, {'8', '3'}, {'8', '4'}, {'8', '5'}, {'8', '6'}, {'8', '7'}, {'8', '8'}, {'8', '9'}, + {'9', '0'}, {'9', '1'}, {'9', '2'}, {'9', '3'}, {'9', '4'}, {'9', '5'}, {'9', '6'}, {'9', '7'}, {'9', '8'}, + {'9', '9'}}; + + FMT_API static constexpr const char hex_digits[] = "0123456789abcdef"; + FMT_API static constexpr const char signs[4] = {0, '-', '+', ' '}; + FMT_API static constexpr const unsigned prefixes[4] = {0, 0, 0x1000000u | '+', 0x1000000u | ' '}; + FMT_API static constexpr const char left_padding_shifts[5] = {31, 31, 0, 1, 0}; + FMT_API static constexpr const char right_padding_shifts[5] = {0, 31, 0, 1, 0}; }; #ifdef FMT_SHARED @@ -896,23 +900,28 @@ extern template struct basic_data; // This is a struct rather than an alias to avoid shadowing warnings in gcc. struct data : basic_data<> {}; -template FMT_CONSTEXPR auto count_digits_fallback(T n) -> int { - int count = 1; - for (;;) { - // Integer division is slow so do it for a group of four digits instead - // of for every digit. The idea comes from the talk by Alexandrescu - // "Three Optimization Tips for C++". See speed-test for a comparison. - if (n < 10) return count; - if (n < 100) return count + 1; - if (n < 1000) return count + 2; - if (n < 10000) return count + 3; - n /= 10000u; - count += 4; - } +template +FMT_CONSTEXPR auto count_digits_fallback(T n) -> int { + int count = 1; + for (;;) { + // Integer division is slow so do it for a group of four digits instead + // of for every digit. The idea comes from the talk by Alexandrescu + // "Three Optimization Tips for C++". See speed-test for a comparison. + if (n < 10) + return count; + if (n < 100) + return count + 1; + if (n < 1000) + return count + 2; + if (n < 10000) + return count + 3; + n /= 10000u; + count += 4; + } } #if FMT_USE_INT128 FMT_CONSTEXPR inline auto count_digits(uint128_t n) -> int { - return count_digits_fallback(n); + return count_digits_fallback(n); } #endif @@ -920,449 +929,453 @@ FMT_CONSTEXPR inline auto count_digits(uint128_t n) -> int { // except for n == 0 in which case count_digits returns 1. FMT_CONSTEXPR20 inline auto count_digits(uint64_t n) -> int { #ifdef FMT_BUILTIN_CLZLL - if (!is_constant_evaluated()) { - // https://github.com/fmtlib/format-benchmark/blob/master/digits10 - // Maps bsr(n) to ceil(log10(pow(2, bsr(n) + 1) - 1)). - constexpr uint16_t bsr2log10[] = { - 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, - 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, - 10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 15, 15, - 15, 16, 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 19, 20}; - auto t = bsr2log10[FMT_BUILTIN_CLZLL(n | 1) ^ 63]; - constexpr const uint64_t zero_or_powers_of_10[] = { - 0, 0, FMT_POWERS_OF_10(1U), FMT_POWERS_OF_10(1000000000ULL), - 10000000000000000000ULL}; - return t - (n < zero_or_powers_of_10[t]); - } + if (!is_constant_evaluated()) { + // https://github.com/fmtlib/format-benchmark/blob/master/digits10 + // Maps bsr(n) to ceil(log10(pow(2, bsr(n) + 1) - 1)). + constexpr uint16_t bsr2log10[] = {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, + 10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 15, 15, + 15, 16, 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 19, 20}; + auto t = bsr2log10[FMT_BUILTIN_CLZLL(n | 1) ^ 63]; + constexpr const uint64_t zero_or_powers_of_10[] = {0, 0, FMT_POWERS_OF_10(1U), FMT_POWERS_OF_10(1000000000ULL), + 10000000000000000000ULL}; + return t - (n < zero_or_powers_of_10[t]); + } #endif - return count_digits_fallback(n); + return count_digits_fallback(n); } // Counts the number of digits in n. BITS = log2(radix). template FMT_CONSTEXPR auto count_digits(UInt n) -> int { #ifdef FMT_BUILTIN_CLZ - if (num_bits() == 32) - return (FMT_BUILTIN_CLZ(static_cast(n) | 1) ^ 31) / BITS + 1; + if (num_bits() == 32) + return (FMT_BUILTIN_CLZ(static_cast(n) | 1) ^ 31) / BITS + 1; #endif - int num_digits = 0; - do { - ++num_digits; - } while ((n >>= BITS) != 0); - return num_digits; + int num_digits = 0; + do { + ++num_digits; + } while ((n >>= BITS) != 0); + return num_digits; } -template <> auto count_digits<4>(detail::fallback_uintptr n) -> int; +template <> +auto count_digits<4>(detail::fallback_uintptr n) -> int; // It is a separate function rather than a part of count_digits to workaround // the lack of static constexpr in constexpr functions. FMT_INLINE uint64_t count_digits_inc(int n) { - // An optimization by Kendall Willets from https://bit.ly/3uOIQrB. - // This increments the upper 32 bits (log10(T) - 1) when >= T is added. + // An optimization by Kendall Willets from https://bit.ly/3uOIQrB. + // This increments the upper 32 bits (log10(T) - 1) when >= T is added. #define FMT_INC(T) (((sizeof(#T) - 1ull) << 32) - T) - static constexpr uint64_t table[] = { - FMT_INC(0), FMT_INC(0), FMT_INC(0), // 8 - FMT_INC(10), FMT_INC(10), FMT_INC(10), // 64 - FMT_INC(100), FMT_INC(100), FMT_INC(100), // 512 - FMT_INC(1000), FMT_INC(1000), FMT_INC(1000), // 4096 - FMT_INC(10000), FMT_INC(10000), FMT_INC(10000), // 32k - FMT_INC(100000), FMT_INC(100000), FMT_INC(100000), // 256k - FMT_INC(1000000), FMT_INC(1000000), FMT_INC(1000000), // 2048k - FMT_INC(10000000), FMT_INC(10000000), FMT_INC(10000000), // 16M - FMT_INC(100000000), FMT_INC(100000000), FMT_INC(100000000), // 128M - FMT_INC(1000000000), FMT_INC(1000000000), FMT_INC(1000000000), // 1024M - FMT_INC(1000000000), FMT_INC(1000000000) // 4B - }; - return table[n]; + static constexpr uint64_t table[] = { + FMT_INC(0), FMT_INC(0), FMT_INC(0), // 8 + FMT_INC(10), FMT_INC(10), FMT_INC(10), // 64 + FMT_INC(100), FMT_INC(100), FMT_INC(100), // 512 + FMT_INC(1000), FMT_INC(1000), FMT_INC(1000), // 4096 + FMT_INC(10000), FMT_INC(10000), FMT_INC(10000), // 32k + FMT_INC(100000), FMT_INC(100000), FMT_INC(100000), // 256k + FMT_INC(1000000), FMT_INC(1000000), FMT_INC(1000000), // 2048k + FMT_INC(10000000), FMT_INC(10000000), FMT_INC(10000000), // 16M + FMT_INC(100000000), FMT_INC(100000000), FMT_INC(100000000), // 128M + FMT_INC(1000000000), FMT_INC(1000000000), FMT_INC(1000000000), // 1024M + FMT_INC(1000000000), FMT_INC(1000000000) // 4B + }; + return table[n]; } // Optional version of count_digits for better performance on 32-bit platforms. FMT_CONSTEXPR20 inline auto count_digits(uint32_t n) -> int { #ifdef FMT_BUILTIN_CLZ - if (!is_constant_evaluated()) { - auto inc = count_digits_inc(FMT_BUILTIN_CLZ(n | 1) ^ 31); - return static_cast((n + inc) >> 32); - } + if (!is_constant_evaluated()) { + auto inc = count_digits_inc(FMT_BUILTIN_CLZ(n | 1) ^ 31); + return static_cast((n + inc) >> 32); + } #endif - return count_digits_fallback(n); + return count_digits_fallback(n); } -template constexpr auto digits10() FMT_NOEXCEPT -> int { - return std::numeric_limits::digits10; +template +constexpr auto digits10() FMT_NOEXCEPT -> int { + return std::numeric_limits::digits10; } -template <> constexpr auto digits10() FMT_NOEXCEPT -> int { - return 38; +template <> +constexpr auto digits10() FMT_NOEXCEPT -> int { + return 38; } -template <> constexpr auto digits10() FMT_NOEXCEPT -> int { - return 38; +template <> +constexpr auto digits10() FMT_NOEXCEPT -> int { + return 38; } -template struct thousands_sep_result { - std::string grouping; - Char thousands_sep; +template +struct thousands_sep_result { + std::string grouping; + Char thousands_sep; }; template FMT_API auto thousands_sep_impl(locale_ref loc) -> thousands_sep_result; template inline auto thousands_sep(locale_ref loc) -> thousands_sep_result { - auto result = thousands_sep_impl(loc); - return {result.grouping, Char(result.thousands_sep)}; + auto result = thousands_sep_impl(loc); + return {result.grouping, Char(result.thousands_sep)}; } template <> inline auto thousands_sep(locale_ref loc) -> thousands_sep_result { - return thousands_sep_impl(loc); + return thousands_sep_impl(loc); } template FMT_API auto decimal_point_impl(locale_ref loc) -> Char; -template inline auto decimal_point(locale_ref loc) -> Char { - return Char(decimal_point_impl(loc)); +template +inline auto decimal_point(locale_ref loc) -> Char { + return Char(decimal_point_impl(loc)); } -template <> inline auto decimal_point(locale_ref loc) -> wchar_t { - return decimal_point_impl(loc); +template <> +inline auto decimal_point(locale_ref loc) -> wchar_t { + return decimal_point_impl(loc); } // Compares two characters for equality. -template auto equal2(const Char* lhs, const char* rhs) -> bool { - return lhs[0] == Char(rhs[0]) && lhs[1] == Char(rhs[1]); +template +auto equal2(const Char *lhs, const char *rhs) -> bool { + return lhs[0] == Char(rhs[0]) && lhs[1] == Char(rhs[1]); } -inline auto equal2(const char* lhs, const char* rhs) -> bool { - return memcmp(lhs, rhs, 2) == 0; +inline auto equal2(const char *lhs, const char *rhs) -> bool { + return memcmp(lhs, rhs, 2) == 0; } // Copies two characters from src to dst. -template void copy2(Char* dst, const char* src) { - *dst++ = static_cast(*src++); - *dst = static_cast(*src); +template +void copy2(Char *dst, const char *src) { + *dst++ = static_cast(*src++); + *dst = static_cast(*src); +} +FMT_INLINE void copy2(char *dst, const char *src) { + memcpy(dst, src, 2); } -FMT_INLINE void copy2(char* dst, const char* src) { memcpy(dst, src, 2); } -template struct format_decimal_result { - Iterator begin; - Iterator end; +template +struct format_decimal_result { + Iterator begin; + Iterator end; }; // Formats a decimal unsigned integer value writing into out pointing to a // buffer of specified size. The caller must ensure that the buffer is large // enough. template -FMT_CONSTEXPR20 auto format_decimal(Char* out, UInt value, int size) - -> format_decimal_result { - FMT_ASSERT(size >= count_digits(value), "invalid digit count"); - out += size; - Char* end = out; - if (is_constant_evaluated()) { - while (value >= 10) { - *--out = static_cast('0' + value % 10); - value /= 10; - } - *--out = static_cast('0' + value); - return {out, end}; - } - while (value >= 100) { - // Integer division is slow so do it for a group of two digits instead - // of for every digit. The idea comes from the talk by Alexandrescu - // "Three Optimization Tips for C++". See speed-test for a comparison. - out -= 2; - copy2(out, data::digits[value % 100]); - value /= 100; - } - if (value < 10) { - *--out = static_cast('0' + value); - return {out, end}; - } - out -= 2; - copy2(out, data::digits[value]); - return {out, end}; +FMT_CONSTEXPR20 auto format_decimal(Char *out, UInt value, int size) -> format_decimal_result { + FMT_ASSERT(size >= count_digits(value), "invalid digit count"); + out += size; + Char *end = out; + if (is_constant_evaluated()) { + while (value >= 10) { + *--out = static_cast('0' + value % 10); + value /= 10; + } + *--out = static_cast('0' + value); + return {out, end}; + } + while (value >= 100) { + // Integer division is slow so do it for a group of two digits instead + // of for every digit. The idea comes from the talk by Alexandrescu + // "Three Optimization Tips for C++". See speed-test for a comparison. + out -= 2; + copy2(out, data::digits[value % 100]); + value /= 100; + } + if (value < 10) { + *--out = static_cast('0' + value); + return {out, end}; + } + out -= 2; + copy2(out, data::digits[value]); + return {out, end}; } template >::value)> -inline auto format_decimal(Iterator out, UInt value, int size) - -> format_decimal_result { - // Buffer is large enough to hold all digits (digits10 + 1). - Char buffer[digits10() + 1]; - auto end = format_decimal(buffer, value, size).end; - return {out, detail::copy_str_noinline(buffer, end, out)}; +inline auto format_decimal(Iterator out, UInt value, int size) -> format_decimal_result { + // Buffer is large enough to hold all digits (digits10 + 1). + Char buffer[digits10() + 1]; + auto end = format_decimal(buffer, value, size).end; + return {out, detail::copy_str_noinline(buffer, end, out)}; } template -FMT_CONSTEXPR auto format_uint(Char* buffer, UInt value, int num_digits, - bool upper = false) -> Char* { - buffer += num_digits; - Char* end = buffer; - do { - const char* digits = upper ? "0123456789ABCDEF" : data::hex_digits; - unsigned digit = (value & ((1 << BASE_BITS) - 1)); - *--buffer = static_cast(BASE_BITS < 4 ? static_cast('0' + digit) - : digits[digit]); - } while ((value >>= BASE_BITS) != 0); - return end; +FMT_CONSTEXPR auto format_uint(Char *buffer, UInt value, int num_digits, bool upper = false) -> Char * { + buffer += num_digits; + Char *end = buffer; + do { + const char *digits = upper ? "0123456789ABCDEF" : data::hex_digits; + unsigned digit = (value & ((1 << BASE_BITS) - 1)); + *--buffer = static_cast(BASE_BITS < 4 ? static_cast('0' + digit) : digits[digit]); + } while ((value >>= BASE_BITS) != 0); + return end; } template -auto format_uint(Char* buffer, detail::fallback_uintptr n, int num_digits, - bool = false) -> Char* { - auto char_digits = std::numeric_limits::digits / 4; - int start = (num_digits + char_digits - 1) / char_digits - 1; - if (int start_digits = num_digits % char_digits) { - unsigned value = n.value[start--]; - buffer = format_uint(buffer, value, start_digits); - } - for (; start >= 0; --start) { - unsigned value = n.value[start]; - buffer += char_digits; - auto p = buffer; - for (int i = 0; i < char_digits; ++i) { - unsigned digit = (value & ((1 << BASE_BITS) - 1)); - *--p = static_cast(data::hex_digits[digit]); - value >>= BASE_BITS; - } - } - return buffer; +auto format_uint(Char *buffer, detail::fallback_uintptr n, int num_digits, bool = false) -> Char * { + auto char_digits = std::numeric_limits::digits / 4; + int start = (num_digits + char_digits - 1) / char_digits - 1; + if (int start_digits = num_digits % char_digits) { + unsigned value = n.value[start--]; + buffer = format_uint(buffer, value, start_digits); + } + for (; start >= 0; --start) { + unsigned value = n.value[start]; + buffer += char_digits; + auto p = buffer; + for (int i = 0; i < char_digits; ++i) { + unsigned digit = (value & ((1 << BASE_BITS) - 1)); + *--p = static_cast(data::hex_digits[digit]); + value >>= BASE_BITS; + } + } + return buffer; } template -inline auto format_uint(It out, UInt value, int num_digits, bool upper = false) - -> It { - if (auto ptr = to_pointer(out, to_unsigned(num_digits))) { - format_uint(ptr, value, num_digits, upper); - return out; - } - // Buffer should be large enough to hold all digits (digits / BASE_BITS + 1). - char buffer[num_bits() / BASE_BITS + 1]; - format_uint(buffer, value, num_digits, upper); - return detail::copy_str_noinline(buffer, buffer + num_digits, out); +inline auto format_uint(It out, UInt value, int num_digits, bool upper = false) -> It { + if (auto ptr = to_pointer(out, to_unsigned(num_digits))) { + format_uint(ptr, value, num_digits, upper); + return out; + } + // Buffer should be large enough to hold all digits (digits / BASE_BITS + + // 1). + char buffer[num_bits() / BASE_BITS + 1]; + format_uint(buffer, value, num_digits, upper); + return detail::copy_str_noinline(buffer, buffer + num_digits, out); } // A converter from UTF-8 to UTF-16. class utf8_to_utf16 { - private: - basic_memory_buffer buffer_; - - public: - FMT_API explicit utf8_to_utf16(string_view s); - operator basic_string_view() const { return {&buffer_[0], size()}; } - auto size() const -> size_t { return buffer_.size() - 1; } - auto c_str() const -> const wchar_t* { return &buffer_[0]; } - auto str() const -> std::wstring { return {&buffer_[0], size()}; } +private: + basic_memory_buffer buffer_; + +public: + FMT_API explicit utf8_to_utf16(string_view s); + operator basic_string_view() const { + return {&buffer_[0], size()}; + } + auto size() const -> size_t { + return buffer_.size() - 1; + } + auto c_str() const -> const wchar_t * { + return &buffer_[0]; + } + auto str() const -> std::wstring { + return {&buffer_[0], size()}; + } }; namespace dragonbox { // Type-specific information that Dragonbox uses. -template struct float_info; - -template <> struct float_info { - using carrier_uint = uint32_t; - static const int significand_bits = 23; - static const int exponent_bits = 8; - static const int min_exponent = -126; - static const int max_exponent = 127; - static const int exponent_bias = -127; - static const int decimal_digits = 9; - static const int kappa = 1; - static const int big_divisor = 100; - static const int small_divisor = 10; - static const int min_k = -31; - static const int max_k = 46; - static const int cache_bits = 64; - static const int divisibility_check_by_5_threshold = 39; - static const int case_fc_pm_half_lower_threshold = -1; - static const int case_fc_pm_half_upper_threshold = 6; - static const int case_fc_lower_threshold = -2; - static const int case_fc_upper_threshold = 6; - static const int case_shorter_interval_left_endpoint_lower_threshold = 2; - static const int case_shorter_interval_left_endpoint_upper_threshold = 3; - static const int shorter_interval_tie_lower_threshold = -35; - static const int shorter_interval_tie_upper_threshold = -35; - static const int max_trailing_zeros = 7; +template +struct float_info; + +template <> +struct float_info { + using carrier_uint = uint32_t; + static const int significand_bits = 23; + static const int exponent_bits = 8; + static const int min_exponent = -126; + static const int max_exponent = 127; + static const int exponent_bias = -127; + static const int decimal_digits = 9; + static const int kappa = 1; + static const int big_divisor = 100; + static const int small_divisor = 10; + static const int min_k = -31; + static const int max_k = 46; + static const int cache_bits = 64; + static const int divisibility_check_by_5_threshold = 39; + static const int case_fc_pm_half_lower_threshold = -1; + static const int case_fc_pm_half_upper_threshold = 6; + static const int case_fc_lower_threshold = -2; + static const int case_fc_upper_threshold = 6; + static const int case_shorter_interval_left_endpoint_lower_threshold = 2; + static const int case_shorter_interval_left_endpoint_upper_threshold = 3; + static const int shorter_interval_tie_lower_threshold = -35; + static const int shorter_interval_tie_upper_threshold = -35; + static const int max_trailing_zeros = 7; }; -template <> struct float_info { - using carrier_uint = uint64_t; - static const int significand_bits = 52; - static const int exponent_bits = 11; - static const int min_exponent = -1022; - static const int max_exponent = 1023; - static const int exponent_bias = -1023; - static const int decimal_digits = 17; - static const int kappa = 2; - static const int big_divisor = 1000; - static const int small_divisor = 100; - static const int min_k = -292; - static const int max_k = 326; - static const int cache_bits = 128; - static const int divisibility_check_by_5_threshold = 86; - static const int case_fc_pm_half_lower_threshold = -2; - static const int case_fc_pm_half_upper_threshold = 9; - static const int case_fc_lower_threshold = -4; - static const int case_fc_upper_threshold = 9; - static const int case_shorter_interval_left_endpoint_lower_threshold = 2; - static const int case_shorter_interval_left_endpoint_upper_threshold = 3; - static const int shorter_interval_tie_lower_threshold = -77; - static const int shorter_interval_tie_upper_threshold = -77; - static const int max_trailing_zeros = 16; +template <> +struct float_info { + using carrier_uint = uint64_t; + static const int significand_bits = 52; + static const int exponent_bits = 11; + static const int min_exponent = -1022; + static const int max_exponent = 1023; + static const int exponent_bias = -1023; + static const int decimal_digits = 17; + static const int kappa = 2; + static const int big_divisor = 1000; + static const int small_divisor = 100; + static const int min_k = -292; + static const int max_k = 326; + static const int cache_bits = 128; + static const int divisibility_check_by_5_threshold = 86; + static const int case_fc_pm_half_lower_threshold = -2; + static const int case_fc_pm_half_upper_threshold = 9; + static const int case_fc_lower_threshold = -4; + static const int case_fc_upper_threshold = 9; + static const int case_shorter_interval_left_endpoint_lower_threshold = 2; + static const int case_shorter_interval_left_endpoint_upper_threshold = 3; + static const int shorter_interval_tie_lower_threshold = -77; + static const int shorter_interval_tie_upper_threshold = -77; + static const int max_trailing_zeros = 16; }; -template struct decimal_fp { - using significand_type = typename float_info::carrier_uint; - significand_type significand; - int exponent; +template +struct decimal_fp { + using significand_type = typename float_info::carrier_uint; + significand_type significand; + int exponent; }; template FMT_API auto to_decimal(T x) FMT_NOEXCEPT -> decimal_fp; -} // namespace dragonbox +} // namespace dragonbox template -constexpr auto exponent_mask() -> - typename dragonbox::float_info::carrier_uint { - using uint = typename dragonbox::float_info::carrier_uint; - return ((uint(1) << dragonbox::float_info::exponent_bits) - 1) - << dragonbox::float_info::significand_bits; +constexpr auto exponent_mask() -> typename dragonbox::float_info::carrier_uint { + using uint = typename dragonbox::float_info::carrier_uint; + return ((uint(1) << dragonbox::float_info::exponent_bits) - 1) << dragonbox::float_info::significand_bits; } // Writes the exponent exp in the form "[+-]d{2,3}" to buffer. template auto write_exponent(int exp, It it) -> It { - FMT_ASSERT(-10000 < exp && exp < 10000, "exponent out of range"); - if (exp < 0) { - *it++ = static_cast('-'); - exp = -exp; - } else { - *it++ = static_cast('+'); - } - if (exp >= 100) { - const char* top = data::digits[exp / 100]; - if (exp >= 1000) *it++ = static_cast(top[0]); - *it++ = static_cast(top[1]); - exp %= 100; - } - const char* d = data::digits[exp]; - *it++ = static_cast(d[0]); - *it++ = static_cast(d[1]); - return it; + FMT_ASSERT(-10000 < exp && exp < 10000, "exponent out of range"); + if (exp < 0) { + *it++ = static_cast('-'); + exp = -exp; + } else { + *it++ = static_cast('+'); + } + if (exp >= 100) { + const char *top = data::digits[exp / 100]; + if (exp >= 1000) + *it++ = static_cast(top[0]); + *it++ = static_cast(top[1]); + exp %= 100; + } + const char *d = data::digits[exp]; + *it++ = static_cast(d[0]); + *it++ = static_cast(d[1]); + return it; } template -auto format_float(T value, int precision, float_specs specs, buffer& buf) - -> int; +auto format_float(T value, int precision, float_specs specs, buffer &buf) -> int; // Formats a floating-point number with snprintf. template -auto snprintf_float(T value, int precision, float_specs specs, - buffer& buf) -> int; +auto snprintf_float(T value, int precision, float_specs specs, buffer &buf) -> int; -template auto promote_float(T value) -> T { return value; } +template +auto promote_float(T value) -> T { + return value; +} inline auto promote_float(float value) -> double { - return static_cast(value); + return static_cast(value); } template -FMT_NOINLINE FMT_CONSTEXPR auto fill(OutputIt it, size_t n, - const fill_t& fill) -> OutputIt { - auto fill_size = fill.size(); - if (fill_size == 1) return detail::fill_n(it, n, fill[0]); - auto data = fill.data(); - for (size_t i = 0; i < n; ++i) - it = copy_str(data, data + fill_size, it); - return it; +FMT_NOINLINE FMT_CONSTEXPR auto fill(OutputIt it, size_t n, const fill_t &fill) -> OutputIt { + auto fill_size = fill.size(); + if (fill_size == 1) + return detail::fill_n(it, n, fill[0]); + auto data = fill.data(); + for (size_t i = 0; i < n; ++i) + it = copy_str(data, data + fill_size, it); + return it; } // Writes the output of f, padded according to format specifications in specs. // size: output size in code units. // width: output display width in (terminal) column positions. -template -FMT_CONSTEXPR auto write_padded(OutputIt out, - const basic_format_specs& specs, - size_t size, size_t width, F&& f) -> OutputIt { - static_assert(align == align::left || align == align::right, ""); - unsigned spec_width = to_unsigned(specs.width); - size_t padding = spec_width > width ? spec_width - width : 0; - auto* shifts = align == align::left ? data::left_padding_shifts - : data::right_padding_shifts; - size_t left_padding = padding >> shifts[specs.align]; - size_t right_padding = padding - left_padding; - auto it = reserve(out, size + padding * specs.fill.size()); - if (left_padding != 0) it = fill(it, left_padding, specs.fill); - it = f(it); - if (right_padding != 0) it = fill(it, right_padding, specs.fill); - return base_iterator(out, it); -} - -template -constexpr auto write_padded(OutputIt out, const basic_format_specs& specs, - size_t size, F&& f) -> OutputIt { - return write_padded(out, specs, size, size, f); +template +FMT_CONSTEXPR auto write_padded(OutputIt out, const basic_format_specs &specs, size_t size, size_t width, F &&f) + -> OutputIt { + static_assert(align == align::left || align == align::right, ""); + unsigned spec_width = to_unsigned(specs.width); + size_t padding = spec_width > width ? spec_width - width : 0; + auto *shifts = align == align::left ? data::left_padding_shifts : data::right_padding_shifts; + size_t left_padding = padding >> shifts[specs.align]; + size_t right_padding = padding - left_padding; + auto it = reserve(out, size + padding * specs.fill.size()); + if (left_padding != 0) + it = fill(it, left_padding, specs.fill); + it = f(it); + if (right_padding != 0) + it = fill(it, right_padding, specs.fill); + return base_iterator(out, it); +} + +template +constexpr auto write_padded(OutputIt out, const basic_format_specs &specs, size_t size, F &&f) -> OutputIt { + return write_padded(out, specs, size, size, f); } template -FMT_CONSTEXPR auto write_bytes(OutputIt out, string_view bytes, - const basic_format_specs& specs) - -> OutputIt { - return write_padded( - out, specs, bytes.size(), [bytes](reserve_iterator it) { - const char* data = bytes.data(); - return copy_str(data, data + bytes.size(), it); - }); +FMT_CONSTEXPR auto write_bytes(OutputIt out, string_view bytes, const basic_format_specs &specs) -> OutputIt { + return write_padded(out, specs, bytes.size(), [bytes](reserve_iterator it) { + const char *data = bytes.data(); + return copy_str(data, data + bytes.size(), it); + }); } template -auto write_ptr(OutputIt out, UIntPtr value, - const basic_format_specs* specs) -> OutputIt { - int num_digits = count_digits<4>(value); - auto size = to_unsigned(num_digits) + size_t(2); - auto write = [=](reserve_iterator it) { - *it++ = static_cast('0'); - *it++ = static_cast('x'); - return format_uint<4, Char>(it, value, num_digits); - }; - return specs ? write_padded(out, *specs, size, write) - : base_iterator(out, write(reserve(out, size))); +auto write_ptr(OutputIt out, UIntPtr value, const basic_format_specs *specs) -> OutputIt { + int num_digits = count_digits<4>(value); + auto size = to_unsigned(num_digits) + size_t(2); + auto write = [=](reserve_iterator it) { + *it++ = static_cast('0'); + *it++ = static_cast('x'); + return format_uint<4, Char>(it, value, num_digits); + }; + return specs ? write_padded(out, *specs, size, write) : base_iterator(out, write(reserve(out, size))); } template -FMT_CONSTEXPR auto write_char(OutputIt out, Char value, - const basic_format_specs& specs) - -> OutputIt { - return write_padded(out, specs, 1, [=](reserve_iterator it) { - *it++ = value; - return it; - }); +FMT_CONSTEXPR auto write_char(OutputIt out, Char value, const basic_format_specs &specs) -> OutputIt { + return write_padded(out, specs, 1, [=](reserve_iterator it) { + *it++ = value; + return it; + }); } template -FMT_CONSTEXPR auto write(OutputIt out, Char value, - const basic_format_specs& specs, - locale_ref loc = {}) -> OutputIt { - return check_char_specs(specs) - ? write_char(out, value, specs) - : write(out, static_cast(value), specs, loc); +FMT_CONSTEXPR auto write(OutputIt out, Char value, const basic_format_specs &specs, locale_ref loc = {}) + -> OutputIt { + return check_char_specs(specs) ? write_char(out, value, specs) : write(out, static_cast(value), specs, loc); } // Data for write_int that doesn't depend on output iterator type. It is used to // avoid template code bloat. -template struct write_int_data { - size_t size; - size_t padding; - - FMT_CONSTEXPR write_int_data(int num_digits, unsigned prefix, - const basic_format_specs& specs) - : size((prefix >> 24) + to_unsigned(num_digits)), padding(0) { - if (specs.align == align::numeric) { - auto width = to_unsigned(specs.width); - if (width > size) { - padding = width - size; - size = width; - } - } else if (specs.precision > num_digits) { - size = (prefix >> 24) + to_unsigned(specs.precision); - padding = to_unsigned(specs.precision - num_digits); - } - } +template +struct write_int_data { + size_t size; + size_t padding; + + FMT_CONSTEXPR write_int_data(int num_digits, unsigned prefix, const basic_format_specs &specs) + : size((prefix >> 24) + to_unsigned(num_digits)), padding(0) { + if (specs.align == align::numeric) { + auto width = to_unsigned(specs.width); + if (width > size) { + padding = width - size; + size = width; + } + } else if (specs.precision > num_digits) { + size = (prefix >> 24) + to_unsigned(specs.precision); + padding = to_unsigned(specs.precision - num_digits); + } + } }; // Writes an integer in the format @@ -1370,777 +1383,743 @@ template struct write_int_data { // where are written by write_digits(it). // prefix contains chars in three lower bytes and the size in the fourth byte. template -FMT_CONSTEXPR FMT_INLINE auto write_int(OutputIt out, int num_digits, - unsigned prefix, - const basic_format_specs& specs, - W write_digits) -> OutputIt { - // Slightly faster check for specs.width == 0 && specs.precision == -1. - if ((specs.width | (specs.precision + 1)) == 0) { - auto it = reserve(out, to_unsigned(num_digits) + (prefix >> 24)); - if (prefix != 0) { - for (unsigned p = prefix & 0xffffff; p != 0; p >>= 8) - *it++ = static_cast(p & 0xff); - } - return base_iterator(out, write_digits(it)); - } - auto data = write_int_data(num_digits, prefix, specs); - return write_padded( - out, specs, data.size, [=](reserve_iterator it) { - for (unsigned p = prefix & 0xffffff; p != 0; p >>= 8) - *it++ = static_cast(p & 0xff); - it = detail::fill_n(it, data.padding, static_cast('0')); - return write_digits(it); - }); +FMT_CONSTEXPR FMT_INLINE auto write_int(OutputIt out, int num_digits, unsigned prefix, + const basic_format_specs &specs, W write_digits) -> OutputIt { + // Slightly faster check for specs.width == 0 && specs.precision == -1. + if ((specs.width | (specs.precision + 1)) == 0) { + auto it = reserve(out, to_unsigned(num_digits) + (prefix >> 24)); + if (prefix != 0) { + for (unsigned p = prefix & 0xffffff; p != 0; p >>= 8) + *it++ = static_cast(p & 0xff); + } + return base_iterator(out, write_digits(it)); + } + auto data = write_int_data(num_digits, prefix, specs); + return write_padded(out, specs, data.size, [=](reserve_iterator it) { + for (unsigned p = prefix & 0xffffff; p != 0; p >>= 8) + *it++ = static_cast(p & 0xff); + it = detail::fill_n(it, data.padding, static_cast('0')); + return write_digits(it); + }); } template -auto write_int_localized(OutputIt& out, UInt value, unsigned prefix, - const basic_format_specs& specs, locale_ref loc) - -> bool { - static_assert(std::is_same, UInt>::value, ""); - const auto sep_size = 1; - auto ts = thousands_sep(loc); - if (!ts.thousands_sep) return false; - int num_digits = count_digits(value); - int size = num_digits, n = num_digits; - const std::string& groups = ts.grouping; - std::string::const_iterator group = groups.cbegin(); - while (group != groups.cend() && n > *group && *group > 0 && - *group != max_value()) { - size += sep_size; - n -= *group; - ++group; - } - if (group == groups.cend()) size += sep_size * ((n - 1) / groups.back()); - char digits[40]; - format_decimal(digits, value, num_digits); - basic_memory_buffer buffer; - if (prefix != 0) ++size; - const auto usize = to_unsigned(size); - buffer.resize(usize); - basic_string_view s(&ts.thousands_sep, sep_size); - // Index of a decimal digit with the least significant digit having index 0. - int digit_index = 0; - group = groups.cbegin(); - auto p = buffer.data() + size - 1; - for (int i = num_digits - 1; i > 0; --i) { - *p-- = static_cast(digits[i]); - if (*group <= 0 || ++digit_index % *group != 0 || - *group == max_value()) - continue; - if (group + 1 != groups.cend()) { - digit_index = 0; - ++group; - } - std::uninitialized_copy(s.data(), s.data() + s.size(), - make_checked(p, s.size())); - p -= s.size(); - } - *p-- = static_cast(*digits); - if (prefix != 0) *p = static_cast(prefix); - auto data = buffer.data(); - out = write_padded( - out, specs, usize, usize, [=](reserve_iterator it) { - return copy_str(data, data + size, it); - }); - return true; -} - -FMT_CONSTEXPR inline void prefix_append(unsigned& prefix, unsigned value) { - prefix |= prefix != 0 ? value << 8 : value; - prefix += (1u + (value > 0xff ? 1 : 0)) << 24; -} - -template struct write_int_arg { - UInt abs_value; - unsigned prefix; +auto write_int_localized(OutputIt &out, UInt value, unsigned prefix, const basic_format_specs &specs, + locale_ref loc) -> bool { + static_assert(std::is_same, UInt>::value, ""); + const auto sep_size = 1; + auto ts = thousands_sep(loc); + if (!ts.thousands_sep) + return false; + int num_digits = count_digits(value); + int size = num_digits, n = num_digits; + const std::string &groups = ts.grouping; + std::string::const_iterator group = groups.cbegin(); + while (group != groups.cend() && n > *group && *group > 0 && *group != max_value()) { + size += sep_size; + n -= *group; + ++group; + } + if (group == groups.cend()) + size += sep_size * ((n - 1) / groups.back()); + char digits[40]; + format_decimal(digits, value, num_digits); + basic_memory_buffer buffer; + if (prefix != 0) + ++size; + const auto usize = to_unsigned(size); + buffer.resize(usize); + basic_string_view s(&ts.thousands_sep, sep_size); + // Index of a decimal digit with the least significant digit having index 0. + int digit_index = 0; + group = groups.cbegin(); + auto p = buffer.data() + size - 1; + for (int i = num_digits - 1; i > 0; --i) { + *p-- = static_cast(digits[i]); + if (*group <= 0 || ++digit_index % *group != 0 || *group == max_value()) + continue; + if (group + 1 != groups.cend()) { + digit_index = 0; + ++group; + } + std::uninitialized_copy(s.data(), s.data() + s.size(), make_checked(p, s.size())); + p -= s.size(); + } + *p-- = static_cast(*digits); + if (prefix != 0) + *p = static_cast(prefix); + auto data = buffer.data(); + out = write_padded( + out, specs, usize, usize, [=](reserve_iterator it) { return copy_str(data, data + size, it); }); + return true; +} + +FMT_CONSTEXPR inline void prefix_append(unsigned &prefix, unsigned value) { + prefix |= prefix != 0 ? value << 8 : value; + prefix += (1u + (value > 0xff ? 1 : 0)) << 24; +} + +template +struct write_int_arg { + UInt abs_value; + unsigned prefix; }; template -FMT_CONSTEXPR auto make_write_int_arg(T value, sign_t sign) - -> write_int_arg> { - auto prefix = 0u; - auto abs_value = static_cast>(value); - if (is_negative(value)) { - prefix = 0x01000000 | '-'; - abs_value = 0 - abs_value; - } else { - prefix = data::prefixes[sign]; - } - return {abs_value, prefix}; +FMT_CONSTEXPR auto make_write_int_arg(T value, sign_t sign) -> write_int_arg> { + auto prefix = 0u; + auto abs_value = static_cast>(value); + if (is_negative(value)) { + prefix = 0x01000000 | '-'; + abs_value = 0 - abs_value; + } else { + prefix = data::prefixes[sign]; + } + return {abs_value, prefix}; } template -FMT_CONSTEXPR FMT_INLINE auto write_int(OutputIt out, write_int_arg arg, - const basic_format_specs& specs, +FMT_CONSTEXPR FMT_INLINE auto write_int(OutputIt out, write_int_arg arg, const basic_format_specs &specs, locale_ref loc) -> OutputIt { - static_assert(std::is_same>::value, ""); - auto abs_value = arg.abs_value; - auto prefix = arg.prefix; - auto utype = static_cast(specs.type); - switch (specs.type) { - case 0: - case 'd': { - if (specs.localized && - write_int_localized(out, static_cast>(abs_value), - prefix, specs, loc)) { - return out; - } - auto num_digits = count_digits(abs_value); - return write_int( - out, num_digits, prefix, specs, [=](reserve_iterator it) { - return format_decimal(it, abs_value, num_digits).end; - }); - } - case 'x': - case 'X': { - if (specs.alt) prefix_append(prefix, (utype << 8) | '0'); - bool upper = specs.type != 'x'; - int num_digits = count_digits<4>(abs_value); - return write_int( - out, num_digits, prefix, specs, [=](reserve_iterator it) { - return format_uint<4, Char>(it, abs_value, num_digits, upper); - }); - } - case 'b': - case 'B': { - if (specs.alt) prefix_append(prefix, (utype << 8) | '0'); - int num_digits = count_digits<1>(abs_value); - return write_int(out, num_digits, prefix, specs, - [=](reserve_iterator it) { - return format_uint<1, Char>(it, abs_value, num_digits); - }); - } - case 'o': { - int num_digits = count_digits<3>(abs_value); - if (specs.alt && specs.precision <= num_digits && abs_value != 0) { - // Octal prefix '0' is counted as a digit, so only add it if precision - // is not greater than the number of digits. - prefix_append(prefix, '0'); - } - return write_int(out, num_digits, prefix, specs, - [=](reserve_iterator it) { - return format_uint<3, Char>(it, abs_value, num_digits); - }); - } - case 'c': - return write_char(out, static_cast(abs_value), specs); - default: - FMT_THROW(format_error("invalid type specifier")); - } - return out; + static_assert(std::is_same>::value, ""); + auto abs_value = arg.abs_value; + auto prefix = arg.prefix; + auto utype = static_cast(specs.type); + switch (specs.type) { + case 0: + case 'd': { + if (specs.localized && + write_int_localized(out, static_cast>(abs_value), prefix, specs, loc)) { + return out; + } + auto num_digits = count_digits(abs_value); + return write_int(out, num_digits, prefix, specs, [=](reserve_iterator it) { + return format_decimal(it, abs_value, num_digits).end; + }); + } + case 'x': + case 'X': { + if (specs.alt) + prefix_append(prefix, (utype << 8) | '0'); + bool upper = specs.type != 'x'; + int num_digits = count_digits<4>(abs_value); + return write_int(out, num_digits, prefix, specs, [=](reserve_iterator it) { + return format_uint<4, Char>(it, abs_value, num_digits, upper); + }); + } + case 'b': + case 'B': { + if (specs.alt) + prefix_append(prefix, (utype << 8) | '0'); + int num_digits = count_digits<1>(abs_value); + return write_int(out, num_digits, prefix, specs, [=](reserve_iterator it) { + return format_uint<1, Char>(it, abs_value, num_digits); + }); + } + case 'o': { + int num_digits = count_digits<3>(abs_value); + if (specs.alt && specs.precision <= num_digits && abs_value != 0) { + // Octal prefix '0' is counted as a digit, so only add it if + // precision is not greater than the number of digits. + prefix_append(prefix, '0'); + } + return write_int(out, num_digits, prefix, specs, [=](reserve_iterator it) { + return format_uint<3, Char>(it, abs_value, num_digits); + }); + } + case 'c': + return write_char(out, static_cast(abs_value), specs); + default: + FMT_THROW(format_error("invalid type specifier")); + } + return out; } template ::value && - !std::is_same::value && + FMT_ENABLE_IF(is_integral::value && !std::is_same::value && std::is_same>::value)> -FMT_CONSTEXPR auto write(OutputIt out, T value, - const basic_format_specs& specs, locale_ref loc) - -> OutputIt { - return write_int(out, make_write_int_arg(value, specs.sign), specs, loc); +FMT_CONSTEXPR auto write(OutputIt out, T value, const basic_format_specs &specs, locale_ref loc) -> OutputIt { + return write_int(out, make_write_int_arg(value, specs.sign), specs, loc); } // An inlined version of write used in format string compilation. template ::value && - !std::is_same::value && + FMT_ENABLE_IF(is_integral::value && !std::is_same::value && !std::is_same>::value)> -FMT_CONSTEXPR FMT_INLINE auto write(OutputIt out, T value, - const basic_format_specs& specs, - locale_ref loc) -> OutputIt { - return write_int(out, make_write_int_arg(value, specs.sign), specs, loc); +FMT_CONSTEXPR FMT_INLINE auto write(OutputIt out, T value, const basic_format_specs &specs, locale_ref loc) + -> OutputIt { + return write_int(out, make_write_int_arg(value, specs.sign), specs, loc); } template -FMT_CONSTEXPR auto write(OutputIt out, basic_string_view s, - const basic_format_specs& specs) -> OutputIt { - auto data = s.data(); - auto size = s.size(); - if (specs.precision >= 0 && to_unsigned(specs.precision) < size) - size = code_point_index(s, to_unsigned(specs.precision)); - auto width = - specs.width != 0 ? compute_width(basic_string_view(data, size)) : 0; - return write_padded(out, specs, size, width, - [=](reserve_iterator it) { - return copy_str(data, data + size, it); - }); +FMT_CONSTEXPR auto write(OutputIt out, basic_string_view s, const basic_format_specs &specs) -> OutputIt { + auto data = s.data(); + auto size = s.size(); + if (specs.precision >= 0 && to_unsigned(specs.precision) < size) + size = code_point_index(s, to_unsigned(specs.precision)); + auto width = specs.width != 0 ? compute_width(basic_string_view(data, size)) : 0; + return write_padded(out, specs, size, width, + [=](reserve_iterator it) { return copy_str(data, data + size, it); }); } template -FMT_CONSTEXPR auto write(OutputIt out, - basic_string_view> s, - const basic_format_specs& specs, locale_ref) - -> OutputIt { - check_string_type_spec(specs.type); - return write(out, s, specs); +FMT_CONSTEXPR auto write(OutputIt out, basic_string_view> s, + const basic_format_specs &specs, locale_ref) -> OutputIt { + check_string_type_spec(specs.type); + return write(out, s, specs); } template -FMT_CONSTEXPR auto write(OutputIt out, const Char* s, - const basic_format_specs& specs, locale_ref) - -> OutputIt { - return check_cstring_type_spec(specs.type) - ? write(out, basic_string_view(s), specs, {}) - : write_ptr(out, to_uintptr(s), &specs); +FMT_CONSTEXPR auto write(OutputIt out, const Char *s, const basic_format_specs &specs, locale_ref) -> OutputIt { + return check_cstring_type_spec(specs.type) ? write(out, basic_string_view(s), specs, {}) + : write_ptr(out, to_uintptr(s), &specs); } template -auto write_nonfinite(OutputIt out, bool isinf, basic_format_specs specs, - const float_specs& fspecs) -> OutputIt { - auto str = - isinf ? (fspecs.upper ? "INF" : "inf") : (fspecs.upper ? "NAN" : "nan"); - constexpr size_t str_size = 3; - auto sign = fspecs.sign; - auto size = str_size + (sign ? 1 : 0); - // Replace '0'-padding with space for non-finite values. - const bool is_zero_fill = - specs.fill.size() == 1 && *specs.fill.data() == static_cast('0'); - if (is_zero_fill) specs.fill[0] = static_cast(' '); - return write_padded(out, specs, size, [=](reserve_iterator it) { - if (sign) *it++ = static_cast(data::signs[sign]); - return copy_str(str, str + str_size, it); - }); +auto write_nonfinite(OutputIt out, bool isinf, basic_format_specs specs, const float_specs &fspecs) -> OutputIt { + auto str = isinf ? (fspecs.upper ? "INF" : "inf") : (fspecs.upper ? "NAN" : "nan"); + constexpr size_t str_size = 3; + auto sign = fspecs.sign; + auto size = str_size + (sign ? 1 : 0); + // Replace '0'-padding with space for non-finite values. + const bool is_zero_fill = specs.fill.size() == 1 && *specs.fill.data() == static_cast('0'); + if (is_zero_fill) + specs.fill[0] = static_cast(' '); + return write_padded(out, specs, size, [=](reserve_iterator it) { + if (sign) + *it++ = static_cast(data::signs[sign]); + return copy_str(str, str + str_size, it); + }); } // A decimal floating-point number significand * pow(10, exp). struct big_decimal_fp { - const char* significand; - int significand_size; - int exponent; + const char *significand; + int significand_size; + int exponent; }; -inline auto get_significand_size(const big_decimal_fp& fp) -> int { - return fp.significand_size; +inline auto get_significand_size(const big_decimal_fp &fp) -> int { + return fp.significand_size; } template -inline auto get_significand_size(const dragonbox::decimal_fp& fp) -> int { - return count_digits(fp.significand); +inline auto get_significand_size(const dragonbox::decimal_fp &fp) -> int { + return count_digits(fp.significand); } template -inline auto write_significand(OutputIt out, const char* significand, - int& significand_size) -> OutputIt { - return copy_str(significand, significand + significand_size, out); +inline auto write_significand(OutputIt out, const char *significand, int &significand_size) -> OutputIt { + return copy_str(significand, significand + significand_size, out); } template -inline auto write_significand(OutputIt out, UInt significand, - int significand_size) -> OutputIt { - return format_decimal(out, significand, significand_size).end; -} - -template ::value)> -inline auto write_significand(Char* out, UInt significand, int significand_size, - int integral_size, Char decimal_point) -> Char* { - if (!decimal_point) - return format_decimal(out, significand, significand_size).end; - auto end = format_decimal(out + 1, significand, significand_size).end; - if (integral_size == 1) { - out[0] = out[1]; - } else { - std::uninitialized_copy_n(out + 1, integral_size, - make_checked(out, to_unsigned(integral_size))); - } - out[integral_size] = decimal_point; - return end; +inline auto write_significand(OutputIt out, UInt significand, int significand_size) -> OutputIt { + return format_decimal(out, significand, significand_size).end; +} + +template ::value)> +inline auto write_significand(Char *out, UInt significand, int significand_size, int integral_size, Char decimal_point) + -> Char * { + if (!decimal_point) + return format_decimal(out, significand, significand_size).end; + auto end = format_decimal(out + 1, significand, significand_size).end; + if (integral_size == 1) { + out[0] = out[1]; + } else { + std::uninitialized_copy_n(out + 1, integral_size, make_checked(out, to_unsigned(integral_size))); + } + out[integral_size] = decimal_point; + return end; } template >::value)> -inline auto write_significand(OutputIt out, UInt significand, - int significand_size, int integral_size, +inline auto write_significand(OutputIt out, UInt significand, int significand_size, int integral_size, Char decimal_point) -> OutputIt { - // Buffer is large enough to hold digits (digits10 + 1) and a decimal point. - Char buffer[digits10() + 2]; - auto end = write_significand(buffer, significand, significand_size, - integral_size, decimal_point); - return detail::copy_str_noinline(buffer, end, out); + // Buffer is large enough to hold digits (digits10 + 1) and a decimal point. + Char buffer[digits10() + 2]; + auto end = write_significand(buffer, significand, significand_size, integral_size, decimal_point); + return detail::copy_str_noinline(buffer, end, out); } template -inline auto write_significand(OutputIt out, const char* significand, - int significand_size, int integral_size, +inline auto write_significand(OutputIt out, const char *significand, int significand_size, int integral_size, Char decimal_point) -> OutputIt { - out = detail::copy_str_noinline(significand, - significand + integral_size, out); - if (!decimal_point) return out; - *out++ = decimal_point; - return detail::copy_str_noinline(significand + integral_size, - significand + significand_size, out); + out = detail::copy_str_noinline(significand, significand + integral_size, out); + if (!decimal_point) + return out; + *out++ = decimal_point; + return detail::copy_str_noinline(significand + integral_size, significand + significand_size, out); } template -auto write_float(OutputIt out, const DecimalFP& fp, - const basic_format_specs& specs, float_specs fspecs, +auto write_float(OutputIt out, const DecimalFP &fp, const basic_format_specs &specs, float_specs fspecs, Char decimal_point) -> OutputIt { - auto significand = fp.significand; - int significand_size = get_significand_size(fp); - static const Char zero = static_cast('0'); - auto sign = fspecs.sign; - size_t size = to_unsigned(significand_size) + (sign ? 1 : 0); - using iterator = reserve_iterator; - - int output_exp = fp.exponent + significand_size - 1; - auto use_exp_format = [=]() { - if (fspecs.format == float_format::exp) return true; - if (fspecs.format != float_format::general) return false; - // Use the fixed notation if the exponent is in [exp_lower, exp_upper), - // e.g. 0.0001 instead of 1e-04. Otherwise use the exponent notation. - const int exp_lower = -4, exp_upper = 16; - return output_exp < exp_lower || - output_exp >= (fspecs.precision > 0 ? fspecs.precision : exp_upper); - }; - if (use_exp_format()) { - int num_zeros = 0; - if (fspecs.showpoint) { - num_zeros = fspecs.precision - significand_size; - if (num_zeros < 0) num_zeros = 0; - size += to_unsigned(num_zeros); - } else if (significand_size == 1) { - decimal_point = Char(); - } - auto abs_output_exp = output_exp >= 0 ? output_exp : -output_exp; - int exp_digits = 2; - if (abs_output_exp >= 100) exp_digits = abs_output_exp >= 1000 ? 4 : 3; - - size += to_unsigned((decimal_point ? 1 : 0) + 2 + exp_digits); - char exp_char = fspecs.upper ? 'E' : 'e'; - auto write = [=](iterator it) { - if (sign) *it++ = static_cast(data::signs[sign]); - // Insert a decimal point after the first digit and add an exponent. - it = write_significand(it, significand, significand_size, 1, - decimal_point); - if (num_zeros > 0) it = detail::fill_n(it, num_zeros, zero); - *it++ = static_cast(exp_char); - return write_exponent(output_exp, it); - }; - return specs.width > 0 ? write_padded(out, specs, size, write) - : base_iterator(out, write(reserve(out, size))); - } - - int exp = fp.exponent + significand_size; - if (fp.exponent >= 0) { - // 1234e5 -> 123400000[.0+] - size += to_unsigned(fp.exponent); - int num_zeros = fspecs.precision - exp; + auto significand = fp.significand; + int significand_size = get_significand_size(fp); + static const Char zero = static_cast('0'); + auto sign = fspecs.sign; + size_t size = to_unsigned(significand_size) + (sign ? 1 : 0); + using iterator = reserve_iterator; + + int output_exp = fp.exponent + significand_size - 1; + auto use_exp_format = [=]() { + if (fspecs.format == float_format::exp) + return true; + if (fspecs.format != float_format::general) + return false; + // Use the fixed notation if the exponent is in [exp_lower, exp_upper), + // e.g. 0.0001 instead of 1e-04. Otherwise use the exponent notation. + const int exp_lower = -4, exp_upper = 16; + return output_exp < exp_lower || output_exp >= (fspecs.precision > 0 ? fspecs.precision : exp_upper); + }; + if (use_exp_format()) { + int num_zeros = 0; + if (fspecs.showpoint) { + num_zeros = fspecs.precision - significand_size; + if (num_zeros < 0) + num_zeros = 0; + size += to_unsigned(num_zeros); + } else if (significand_size == 1) { + decimal_point = Char(); + } + auto abs_output_exp = output_exp >= 0 ? output_exp : -output_exp; + int exp_digits = 2; + if (abs_output_exp >= 100) + exp_digits = abs_output_exp >= 1000 ? 4 : 3; + + size += to_unsigned((decimal_point ? 1 : 0) + 2 + exp_digits); + char exp_char = fspecs.upper ? 'E' : 'e'; + auto write = [=](iterator it) { + if (sign) + *it++ = static_cast(data::signs[sign]); + // Insert a decimal point after the first digit and add an exponent. + it = write_significand(it, significand, significand_size, 1, decimal_point); + if (num_zeros > 0) + it = detail::fill_n(it, num_zeros, zero); + *it++ = static_cast(exp_char); + return write_exponent(output_exp, it); + }; + return specs.width > 0 ? write_padded(out, specs, size, write) + : base_iterator(out, write(reserve(out, size))); + } + + int exp = fp.exponent + significand_size; + if (fp.exponent >= 0) { + // 1234e5 -> 123400000[.0+] + size += to_unsigned(fp.exponent); + int num_zeros = fspecs.precision - exp; #ifdef FMT_FUZZ - if (num_zeros > 5000) - throw std::runtime_error("fuzz mode - avoiding excessive cpu use"); + if (num_zeros > 5000) + throw std::runtime_error("fuzz mode - avoiding excessive cpu use"); #endif - if (fspecs.showpoint) { - if (num_zeros <= 0 && fspecs.format != float_format::fixed) num_zeros = 1; - if (num_zeros > 0) size += to_unsigned(num_zeros) + 1; - } - return write_padded(out, specs, size, [&](iterator it) { - if (sign) *it++ = static_cast(data::signs[sign]); - it = write_significand(it, significand, significand_size); - it = detail::fill_n(it, fp.exponent, zero); - if (!fspecs.showpoint) return it; - *it++ = decimal_point; - return num_zeros > 0 ? detail::fill_n(it, num_zeros, zero) : it; - }); - } else if (exp > 0) { - // 1234e-2 -> 12.34[0+] - int num_zeros = fspecs.showpoint ? fspecs.precision - significand_size : 0; - size += 1 + to_unsigned(num_zeros > 0 ? num_zeros : 0); - return write_padded(out, specs, size, [&](iterator it) { - if (sign) *it++ = static_cast(data::signs[sign]); - it = write_significand(it, significand, significand_size, exp, - decimal_point); - return num_zeros > 0 ? detail::fill_n(it, num_zeros, zero) : it; - }); - } - // 1234e-6 -> 0.001234 - int num_zeros = -exp; - if (significand_size == 0 && fspecs.precision >= 0 && - fspecs.precision < num_zeros) { - num_zeros = fspecs.precision; - } - bool pointy = num_zeros != 0 || significand_size != 0 || fspecs.showpoint; - size += 1 + (pointy ? 1 : 0) + to_unsigned(num_zeros); - return write_padded(out, specs, size, [&](iterator it) { - if (sign) *it++ = static_cast(data::signs[sign]); - *it++ = zero; - if (!pointy) return it; - *it++ = decimal_point; - it = detail::fill_n(it, num_zeros, zero); - return write_significand(it, significand, significand_size); - }); -} - -template ::value)> -auto write(OutputIt out, T value, basic_format_specs specs, - locale_ref loc = {}) -> OutputIt { - if (const_check(!is_supported_floating_point(value))) return out; - float_specs fspecs = parse_float_type_spec(specs); - fspecs.sign = specs.sign; - if (std::signbit(value)) { // value < 0 is false for NaN so use signbit. - fspecs.sign = sign::minus; - value = -value; - } else if (fspecs.sign == sign::minus) { - fspecs.sign = sign::none; - } - - if (!std::isfinite(value)) - return write_nonfinite(out, std::isinf(value), specs, fspecs); - - if (specs.align == align::numeric && fspecs.sign) { - auto it = reserve(out, 1); - *it++ = static_cast(data::signs[fspecs.sign]); - out = base_iterator(out, it); - fspecs.sign = sign::none; - if (specs.width != 0) --specs.width; - } - - memory_buffer buffer; - if (fspecs.format == float_format::hex) { - if (fspecs.sign) buffer.push_back(data::signs[fspecs.sign]); - snprintf_float(promote_float(value), specs.precision, fspecs, buffer); - return write_bytes(out, {buffer.data(), buffer.size()}, - specs); - } - int precision = specs.precision >= 0 || !specs.type ? specs.precision : 6; - if (fspecs.format == float_format::exp) { - if (precision == max_value()) - FMT_THROW(format_error("number is too big")); - else - ++precision; - } - if (const_check(std::is_same())) fspecs.binary32 = true; - fspecs.use_grisu = is_fast_float(); - int exp = format_float(promote_float(value), precision, fspecs, buffer); - fspecs.precision = precision; - Char point = - fspecs.locale ? decimal_point(loc) : static_cast('.'); - auto fp = big_decimal_fp{buffer.data(), static_cast(buffer.size()), exp}; - return write_float(out, fp, specs, fspecs, point); -} - -template ::value)> + if (fspecs.showpoint) { + if (num_zeros <= 0 && fspecs.format != float_format::fixed) + num_zeros = 1; + if (num_zeros > 0) + size += to_unsigned(num_zeros) + 1; + } + return write_padded(out, specs, size, [&](iterator it) { + if (sign) + *it++ = static_cast(data::signs[sign]); + it = write_significand(it, significand, significand_size); + it = detail::fill_n(it, fp.exponent, zero); + if (!fspecs.showpoint) + return it; + *it++ = decimal_point; + return num_zeros > 0 ? detail::fill_n(it, num_zeros, zero) : it; + }); + } else if (exp > 0) { + // 1234e-2 -> 12.34[0+] + int num_zeros = fspecs.showpoint ? fspecs.precision - significand_size : 0; + size += 1 + to_unsigned(num_zeros > 0 ? num_zeros : 0); + return write_padded(out, specs, size, [&](iterator it) { + if (sign) + *it++ = static_cast(data::signs[sign]); + it = write_significand(it, significand, significand_size, exp, decimal_point); + return num_zeros > 0 ? detail::fill_n(it, num_zeros, zero) : it; + }); + } + // 1234e-6 -> 0.001234 + int num_zeros = -exp; + if (significand_size == 0 && fspecs.precision >= 0 && fspecs.precision < num_zeros) { + num_zeros = fspecs.precision; + } + bool pointy = num_zeros != 0 || significand_size != 0 || fspecs.showpoint; + size += 1 + (pointy ? 1 : 0) + to_unsigned(num_zeros); + return write_padded(out, specs, size, [&](iterator it) { + if (sign) + *it++ = static_cast(data::signs[sign]); + *it++ = zero; + if (!pointy) + return it; + *it++ = decimal_point; + it = detail::fill_n(it, num_zeros, zero); + return write_significand(it, significand, significand_size); + }); +} + +template ::value)> +auto write(OutputIt out, T value, basic_format_specs specs, locale_ref loc = {}) -> OutputIt { + if (const_check(!is_supported_floating_point(value))) + return out; + float_specs fspecs = parse_float_type_spec(specs); + fspecs.sign = specs.sign; + if (std::signbit(value)) { // value < 0 is false for NaN so use signbit. + fspecs.sign = sign::minus; + value = -value; + } else if (fspecs.sign == sign::minus) { + fspecs.sign = sign::none; + } + + if (!std::isfinite(value)) + return write_nonfinite(out, std::isinf(value), specs, fspecs); + + if (specs.align == align::numeric && fspecs.sign) { + auto it = reserve(out, 1); + *it++ = static_cast(data::signs[fspecs.sign]); + out = base_iterator(out, it); + fspecs.sign = sign::none; + if (specs.width != 0) + --specs.width; + } + + memory_buffer buffer; + if (fspecs.format == float_format::hex) { + if (fspecs.sign) + buffer.push_back(data::signs[fspecs.sign]); + snprintf_float(promote_float(value), specs.precision, fspecs, buffer); + return write_bytes(out, {buffer.data(), buffer.size()}, specs); + } + int precision = specs.precision >= 0 || !specs.type ? specs.precision : 6; + if (fspecs.format == float_format::exp) { + if (precision == max_value()) + FMT_THROW(format_error("number is too big")); + else + ++precision; + } + if (const_check(std::is_same())) + fspecs.binary32 = true; + fspecs.use_grisu = is_fast_float(); + int exp = format_float(promote_float(value), precision, fspecs, buffer); + fspecs.precision = precision; + Char point = fspecs.locale ? decimal_point(loc) : static_cast('.'); + auto fp = big_decimal_fp {buffer.data(), static_cast(buffer.size()), exp}; + return write_float(out, fp, specs, fspecs, point); +} + +template ::value)> auto write(OutputIt out, T value) -> OutputIt { - if (const_check(!is_supported_floating_point(value))) return out; + if (const_check(!is_supported_floating_point(value))) + return out; - using floaty = conditional_t::value, double, T>; - using uint = typename dragonbox::float_info::carrier_uint; - auto bits = bit_cast(value); + using floaty = conditional_t::value, double, T>; + using uint = typename dragonbox::float_info::carrier_uint; + auto bits = bit_cast(value); - auto fspecs = float_specs(); - auto sign_bit = bits & (uint(1) << (num_bits() - 1)); - if (sign_bit != 0) { - fspecs.sign = sign::minus; - value = -value; - } + auto fspecs = float_specs(); + auto sign_bit = bits & (uint(1) << (num_bits() - 1)); + if (sign_bit != 0) { + fspecs.sign = sign::minus; + value = -value; + } - static const auto specs = basic_format_specs(); - uint mask = exponent_mask(); - if ((bits & mask) == mask) - return write_nonfinite(out, std::isinf(value), specs, fspecs); + static const auto specs = basic_format_specs(); + uint mask = exponent_mask(); + if ((bits & mask) == mask) + return write_nonfinite(out, std::isinf(value), specs, fspecs); - auto dec = dragonbox::to_decimal(static_cast(value)); - return write_float(out, dec, specs, fspecs, static_cast('.')); + auto dec = dragonbox::to_decimal(static_cast(value)); + return write_float(out, dec, specs, fspecs, static_cast('.')); } template ::value && - !is_fast_float::value)> + FMT_ENABLE_IF(std::is_floating_point::value && !is_fast_float::value)> inline auto write(OutputIt out, T value) -> OutputIt { - return write(out, value, basic_format_specs()); + return write(out, value, basic_format_specs()); } template -auto write(OutputIt out, monostate, basic_format_specs = {}, - locale_ref = {}) -> OutputIt { - FMT_ASSERT(false, ""); - return out; +auto write(OutputIt out, monostate, basic_format_specs = {}, locale_ref = {}) -> OutputIt { + FMT_ASSERT(false, ""); + return out; } template -FMT_CONSTEXPR auto write(OutputIt out, basic_string_view value) - -> OutputIt { - auto it = reserve(out, value.size()); - it = copy_str_noinline(value.begin(), value.end(), it); - return base_iterator(out, it); +FMT_CONSTEXPR auto write(OutputIt out, basic_string_view value) -> OutputIt { + auto it = reserve(out, value.size()); + it = copy_str_noinline(value.begin(), value.end(), it); + return base_iterator(out, it); } -template ::value)> -constexpr auto write(OutputIt out, const T& value) -> OutputIt { - return write(out, to_string_view(value)); +template ::value)> +constexpr auto write(OutputIt out, const T &value) -> OutputIt { + return write(out, to_string_view(value)); } template ::value && - !std::is_same::value && - !std::is_same::value)> + FMT_ENABLE_IF(is_integral::value && !std::is_same::value && !std::is_same::value)> FMT_CONSTEXPR auto write(OutputIt out, T value) -> OutputIt { - auto abs_value = static_cast>(value); - bool negative = is_negative(value); - // Don't do -abs_value since it trips unsigned-integer-overflow sanitizer. - if (negative) abs_value = ~abs_value + 1; - int num_digits = count_digits(abs_value); - auto size = (negative ? 1 : 0) + static_cast(num_digits); - auto it = reserve(out, size); - if (auto ptr = to_pointer(it, size)) { - if (negative) *ptr++ = static_cast('-'); - format_decimal(ptr, abs_value, num_digits); - return out; - } - if (negative) *it++ = static_cast('-'); - it = format_decimal(it, abs_value, num_digits).end; - return base_iterator(out, it); + auto abs_value = static_cast>(value); + bool negative = is_negative(value); + // Don't do -abs_value since it trips unsigned-integer-overflow sanitizer. + if (negative) + abs_value = ~abs_value + 1; + int num_digits = count_digits(abs_value); + auto size = (negative ? 1 : 0) + static_cast(num_digits); + auto it = reserve(out, size); + if (auto ptr = to_pointer(it, size)) { + if (negative) + *ptr++ = static_cast('-'); + format_decimal(ptr, abs_value, num_digits); + return out; + } + if (negative) + *it++ = static_cast('-'); + it = format_decimal(it, abs_value, num_digits).end; + return base_iterator(out, it); } // FMT_ENABLE_IF() condition separated to workaround MSVC bug -template < - typename Char, typename OutputIt, typename T, - bool check = - std::is_enum::value && !std::is_same::value && - mapped_type_constant>::value != - type::custom_type, - FMT_ENABLE_IF(check)> +template ::value && !std::is_same::value && + mapped_type_constant>::value != type::custom_type, + FMT_ENABLE_IF(check)> FMT_CONSTEXPR auto write(OutputIt out, T value) -> OutputIt { - return write( - out, static_cast::type>(value)); + return write(out, static_cast::type>(value)); } -template ::value)> -FMT_CONSTEXPR auto write(OutputIt out, T value, - const basic_format_specs& specs = {}, - locale_ref = {}) -> OutputIt { - return specs.type && specs.type != 's' - ? write(out, value ? 1 : 0, specs, {}) - : write_bytes(out, value ? "true" : "false", specs); +template ::value)> +FMT_CONSTEXPR auto write(OutputIt out, T value, const basic_format_specs &specs = {}, locale_ref = {}) + -> OutputIt { + return specs.type && specs.type != 's' ? write(out, value ? 1 : 0, specs, {}) + : write_bytes(out, value ? "true" : "false", specs); } template FMT_CONSTEXPR auto write(OutputIt out, Char value) -> OutputIt { - auto it = reserve(out, 1); - *it++ = value; - return base_iterator(out, it); + auto it = reserve(out, 1); + *it++ = value; + return base_iterator(out, it); } template -FMT_CONSTEXPR_CHAR_TRAITS auto write(OutputIt out, const Char* value) - -> OutputIt { - if (!value) { - FMT_THROW(format_error("string pointer is null")); - } else { - auto length = std::char_traits::length(value); - out = write(out, basic_string_view(value, length)); - } - return out; +FMT_CONSTEXPR_CHAR_TRAITS auto write(OutputIt out, const Char *value) -> OutputIt { + if (!value) { + FMT_THROW(format_error("string pointer is null")); + } else { + auto length = std::char_traits::length(value); + out = write(out, basic_string_view(value, length)); + } + return out; } -template ::value)> -auto write(OutputIt out, const T* value, - const basic_format_specs& specs = {}, locale_ref = {}) - -> OutputIt { - check_pointer_type_spec(specs.type, error_handler()); - return write_ptr(out, to_uintptr(value), &specs); +template ::value)> +auto write(OutputIt out, const T *value, const basic_format_specs &specs = {}, locale_ref = {}) -> OutputIt { + check_pointer_type_spec(specs.type, error_handler()); + return write_ptr(out, to_uintptr(value), &specs); } template -FMT_CONSTEXPR auto write(OutputIt out, const T& value) -> - typename std::enable_if< - mapped_type_constant>::value == - type::custom_type, - OutputIt>::type { - using context_type = basic_format_context; - using formatter_type = - conditional_t::value, - typename context_type::template formatter_type, - fallback_formatter>; - context_type ctx(out, {}, {}); - return formatter_type().format(value, ctx); +FMT_CONSTEXPR auto write(OutputIt out, const T &value) -> + typename std::enable_if>::value == type::custom_type, + OutputIt>::type { + using context_type = basic_format_context; + using formatter_type = + conditional_t::value, typename context_type::template formatter_type, + fallback_formatter>; + context_type ctx(out, {}, {}); + return formatter_type().format(value, ctx); } // An argument visitor that formats the argument and writes it via the output // iterator. It's a class and not a generic lambda for compatibility with C++11. -template struct default_arg_formatter { - using iterator = buffer_appender; - using context = buffer_context; - - iterator out; - basic_format_args args; - locale_ref loc; - - template auto operator()(T value) -> iterator { - return write(out, value); - } - auto operator()(typename basic_format_arg::handle h) -> iterator { - basic_format_parse_context parse_ctx({}); - context format_ctx(out, args, loc); - h.format(parse_ctx, format_ctx); - return format_ctx.out(); - } +template +struct default_arg_formatter { + using iterator = buffer_appender; + using context = buffer_context; + + iterator out; + basic_format_args args; + locale_ref loc; + + template + auto operator()(T value) -> iterator { + return write(out, value); + } + auto operator()(typename basic_format_arg::handle h) -> iterator { + basic_format_parse_context parse_ctx({}); + context format_ctx(out, args, loc); + h.format(parse_ctx, format_ctx); + return format_ctx.out(); + } }; -template struct arg_formatter { - using iterator = buffer_appender; - using context = buffer_context; - - iterator out; - const basic_format_specs& specs; - locale_ref locale; - - template - FMT_CONSTEXPR FMT_INLINE auto operator()(T value) -> iterator { - return detail::write(out, value, specs, locale); - } - auto operator()(typename basic_format_arg::handle) -> iterator { - // User-defined types are handled separately because they require access - // to the parse context. - return out; - } +template +struct arg_formatter { + using iterator = buffer_appender; + using context = buffer_context; + + iterator out; + const basic_format_specs &specs; + locale_ref locale; + + template + FMT_CONSTEXPR FMT_INLINE auto operator()(T value) -> iterator { + return detail::write(out, value, specs, locale); + } + auto operator()(typename basic_format_arg::handle) -> iterator { + // User-defined types are handled separately because they require access + // to the parse context. + return out; + } }; -template struct custom_formatter { - basic_format_parse_context& parse_ctx; - buffer_context& ctx; - - void operator()( - typename basic_format_arg>::handle h) const { - h.format(parse_ctx, ctx); - } - template void operator()(T) const {} +template +struct custom_formatter { + basic_format_parse_context &parse_ctx; + buffer_context &ctx; + + void operator()(typename basic_format_arg>::handle h) const { + h.format(parse_ctx, ctx); + } + template + void operator()(T) const { + } }; template -using is_integer = - bool_constant::value && !std::is_same::value && - !std::is_same::value && - !std::is_same::value>; - -template class width_checker { - public: - explicit FMT_CONSTEXPR width_checker(ErrorHandler& eh) : handler_(eh) {} - - template ::value)> - FMT_CONSTEXPR auto operator()(T value) -> unsigned long long { - if (is_negative(value)) handler_.on_error("negative width"); - return static_cast(value); - } - - template ::value)> - FMT_CONSTEXPR auto operator()(T) -> unsigned long long { - handler_.on_error("width is not integer"); - return 0; - } - - private: - ErrorHandler& handler_; +using is_integer = bool_constant::value && !std::is_same::value && + !std::is_same::value && !std::is_same::value>; + +template +class width_checker { +public: + explicit FMT_CONSTEXPR width_checker(ErrorHandler &eh) : handler_(eh) { + } + + template ::value)> + FMT_CONSTEXPR auto operator()(T value) -> unsigned long long { + if (is_negative(value)) + handler_.on_error("negative width"); + return static_cast(value); + } + + template ::value)> + FMT_CONSTEXPR auto operator()(T) -> unsigned long long { + handler_.on_error("width is not integer"); + return 0; + } + +private: + ErrorHandler &handler_; }; -template class precision_checker { - public: - explicit FMT_CONSTEXPR precision_checker(ErrorHandler& eh) : handler_(eh) {} - - template ::value)> - FMT_CONSTEXPR auto operator()(T value) -> unsigned long long { - if (is_negative(value)) handler_.on_error("negative precision"); - return static_cast(value); - } - - template ::value)> - FMT_CONSTEXPR auto operator()(T) -> unsigned long long { - handler_.on_error("precision is not integer"); - return 0; - } - - private: - ErrorHandler& handler_; +template +class precision_checker { +public: + explicit FMT_CONSTEXPR precision_checker(ErrorHandler &eh) : handler_(eh) { + } + + template ::value)> + FMT_CONSTEXPR auto operator()(T value) -> unsigned long long { + if (is_negative(value)) + handler_.on_error("negative precision"); + return static_cast(value); + } + + template ::value)> + FMT_CONSTEXPR auto operator()(T) -> unsigned long long { + handler_.on_error("precision is not integer"); + return 0; + } + +private: + ErrorHandler &handler_; }; -template