Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 14 additions & 14 deletions locale.c
Original file line number Diff line number Diff line change
Expand Up @@ -1235,7 +1235,7 @@ S_parse_LC_ALL_string(pTHX_ const char * string,

# ifdef PERL_LC_ALL_USES_NAME_VALUE_PAIRS

const char separator[] = ";";
const char * const separator = ";";
const Size_t separator_len = 1;
const bool single_component = (strchr(string, ';') == NULL);

Expand Down Expand Up @@ -5277,7 +5277,7 @@ S_my_localeconv(pTHX_ const int item)
#define LCONV_MONETARY_ENTRY(name) LCONV_ENTRY(name)

/* There are just a few fields for NUMERIC strings */
const lconv_offset_t lconv_numeric_strings[] = {
static const lconv_offset_t lconv_numeric_strings[] = {
#ifndef NO_LOCALECONV_GROUPING
LCONV_NUMERIC_ENTRY(grouping),
# endif
Expand All @@ -5301,7 +5301,7 @@ S_my_localeconv(pTHX_ const int item)
&lconv_numeric_strings[(C_ARRAY_LENGTH(lconv_numeric_strings) - 2)]

/* And the MONETARY string fields */
const lconv_offset_t lconv_monetary_strings[] = {
static const lconv_offset_t lconv_monetary_strings[] = {
LCONV_MONETARY_ENTRY(int_curr_symbol),
LCONV_MONETARY_ENTRY(mon_decimal_point),
#ifndef NO_LOCALECONV_MON_THOUSANDS_SEP
Expand All @@ -5322,7 +5322,7 @@ S_my_localeconv(pTHX_ const int item)
&lconv_monetary_strings[(C_ARRAY_LENGTH(lconv_monetary_strings) - 2)]

/* Finally there are integer fields, all are for monetary purposes */
const lconv_offset_t lconv_integers[] = {
static const lconv_offset_t lconv_integers[] = {
LCONV_ENTRY(int_frac_digits),
LCONV_ENTRY(frac_digits),
LCONV_ENTRY(p_sep_by_space),
Expand Down Expand Up @@ -5432,7 +5432,7 @@ S_my_localeconv(pTHX_ const int item)
* the data structure could do double duty. However, both this and
* RADIXCHAR would need to be in the final position of the same full
* structure; an impossibility. So make this into a separate structure */
const lconv_offset_t thousands_sep_string[] = {
static const lconv_offset_t thousands_sep_string[] = {
LCONV_NUMERIC_ENTRY(thousands_sep),
{NULL, 0}
};
Expand Down Expand Up @@ -7684,10 +7684,10 @@ S_emulate_langinfo(pTHX_ const PERL_INTMAX_T item,
* holds what field in the 'struct tm' to applies to the corresponding
* format */
int year, min, sec;
const char * fmts[] = {"%Oy", "%OM", "%OS", "%Od", "%OH", "%Om", "%Ow" };
const Size_t maxes[] = { 99, 59, 59, 31, 23, 11, 6 };
const int offsets[] = { 0, 0, 0, 1, 0, 1, 0 };
int * vars[] = {&year, &min, &sec, &mday, &hour, &mon, &mday };
static const char * const fmts[] = {"%Oy", "%OM", "%OS", "%Od", "%OH", "%Om", "%Ow" };
static const Size_t const maxes[] = { 99, 59, 59, 31, 23, 11, 6 };
static const int const offsets[] = { 0, 0, 0, 1, 0, 1, 0 };
int * vars[] = {&year, &min, &sec, &mday, &hour, &mon, &mday };
Size_t j = 0; /* Current index into the above tables */

orig_TIME_locale = toggle_locale_c_unless_locking(LC_TIME, locale);
Expand Down Expand Up @@ -7965,7 +7965,7 @@ S_maybe_override_codeset(pTHX_ const char * codeset,
utf8ness_t strings_utf8ness = UTF8NESS_UNKNOWN;

/* List of strings to look at */
const int trials[] = {
static const int trials[] = {

# if defined(USE_LOCALE_MONETARY) && defined(HAS_LOCALECONV)

Expand Down Expand Up @@ -9530,7 +9530,7 @@ S_compute_collxfrm_coefficients(pTHX)
* digits tend to have fewer levels, and some punctuation has more, but
* those are relatively sparse in text, and khw believes this gives a
* reasonable result, but it could be changed if experience so dictates. */
const char longer[] = "ABCDEFGHIJKLMnopqrstuvwxyz";
const char * const longer = "ABCDEFGHIJKLMnopqrstuvwxyz";
char * x_longer; /* Transformed 'longer' */
Size_t x_len_longer; /* Length of 'x_longer' */

Expand All @@ -9555,7 +9555,7 @@ S_compute_collxfrm_coefficients(pTHX)

/* Find out how long the transformation really is */
x_longer = mem_collxfrm_(longer,
sizeof(longer) - 1,
strlen(longer),
&x_len_longer,

/* We avoid converting to UTF-8 in the called
Expand All @@ -9572,7 +9572,7 @@ S_compute_collxfrm_coefficients(pTHX)
* first character. This minimizes the chances of being swayed by outliers
* */
x_shorter = mem_collxfrm_(longer + 1,
sizeof(longer) - 2,
strlen(longer) - 1,
&x_len_shorter,
PL_in_utf8_COLLATE_locale);
Safefree(x_shorter);
Expand Down Expand Up @@ -9616,7 +9616,7 @@ S_compute_collxfrm_coefficients(pTHX)
/* mx + b = len
* so: b = len - mx
* but in case something has gone wrong, make sure it is non-negative */
base = x_len_longer - PL_collxfrm_mult * (sizeof(longer) - 1);
base = x_len_longer - PL_collxfrm_mult * strlen(longer);
if (base < 0) {
base = 0;
}
Expand Down
2 changes: 1 addition & 1 deletion perl.c
Original file line number Diff line number Diff line change
Expand Up @@ -5172,7 +5172,7 @@ S_incpush(pTHX_ const char *const dir, STRLEN len, U32 flags)
SV *subdir = newSVsv(libdir);
#ifdef PERL_INC_VERSION_LIST
/* Configure terminates PERL_INC_VERSION_LIST with a NULL */
const char * const incverlist[] = { PERL_INC_VERSION_LIST };
static const char * const incverlist[] = { PERL_INC_VERSION_LIST };
const char * const *incver;
#endif

Expand Down
4 changes: 2 additions & 2 deletions pp_ctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -4699,9 +4699,9 @@ S_require_file(pTHX_ SV *sv)
* of checks here at runtime.
*/
const STRLEN package_len = len - 3;
const char slashdot[2] = {'/', '.'};
static const char slashdot[2] = {'/', '.'};
#ifdef DOSISH
const char backslashdot[2] = {'\\', '.'};
static const char backslashdot[2] = {'\\', '.'};
#endif

/* Disallow *purported* barewords that map to absolute
Expand Down
30 changes: 15 additions & 15 deletions regcomp.c
Original file line number Diff line number Diff line change
Expand Up @@ -3484,9 +3484,9 @@ S_reg(pTHX_ RExC_state_t *pRExC_state, I32 paren, I32 *flagp, U32 depth)
bool is_logical = 0, is_optimistic = 0;
const char * const seqstart = RExC_parse;
const char * endptr;
const char non_existent_group_msg[]
const char * const non_existent_group_msg
= "Reference to nonexistent group";
const char impossible_group[] = "Invalid reference to group";
const char * const impossible_group = "Invalid reference to group";

if (has_intervening_patws) {
RExC_parse_inc_by(1);
Expand Down Expand Up @@ -4476,7 +4476,7 @@ S_reg(pTHX_ RExC_state_t *pRExC_state, I32 paren, I32 *flagp, U32 depth)
{
const char *p;
/* Even/odd or x = don't care: 010101x10x */
static const char parens[] = "=!aA<,>Bbt";
const char * const parens = "=!aA<,>Bbt";
/* flag below is set to 0 up through 'A'; 1 for larger */

if (paren && (p = strchr(parens, paren))) {
Expand Down Expand Up @@ -8350,7 +8350,7 @@ S_handle_possible_posix(pTHX_ RExC_state_t *pRExC_state,
* was, but there was a typo. We tease these apart by doing fuzzy
* matching on the name */
if (class_number == OOB_NAMEDCLASS && found_problem) {
const UV posix_names[][6] = {
static const UV posix_names[][6] = {
{ 'a', 'l', 'n', 'u', 'm' },
{ 'a', 'l', 'p', 'h', 'a' },
{ 'a', 's', 'c', 'i', 'i' },
Expand All @@ -8368,7 +8368,7 @@ S_handle_possible_posix(pTHX_ RExC_state_t *pRExC_state,
};
/* The names of the above all have added NULs to make them the same
* size, so we need to also have the real lengths */
const UV posix_name_lengths[] = {
static const UV posix_name_lengths[] = {
sizeof("alnum") - 1,
sizeof("alpha") - 1,
sizeof("ascii") - 1,
Expand Down Expand Up @@ -14247,7 +14247,7 @@ S_handle_user_defined_property(pTHX_

const char * s0 = string; /* Points to first byte in the current line
being parsed in 'string' */
const char overflow_msg[] = "Code point too large in \"";
const char * const overflow_msg = "Code point too large in \"";
SV* running_definition = NULL;

PERL_ARGS_ASSERT_HANDLE_USER_DEFINED_PROPERTY;
Expand Down Expand Up @@ -15332,7 +15332,7 @@ S_parse_uniprop_string(pTHX_
/* Drop down to look up in the official properties */
}
else {
const char insecure[] = "Insecure user-defined property";
const char * const insecure = "Insecure user-defined property";

/* Here, there is a sub by the correct name. Normally we call it
* to get the property definition */
Expand All @@ -15357,7 +15357,7 @@ S_parse_uniprop_string(pTHX_
* error instead */
if (TAINT_get) {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvn(msg, insecure, sizeof(insecure) - 1);
sv_catpvn(msg, insecure, strlen(insecure));
goto append_name_to_msg;
}

Expand Down Expand Up @@ -15545,7 +15545,7 @@ S_parse_uniprop_string(pTHX_
}
if (TAINT_get) {
if (SvTRUE(error)) sv_catpvs(msg, "; ");
sv_catpvn(msg, insecure, sizeof(insecure) - 1);
sv_catpvn(msg, insecure, strlen(insecure));
}

if (name_len > 0) {
Expand Down Expand Up @@ -16033,8 +16033,8 @@ S_handle_names_wildcard(pTHX_ const char * wname, /* wildcard name to match */
const char * must; /* The PV of 'must' */
STRLEN must_len; /* And its length */
SV * syllable_name = NULL; /* For Hangul syllables */
const char hangul_prefix[] = "HANGUL SYLLABLE ";
const STRLEN hangul_prefix_len = sizeof(hangul_prefix) - 1;
const char * const hangul_prefix = "HANGUL SYLLABLE ";
const STRLEN hangul_prefix_len = strlen(hangul_prefix);

/* By inspection, there are a maximum of 7 bytes in the suffix of a hangul
* syllable name, and these are immutable and guaranteed by the Unicode
Expand Down Expand Up @@ -16267,20 +16267,20 @@ S_handle_names_wildcard(pTHX_ const char * wname, /* wildcard name to match */
/* These constants, names, values, and algorithm are adapted from the
* Unicode standard, version 5.1, section 3.12, and should never
* change. */
const char * JamoL[] = {
static const char * const JamoL[] = {
"G", "GG", "N", "D", "DD", "R", "M", "B", "BB",
"S", "SS", "", "J", "JJ", "C", "K", "T", "P", "H"
};
const int LCount = C_ARRAY_LENGTH(JamoL);

const char * JamoV[] = {
static const char * const JamoV[] = {
"A", "AE", "YA", "YAE", "EO", "E", "YEO", "YE", "O", "WA",
"WAE", "OE", "YO", "U", "WEO", "WE", "WI", "YU", "EU", "YI",
"I"
};
const int VCount = C_ARRAY_LENGTH(JamoV);

const char * JamoT[] = {
static const char * const JamoT[] = {
"", "G", "GG", "GS", "N", "NJ", "NH", "D", "L",
"LG", "LM", "LB", "LS", "LT", "LP", "LH", "M", "B",
"BS", "S", "SS", "NG", "J", "C", "K", "T", "P", "H"
Expand Down Expand Up @@ -16415,7 +16415,7 @@ S_handle_names_wildcard(pTHX_ const char * wname, /* wildcard name to match */
/* If we ever were to accept aliases for, say private use names, we would
* need to do something fancier to find empty names. The code below works
* (at the time it was written), and is slower than the above */
const char empties_pat[] = "^.";
const char * const empties_pat = "^.";
if (strNE(name, empties_pat)) {
SV * empty = newSVpvs("");
if (execute_wildcard(subpattern_re,
Expand Down
11 changes: 5 additions & 6 deletions toke.c
Original file line number Diff line number Diff line change
Expand Up @@ -4093,9 +4093,9 @@ S_scan_const(pTHX_ char *start)
if (PL_lex_inpat) {

if (! len) { /* The name resolved to an empty string */
const char empty_N[] = "\\N{_}";
Copy(empty_N, d, sizeof(empty_N) - 1, char);
d += sizeof(empty_N) - 1;
const char * const empty_N = "\\N{_}";
Copy(empty_N, d, strlen(empty_N), char);
d += strlen(empty_N);
}
else {
/* In order to not lose information for the regex
Expand All @@ -4113,9 +4113,8 @@ S_scan_const(pTHX_ char *start)
* through the string. Each character takes up
* 2 hex digits plus either a trailing dot or
* the "}" */
const char initial_text[] = "\\N{U+";
const STRLEN initial_len = sizeof(initial_text)
- 1;
const char * const initial_text = "\\N{U+";
const STRLEN initial_len = strlen(initial_text);
d = off + SvGROW(sv, off
+ 3 * len

Expand Down
4 changes: 2 additions & 2 deletions util.c
Original file line number Diff line number Diff line change
Expand Up @@ -6195,11 +6195,11 @@ static void atos_update(atos_context* ctx,
if (ctx->unavail)
return;
if (ctx->tool == NULL) {
const char* tools[] = {
static const char* const tools[] = {
"/usr/bin/xcrun",
"/usr/bin/atos"
};
const char* formats[] = {
static const char* const formats[] = {
"/usr/bin/xcrun atos -o '%s' -l %08x %08x 2>&1",
"/usr/bin/atos -d -o '%s' -l %08x %08x 2>&1"
};
Expand Down
Loading