diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000..472f47c72a --- /dev/null +++ b/.gitmodules @@ -0,0 +1,4 @@ +[submodule "scylla-jmx"] + path = scylla-jmx + url = git@github.com:scylladb/scylla-jmx.git + branch = master diff --git a/build.xml b/build.xml index e3964e3aee..324633a959 100644 --- a/build.xml +++ b/build.xml @@ -40,6 +40,8 @@ + + @@ -166,6 +168,11 @@ + + + + + @@ -453,6 +460,18 @@ + + + + + + + + + + + + @@ -641,6 +660,18 @@ + + + + + + + + + + + + - @@ -1054,6 +1085,17 @@ + + + + + + + + + + + @@ -1069,6 +1111,7 @@ + @@ -1216,6 +1259,7 @@ + diff --git a/lib/hk2-api-2.4.0-b31.jar b/lib/hk2-api-2.4.0-b31.jar new file mode 100644 index 0000000000..6a70ef3c18 Binary files /dev/null and b/lib/hk2-api-2.4.0-b31.jar differ diff --git a/lib/hk2-locator-2.4.0-b31.jar b/lib/hk2-locator-2.4.0-b31.jar new file mode 100644 index 0000000000..1f38d250d1 Binary files /dev/null and b/lib/hk2-locator-2.4.0-b31.jar differ diff --git a/lib/hk2-utils-2.4.0-b31.jar b/lib/hk2-utils-2.4.0-b31.jar new file mode 100644 index 0000000000..211ce37b11 Binary files /dev/null and b/lib/hk2-utils-2.4.0-b31.jar differ diff --git a/lib/jackson-annotations-2.9.9.jar b/lib/jackson-annotations-2.9.9.jar new file mode 100644 index 0000000000..8e059d9711 Binary files /dev/null and b/lib/jackson-annotations-2.9.9.jar differ diff --git a/lib/jackson-core-2.9.9.jar b/lib/jackson-core-2.9.9.jar new file mode 100644 index 0000000000..02bd446ddf Binary files /dev/null and b/lib/jackson-core-2.9.9.jar differ diff --git a/lib/jackson-databind-2.9.9.jar b/lib/jackson-databind-2.9.9.jar new file mode 100644 index 0000000000..80d4c47a9f Binary files /dev/null and b/lib/jackson-databind-2.9.9.jar differ diff --git a/lib/jackson-jaxrs-base-2.9.9.jar b/lib/jackson-jaxrs-base-2.9.9.jar new file mode 100644 index 0000000000..24c743b498 Binary files /dev/null and b/lib/jackson-jaxrs-base-2.9.9.jar differ diff --git a/lib/jackson-jaxrs-json-provider-2.9.9.jar b/lib/jackson-jaxrs-json-provider-2.9.9.jar new file mode 100644 index 0000000000..8bdc2d4b1f Binary files /dev/null and b/lib/jackson-jaxrs-json-provider-2.9.9.jar differ diff --git a/lib/jackson-module-jaxb-annotations-2.9.9.jar b/lib/jackson-module-jaxb-annotations-2.9.9.jar new file mode 100644 index 0000000000..e431a02e7e Binary files /dev/null and b/lib/jackson-module-jaxb-annotations-2.9.9.jar differ diff --git a/lib/javax.annotation-api-1.2.jar b/lib/javax.annotation-api-1.2.jar new file mode 100644 index 0000000000..9ab39ffa4b Binary files /dev/null and b/lib/javax.annotation-api-1.2.jar differ diff --git a/lib/javax.inject-2.4.0-b31.jar b/lib/javax.inject-2.4.0-b31.jar new file mode 100644 index 0000000000..1f60d98a76 Binary files /dev/null and b/lib/javax.inject-2.4.0-b31.jar differ diff --git a/lib/javax.json-1.0.4.jar b/lib/javax.json-1.0.4.jar new file mode 100644 index 0000000000..09967d8158 Binary files /dev/null and b/lib/javax.json-1.0.4.jar differ diff --git a/lib/javax.json-api-1.0.jar b/lib/javax.json-api-1.0.jar new file mode 100644 index 0000000000..d276c793c7 Binary files /dev/null and b/lib/javax.json-api-1.0.jar differ diff --git a/lib/javax.ws.rs-api-2.0.1.jar b/lib/javax.ws.rs-api-2.0.1.jar new file mode 100644 index 0000000000..7eb68b4a0a Binary files /dev/null and b/lib/javax.ws.rs-api-2.0.1.jar differ diff --git a/lib/jersey-client-2.22.1.jar b/lib/jersey-client-2.22.1.jar new file mode 100644 index 0000000000..687843e168 Binary files /dev/null and b/lib/jersey-client-2.22.1.jar differ diff --git a/lib/jersey-common-2.22.1.jar b/lib/jersey-common-2.22.1.jar new file mode 100644 index 0000000000..9ee53873df Binary files /dev/null and b/lib/jersey-common-2.22.1.jar differ diff --git a/lib/jersey-guava-2.22.1.jar b/lib/jersey-guava-2.22.1.jar new file mode 100644 index 0000000000..70f3a0761a Binary files /dev/null and b/lib/jersey-guava-2.22.1.jar differ diff --git a/lib/jsr305-2.0.2.jar b/lib/jsr305-2.0.2.jar new file mode 100644 index 0000000000..43807b02f3 Binary files /dev/null and b/lib/jsr305-2.0.2.jar differ diff --git a/lib/jsr311-api-1.1.1.jar b/lib/jsr311-api-1.1.1.jar new file mode 100644 index 0000000000..ec8bc81854 Binary files /dev/null and b/lib/jsr311-api-1.1.1.jar differ diff --git a/lib/scylla-apiclient-1.0.jar b/lib/scylla-apiclient-1.0.jar new file mode 100644 index 0000000000..4773caebbb Binary files /dev/null and b/lib/scylla-apiclient-1.0.jar differ diff --git a/scylla-jmx b/scylla-jmx new file mode 160000 index 0000000000..04ea3ab7e0 --- /dev/null +++ b/scylla-jmx @@ -0,0 +1 @@ +Subproject commit 04ea3ab7e02dbbb37eeb696f55ca4c5fe2a1ee5b diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionHistoryTabularData.java b/src/java/org/apache/cassandra/db/compaction/CompactionHistoryTabularData.java index 485f1a0908..96f9d03ca7 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionHistoryTabularData.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionHistoryTabularData.java @@ -17,6 +17,8 @@ */ package org.apache.cassandra.db.compaction; +import javax.json.JsonArray; +import javax.json.JsonObject; import javax.management.openmbean.*; import java.util.Map; import java.util.UUID; @@ -82,4 +84,35 @@ public static TabularData from(UntypedResultSet resultSet) throws OpenDataExcept } return result; } + + public static TabularData from(JsonArray resultSet) throws OpenDataException { + TabularDataSupport result = new TabularDataSupport(TABULAR_TYPE); + for (int i = 0; i < resultSet.size(); i++) { + JsonObject row = resultSet.getJsonObject(i); + String id = row.getString("id"); + String ksName = row.getString("ks"); + String cfName = row.getString("cf"); + long compactedAt = row.getJsonNumber("compacted_at").longValue(); + long bytesIn = row.getJsonNumber("bytes_in").longValue(); + long bytesOut = row.getJsonNumber("bytes_out").longValue(); + + JsonArray merged = row.getJsonArray("rows_merged"); + StringBuilder sb = new StringBuilder(); + if (merged != null) { + sb.append('{'); + for (int m = 0; m < merged.size(); m++) { + JsonObject entry = merged.getJsonObject(m); + if (m > 0) { + sb.append(','); + } + sb.append(entry.getString("key")).append(':').append(entry.getString("value")); + + } + sb.append('}'); + } + result.put(new CompositeDataSupport(COMPOSITE_TYPE, ITEM_NAMES, + new Object[] { id, ksName, cfName, compactedAt, bytesIn, bytesOut, sb.toString() })); + } + return result; + } } diff --git a/src/java/org/apache/cassandra/db/compaction/ScyllaCompactionManager.java b/src/java/org/apache/cassandra/db/compaction/ScyllaCompactionManager.java new file mode 100644 index 0000000000..f8b7e9c840 --- /dev/null +++ b/src/java/org/apache/cassandra/db/compaction/ScyllaCompactionManager.java @@ -0,0 +1,232 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.cassandra.db.compaction; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.logging.Logger; + +import javax.json.JsonArray; +import javax.json.JsonObject; +import javax.management.openmbean.OpenDataException; +import javax.management.openmbean.TabularData; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; + +import org.apache.cassandra.metrics.CompactionMetrics; + +import com.scylladb.jmx.api.APIClient; + +/** + * A singleton which manages a private executor of ongoing compactions. + *

+ * Scheduling for compaction is accomplished by swapping sstables to be + * compacted into a set via DataTracker. New scheduling attempts will ignore + * currently compacting sstables. + */ + +/* + * Copyright 2015 Cloudius Systems + * + * Modified by Cloudius Systems + */ +public class ScyllaCompactionManager implements CompactionManagerMBean { + public static final String MBEAN_OBJECT_NAME = "org.apache.cassandra.db:type=CompactionManager"; + private static final Logger logger = Logger.getLogger(CompactionManager.class.getName()); + protected final APIClient client; + + public void log(String str) { + logger.finest(str); + } + + public ScyllaCompactionManager(APIClient client) { + this.client=client; +// super(MBEAN_OBJECT_NAME, client, new CompactionMetrics()); + } + + /** List of running compaction objects. */ + @Override + public List> getCompactions() { + log(" getCompactions()"); + List> results = new ArrayList>(); + JsonArray compactions = client.getJsonArray("compaction_manager/compactions"); + for (int i = 0; i < compactions.size(); i++) { + JsonObject compaction = compactions.getJsonObject(i); + Map result = new HashMap(); + result.put("total", Long.toString(compaction.getJsonNumber("total").longValue())); + result.put("completed", Long.toString(compaction.getJsonNumber("completed").longValue())); + result.put("taskType", compaction.getString("task_type")); + result.put("keyspace", compaction.getString("ks")); + result.put("columnfamily", compaction.getString("cf")); + result.put("unit", compaction.getString("unit")); + result.put("compactionId", ""); + results.add(result); + } + return results; + } + + /** List of running compaction summary strings. */ + @Override + public List getCompactionSummary() { + log(" getCompactionSummary()"); + return client.getListStrValue("compaction_manager/compaction_summary"); + } + + /** compaction history **/ + @Override + public TabularData getCompactionHistory() { + log(" getCompactionHistory()"); + try { + return CompactionHistoryTabularData.from(client.getJsonArray("/compaction_manager/compaction_history")); + } catch (OpenDataException e) { + return null; + } + } + + /** + * Triggers the compaction of user specified sstables. You can specify files + * from various keyspaces and columnfamilies. If you do so, user defined + * compaction is performed several times to the groups of files in the same + * keyspace/columnfamily. + * + * @param dataFiles + * a comma separated list of sstable file to compact. must + * contain keyspace and columnfamily name in path(for 2.1+) or + * file name itself. + */ + @Override + public void forceUserDefinedCompaction(String dataFiles) { + log(" forceUserDefinedCompaction(String dataFiles)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("dataFiles", dataFiles); + client.post("compaction_manager/force_user_defined_compaction", queryParams); + } + + @Override + public void forceUserDefinedCleanup(String dataFiles) { + //TODO fix add this + } + + /** + * Stop all running compaction-like tasks having the provided {@code type}. + * + * @param type + * the type of compaction to stop. Can be one of: - COMPACTION - + * VALIDATION - CLEANUP - SCRUB - INDEX_BUILD + */ + @Override + public void stopCompaction(String type) { + log(" stopCompaction(String type)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("type", type); + client.post("compaction_manager/stop_compaction", queryParams); + } + + /** + * Returns core size of compaction thread pool + */ + @Override + public int getCoreCompactorThreads() { + log(" getCoreCompactorThreads()"); + return client.getIntValue(""); + } + + /** + * Allows user to resize maximum size of the compaction thread pool. + * + * @param number + * New maximum of compaction threads + */ + @Override + public void setCoreCompactorThreads(int number) { + log(" setCoreCompactorThreads(int number)"); + } + + /** + * Returns maximum size of compaction thread pool + */ + @Override + public int getMaximumCompactorThreads() { + log(" getMaximumCompactorThreads()"); + return client.getIntValue(""); + } + + /** + * Allows user to resize maximum size of the compaction thread pool. + * + * @param number + * New maximum of compaction threads + */ + @Override + public void setMaximumCompactorThreads(int number) { + log(" setMaximumCompactorThreads(int number)"); + } + + /** + * Returns core size of validation thread pool + */ + @Override + public int getCoreValidationThreads() { + log(" getCoreValidationThreads()"); + return client.getIntValue(""); + } + + /** + * Allows user to resize maximum size of the compaction thread pool. + * + * @param number + * New maximum of compaction threads + */ + @Override + public void setCoreValidationThreads(int number) { + log(" setCoreValidationThreads(int number)"); + } + + /** + * Returns size of validator thread pool + */ + @Override + public int getMaximumValidatorThreads() { + log(" getMaximumValidatorThreads()"); + return client.getIntValue(""); + } + + /** + * Allows user to resize maximum size of the validator thread pool. + * + * @param number + * New maximum of validator threads + */ + @Override + public void setMaximumValidatorThreads(int number) { + log(" setMaximumValidatorThreads(int number)"); + } + + @Override + public void stopCompactionById(String compactionId) { + // scylla does not have neither compaction ids nor the file described + // in: + // "Ids can be found in the transaction log files whose name starts with + // compaction_, located in the table transactions folder" + // (nodetool) + // TODO: throw? + log(" stopCompactionById"); + } +} diff --git a/src/java/org/apache/cassandra/locator/ScyllaEndpointSnitchInfo.java b/src/java/org/apache/cassandra/locator/ScyllaEndpointSnitchInfo.java new file mode 100644 index 0000000000..7acb1e5b63 --- /dev/null +++ b/src/java/org/apache/cassandra/locator/ScyllaEndpointSnitchInfo.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.cassandra.locator; + +import static java.util.Collections.singletonMap; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.logging.Logger; + +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; + +import com.scylladb.jmx.api.APIClient; + +public class ScyllaEndpointSnitchInfo implements EndpointSnitchInfoMBean { + public static final String MBEAN_NAME = "org.apache.cassandra.db:type=EndpointSnitchInfo"; + private static final Logger logger = Logger.getLogger(EndpointSnitchInfo.class.getName()); + + protected final APIClient client; + + public ScyllaEndpointSnitchInfo(APIClient c) { + this.client = c; + } + + public void log(String str) { + logger.finest(str); + } + + /** + * Provides the Rack name depending on the respective snitch used, given the + * host name/ip + * + * @param host + * @throws UnknownHostException + */ + @Override + public String getRack(String host) throws UnknownHostException { + log("getRack(String host) throws UnknownHostException"); + MultivaluedMap queryParams = host != null ? new MultivaluedHashMap( + singletonMap("host", InetAddress.getByName(host).getHostAddress())) : null; + return client.getStringValue("/snitch/rack", queryParams, 10000); + } + + /** + * Provides the Datacenter name depending on the respective snitch used, + * given the hostname/ip + * + * @param host + * @throws UnknownHostException + */ + @Override + public String getDatacenter(String host) throws UnknownHostException { + log(" getDatacenter(String host) throws UnknownHostException"); + MultivaluedMap queryParams = host != null ? new MultivaluedHashMap( + singletonMap("host", InetAddress.getByName(host).getHostAddress())) : null; + return client.getStringValue("/snitch/datacenter", queryParams, 10000); + } + + /** + * Provides the snitch name of the cluster + * + * @return Snitch name + */ + @Override + public String getSnitchName() { + log(" getSnitchName()"); + return client.getStringValue("/snitch/name"); + } + + @Override + public String getRack() { + return client.getStringValue("/snitch/rack", null, 10000); + } + + @Override + public String getDatacenter() { + return client.getStringValue("/snitch/datacenter", null, 10000); + } +} diff --git a/src/java/org/apache/cassandra/metrics/ScyllaJmxHistogram.java b/src/java/org/apache/cassandra/metrics/ScyllaJmxHistogram.java new file mode 100644 index 0000000000..5c10537f56 --- /dev/null +++ b/src/java/org/apache/cassandra/metrics/ScyllaJmxHistogram.java @@ -0,0 +1,390 @@ +package org.apache.cassandra.metrics; + +import javax.json.JsonArray; +import javax.json.JsonNumber; +import javax.json.JsonObject; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import java.util.Arrays; + +import static java.lang.Math.floor; + +public class ScyllaJmxHistogram implements CassandraMetricsRegistry.JmxHistogramMBean,CassandraMetricsRegistry.MetricMBean + { + + private ObjectName objectName; + + @Override + public ObjectName objectName() + { + return objectName; + } + + public ScyllaJmxHistogram() { + histogram = new Histogram(); + try { + objectName=new ObjectName(""); + } catch (MalformedObjectNameException e) { + e.printStackTrace(); + } + } + + public ScyllaJmxHistogram(JsonObject obj, String metricName) + { + if (obj.containsKey("hist")) { + obj = obj.getJsonObject("hist"); + } + if (obj.containsKey("buckets")) { + histogram = new Histogram(new EstimatedHistogram(obj)); + } else { + histogram = new Histogram(obj); + } + + try { + objectName=new ObjectName(""); + } catch (MalformedObjectNameException e) { + e.printStackTrace(); + } + } + + private Histogram histogram; + + public void update(JsonObject obj) { + + } + + @Override + public long getCount() { + return histogram.getCount(); + } + + @Override + public long getMin() { + return histogram.getMin(); + } + + @Override + public long getMax() { + return histogram.getMax(); + } + + @Override + public double getMean() { + return histogram.getMean(); + } + + @Override + public double getStdDev() { + return histogram.getStdDev(); + } + + @Override + public double get50thPercentile() { + return histogram.getValue(.5); + } + + @Override + public double get75thPercentile() { + return histogram.getValue(.75); + } + + @Override + public double get95thPercentile() { + return histogram.getValue(.95); + } + + @Override + public double get98thPercentile() { + return histogram.getValue(.98); + } + + @Override + public double get99thPercentile() { + return histogram.getValue(.99); + } + + @Override + public double get999thPercentile() { + return histogram.getValue(.999); + } + + @Override + public long[] values() { + return histogram.getValues(); + } + + public static class Histogram { + private final long count; + private final long min; + private final long max; + private final double mean; + private final double stdDev; + + private final Samples samples; + + public Histogram(long count, long min, long max, double mean, double stdDev, Samples samples) { + this.count = count; + this.min = min; + this.max = max; + this.mean = mean; + this.stdDev = stdDev; + this.samples = samples; + } + + public Histogram() { + this(0, 0, 0, 0, 0, new Samples() { + }); + } + + public Histogram(JsonObject obj) { + this(obj.getJsonNumber("count").longValue(), obj.getJsonNumber("min").longValue(), + obj.getJsonNumber("max").longValue(), obj.getJsonNumber("mean").doubleValue(), + obj.getJsonNumber("variance").doubleValue(), new BufferSamples(getValues(obj))); + } + + public Histogram(EstimatedHistogram h) { + this(h.count(), h.min(), h.max(), h.mean(), 0, h); + } + + private static long[] getValues(JsonObject obj) { + JsonArray arr = obj.getJsonArray("sample"); + if (arr != null) { + return asLongArray(arr); + } + return new long[0]; + } + + public long[] getValues() { + return samples.getValues(); + } + + // Origin (and previous iterations of scylla-jxm) + // uses biased/ExponentiallyDecaying measurements + // for the history & quantile resolution. + // However, for use that is just gobbletigook, since + // we, at occasions of being asked, and when certain time + // has passed, ask the actual scylla server for a + // "values" buffer. A buffer with no information whatsoever + // on how said values correlate to actual sampling + // time. + // So, applying time weights at this level is just + // wrong. We can just as well treat this as a uniform + // distribution. + // Obvious improvement: Send time/value tuples instead. + public double getValue(double quantile) { + return samples.getValue(quantile); + } + + public long getCount() { + return count; + } + + public long getMin() { + return min; + } + + public long getMax() { + return max; + } + + public double getMean() { + return mean; + } + + public double getStdDev() { + return stdDev; + } + } + + private static long[] asLongArray(JsonArray a) { + return a.getValuesAs(JsonNumber.class).stream().mapToLong(n -> n.longValue()).toArray(); + } + + private static interface Samples { + default double getValue(double quantile) { + return 0; + } + + default long[] getValues() { + return new long[0]; + } + } + + private static class BufferSamples implements Samples { + private final long[] samples; + + public BufferSamples(long[] samples) { + this.samples = samples; + Arrays.sort(this.samples); + } + + @Override + public long[] getValues() { + return samples; + } + + @Override + public double getValue(double quantile) { + if (quantile < 0.0 || quantile > 1.0) { + throw new IllegalArgumentException(quantile + " is not in [0..1]"); + } + + if (samples.length == 0) { + return 0.0; + } + + final double pos = quantile * (samples.length + 1); + + if (pos < 1) { + return samples[0]; + } + + if (pos >= samples.length) { + return samples[samples.length - 1]; + } + + final double lower = samples[(int) pos - 1]; + final double upper = samples[(int) pos]; + return lower + (pos - floor(pos)) * (upper - lower); + } + } + + public static class EstimatedHistogram implements Samples { + /** + * The series of values to which the counts in `buckets` correspond: 1, + * 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 17, 20, etc. Thus, a `buckets` of + * [0, 0, 1, 10] would mean we had seen one value of 3 and 10 values of + * 4. + * + * The series starts at 1 and grows by 1.2 each time (rounding and + * removing duplicates). It goes from 1 to around 36M by default + * (creating 90+1 buckets), which will give us timing resolution from + * microseconds to 36 seconds, with less precision as the numbers get + * larger. + * + * Each bucket represents values from (previous bucket offset, current + * offset]. + */ + private final long[] bucketOffsets; + // buckets is one element longer than bucketOffsets -- the last element + // is + // values greater than the last offset + private long[] buckets; + + public EstimatedHistogram(JsonObject obj) { + this(asLongArray(obj.getJsonArray("bucket_offsets")), asLongArray(obj.getJsonArray("buckets"))); + } + + public EstimatedHistogram(long[] offsets, long[] bucketData) { + assert bucketData.length == offsets.length + 1; + bucketOffsets = offsets; + buckets = bucketData; + } + + /** + * @return the smallest value that could have been added to this + * histogram + */ + public long min() { + for (int i = 0; i < buckets.length; i++) { + if (buckets[i] > 0) { + return i == 0 ? 0 : 1 + bucketOffsets[i - 1]; + } + } + return 0; + } + + /** + * @return the largest value that could have been added to this + * histogram. If the histogram overflowed, returns + * Long.MAX_VALUE. + */ + public long max() { + int lastBucket = buckets.length - 1; + if (buckets[lastBucket] > 0) { + return Long.MAX_VALUE; + } + + for (int i = lastBucket - 1; i >= 0; i--) { + if (buckets[i] > 0) { + return bucketOffsets[i]; + } + } + return 0; + } + + @Override + public long[] getValues() { + return buckets; + } + + /** + * @param percentile + * @return estimated value at given percentile + */ + @Override + public double getValue(double percentile) { + assert percentile >= 0 && percentile <= 1.0; + int lastBucket = buckets.length - 1; + if (buckets[lastBucket] > 0) { + throw new IllegalStateException("Unable to compute when histogram overflowed"); + } + + long pcount = (long) Math.floor(count() * percentile); + if (pcount == 0) { + return 0; + } + + long elements = 0; + for (int i = 0; i < lastBucket; i++) { + elements += buckets[i]; + if (elements >= pcount) { + return bucketOffsets[i]; + } + } + return 0; + } + + /** + * @return the mean histogram value (average of bucket offsets, weighted + * by count) + * @throws IllegalStateException + * if any values were greater than the largest bucket + * threshold + */ + public long mean() { + int lastBucket = buckets.length - 1; + if (buckets[lastBucket] > 0) { + throw new IllegalStateException("Unable to compute ceiling for max when histogram overflowed"); + } + + long elements = 0; + long sum = 0; + for (int i = 0; i < lastBucket; i++) { + long bCount = buckets[i]; + elements += bCount; + sum += bCount * bucketOffsets[i]; + } + + return (long) Math.ceil((double) sum / elements); + } + + /** + * @return the total number of non-zero values + */ + public long count() { + return Arrays.stream(buckets).sum(); + } + + /** + * @return true if this histogram has overflowed -- that is, a value + * larger than our largest bucket could bound was added + */ + @SuppressWarnings("unused") + public boolean isOverflowed() { + return buckets[buckets.length - 1] > 0; + } + + } + + } diff --git a/src/java/org/apache/cassandra/metrics/ScyllaJmxTimer.java b/src/java/org/apache/cassandra/metrics/ScyllaJmxTimer.java new file mode 100644 index 0000000000..30d953a3fc --- /dev/null +++ b/src/java/org/apache/cassandra/metrics/ScyllaJmxTimer.java @@ -0,0 +1,180 @@ +package org.apache.cassandra.metrics; + +import javax.json.JsonArray; +import javax.json.JsonObject; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import java.util.Locale; +import java.util.concurrent.TimeUnit; +import org.apache.cassandra.metrics.ScyllaJmxHistogram.Histogram; + +public class ScyllaJmxTimer implements CassandraMetricsRegistry.JmxTimerMBean,CassandraMetricsRegistry.MetricMBean,CassandraMetricsRegistry.JmxMeterMBean +{ + private ObjectName objectName; + + @Override + public ObjectName objectName() + { + return objectName; + } + + private static final TimeUnit RATE_UNIT = TimeUnit.SECONDS; + private static final TimeUnit DURATION_UNIT = TimeUnit.MICROSECONDS; + private static final TimeUnit API_DURATION_UNIT = TimeUnit.MICROSECONDS; + private static final double DURATION_FACTOR = 1.0 / API_DURATION_UNIT.convert(1, DURATION_UNIT); + + public ScyllaJmxTimer() { + meter = new Meter(); + histogram = new Histogram(); + try { + objectName=new ObjectName(""); + } catch (MalformedObjectNameException e) { + e.printStackTrace(); + } + } + + private Histogram histogram; + + public ScyllaJmxTimer(JsonObject obj, String metricName) { + // TODO: this is not atomic. + meter = new Meter(obj.getJsonObject("meter")); + histogram = new Histogram(obj.getJsonObject("hist")); + try { + objectName=new ObjectName(""); + } catch (MalformedObjectNameException e) { + e.printStackTrace(); + } + } + + @Override + public double getMin() { + return toDuration(histogram.getMin()); + } + + @Override + public double getMax() { + return toDuration(histogram.getMax()); + } + + @Override + public double getMean() { + return toDuration(histogram.getMean()); + } + + @Override + public double getStdDev() { + return toDuration(histogram.getStdDev()); + } + + @Override + public double get50thPercentile() { + return toDuration(histogram.getValue(.5)); + } + + @Override + public double get75thPercentile() { + return toDuration(histogram.getValue(.75)); + } + + @Override + public double get95thPercentile() { + return toDuration(histogram.getValue(.95)); + } + + @Override + public double get98thPercentile() { + return toDuration(histogram.getValue(.98)); + } + + @Override + public double get99thPercentile() { + return toDuration(histogram.getValue(.99)); + } + + @Override + public double get999thPercentile() { + return toDuration(histogram.getValue(.999)); + } + + @Override + public long[] values() { + return histogram.getValues(); + } + + @Override + public String getDurationUnit() { + return DURATION_UNIT.toString().toLowerCase(Locale.US); + } + + @Override + public long getCount() { + return meter.count; + } + + @Override + public double getMeanRate() { + return meter.meanRate; + } + + @Override + public double getOneMinuteRate() { + return meter.oneMinuteRate; + } + + @Override + public double getFiveMinuteRate() { + return meter.fiveMinuteRate; + } + + @Override + public double getFifteenMinuteRate() { + return meter.fifteenMinuteRate; + } + + @Override + public String getRateUnit() { + return "event/" + unitString(RATE_UNIT); + } + + private static double toDuration(double micro) { + return micro * DURATION_FACTOR; + } + + private Meter meter = new Meter(); + + private static class Meter { + public final long count; + public final double oneMinuteRate; + public final double fiveMinuteRate; + public final double fifteenMinuteRate; + public final double meanRate; + + public Meter(long count, double oneMinuteRate, double fiveMinuteRate, double fifteenMinuteRate, + double meanRate) { + this.count = count; + this.oneMinuteRate = oneMinuteRate; + this.fiveMinuteRate = fiveMinuteRate; + this.fifteenMinuteRate = fifteenMinuteRate; + this.meanRate = meanRate; + } + + public Meter() { + this(0, 0, 0, 0, 0); + } + + public Meter(JsonObject obj) { + JsonArray rates = obj.getJsonArray("rates"); + oneMinuteRate = rates.getJsonNumber(0).doubleValue(); + fiveMinuteRate = rates.getJsonNumber(1).doubleValue(); + fifteenMinuteRate = rates.getJsonNumber(2).doubleValue(); + meanRate = obj.getJsonNumber("mean_rate").doubleValue(); + count = obj.getJsonNumber("count").longValue(); + } + } + + private static String unitString(TimeUnit u) { + String s = u.toString().toLowerCase(Locale.US); + return s.substring(0, s.length() - 1); + } + +} diff --git a/src/java/org/apache/cassandra/service/ScyllaCacheService.java b/src/java/org/apache/cassandra/service/ScyllaCacheService.java new file mode 100644 index 0000000000..138f0d366d --- /dev/null +++ b/src/java/org/apache/cassandra/service/ScyllaCacheService.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Copyright 2015 Cloudius Systems + * + * Modified by Cloudius Systems + */ + +package org.apache.cassandra.service; + +import java.util.concurrent.ExecutionException; +import java.util.logging.Logger; + +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; + +import com.scylladb.jmx.api.APIClient; + +public class ScyllaCacheService implements CacheServiceMBean { + private static final Logger logger = Logger.getLogger(CacheService.class.getName()); + + protected final APIClient client; + + public void log(String str) { + logger.finest(str); + } + + public static final String MBEAN_NAME = "org.apache.cassandra.db:type=Caches"; + + public ScyllaCacheService(APIClient client) { + this.client=client; +// super(MBEAN_NAME, client, new CacheMetrics("KeyCache", "key"), new CacheMetrics("RowCache", "row"), +// new CacheMetrics("CounterCache", "counter")); + } + + @Override + public int getRowCacheSavePeriodInSeconds() { + log(" getRowCacheSavePeriodInSeconds()"); + return client.getIntValue("cache_service/row_cache_save_period"); + } + + @Override + public void setRowCacheSavePeriodInSeconds(int rcspis) { + log(" setRowCacheSavePeriodInSeconds(int rcspis)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("period", Integer.toString(rcspis)); + client.post("cache_service/row_cache_save_period", queryParams); + } + + @Override + public int getKeyCacheSavePeriodInSeconds() { + log(" getKeyCacheSavePeriodInSeconds()"); + return client.getIntValue("cache_service/key_cache_save_period"); + } + + @Override + public void setKeyCacheSavePeriodInSeconds(int kcspis) { + log(" setKeyCacheSavePeriodInSeconds(int kcspis)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("period", Integer.toString(kcspis)); + client.post("cache_service/key_cache_save_period", queryParams); + } + + @Override + public int getCounterCacheSavePeriodInSeconds() { + log(" getCounterCacheSavePeriodInSeconds()"); + return client.getIntValue("cache_service/counter_cache_save_period"); + } + + @Override + public void setCounterCacheSavePeriodInSeconds(int ccspis) { + log(" setCounterCacheSavePeriodInSeconds(int ccspis)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("ccspis", Integer.toString(ccspis)); + client.post("cache_service/counter_cache_save_period", queryParams); + } + + @Override + public int getRowCacheKeysToSave() { + log(" getRowCacheKeysToSave()"); + return client.getIntValue("cache_service/row_cache_keys_to_save"); + } + + @Override + public void setRowCacheKeysToSave(int rckts) { + log(" setRowCacheKeysToSave(int rckts)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("rckts", Integer.toString(rckts)); + client.post("cache_service/row_cache_keys_to_save", queryParams); + } + + @Override + public int getKeyCacheKeysToSave() { + log(" getKeyCacheKeysToSave()"); + return client.getIntValue("cache_service/key_cache_keys_to_save"); + } + + @Override + public void setKeyCacheKeysToSave(int kckts) { + log(" setKeyCacheKeysToSave(int kckts)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("kckts", Integer.toString(kckts)); + client.post("cache_service/key_cache_keys_to_save", queryParams); + } + + @Override + public int getCounterCacheKeysToSave() { + log(" getCounterCacheKeysToSave()"); + return client.getIntValue("cache_service/counter_cache_keys_to_save"); + } + + @Override + public void setCounterCacheKeysToSave(int cckts) { + log(" setCounterCacheKeysToSave(int cckts)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("cckts", Integer.toString(cckts)); + client.post("cache_service/counter_cache_keys_to_save", queryParams); + } + + /** + * invalidate the key cache; for use after invalidating row cache + */ + @Override + public void invalidateKeyCache() { + log(" invalidateKeyCache()"); + client.post("cache_service/invalidate_key_cache"); + } + + /** + * invalidate the row cache; for use after bulk loading via BinaryMemtable + */ + @Override + public void invalidateRowCache() { + log(" invalidateRowCache()"); + client.post("cache_service/invalidate_row_cache"); + } + + @Override + public void invalidateCounterCache() { + log(" invalidateCounterCache()"); + client.post("cache_service/invalidate_counter_cache"); + } + + @Override + public void setRowCacheCapacityInMB(long capacity) { + log(" setRowCacheCapacityInMB(long capacity)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("capacity", Long.toString(capacity)); + client.post("cache_service/row_cache_capacity", queryParams); + } + + @Override + public void setKeyCacheCapacityInMB(long capacity) { + log(" setKeyCacheCapacityInMB(long capacity)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("capacity", Long.toString(capacity)); + client.post("cache_service/key_cache_capacity", queryParams); + } + + @Override + public void setCounterCacheCapacityInMB(long capacity) { + log(" setCounterCacheCapacityInMB(long capacity)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("capacity", Long.toString(capacity)); + client.post("cache_service/counter_cache_capacity_in_mb", queryParams); + } + + /** + * save row and key caches + * + * @throws ExecutionException + * when attempting to retrieve the result of a task that aborted + * by throwing an exception + * @throws InterruptedException + * when a thread is waiting, sleeping, or otherwise occupied, + * and the thread is interrupted, either before or during the + * activity. + */ + @Override + public void saveCaches() throws ExecutionException, InterruptedException { + log(" saveCaches() throws ExecutionException, InterruptedException"); + client.post("cache_service/save_caches"); + } +} diff --git a/src/java/org/apache/cassandra/service/ScyllaStorageProxy.java b/src/java/org/apache/cassandra/service/ScyllaStorageProxy.java new file mode 100644 index 0000000000..92980fa331 --- /dev/null +++ b/src/java/org/apache/cassandra/service/ScyllaStorageProxy.java @@ -0,0 +1,305 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Copyright 2015 Cloudius Systems + * + * Modified by Cloudius Systems + */ +package org.apache.cassandra.service; + +import static java.util.Collections.emptySet; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.logging.Logger; + +import javax.json.JsonArray; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; + +import com.scylladb.jmx.api.APIClient; + +public class ScyllaStorageProxy implements StorageProxyMBean { + public static final String MBEAN_NAME = "org.apache.cassandra.db:type=StorageProxy"; + private static final Logger logger = Logger.getLogger(StorageProxy.class.getName()); + + public void log(String str) { + logger.finest(str); + } + + public static final String UNREACHABLE = "UNREACHABLE"; + protected final APIClient client; + + public ScyllaStorageProxy(APIClient client) { + this.client = client; +// super(MBEAN_NAME, client, new ClientRequestMetrics("Read", "storage_proxy/metrics/read"), +// new ClientRequestMetrics("RangeSlice", "/storage_proxy/metrics/range"), +// new ClientRequestMetrics("Write", "storage_proxy/metrics/write"), +// new CASClientRequestMetrics("CASWrite", "storage_proxy/metrics/cas_write"), +// new CASClientRequestMetrics("CASRead", "storage_proxy/metrics/cas_read")); + } + + @Override + public long getTotalHints() { + log(" getTotalHints()"); + return client.getLongValue("storage_proxy/total_hints"); + } + + @Override + public boolean getHintedHandoffEnabled() { + log(" getHintedHandoffEnabled()"); + return client.getBooleanValue("storage_proxy/hinted_handoff_enabled"); + } + +// @Override + public Set getHintedHandoffEnabledByDC() { + log(" getHintedHandoffEnabledByDC()"); + return client.getSetStringValue("storage_proxy/hinted_handoff_enabled_by_dc"); + } + + @Override + public void setHintedHandoffEnabled(boolean b) { + log(" setHintedHandoffEnabled(boolean b)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("enable", Boolean.toString(b)); + client.post("storage_proxy/hinted_handoff_enabled", queryParams); + } + +// @Override + public void setHintedHandoffEnabledByDCList(String dcs) { + log(" setHintedHandoffEnabledByDCList(String dcs)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("dcs", dcs); + client.post("storage_proxy/hinted_handoff_enabled_by_dc_list"); + } + + @Override + public int getMaxHintWindow() { + log(" getMaxHintWindow()"); + return client.getIntValue("storage_proxy/max_hint_window"); + } + + @Override + public void setMaxHintWindow(int ms) { + log(" setMaxHintWindow(int ms)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("ms", Integer.toString(ms)); + client.post("storage_proxy/max_hint_window", queryParams); + } + + @Override + public int getMaxHintsInProgress() { + log(" getMaxHintsInProgress()"); + return client.getIntValue("storage_proxy/max_hints_in_progress"); + } + + @Override + public void setMaxHintsInProgress(int qs) { + log(" setMaxHintsInProgress(int qs)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("qs", Integer.toString(qs)); + client.post("storage_proxy/max_hints_in_progress", queryParams); + } + + @Override + public int getHintsInProgress() { + log(" getHintsInProgress()"); + return client.getIntValue("storage_proxy/hints_in_progress"); + } + + @Override + public Long getRpcTimeout() { + log(" getRpcTimeout()"); + return client.getLongValue("storage_proxy/rpc_timeout"); + } + + @Override + public void setRpcTimeout(Long timeoutInMillis) { + log(" setRpcTimeout(Long timeoutInMillis)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("timeout", Long.toString(timeoutInMillis)); + client.post("storage_proxy/rpc_timeout", queryParams); + } + + @Override + public Long getReadRpcTimeout() { + log(" getReadRpcTimeout()"); + return client.getLongValue("storage_proxy/read_rpc_timeout"); + } + + @Override + public void setReadRpcTimeout(Long timeoutInMillis) { + log(" setReadRpcTimeout(Long timeoutInMillis)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("timeout", Long.toString(timeoutInMillis)); + client.post("storage_proxy/read_rpc_timeout", queryParams); + } + + @Override + public Long getWriteRpcTimeout() { + log(" getWriteRpcTimeout()"); + return client.getLongValue("storage_proxy/write_rpc_timeout"); + } + + @Override + public void setWriteRpcTimeout(Long timeoutInMillis) { + log(" setWriteRpcTimeout(Long timeoutInMillis)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("timeout", Long.toString(timeoutInMillis)); + client.post("storage_proxy/write_rpc_timeout", queryParams); + } + + @Override + public Long getCounterWriteRpcTimeout() { + log(" getCounterWriteRpcTimeout()"); + return client.getLongValue("storage_proxy/counter_write_rpc_timeout"); + } + + @Override + public void setCounterWriteRpcTimeout(Long timeoutInMillis) { + log(" setCounterWriteRpcTimeout(Long timeoutInMillis)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("timeout", Long.toString(timeoutInMillis)); + client.post("storage_proxy/counter_write_rpc_timeout", queryParams); + } + + @Override + public Long getCasContentionTimeout() { + log(" getCasContentionTimeout()"); + return client.getLongValue("storage_proxy/cas_contention_timeout"); + } + + @Override + public void setCasContentionTimeout(Long timeoutInMillis) { + log(" setCasContentionTimeout(Long timeoutInMillis)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("timeout", Long.toString(timeoutInMillis)); + client.post("storage_proxy/cas_contention_timeout", queryParams); + } + + @Override + public Long getRangeRpcTimeout() { + log(" getRangeRpcTimeout()"); + return client.getLongValue("storage_proxy/range_rpc_timeout"); + } + + @Override + public void setRangeRpcTimeout(Long timeoutInMillis) { + log(" setRangeRpcTimeout(Long timeoutInMillis)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("timeout", Long.toString(timeoutInMillis)); + client.post("storage_proxy/range_rpc_timeout", queryParams); + } + + @Override + public Long getTruncateRpcTimeout() { + log(" getTruncateRpcTimeout()"); + return client.getLongValue("storage_proxy/truncate_rpc_timeout"); + } + + @Override + public void setTruncateRpcTimeout(Long timeoutInMillis) { + log(" setTruncateRpcTimeout(Long timeoutInMillis)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("timeout", Long.toString(timeoutInMillis)); + client.post("storage_proxy/truncate_rpc_timeout", queryParams); + } + + @Override + public void reloadTriggerClasses() { + log(" reloadTriggerClasses()"); + client.post("storage_proxy/reload_trigger_classes"); + } + + @Override + public long getReadRepairAttempted() { + log(" getReadRepairAttempted()"); + return client.getLongValue("storage_proxy/read_repair_attempted"); + } + + @Override + public long getReadRepairRepairedBlocking() { + log(" getReadRepairRepairedBlocking()"); + return client.getLongValue("storage_proxy/read_repair_repaired_blocking"); + } + + @Override + public long getReadRepairRepairedBackground() { + log(" getReadRepairRepairedBackground()"); + return client.getLongValue("storage_proxy/read_repair_repaired_background"); + } + + @Override + public int getOtcBacklogExpirationInterval() { + return 0; //TODO fix this + } + + @Override + public void setOtcBacklogExpirationInterval(int intervalInMillis) { + //TODO fix this + } + + /** Returns each live node's schema version */ + @Override + public Map> getSchemaVersions() { + log(" getSchemaVersions()"); + return client.getMapStringListStrValue("storage_proxy/schema_versions"); + } + + @Override + public void setNativeTransportMaxConcurrentConnections(Long nativeTransportMaxConcurrentConnections) { + // TODO Auto-generated method stub + log(" setNativeTransportMaxConcurrentConnections()"); + + } + + @Override + public Long getNativeTransportMaxConcurrentConnections() { + // TODO Auto-generated method stub + log(" getNativeTransportMaxConcurrentConnections()"); + return client.getLongValue(""); + } + + @Override + public void enableHintsForDC(String dc) { + // TODO if/when scylla uses hints + log(" enableHintsForDC()"); + } + + @Override + public void disableHintsForDC(String dc) { + // TODO if/when scylla uses hints + log(" disableHintsForDC()"); + } + + @Override + public Set getHintedHandoffDisabledDCs() { + // TODO if/when scylla uses hints + log(" getHintedHandoffDisabledDCs()"); + return emptySet(); + } + + @Override + public int getNumberOfTables() { + // TODO: could be like 1000% more efficient + JsonArray mbeans = client.getJsonArray("/column_family/"); + return mbeans.size(); + } +} diff --git a/src/java/org/apache/cassandra/service/ScyllaStorageService.java b/src/java/org/apache/cassandra/service/ScyllaStorageService.java new file mode 100644 index 0000000000..7f9fe69c0f --- /dev/null +++ b/src/java/org/apache/cassandra/service/ScyllaStorageService.java @@ -0,0 +1,1833 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* + * Copyright 2015 Cloudius Systems + * + * Modified by Cloudius Systems + */ +package org.apache.cassandra.service; + +import static java.util.Arrays.asList; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Logger; + +import javax.json.JsonArray; +import javax.json.JsonObject; +import javax.management.ListenerNotFoundException; +import javax.management.MBeanNotificationInfo; +import javax.management.Notification; +import javax.management.NotificationBroadcasterSupport; +import javax.management.NotificationFilter; +import javax.management.NotificationListener; +import javax.management.openmbean.TabularData; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; + +import org.apache.cassandra.repair.RepairParallelism; + +import com.google.common.base.Joiner; +import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.utils.FileUtils; + +/** + * This abstraction contains the token/identifier of this node on the identifier + * space. This token gets gossiped around. This class will also maintain + * histograms of the load information of other nodes in the cluster. + */ +public class ScyllaStorageService implements StorageServiceMBean { + private static final Logger logger = Logger.getLogger(StorageService.class.getName()); + private static final Timer timer = new Timer("Storage Service Repair", true); + + private final NotificationBroadcasterSupport notificationBroadcasterSupport = new NotificationBroadcasterSupport(); + + @Override + public void addNotificationListener(NotificationListener listener, NotificationFilter filter, Object handback) { + notificationBroadcasterSupport.addNotificationListener(listener, filter, handback); + } + + @Override + public void removeNotificationListener(NotificationListener listener) throws ListenerNotFoundException { + notificationBroadcasterSupport.removeNotificationListener(listener); + } + + @Override + public void removeNotificationListener(NotificationListener listener, NotificationFilter filter, Object handback) + throws ListenerNotFoundException { + notificationBroadcasterSupport.removeNotificationListener(listener, filter, handback); + } + + @Override + public MBeanNotificationInfo[] getNotificationInfo() { + return notificationBroadcasterSupport.getNotificationInfo(); + } + + public void sendNotification(Notification notification) { + notificationBroadcasterSupport.sendNotification(notification); + } + + public static enum RepairStatus { + STARTED, SESSION_SUCCESS, SESSION_FAILED, FINISHED + } + + /* JMX notification serial number counter */ + private final AtomicLong notificationSerialNumber = new AtomicLong(); + protected final APIClient client; + + public ScyllaStorageService(APIClient client) { + this.client=client; +// super("org.apache.cassandra.db:type=StorageService", client, new StorageMetrics()); + + } + + public void log(String str) { + logger.finest(str); + } + + /** + * Retrieve the list of live nodes in the cluster, where "liveness" is + * determined by the failure detector of the node being queried. + * + * @return set of IP addresses, as Strings + */ + @Override + public List getLiveNodes() { + log(" getLiveNodes()"); + return client.getListStrValue("/gossiper/endpoint/live"); + } + + /** + * Retrieve the list of unreachable nodes in the cluster, as determined by + * this node's failure detector. + * + * @return set of IP addresses, as Strings + */ + @Override + public List getUnreachableNodes() { + log(" getUnreachableNodes()"); + return client.getListStrValue("/gossiper/endpoint/down"); + } + + /** + * Retrieve the list of nodes currently bootstrapping into the ring. + * + * @return set of IP addresses, as Strings + */ + @Override + public List getJoiningNodes() { + log(" getJoiningNodes()"); + return client.getListStrValue("/storage_service/nodes/joining"); + } + + /** + * Retrieve the list of nodes currently leaving the ring. + * + * @return set of IP addresses, as Strings + */ + @Override + public List getLeavingNodes() { + log(" getLeavingNodes()"); + return client.getListStrValue("/storage_service/nodes/leaving"); + } + + /** + * Retrieve the list of nodes currently moving in the ring. + * + * @return set of IP addresses, as Strings + */ + @Override + public List getMovingNodes() { + log(" getMovingNodes()"); + return client.getListStrValue("/storage_service/nodes/moving"); + } + + /** + * Fetch string representations of the tokens for this node. + * + * @return a collection of tokens formatted as strings + */ + @Override + public List getTokens() { + log(" getTokens()"); + try { + return getTokens(getLocalBroadCastingAddress()); + } catch (UnknownHostException e) { + // We should never reach here, + // but it makes the compiler happy + return null; + } + } + + /** + * Fetch string representations of the tokens for a specified node. + * + * @param endpoint + * string representation of an node + * @return a collection of tokens formatted as strings + */ + @Override + public List getTokens(String endpoint) throws UnknownHostException { + log(" getTokens(String endpoint) throws UnknownHostException"); + return client.getListStrValue("/storage_service/tokens/" + endpoint); + } + + /** + * Fetch a string representation of the Cassandra version. + * + * @return A string representation of the Cassandra version. + */ + @Override + public String getReleaseVersion() { + log(" getReleaseVersion()"); + return client.getStringValue("/storage_service/release_version"); + } + + /** + * Fetch a string representation of the current Schema version. + * + * @return A string representation of the Schema version. + */ + @Override + public String getSchemaVersion() { + log(" getSchemaVersion()"); + return client.getStringValue("/storage_service/schema_version"); + } + + /** + * Get the list of all data file locations from conf + * + * @return String array of all locations + */ + @Override + public String[] getAllDataFileLocations() { + log(" getAllDataFileLocations()"); + return client.getStringArrValue("/storage_service/data_file/locations"); + } + + /** + * Get location of the commit log + * + * @return a string path + */ + @Override + public String getCommitLogLocation() { + log(" getCommitLogLocation()"); + return client.getStringValue("/storage_service/commitlog"); + } + + /** + * Get location of the saved caches dir + * + * @return a string path + */ + @Override + public String getSavedCachesLocation() { + log(" getSavedCachesLocation()"); + return client.getStringValue("/storage_service/saved_caches/location"); + } + + /** + * Retrieve a map of range to end points that describe the ring topology of + * a Cassandra cluster. + * + * @return mapping of ranges to end points + */ + @Override + public Map, List> getRangeToEndpointMap(String keyspace) { + log(" getRangeToEndpointMap(String keyspace)"); + return client.getMapListStrValue("/storage_service/range/" + keyspace); + } + + /** + * Retrieve a map of range to rpc addresses that describe the ring topology + * of a Cassandra cluster. + * + * @return mapping of ranges to rpc addresses + */ + @Override + public Map, List> getRangeToRpcaddressMap(String keyspace) { + log(" getRangeToRpcaddressMap(String keyspace)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("rpc", "true"); + return client.getMapListStrValue("/storage_service/range/" + keyspace, queryParams); + } + + /** + * The same as {@code describeRing(String)} but converts TokenRange to the + * String for JMX compatibility + * + * @param keyspace + * The keyspace to fetch information about + * + * @return a List of TokenRange(s) converted to String for the given + * keyspace + */ + @Override + public List describeRingJMX(String keyspace) throws IOException { + log(" describeRingJMX(String keyspace) throws IOException"); + JsonArray arr = client.getJsonArray("/storage_service/describe_ring/" + keyspace); + List res = new ArrayList(); + + for (int i = 0; i < arr.size(); i++) { + JsonObject obj = arr.getJsonObject(i); + StringBuilder sb = new StringBuilder(); + sb.append("TokenRange("); + sb.append("start_token:"); + sb.append(obj.getString("start_token")); + sb.append(", end_token:"); + sb.append(obj.getString("end_token")); + sb.append(", endpoints:["); + JsonArray endpoints = obj.getJsonArray("endpoints"); + for (int j = 0; j < endpoints.size(); j++) { + if (j > 0) { + sb.append(", "); + } + sb.append(endpoints.getString(j)); + } + sb.append("], rpc_endpoints:["); + JsonArray rpc_endpoints = obj.getJsonArray("rpc_endpoints"); + for (int j = 0; j < rpc_endpoints.size(); j++) { + if (j > 0) { + sb.append(", "); + } + sb.append(rpc_endpoints.getString(j)); + } + + sb.append("], endpoint_details:["); + JsonArray endpoint_details = obj.getJsonArray("endpoint_details"); + for (int j = 0; j < endpoint_details.size(); j++) { + JsonObject detail = endpoint_details.getJsonObject(j); + if (j > 0) { + sb.append(", "); + } + sb.append("EndpointDetails("); + sb.append("host:"); + sb.append(detail.getString("host")); + sb.append(", datacenter:"); + sb.append(detail.getString("datacenter")); + sb.append(", rack:"); + sb.append(detail.getString("rack")); + sb.append(')'); + } + sb.append("])"); + res.add(sb.toString()); + } + return res; + } + + /** + * Retrieve a map of pending ranges to endpoints that describe the ring + * topology + * + * @param keyspace + * the keyspace to get the pending range map for. + * @return a map of pending ranges to endpoints + */ + @Override + public Map, List> getPendingRangeToEndpointMap(String keyspace) { + log(" getPendingRangeToEndpointMap(String keyspace)"); + return client.getMapListStrValue("/storage_service/pending_range/" + keyspace); + } + + /** + * Retrieve a map of tokens to endpoints, including the bootstrapping ones. + * + * @return a map of tokens to endpoints in ascending order + */ + @Override + public Map getTokenToEndpointMap() { + log(" getTokenToEndpointMap()"); + return client.getMapStrValue("/storage_service/tokens_endpoint"); + } + + /** Retrieve this hosts unique ID */ + @Override + public String getLocalHostId() { + log(" getLocalHostId()"); + return client.getStringValue("/storage_service/hostid/local"); + } + + public String getLocalBroadCastingAddress() { + // FIXME: + // There is no straight API to get the broadcasting + // address, instead of trying to figure it out from the configuration + // we will use the getHostIdToAddressMap with the hostid + return getHostIdToAddressMap().get(getLocalHostId()); + } + + /** Retrieve the mapping of endpoint to host ID */ + @Override + public Map getHostIdMap() { + log(" getHostIdMap()"); + return client.getMapStrValue("/storage_service/host_id"); + } + + /** Retrieve the mapping of endpoint to host ID */ + public Map getHostIdToAddressMap() { + log(" getHostIdToAddressMap()"); + return client.getReverseMapStrValue("/storage_service/host_id"); + } + + /** + * Numeric load value. + * + * @see org.apache.cassandra.metrics.StorageMetrics#load + */ + @Deprecated + public double getLoad() { + log(" getLoad()"); + return client.getDoubleValue("/storage_service/load"); + } + + /** Human-readable load value */ + @Override + public String getLoadString() { + log(" getLoadString()"); + return FileUtils.stringifyFileSize(getLoad()); + } + + /** Human-readable load value. Keys are IP addresses. */ + @Override + public Map getLoadMap() { + log(" getLoadMap()"); + Map load = getLoadMapAsDouble(); + Map map = new HashMap<>(); + for (Map.Entry entry : load.entrySet()) { + map.put(entry.getKey(), FileUtils.stringifyFileSize(entry.getValue())); + } + return map; + } + + public Map getLoadMapAsDouble() { + log(" getLoadMapAsDouble()"); + return client.getMapStringDouble("/storage_service/load_map"); + } + + /** + * Return the generation value for this node. + * + * @return generation number + */ + @Override + public int getCurrentGenerationNumber() { + log(" getCurrentGenerationNumber()"); + return client.getIntValue("/storage_service/generation_number"); + } + + /** + * This method returns the N endpoints that are responsible for storing the + * specified key i.e for replication. + * + * @param keyspaceName + * keyspace name + * @param cf + * Column family name + * @param key + * - key for which we need to find the endpoint return value - + * the endpoint responsible for this key + */ + @Override + public List getNaturalEndpoints(String keyspaceName, String cf, String key) { + log(" getNaturalEndpoints(String keyspaceName, String cf, String key)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("cf", cf); + queryParams.add("key", key); + return client.getListInetAddressValue("/storage_service/natural_endpoints/" + keyspaceName, queryParams); + } + + @Override + public List getNaturalEndpoints(String keyspaceName, ByteBuffer key) { + log(" getNaturalEndpoints(String keyspaceName, ByteBuffer key)"); + return client.getListInetAddressValue(""); + } + + /** + * Takes the snapshot for the given keyspaces. A snapshot name must be + * specified. + * + * @param tag + * the tag given to the snapshot; may not be null or empty + * @param keyspaceNames + * the name of the keyspaces to snapshot; empty means "all." + */ + @Override + public void takeSnapshot(String tag, String... keyspaceNames) throws IOException { + takeSnapshot(tag, null, keyspaceNames); + } + + @Override + public void takeTableSnapshot(String keyspaceName, String tableName, String tag) throws IOException { + + } + + @Override + public void takeMultipleTableSnapshot(String tag, String... tableList) throws IOException { + + } + + @Override + public void takeSnapshot(String tag, Map options, String... keyspaceNames) throws IOException { + log(" takeSnapshot(String tag, String... keyspaceNames) throws IOException"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + APIClient.set_query_param(queryParams, "tag", tag); + + if (keyspaceNames.length == 1 && keyspaceNames[0].indexOf('.') != -1) { + String[] parts = keyspaceNames[0].split("\\."); + keyspaceNames = new String[] { parts[0] }; + APIClient.set_query_param(queryParams, "cf", parts[1]); + } + APIClient.set_query_param(queryParams, "kn", APIClient.join(keyspaceNames)); + // TODO: origin has one recognized option: skip flush. We don't. + client.post("/storage_service/snapshots", queryParams); + } + + /** + * Takes the snapshot of a specific column family. A snapshot name must be + * specified. + * + * @param keyspaceName + * the keyspace which holds the specified column family + * @param columnFamilyName + * the column family to snapshot + * @param tag + * the tag given to the snapshot; may not be null or empty + */ +// @Override + public void takeColumnFamilySnapshot(String keyspaceName, String columnFamilyName, String tag) throws IOException { + log(" takeColumnFamilySnapshot(String keyspaceName, String columnFamilyName, String tag) throws IOException"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + if (keyspaceName == null) { + throw new IOException("You must supply a keyspace name"); + } + if (columnFamilyName == null) { + throw new IOException("You must supply a table name"); + } + if (tag == null || tag.equals("")) { + throw new IOException("You must supply a snapshot name."); + } + queryParams.add("tag", tag); + queryParams.add("kn", keyspaceName); + queryParams.add("cf", columnFamilyName); + client.post("/storage_service/snapshots", queryParams); + } + + /** + * Remove the snapshot with the given name from the given keyspaces. If no + * tag is specified we will remove all snapshots. + */ + @Override + public void clearSnapshot(String tag, String... keyspaceNames) throws IOException { + log(" clearSnapshot(String tag, String... keyspaceNames) throws IOException"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + APIClient.set_query_param(queryParams, "tag", tag); + APIClient.set_query_param(queryParams, "kn", APIClient.join(keyspaceNames)); + client.delete("/storage_service/snapshots", queryParams); + } + + /** + * Get the details of all the snapshot + * + * @return A map of snapshotName to all its details in Tabular form. + */ + @Override + public Map getSnapshotDetails() { + log(" getSnapshotDetails()"); + return client.getMapStringSnapshotTabularDataValue("/storage_service/snapshots", null); + } + + public Map>> getSnapshotKeyspaceColumnFamily() { + JsonArray arr = client.getJsonArray("/storage_service/snapshots"); + Map>> res = new HashMap>>(); + for (int i = 0; i < arr.size(); i++) { + JsonObject obj = arr.getJsonObject(i); + Map> kscf = new HashMap>(); + JsonArray snapshots = obj.getJsonArray("value"); + for (int j = 0; j < snapshots.size(); j++) { + JsonObject s = snapshots.getJsonObject(j); + String ks = s.getString("ks"); + String cf = s.getString("cf"); + if (!kscf.containsKey(ks)) { + kscf.put(ks, new HashSet()); + } + kscf.get(ks).add(cf); + } + res.put(obj.getString("key"), kscf); + } + return res; + } + + /** + * Get the true size taken by all snapshots across all keyspaces. + * + * @return True size taken by all the snapshots. + */ + @Override + public long trueSnapshotsSize() { + log(" trueSnapshotsSize()"); + return client.getLongValue("/storage_service/snapshots/size/true"); + } + + /** + * Forces major compaction of a single keyspace + */ +// @Override + public void forceKeyspaceCompaction(String keyspaceName, String... columnFamilies) + throws IOException, ExecutionException, InterruptedException { + log(" forceKeyspaceCompaction(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies)); + client.post("/storage_service/keyspace_compaction/" + keyspaceName, queryParams); + } + + @Override + public void forceKeyspaceCompactionForTokenRange(String keyspaceName, String startToken, String endToken, String... tableNames) throws IOException, ExecutionException, InterruptedException { + // TODO: actually handle token ranges. + forceKeyspaceCompaction(keyspaceName, tableNames); + } + + /** + * Trigger a cleanup of keys on a single keyspace + */ + @Override + public int forceKeyspaceCleanup(String keyspaceName, String... columnFamilies) + throws IOException, ExecutionException, InterruptedException { + log(" forceKeyspaceCleanup(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies)); + return client.postInt("/storage_service/keyspace_cleanup/" + keyspaceName, queryParams); + } + + /** + * Scrub (deserialize + reserialize at the latest version, skipping bad rows + * if any) the given keyspace. If columnFamilies array is empty, all CFs are + * scrubbed. + * + * Scrubbed CFs will be snapshotted first, if disableSnapshot is false + */ + @Override + public int scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... columnFamilies) + throws IOException, ExecutionException, InterruptedException { + log(" scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException"); + return scrub(disableSnapshot, skipCorrupted, true, keyspaceName, columnFamilies); + } + + @Override + public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, String keyspaceName, + String... columnFamilies) throws IOException, ExecutionException, InterruptedException { + log(" scrub(boolean disableSnapshot, boolean skipCorrupted, bool checkData, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + APIClient.set_bool_query_param(queryParams, "disable_snapshot", disableSnapshot); + APIClient.set_bool_query_param(queryParams, "skip_corrupted", skipCorrupted); + APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies)); + return client.getIntValue("/storage_service/keyspace_scrub/" + keyspaceName); + } + + /** + * Rewrite all sstables to the latest version. Unlike scrub, it doesn't skip + * bad rows and do not snapshot sstables first. + */ + @Override + public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... columnFamilies) + throws IOException, ExecutionException, InterruptedException { + log(" upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... columnFamilies) throws IOException, ExecutionException, InterruptedException"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + APIClient.set_bool_query_param(queryParams, "exclude_current_version", excludeCurrentVersion); + APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies)); + return client.getIntValue("/storage_service/keyspace_upgrade_sstables/" + keyspaceName, queryParams); + } + + /** + * Flush all memtables for the given column families, or all columnfamilies + * for the given keyspace if none are explicitly listed. + * + * @param keyspaceName + * @param columnFamilies + * @throws IOException + */ + @Override + public void forceKeyspaceFlush(String keyspaceName, String... columnFamilies) + throws IOException, ExecutionException, InterruptedException { + log(" forceKeyspaceFlush(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies)); + client.post("/storage_service/keyspace_flush/" + keyspaceName, queryParams); + } + + private class CheckRepair extends TimerTask { + @SuppressWarnings("unused") + private int id; + private String keyspace; + private String message; + private MultivaluedMap queryParams = new MultivaluedHashMap(); + private int cmd; + private final boolean legacy; + + public CheckRepair(int id, String keyspace, boolean legacy) { + this.id = id; + this.keyspace = keyspace; + this.legacy = legacy; + APIClient.set_query_param(queryParams, "id", Integer.toString(id)); + message = String.format("Repair session %d ", id); + // The returned id is the command number + this.cmd = id; + } + + @Override + public void run() { + String status = client.getStringValue("/storage_service/repair_async/" + keyspace, queryParams); + if (!status.equals("RUNNING")) { + cancel(); + if (status.equals("SUCCESSFUL")) { + sendMessage(cmd, RepairStatus.SESSION_SUCCESS, message, legacy); + } else { + sendMessage(cmd, RepairStatus.SESSION_FAILED, message + "failed", legacy); + } + sendMessage(cmd, RepairStatus.FINISHED, message + "finished", legacy); + } + } + + } + + + public String getRepairMessage(final int cmd, final String keyspace, final int ranges_size, + final RepairParallelism parallelismDegree, final boolean fullRepair) { + return String.format( + "Starting repair command #%d, repairing %d ranges for keyspace %s (parallelism=%s, full=%b)", cmd, + ranges_size, keyspace, parallelismDegree, fullRepair); + } + + /** + * + */ + private int waitAndNotifyRepair(int cmd, String keyspace, String message, boolean legacy) { + logger.finest(message); + + sendMessage(cmd, RepairStatus.STARTED, message, legacy); + + TimerTask taskToExecute = new CheckRepair(cmd, keyspace, legacy); + timer.schedule(taskToExecute, 100, 1000); + return cmd; + } + + // See org.apache.cassandra.utils.progress.ProgressEventType + private static enum ProgressEventType { + START, PROGRESS, ERROR, ABORT, SUCCESS, COMPLETE, NOTIFICATION + } + + private void sendMessage(int cmd, RepairStatus status, String message, boolean legacy) { + String tag = "repair:" + cmd; + + ProgressEventType type = ProgressEventType.ERROR; + int total = 100; + int count = 0; + switch (status) { + case STARTED: + type = ProgressEventType.START; + break; + case FINISHED: + type = ProgressEventType.COMPLETE; + count = 100; + break; + case SESSION_SUCCESS: + type = ProgressEventType.SUCCESS; + count = 100; + break; + default: + break; + } + + Notification jmxNotification = new Notification("progress", tag, notificationSerialNumber.incrementAndGet(), + message); + Map userData = new HashMap<>(); + userData.put("type", type.ordinal()); + userData.put("progressCount", count); + userData.put("total", total); + jmxNotification.setUserData(userData); + sendNotification(jmxNotification); + + if (legacy) { +// sendNotification("repair", message, new int[] { cmd, status.ordinal() }); + } + } + + /** + * Invoke repair asynchronously. You can track repair progress by + * subscribing JMX notification sent from this StorageServiceMBean. + * Notification format is: type: "repair" userObject: int array of length 2, + * [0]=command number, [1]=ordinal of AntiEntropyService.Status + * + * @param keyspace + * Keyspace name to repair. Should not be null. + * @param options + * repair option. + * @return Repair command number, or 0 if nothing to repair + */ + @Override + public int repairAsync(String keyspace, Map options) { + return repairAsync(keyspace, options, false); + } + + @SuppressWarnings("unused") + private static final String PARALLELISM_KEY = "parallelism"; + private static final String PRIMARY_RANGE_KEY = "primaryRange"; + @SuppressWarnings("unused") + private static final String INCREMENTAL_KEY = "incremental"; + @SuppressWarnings("unused") + private static final String JOB_THREADS_KEY = "jobThreads"; + private static final String RANGES_KEY = "ranges"; + private static final String COLUMNFAMILIES_KEY = "columnFamilies"; + private static final String DATACENTERS_KEY = "dataCenters"; + private static final String HOSTS_KEY = "hosts"; + @SuppressWarnings("unused") + private static final String TRACE_KEY = "trace"; + + private int repairAsync(String keyspace, Map options, boolean legacy) { + log(" repairAsync(String keyspace, Map options)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + for (String op : options.keySet()) { + APIClient.set_query_param(queryParams, op, options.get(op)); + } + + int cmd = client.postInt("/storage_service/repair_async/" + keyspace, queryParams); + waitAndNotifyRepair(cmd, keyspace, getRepairMessage(cmd, keyspace, 1, RepairParallelism.SEQUENTIAL, true), + legacy); + return cmd; + } + + private static String commaSeparated(Collection c) { + String s = c.toString(); + return s.substring(1, s.length() - 1); + } + + private int repairRangeAsync(String beginToken, String endToken, String keyspaceName, Boolean isSequential, + Collection dataCenters, Collection hosts, Boolean primaryRange, Boolean repairedAt, + String... columnFamilies) { + log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, Collection dataCenters, Collection hosts, boolean repairedAt, String... columnFamilies) throws IOException"); + + Map options = new HashMap(); + if (beginToken != null && endToken != null) { + options.put(RANGES_KEY, beginToken + ":" + endToken); + } + if (dataCenters != null) { + options.put(DATACENTERS_KEY, commaSeparated(dataCenters)); + } + if (hosts != null) { + options.put(HOSTS_KEY, commaSeparated(hosts)); + } + if (columnFamilies != null && columnFamilies.length != 0) { + options.put(COLUMNFAMILIES_KEY, commaSeparated(asList(columnFamilies))); + } + if (primaryRange != null) { + options.put(PRIMARY_RANGE_KEY, primaryRange.toString()); + } + + return repairAsync(keyspaceName, options, true); + } + + @Override + @Deprecated + public int forceRepairAsync(String keyspace, boolean isSequential, Collection dataCenters, + Collection hosts, boolean primaryRange, boolean repairedAt, String... columnFamilies) + throws IOException { + log(" forceRepairAsync(String keyspace, boolean isSequential, Collection dataCenters, Collection hosts, boolean primaryRange, boolean repairedAt, String... columnFamilies) throws IOException"); + return repairRangeAsync(null, null, keyspace, isSequential, dataCenters, hosts, primaryRange, repairedAt, + columnFamilies); + } + + @Override + @Deprecated + public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, + Collection dataCenters, Collection hosts, boolean repairedAt, String... columnFamilies) { + log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, Collection dataCenters, Collection hosts, boolean repairedAt, String... columnFamilies) throws IOException"); + return repairRangeAsync(beginToken, endToken, keyspaceName, isSequential, dataCenters, hosts, null, repairedAt, + columnFamilies); + } + + @Override + @Deprecated + public int forceRepairAsync(String keyspaceName, boolean isSequential, boolean isLocal, boolean primaryRange, + boolean fullRepair, String... columnFamilies) { + log(" forceRepairAsync(String keyspace, boolean isSequential, boolean isLocal, boolean primaryRange, boolean fullRepair, String... columnFamilies)"); + return repairRangeAsync(null, null, keyspaceName, isSequential, null, null, primaryRange, null, columnFamilies); + } + + @Override + @Deprecated + public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, + boolean isLocal, boolean repairedAt, String... columnFamilies) { + log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, boolean isLocal, boolean repairedAt, String... columnFamilies)"); + return forceRepairRangeAsync(beginToken, endToken, keyspaceName, isSequential, null, null, repairedAt, + columnFamilies); + } + + @Override + public void forceTerminateAllRepairSessions() { + log(" forceTerminateAllRepairSessions()"); + client.post("/storage_service/force_terminate"); + } + + /** + * transfer this node's data to other machines and remove it from service. + */ + @Override + public void decommission() throws InterruptedException { + log(" decommission() throws InterruptedException"); + client.post("/storage_service/decommission"); + } + + /** + * @param newToken + * token to move this node to. This node will unload its data + * onto its neighbors, and bootstrap to the new token. + */ + @Override + public void move(String newToken) throws IOException { + log(" move(String newToken) throws IOException"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + APIClient.set_query_param(queryParams, "new_token", newToken); + client.post("/storage_service/move", queryParams); + } + + /** + * removeToken removes token (and all data associated with enpoint that had + * it) from the ring + * + * @param hostIdString + * the host id to remove + */ + @Override + public void removeNode(String hostIdString) { + log(" removeNode(String token)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + APIClient.set_query_param(queryParams, "host_id", hostIdString); + client.post("/storage_service/remove_node", queryParams); + } + + /** + * Get the status of a token removal. + */ + @Override + public String getRemovalStatus() { + log(" getRemovalStatus()"); + return client.getStringValue("/storage_service/removal_status"); + } + + /** + * Force a remove operation to finish. + */ + @Override + public void forceRemoveCompletion() { + log(" forceRemoveCompletion()"); + client.post("/storage_service/force_remove_completion"); + } + + /** + * set the logging level at runtime
+ *
+ * If both classQualifer and level are empty/null, it will reload the + * configuration to reset.
+ * If classQualifer is not empty but level is empty/null, it will set the + * level to null for the defined classQualifer
+ * If level cannot be parsed, then the level will be defaulted to DEBUG
+ *
+ * The logback configuration should have < jmxConfigurator /> set + * + * @param classQualifier + * The logger's classQualifer + * @param level + * The log level + * @throws Exception + * + * @see ch.qos.logback.classic.Level#toLevel(String) + */ + @Override + public void setLoggingLevel(String classQualifier, String level) throws Exception { + log(" setLoggingLevel(String classQualifier, String level) throws Exception"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + APIClient.set_query_param(queryParams, "level", level); + client.post("/system/logger/" + classQualifier, queryParams); + } + + /** get the runtime logging levels */ + @Override + public Map getLoggingLevels() { + log(" getLoggingLevels()"); + return client.getMapStrValue("/storage_service/logging_level"); + } + + /** + * get the operational mode (leaving, joining, normal, decommissioned, + * client) + **/ + @Override + public String getOperationMode() { + log(" getOperationMode()"); + return client.getStringValue("/storage_service/operation_mode"); + } + + /** Returns whether the storage service is starting or not */ + @Override + public boolean isStarting() { + log(" isStarting()"); + return client.getBooleanValue("/storage_service/is_starting"); + } + + /** get the progress of a drain operation */ + @Override + public String getDrainProgress() { + log(" getDrainProgress()"); + // FIXME + // This is a workaround so the nodetool would work + // it should be revert when the drain progress will be implemented + // return c.getStringValue("/storage_service/drain"); + return String.format("Drained %s/%s ColumnFamilies", 0, 0); + } + + /** + * makes node unavailable for writes, flushes memtables and replays + * commitlog. + */ + @Override + public void drain() throws IOException, InterruptedException, ExecutionException { + log(" drain() throws IOException, InterruptedException, ExecutionException"); + client.post("/storage_service/drain"); + } + + /** + * Truncates (deletes) the given columnFamily from the provided keyspace. + * Calling truncate results in actual deletion of all data in the cluster + * under the given columnFamily and it will fail unless all hosts are up. + * All data in the given column family will be deleted, but its definition + * will not be affected. + * + * @param keyspace + * The keyspace to delete from + * @param columnFamily + * The column family to delete data from. + */ + @Override + public void truncate(String keyspace, String columnFamily) throws TimeoutException, IOException { + log(" truncate(String keyspace, String columnFamily)throws TimeoutException, IOException"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + APIClient.set_query_param(queryParams, "cf", columnFamily); + client.post("/storage_service/truncate/" + keyspace, queryParams); + } + + /** + * given a list of tokens (representing the nodes in the cluster), returns a + * mapping from "token -> %age of cluster owned by that token" + */ + @Override + public Map getOwnership() { + log(" getOwnership()"); + return client.getMapInetAddressFloatValue("/storage_service/ownership/"); + } + + /** + * Effective ownership is % of the data each node owns given the keyspace we + * calculate the percentage using replication factor. If Keyspace == null, + * this method will try to verify if all the keyspaces in the cluster have + * the same replication strategies and if yes then we will use the first + * else a empty Map is returned. + */ + @Override + public Map effectiveOwnership(String keyspace) throws IllegalStateException { + log(" effectiveOwnership(String keyspace) throws IllegalStateException"); + try { + return client.getMapInetAddressFloatValue("/storage_service/ownership/" + keyspace); + } catch (Exception e) { + throw new IllegalStateException( + "Non-system keyspaces don't have the same replication settings, effective ownership information is meaningless"); + } + } + + @Override + public List getKeyspaces() { + log(" getKeyspaces()"); + return client.getListStrValue("/storage_service/keyspaces"); + } + + public Map> getColumnFamilyPerKeyspace() { + Map> res = new HashMap>(); + + JsonArray mbeans = client.getJsonArray("/column_family/"); + + for (int i = 0; i < mbeans.size(); i++) { + JsonObject mbean = mbeans.getJsonObject(i); + String ks = mbean.getString("ks"); + String cf = mbean.getString("cf"); + if (!res.containsKey(ks)) { + res.put(ks, new HashSet()); + } + res.get(ks).add(cf); + } + return res; + } + + @Override + public List getNonSystemKeyspaces() { + log(" getNonSystemKeyspaces()"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("type", "user"); + return client.getListStrValue("/storage_service/keyspaces", queryParams); + } + + @Override + public Map getViewBuildStatuses(String keyspace, String view) { + log(" getViewBuildStatuses()"); + return client.getMapStrValue("storage_service/view_build_statuses/" + keyspace + "/" + view); + } + + /** + * Change endpointsnitch class and dynamic-ness (and dynamic attributes) at + * runtime + * + * @param epSnitchClassName + * the canonical path name for a class implementing + * IEndpointSnitch + * @param dynamic + * boolean that decides whether dynamicsnitch is used or not + * @param dynamicUpdateInterval + * integer, in ms (default 100) + * @param dynamicResetInterval + * integer, in ms (default 600,000) + * @param dynamicBadnessThreshold + * double, (default 0.0) + */ + @Override + public void updateSnitch(String epSnitchClassName, Boolean dynamic, Integer dynamicUpdateInterval, + Integer dynamicResetInterval, Double dynamicBadnessThreshold) throws ClassNotFoundException { + log(" updateSnitch(String epSnitchClassName, Boolean dynamic, Integer dynamicUpdateInterval, Integer dynamicResetInterval, Double dynamicBadnessThreshold) throws ClassNotFoundException"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + APIClient.set_bool_query_param(queryParams, "dynamic", dynamic); + APIClient.set_query_param(queryParams, "epSnitchClassName", epSnitchClassName); + if (dynamicUpdateInterval != null) { + queryParams.add("dynamic_update_interval", dynamicUpdateInterval.toString()); + } + if (dynamicResetInterval != null) { + queryParams.add("dynamic_reset_interval", dynamicResetInterval.toString()); + } + if (dynamicBadnessThreshold != null) { + queryParams.add("dynamic_badness_threshold", dynamicBadnessThreshold.toString()); + } + client.post("/storage_service/update_snitch", queryParams); + } + + @Override + public void setDynamicUpdateInterval(int dynamicUpdateInterval) { + + } + + @Override + public int getDynamicUpdateInterval() { + return 0; + } + + // allows a user to forcibly 'kill' a sick node + @Override + public void stopGossiping() { + log(" stopGossiping()"); + client.delete("/storage_service/gossiping"); + } + + // allows a user to recover a forcibly 'killed' node + @Override + public void startGossiping() { + log(" startGossiping()"); + client.post("/storage_service/gossiping"); + } + + // allows a user to see whether gossip is running or not + @Override + public boolean isGossipRunning() { + log(" isGossipRunning()"); + return client.getBooleanValue("/storage_service/gossiping"); + } + + // allows a user to forcibly completely stop cassandra + @Override + public void stopDaemon() { + log(" stopDaemon()"); + client.post("/storage_service/stop_daemon"); + } + + // to determine if gossip is disabled + @Override + public boolean isInitialized() { + log(" isInitialized()"); + return client.getBooleanValue("/storage_service/is_initialized"); + } + + // allows a user to disable thrift + @Override + public void stopRPCServer() { + log(" stopRPCServer()"); + client.delete("/storage_service/rpc_server"); + } + + // allows a user to reenable thrift + @Override + public void startRPCServer() { + log(" startRPCServer()"); + client.post("/storage_service/rpc_server"); + } + + // to determine if thrift is running + @Override + public boolean isRPCServerRunning() { + log(" isRPCServerRunning()"); + return client.getBooleanValue("/storage_service/rpc_server"); + } + + @Override + public void stopNativeTransport() { + log(" stopNativeTransport()"); + client.delete("/storage_service/native_transport"); + } + + @Override + public void startNativeTransport() { + log(" startNativeTransport()"); + client.post("/storage_service/native_transport"); + } + + @Override + public boolean isNativeTransportRunning() { + log(" isNativeTransportRunning()"); + return client.getBooleanValue("/storage_service/native_transport"); + } + + // allows a node that have been started without joining the ring to join it + @Override + public void joinRing() throws IOException { + log(" joinRing() throws IOException"); + client.post("/storage_service/join_ring"); + } + + @Override + public boolean isJoined() { + log(" isJoined()"); + return client.getBooleanValue("/storage_service/join_ring"); + } + + @Override + public boolean isDrained() { + return false; + } + + @Override + public boolean isDraining() { + return false; + } + + @Override + public void setRpcTimeout(long value) { + + } + + @Override + public long getRpcTimeout() { + return 0; + } + + @Override + public void setReadRpcTimeout(long value) { + + } + + @Override + public long getReadRpcTimeout() { + return 0; + } + + @Override + public void setRangeRpcTimeout(long value) { + + } + + @Override + public long getRangeRpcTimeout() { + return 0; + } + + @Override + public void setWriteRpcTimeout(long value) { + + } + + @Override + public long getWriteRpcTimeout() { + return 0; + } + + @Override + public void setCounterWriteRpcTimeout(long value) { + + } + + @Override + public long getCounterWriteRpcTimeout() { + return 0; + } + + @Override + public void setCasContentionTimeout(long value) { + + } + + @Override + public long getCasContentionTimeout() { + return 0; + } + + @Override + public void setTruncateRpcTimeout(long value) { + + } + + @Override + public long getTruncateRpcTimeout() { + return 0; + } + + @Override + public void setStreamingSocketTimeout(int value) { + + } + + @Override + public int getStreamingSocketTimeout() { + return 0; + } + + @Override + public void setStreamThroughputMbPerSec(int value) { + log(" setStreamThroughputMbPerSec(int value)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("value", Integer.toString(value)); + client.post("/storage_service/stream_throughput", queryParams); + } + + @Override + public int getStreamThroughputMbPerSec() { + log(" getStreamThroughputMbPerSec()"); + return client.getIntValue("/storage_service/stream_throughput"); + } + + public int getCompactionThroughputMbPerSec() { + log(" getCompactionThroughputMbPerSec()"); + return client.getIntValue("/storage_service/compaction_throughput"); + } + + @Override + public void setCompactionThroughputMbPerSec(int value) { + log(" setCompactionThroughputMbPerSec(int value)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("value", Integer.toString(value)); + client.post("/storage_service/compaction_throughput", queryParams); + } + + @Override + public int getConcurrentCompactors() { + return 0; + } + + @Override + public void setConcurrentCompactors(int value) { + + } + + @Override + public boolean isIncrementalBackupsEnabled() { + log(" isIncrementalBackupsEnabled()"); + return client.getBooleanValue("/storage_service/incremental_backups"); + } + + @Override + public void setIncrementalBackupsEnabled(boolean value) { + log(" setIncrementalBackupsEnabled(boolean value)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("value", Boolean.toString(value)); + client.post("/storage_service/incremental_backups", queryParams); + } + + /** + * Initiate a process of streaming data for which we are responsible from + * other nodes. It is similar to bootstrap except meant to be used on a node + * which is already in the cluster (typically containing no data) as an + * alternative to running repair. + * + * @param sourceDc + * Name of DC from which to select sources for streaming or null + * to pick any node + */ + @Override + public void rebuild(String sourceDc) { + rebuild(sourceDc, null, null, null); + } + + /** + * Same as {@link #rebuild(String)}, but only for specified keyspace and ranges. + * + * @param sourceDc Name of DC from which to select sources for streaming or null to pick any node + * @param keyspace Name of the keyspace which to rebuild or null to rebuild all keyspaces. + * @param tokens Range of tokens to rebuild or null to rebuild all token ranges. In the format of: + * "(start_token_1,end_token_1],(start_token_2,end_token_2],...(start_token_n,end_token_n]" + */ + @Override + public void rebuild(String sourceDc, String keyspace, String tokens, String specificSources) { + log(" rebuild(String sourceDc, String keyspace, String tokens, String specificSources)"); + if (keyspace != null) { + throw new UnsupportedOperationException("Rebuild: 'keyspace' not yet supported"); + } + if (tokens != null) { + throw new UnsupportedOperationException("Rebuild: 'token range' not yet supported"); + } + if (specificSources != null) { + throw new UnsupportedOperationException("Rebuild: 'specific sources' not yet supported"); + } + if (sourceDc != null) { + MultivaluedMap queryParams = new MultivaluedHashMap(); + APIClient.set_query_param(queryParams, "source_dc", sourceDc); + client.post("/storage_service/rebuild", queryParams); + } else { + client.post("/storage_service/rebuild"); + } + } + + /** Starts a bulk load and blocks until it completes. */ + @Override + public void bulkLoad(String directory) { + log(" bulkLoad(String directory)"); + client.post("/storage_service/bulk_load/" + directory); + } + + /** + * Starts a bulk load asynchronously and returns the String representation + * of the planID for the new streaming session. + */ + @Override + public String bulkLoadAsync(String directory) { + log(" bulkLoadAsync(String directory)"); + return client.getStringValue("/storage_service/bulk_load_async/" + directory); + } + + @Override + public void rescheduleFailedDeletions() { + log(" rescheduleFailedDeletions()"); + client.post("/storage_service/reschedule_failed_deletions"); + } + + /** + * Load new SSTables to the given keyspace/columnFamily + * + * @param ksName + * The parent keyspace name + * @param cfName + * The ColumnFamily name where SSTables belong + */ + @Override + public void loadNewSSTables(String ksName, String cfName) { + log(" loadNewSSTables(String ksName, String cfName)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("cf", cfName); + client.post("/storage_service/sstables/" + ksName, queryParams); + } + + /** + * Return a List of Tokens representing a sample of keys across all + * ColumnFamilyStores. + * + * Note: this should be left as an operation, not an attribute (methods + * starting with "get") to avoid sending potentially multiple MB of data + * when accessing this mbean by default. See CASSANDRA-4452. + * + * @return set of Tokens as Strings + */ + @Override + public List sampleKeyRange() { + log(" sampleKeyRange()"); + return client.getListStrValue("/storage_service/sample_key_range"); + } + + /** + * rebuild the specified indexes + */ + @Override + public void rebuildSecondaryIndex(String ksName, String cfName, String... idxNames) { + log(" rebuildSecondaryIndex(String ksName, String cfName, String... idxNames)"); + } + + @Override + public void resetLocalSchema() throws IOException { + log(" resetLocalSchema() throws IOException"); + client.post("/storage_service/relocal_schema"); + } + + @Override + public void reloadLocalSchema() { + + } + + /** + * Enables/Disables tracing for the whole system. Only thrift requests can + * start tracing currently. + * + * @param probability + * ]0,1[ will enable tracing on a partial number of requests with + * the provided probability. 0 will disable tracing and 1 will + * enable tracing for all requests (which mich severely cripple + * the system) + */ + @Override + public void setTraceProbability(double probability) { + log(" setTraceProbability(double probability)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("probability", Double.toString(probability)); + client.post("/storage_service/trace_probability", queryParams); + } + + /** + * Returns the configured tracing probability. + */ + @Override + public double getTraceProbability() { + log(" getTraceProbability()"); + return client.getDoubleValue("/storage_service/trace_probability"); + } + + @Override + public void disableAutoCompaction(String ks, String... columnFamilies) throws IOException { + log("disableAutoCompaction(String ks, String... columnFamilies)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies)); + client.delete("/storage_service/auto_compaction/" + ks, queryParams); + } + + @Override + public void enableAutoCompaction(String ks, String... columnFamilies) throws IOException { + log("enableAutoCompaction(String ks, String... columnFamilies)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies)); + try { + client.post("/storage_service/auto_compaction/" + ks, queryParams); + } catch (RuntimeException e) { + // FIXME should throw the right exception + throw new IOException(e.getMessage()); + } + + } + + @Override + public void deliverHints(String host) throws UnknownHostException { + log(" deliverHints(String host) throws UnknownHostException"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("host", host); + client.post("/storage_service/deliver_hints", queryParams); + } + + /** Returns the name of the cluster */ + @Override + public String getClusterName() { + log(" getClusterName()"); + return client.getStringValue("/storage_service/cluster_name"); + } + + /** Returns the cluster partitioner */ + @Override + public String getPartitionerName() { + log(" getPartitionerName()"); + return client.getStringValue("/storage_service/partitioner_name"); + } + + /** Returns the threshold for warning of queries with many tombstones */ + @Override + public int getTombstoneWarnThreshold() { + log(" getTombstoneWarnThreshold()"); + return client.getIntValue("/storage_service/tombstone_warn_threshold"); + } + + /** Sets the threshold for warning queries with many tombstones */ + @Override + public void setTombstoneWarnThreshold(int tombstoneDebugThreshold) { + log(" setTombstoneWarnThreshold(int tombstoneDebugThreshold)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("debug_threshold", Integer.toString(tombstoneDebugThreshold)); + client.post("/storage_service/tombstone_warn_threshold", queryParams); + } + + /** Returns the threshold for abandoning queries with many tombstones */ + @Override + public int getTombstoneFailureThreshold() { + log(" getTombstoneFailureThreshold()"); + return client.getIntValue("/storage_service/tombstone_failure_threshold"); + } + + /** Sets the threshold for abandoning queries with many tombstones */ + @Override + public void setTombstoneFailureThreshold(int tombstoneDebugThreshold) { + log(" setTombstoneFailureThreshold(int tombstoneDebugThreshold)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("debug_threshold", Integer.toString(tombstoneDebugThreshold)); + client.post("/storage_service/tombstone_failure_threshold", queryParams); + } + + /** Returns the threshold for rejecting queries due to a large batch size */ + @Override + public int getBatchSizeFailureThreshold() { + log(" getBatchSizeFailureThreshold()"); + return client.getIntValue("/storage_service/batch_size_failure_threshold"); + } + + /** Sets the threshold for rejecting queries due to a large batch size */ + @Override + public void setBatchSizeFailureThreshold(int batchSizeDebugThreshold) { + log(" setBatchSizeFailureThreshold(int batchSizeDebugThreshold)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("threshold", Integer.toString(batchSizeDebugThreshold)); + client.post("/storage_service/batch_size_failure_threshold", queryParams); + } + + /** + * Sets the hinted handoff throttle in kb per second, per delivery thread. + */ + @Override + public void setHintedHandoffThrottleInKB(int throttleInKB) { + log(" setHintedHandoffThrottleInKB(int throttleInKB)"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("throttle", Integer.toString(throttleInKB)); + client.post("/storage_service/hinted_handoff", queryParams); + + } + +// @Override + public void takeMultipleColumnFamilySnapshot(String tag, String... columnFamilyList) throws IOException { + log(" takeMultipleColumnFamilySnapshot"); + Map> keyspaceColumnfamily = new HashMap>(); + Map> kss = getColumnFamilyPerKeyspace(); + Map>> snapshots = getSnapshotKeyspaceColumnFamily(); + for (String columnFamily : columnFamilyList) { + String splittedString[] = columnFamily.split("\\."); + if (splittedString.length == 2) { + String keyspaceName = splittedString[0]; + String columnFamilyName = splittedString[1]; + + if (keyspaceName == null) { + throw new IOException("You must supply a keyspace name"); + } + if (columnFamilyName == null) { + throw new IOException("You must supply a column family name"); + } + if (tag == null || tag.equals("")) { + throw new IOException("You must supply a snapshot name."); + } + if (!kss.containsKey(keyspaceName)) { + throw new IOException("Keyspace " + keyspaceName + " does not exist"); + } + if (!kss.get(keyspaceName).contains(columnFamilyName)) { + throw new IllegalArgumentException( + String.format("Unknown keyspace/cf pair (%s.%s)", keyspaceName, columnFamilyName)); + } + // As there can be multiple column family from same keyspace + // check if snapshot exist for that specific + // columnfamily and not for whole keyspace + + if (snapshots.containsKey(tag) && snapshots.get(tag).containsKey(keyspaceName) + && snapshots.get(tag).get(keyspaceName).contains(columnFamilyName)) { + throw new IOException("Snapshot " + tag + " already exists."); + } + + if (!keyspaceColumnfamily.containsKey(keyspaceName)) { + keyspaceColumnfamily.put(keyspaceName, new ArrayList()); + } + + // Add Keyspace columnfamily to map in order to support + // atomicity for snapshot process. + // So no snapshot should happen if any one of the above + // conditions fail for any keyspace or columnfamily + keyspaceColumnfamily.get(keyspaceName).add(columnFamilyName); + + } else { + throw new IllegalArgumentException( + "Cannot take a snapshot on secondary index or invalid column family name. You must supply a column family name in the form of keyspace.columnfamily"); + } + } + + for (Entry> entry : keyspaceColumnfamily.entrySet()) { + for (String columnFamily : entry.getValue()) { + takeColumnFamilySnapshot(entry.getKey(), columnFamily, tag); + } + } + } + + @Override + public int forceRepairAsync(String keyspace, int parallelismDegree, Collection dataCenters, + Collection hosts, boolean primaryRange, boolean fullRepair, String... columnFamilies) { + log(" forceRepairAsync(keyspace, parallelismDegree, dataCenters, hosts, primaryRange, fullRepair, columnFamilies)"); + Map options = new HashMap(); + Joiner commas = Joiner.on(","); + options.put("parallelism", Integer.toString(parallelismDegree)); + if (dataCenters != null) { + options.put("dataCenters", commas.join(dataCenters)); + } + if (hosts != null) { + options.put("hosts", commas.join(hosts)); + } + options.put("primaryRange", Boolean.toString(primaryRange)); + options.put("incremental", Boolean.toString(!fullRepair)); + if (columnFamilies != null && columnFamilies.length > 0) { + options.put("columnFamilies", commas.join(columnFamilies)); + } + return repairAsync(keyspace, options); + } + + @Override + public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, int parallelismDegree, + Collection dataCenters, Collection hosts, boolean fullRepair, String... columnFamilies) { + log(" forceRepairRangeAsync(beginToken, endToken, keyspaceName, parallelismDegree, dataCenters, hosts, fullRepair, columnFamilies)"); + Map options = new HashMap(); + Joiner commas = Joiner.on(","); + options.put("parallelism", Integer.toString(parallelismDegree)); + if (dataCenters != null) { + options.put("dataCenters", commas.join(dataCenters)); + } + if (hosts != null) { + options.put("hosts", commas.join(hosts)); + } + options.put("incremental", Boolean.toString(!fullRepair)); + options.put("startToken", beginToken); + options.put("endToken", endToken); + return repairAsync(keyspaceName, options); + } + + @Override + public Map getEndpointToHostId() { + return getHostIdMap(); + } + + @Override + public Map getHostIdToEndpoint() { + return getHostIdToAddressMap(); + } + + @Override + public void refreshSizeEstimates() throws ExecutionException { + // TODO Auto-generated method stub + log(" refreshSizeEstimates"); + } + + @Override + public void forceKeyspaceCompaction(boolean splitOutput, String keyspaceName, String... tableNames) + throws IOException, ExecutionException, InterruptedException { + // "splitOutput" afaik not relevant for scylla (yet?...) + forceKeyspaceCompaction(keyspaceName, tableNames); + } + + @Override + public int relocateSSTables(String keyspace, String... cfnames) throws IOException, ExecutionException, InterruptedException { + return 0; + } + + @Override + public int relocateSSTables(int jobs, String keyspace, String... cfnames) throws IOException, ExecutionException, InterruptedException { + return 0; + } + + @Override + public int forceKeyspaceCleanup(int jobs, String keyspaceName, String... tables) + throws IOException, ExecutionException, InterruptedException { + // "jobs" not (yet) relevant for scylla. (though possibly useful...) + return forceKeyspaceCleanup(keyspaceName, tables); + } + + @Override + public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, int jobs, String keyspaceName, + String... columnFamilies) throws IOException, ExecutionException, InterruptedException { + // "jobs" not (yet) relevant for scylla. (though possibly useful...) + return scrub(disableSnapshot, skipCorrupted, checkData, 0, keyspaceName, columnFamilies); + } + + @Override + public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, boolean reinsertOverflowedTTL, int jobs, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException { + return 0; + } + + @Override + public int verify(boolean extendedVerify, String keyspaceName, String... tableNames) + throws IOException, ExecutionException, InterruptedException { + // TODO Auto-generated method stub + log(" verify"); + return 0; + } + + @Override + public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames) + throws IOException, ExecutionException, InterruptedException { + // "jobs" not (yet) relevant for scylla. (though possibly useful...) + return upgradeSSTables(keyspaceName, excludeCurrentVersion, tableNames); + } + + @Override + public int garbageCollect(String tombstoneOption, int jobs, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException { + return 0; + } + + @Override + public List getNonLocalStrategyKeyspaces() { + log(" getNonLocalStrategyKeyspaces"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("type", "non_local_strategy"); + return client.getListStrValue("/storage_service/keyspaces", queryParams); + } + + @Override + public void setInterDCStreamThroughputMbPerSec(int value) { + // TODO Auto-generated method stub + log(" setInterDCStreamThroughputMbPerSec"); + } + + @Override + public int getInterDCStreamThroughputMbPerSec() { + // TODO Auto-generated method stub + log(" getInterDCStreamThroughputMbPerSec"); + return 0; + } + + @Override + public boolean resumeBootstrap() { + log(" resumeBootstrap"); + return false; + } + +} diff --git a/src/java/org/apache/cassandra/tools/NodeTool.java b/src/java/org/apache/cassandra/tools/NodeTool.java index c86f4c12a2..bea57053c0 100644 --- a/src/java/org/apache/cassandra/tools/NodeTool.java +++ b/src/java/org/apache/cassandra/tools/NodeTool.java @@ -49,9 +49,19 @@ public class NodeTool public static void main(String... args) { - List> commands = asList( + boolean REST=true; + for (String arg:args) { + if (arg.startsWith("-r") || arg.startsWith("--protocol")) { + String[] param=arg.split("="); + if (param.length>1) { + if (!"rest".equals(param[1].toLowerCase())) { + System.out.println("Other than REST detected, falling back to jmx"); + REST = false; } + } + } + } + List> commands = new ArrayList<>(asList( Help.class, - Info.class, Ring.class, NetStats.class, CfStats.class, @@ -147,9 +157,10 @@ public static void main(String... args) // Remove until proven otherwise: RefreshSizeEstimates.class // Remove until proven otherwise: RelocateSSTables.class, ViewBuildStatus.class, - SSTableInfo.class - ); + )); + + if (REST) { commands.add(RESTInfo.class); } else { commands.add(Info.class); } Cli.CliBuilder builder = Cli.builder("nodetool"); @@ -232,12 +243,18 @@ public static class CommandFailedButNeedNoMoreOutput extends Error {}; public static abstract class NodeToolCmd implements Runnable { + private final String REST = "rest"; + private final String JMX = "jmx"; + @Option(type = OptionType.GLOBAL, name = {"-h", "--host"}, description = "Node hostname or ip address") private String host = "127.0.0.1"; @Option(type = OptionType.GLOBAL, name = {"-p", "--port"}, description = "Remote jmx agent port number") private String port = "7199"; + @Option(type = OptionType.GLOBAL, name = {"-o", "--restport"}, description = "Remote Scylla REST port number") + private String rport = "10000"; + @Option(type = OptionType.GLOBAL, name = {"-u", "--username"}, description = "Remote jmx agent username") private String username = EMPTY; @@ -247,6 +264,9 @@ public static abstract class NodeToolCmd implements Runnable @Option(type = OptionType.GLOBAL, name = {"-pwf", "--password-file"}, description = "Path to the JMX password file") private String passwordFilePath = EMPTY; + @Option(type = OptionType.GLOBAL, name = {"-r", "--protocol"}, description = "Use rest(default, only for what is ported) or jmx(legacy) protocol") + private String protocol = REST; + @Override public void run() { @@ -317,10 +337,19 @@ private NodeProbe connect() try { - if (username.isEmpty()) - nodeClient = new NodeProbe(host, parseInt(port)); - else - nodeClient = new NodeProbe(host, parseInt(port), username, password); + if (username.isEmpty()) { + if (REST.equals(protocol)) { + nodeClient = new RESTNodeProbe(host, parseInt(port), parseInt(rport)); + } else { + nodeClient = new NodeProbe(host, parseInt(port)); + } + } else { + if (REST.equals(protocol)) { + nodeClient = new RESTNodeProbe(host, parseInt(port), parseInt(rport), username, password); + } else { + nodeClient = new NodeProbe(host, parseInt(port), username, password); + } + } } catch (IOException | SecurityException e) { Throwable rootCause = Throwables.getRootCause(e); diff --git a/src/java/org/apache/cassandra/tools/RESTNodeProbe.java b/src/java/org/apache/cassandra/tools/RESTNodeProbe.java new file mode 100644 index 0000000000..dc8f5e2356 --- /dev/null +++ b/src/java/org/apache/cassandra/tools/RESTNodeProbe.java @@ -0,0 +1,1430 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.cassandra.tools; + +import com.google.common.collect.Multimap; +import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.api.APIConfig; +import com.scylladb.jmx.utils.FileUtils; +import org.apache.cassandra.config.CFMetaData; +import org.apache.cassandra.db.ColumnFamilyStore; +import org.apache.cassandra.db.ColumnFamilyStoreMBean; +import org.apache.cassandra.db.Directories; +import org.apache.cassandra.db.Keyspace; +import org.apache.cassandra.db.compaction.CompactionManagerMBean; +import org.apache.cassandra.db.compaction.ScyllaCompactionManager; +import org.apache.cassandra.db.marshal.AsciiType; +import org.apache.cassandra.locator.DynamicEndpointSnitchMBean; +import org.apache.cassandra.locator.EndpointSnitchInfoMBean; +import org.apache.cassandra.locator.ScyllaEndpointSnitchInfo; +import org.apache.cassandra.metrics.CassandraMetricsRegistry; +import org.apache.cassandra.metrics.ScyllaJmxHistogram; +import org.apache.cassandra.metrics.ScyllaJmxTimer; +import org.apache.cassandra.metrics.StorageMetrics; +import org.apache.cassandra.metrics.TableMetrics; +import org.apache.cassandra.metrics.TableMetrics.Sampler; +import org.apache.cassandra.service.CacheServiceMBean; +import org.apache.cassandra.service.ScyllaCacheService; +import org.apache.cassandra.service.ScyllaStorageProxy; +import org.apache.cassandra.service.ScyllaStorageService; +import org.apache.cassandra.service.StorageProxyMBean; +import org.apache.cassandra.service.StorageServiceMBean; +import org.apache.cassandra.streaming.StreamState; +import org.apache.cassandra.tools.nodetool.GetTimeout; + +import javax.json.JsonArray; +import javax.json.JsonObject; +import javax.json.JsonString; +import javax.management.JMX; +import javax.management.MBeanServerConnection; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import javax.management.openmbean.CompositeData; +import javax.management.openmbean.OpenDataException; +import javax.management.openmbean.TabularData; +import java.io.IOException; +import java.io.PrintStream; +import java.lang.management.MemoryUsage; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.*; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.function.BiFunction; +import java.util.logging.Logger; + +/** + * REST client operations for Scylla. + */ +public class RESTNodeProbe extends NodeProbe { + + private static APIConfig config; + protected final APIClient client; + + /** + * Creates a NodeProbe using the specified JMX host, port, username, and password. + * + * @param host hostname or IP address of the JMX agent + * @param port TCP port of the remote JMX agent + * @throws IOException on connection failures + */ + public RESTNodeProbe(String host, int port, int rport, String username, String password) throws IOException { + super(host, port, username, password); + System.setProperty("apiaddress", host); + System.getProperty("apiport", String.valueOf(rport)); + //TODO add username and password support - first in scylla-apiclient, then here + config = new APIConfig(); + client = new APIClient(config); + } + + /** + * Creates a NodeProbe using the specified JMX host and port. + * + * @param host hostname or IP address of the JMX agent + * @param port TCP port of the remote JMX agent + * @throws IOException on connection failures + */ + public RESTNodeProbe(String host, int port, int rport) throws IOException { + super(host, port); + System.setProperty("apiaddress", host); + System.getProperty("apiport", String.valueOf(rport)); + config = new APIConfig(); + client = new APIClient(config); + } + + /** + * Creates a NodeProbe using the specified JMX host and default port. + * + * @param host hostname or IP address of the JMX agent + * @throws IOException on connection failures + */ + public RESTNodeProbe(String host) throws IOException { + super(host); + System.setProperty("apiaddress", host); + config = new APIConfig(); + client = new APIClient(config); + } + + public int forceKeyspaceCleanup(int jobs, String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException { + return super.forceKeyspaceCleanup(jobs, keyspaceName, tables); + } + + public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, boolean reinsertOverflowedTTL, int jobs, String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException { + return super.scrub(disableSnapshot, skipCorrupted, checkData, reinsertOverflowedTTL, jobs, keyspaceName, tables); + } + + public int verify(boolean extendedVerify, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException { + return super.verify(extendedVerify, keyspaceName, tableNames); + } + + public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException { + return super.upgradeSSTables(keyspaceName, excludeCurrentVersion, jobs, tableNames); + } + + public int garbageCollect(String tombstoneOption, int jobs, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException { + return super.garbageCollect(tombstoneOption, jobs, keyspaceName, tableNames); + } + + public void forceKeyspaceCleanup(PrintStream out, int jobs, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException { + super.forceKeyspaceCleanup(out, jobs, keyspaceName, tableNames); + } + + public void scrub(PrintStream out, boolean disableSnapshot, boolean skipCorrupted, boolean checkData, boolean reinsertOverflowedTTL, int jobs, String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException { + super.scrub(out, disableSnapshot, skipCorrupted, checkData, reinsertOverflowedTTL, jobs, keyspaceName, tables); + } + + public void verify(PrintStream out, boolean extendedVerify, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException { + super.verify(out, extendedVerify, keyspaceName, tableNames); + } + + + public void upgradeSSTables(PrintStream out, String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException { + super.upgradeSSTables(out, keyspaceName, excludeCurrentVersion, jobs, tableNames); + } + + public void garbageCollect(PrintStream out, String tombstoneOption, int jobs, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException { + super.garbageCollect(out, tombstoneOption, jobs, keyspaceName, tableNames); + } + + public void forceUserDefinedCompaction(String datafiles) throws IOException, ExecutionException, InterruptedException { + super.forceUserDefinedCompaction(datafiles); + } + + public void forceKeyspaceCompaction(boolean splitOutput, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException { + super.forceKeyspaceCompaction(splitOutput, keyspaceName, tableNames); + } + + public void relocateSSTables(int jobs, String keyspace, String[] cfnames) throws IOException, ExecutionException, InterruptedException { + super.relocateSSTables(jobs, keyspace, cfnames); + } + + public void forceKeyspaceCompactionForTokenRange(String keyspaceName, final String startToken, final String endToken, String... tableNames) throws IOException, ExecutionException, InterruptedException { + super.forceKeyspaceCompactionForTokenRange(keyspaceName, startToken, endToken, tableNames); + } + + public void forceKeyspaceFlush(String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException { + super.forceKeyspaceFlush(keyspaceName, tableNames); + } + + public void repairAsync(final PrintStream out, final String keyspace, Map options) throws IOException { + super.repairAsync(out, keyspace, options); + } + + public Map getPartitionSample(String ks, String cf, int capacity, int duration, int count, List samplers) throws OpenDataException { + return super.getPartitionSample(ks, cf, capacity, duration, count, samplers); + } + + public void invalidateCounterCache() { + super.invalidateCounterCache(); + } + + public void invalidateKeyCache() { + super.invalidateKeyCache(); + } + + public void invalidateRowCache() { + super.invalidateRowCache(); + } + + public void drain() throws IOException, InterruptedException, ExecutionException { + super.drain(); + } + + public Map getTokenToEndpointMap() { + log(" getTokenToEndpointMap()"); + return client.getMapStrValue("/storage_service/tokens_endpoint"); + } + + public List getLiveNodes() { + log(" getLiveNodes()"); + return client.getListStrValue("/gossiper/endpoint/live"); + } + + public List getJoiningNodes() { + log(" getJoiningNodes()"); + return client.getListStrValue("/storage_service/nodes/joining"); + } + + public List getLeavingNodes() { + log(" getLeavingNodes()"); + return client.getListStrValue("/storage_service/nodes/leaving"); + } + + public List getMovingNodes() { + log(" getMovingNodes()"); + return client.getListStrValue("/storage_service/nodes/moving"); + } + + public List getUnreachableNodes() { + log(" getUnreachableNodes()"); + return client.getListStrValue("/gossiper/endpoint/down"); + } + + public Map getLoadMap() { + log(" getLoadMap()"); + Map load = getLoadMapAsDouble(); + Map map = new HashMap<>(); + for (Map.Entry entry : load.entrySet()) { + map.put(entry.getKey(), FileUtils.stringifyFileSize(entry.getValue())); + } + return map; + } + + public Map getLoadMapAsDouble() { + log(" getLoadMapAsDouble()"); + return client.getMapStringDouble("/storage_service/load_map"); + } + + + public Map getOwnership() { + log(" getOwnership()"); + return client.getMapInetAddressFloatValue("/storage_service/ownership/"); + } + + public Map effectiveOwnership(String keyspace) throws IllegalStateException { + log(" effectiveOwnership(String keyspace) throws IllegalStateException"); + try { + return client.getMapInetAddressFloatValue("/storage_service/ownership/" + keyspace); + } catch (Exception e) { + throw new IllegalStateException( + "Non-system keyspaces don't have the same replication settings, effective ownership information is meaningless"); + } + } + + CacheServiceMBean cacheService = null; + + public CacheServiceMBean getCacheServiceMBean() { + if (cacheService == null) { + cacheService = new ScyllaCacheService(client); + } + return cacheService; + } + + public double[] getAndResetGCStats() { + return super.getAndResetGCStats(); + } + + public Iterator> getColumnFamilyStoreMBeanProxies() { + return super.getColumnFamilyStoreMBeanProxies(); + } + + public Iterator> getColumnFamilyStoreMap() { + JsonArray tables = client.getJsonArray("/column_family/name"); // format keyspace:table + + List> cfMbeans = new ArrayList<>(tables.size()); + for (JsonString record : tables.getValuesAs(JsonString.class)) { + String srecord = record.getString(); + String[] sarray = srecord.split(":"); + String keyspaceName = sarray[0]; + String tableName = null; + if (sarray.length > 1) { + tableName = sarray[1]; + } + cfMbeans.add(new AbstractMap.SimpleImmutableEntry<>(keyspaceName, tableName)); + } + return cfMbeans.iterator(); + } + + CompactionManagerMBean compactionManager = null; + + public CompactionManagerMBean getCompactionManagerProxy() { + if (compactionManager == null) { + compactionManager = new ScyllaCompactionManager(client); + } + return compactionManager; + + } + + @Override + public List getTokens() { +// return super.getTokens(); + log(" getTokens()"); + return getTokens(getLocalBroadCastingAddress()); + } + + public String getLocalBroadCastingAddress() { + // FIXME: + // There is no straight API to get the broadcasting + // address, instead of trying to figure it out from the configuration + // we will use the getHostIdToAddressMap with the hostid + return getHostIdToAddressMap().get(getLocalHostId()); + } + + /** + * Retrieve the mapping of endpoint to host ID + */ + public Map getHostIdToAddressMap() { + log(" getHostIdToAddressMap()"); + return client.getReverseMapStrValue("/storage_service/host_id"); + } + + @Override + public List getTokens(String endpoint) { +// return super.getTokens(endpoint); + log(" getTokens(String endpoint) throws UnknownHostException"); + return client.getListStrValue("/storage_service/tokens/" + endpoint); + } + + @Override + public String getLocalHostId() { + log(" getLocalHostId()"); + return client.getStringValue("/storage_service/hostid/local"); + } + + public Map getHostIdMap() { + log(" getHostIdMap()"); + return client.getMapStrValue("/storage_service/host_id"); + } + + public String getLoadString() { + log(" getLoadString()"); + return FileUtils.stringifyFileSize(getLoad()); + } + + /** + * Numeric load value. + * + * @see org.apache.cassandra.metrics.StorageMetrics#load + */ + @Deprecated + private double getLoad() { + log(" getLoad()"); + return client.getDoubleValue("/storage_service/load"); + } + + public String getReleaseVersion() { + return super.getReleaseVersion(); + } + + @Override + public int getCurrentGenerationNumber() { + log(" getCurrentGenerationNumber()"); + return client.getIntValue("/storage_service/generation_number"); + } + + @Override + public long getUptime() { + return super.getUptime(); +// log(" getUptime()"); +// return client.getLongValue("/system/uptime_ms"); + } + + @Override + public MemoryUsage getHeapMemoryUsage() { + //TODO FIX this to get uptime from scylla server NOT from JMX mxbean! + return new MemoryUsage(0, 0, 0, 0); + } + + /** + * Take a snapshot of all the keyspaces, optionally specifying only a specific column family. + * + * @param snapshotName the name of the snapshot. + * @param table the table to snapshot or all on null + * @param options Options (skipFlush for now) + * @param keyspaces the keyspaces to snapshot + */ + public void takeSnapshot(String snapshotName, String table, Map options, String... keyspaces) throws IOException { + super.takeSnapshot(snapshotName, table, options, keyspaces); + } + + /** + * Take a snapshot of all column family from different keyspaces. + * + * @param snapshotName the name of the snapshot. + * @param options Options (skipFlush for now) + * @param tableList list of columnfamily from different keyspace in the form of ks1.cf1 ks2.cf2 + */ + public void takeMultipleTableSnapshot(String snapshotName, Map options, String... tableList) + throws IOException { + super.takeMultipleTableSnapshot(snapshotName, options, tableList); + } + + /** + * Remove all the existing snapshots. + */ + public void clearSnapshot(String tag, String... keyspaces) throws IOException { + super.clearSnapshot(tag, keyspaces); + } + + public Map getSnapshotDetails() { + return super.getSnapshotDetails(); + } + + public long trueSnapshotsSize() { + return super.trueSnapshotsSize(); + } + + @Override + public boolean isJoined() { + log(" isJoined()"); + return client.getBooleanValue("/storage_service/join_ring"); + } + + public boolean isDrained() { + return super.isDrained(); + } + + public boolean isDraining() { + return super.isDraining(); + } + + public void joinRing() throws IOException { + super.joinRing(); + } + + public void decommission() throws InterruptedException { + super.decommission(); + } + + public void move(String newToken) throws IOException { + super.move(newToken); + } + + public void removeNode(String token) { + super.removeNode(token); + } + + public String getRemovalStatus() { + return super.getRemovalStatus(); + } + + public void forceRemoveCompletion() { + super.forceRemoveCompletion(); + } + + public void assassinateEndpoint(String address) throws UnknownHostException { + super.assassinateEndpoint(address); + } + + /** + * Set the compaction threshold + * + * @param minimumCompactionThreshold minimum compaction threshold + * @param maximumCompactionThreshold maximum compaction threshold + */ + public void setCompactionThreshold(String ks, String cf, int minimumCompactionThreshold, int maximumCompactionThreshold) { + super.setCompactionThreshold(ks, cf, minimumCompactionThreshold, maximumCompactionThreshold); + } + + public void disableAutoCompaction(String ks, String... tables) throws IOException { + super.disableAutoCompaction(ks, tables); + } + + public void enableAutoCompaction(String ks, String... tableNames) throws IOException { + super.enableAutoCompaction(ks, tableNames); + } + + public void setIncrementalBackupsEnabled(boolean enabled) { + super.setIncrementalBackupsEnabled(enabled); + } + + public boolean isIncrementalBackupsEnabled() { + return super.isIncrementalBackupsEnabled(); + } + + public void setCacheCapacities(int keyCacheCapacity, int rowCacheCapacity, int counterCacheCapacity) { + super.setCacheCapacities(keyCacheCapacity, rowCacheCapacity, counterCacheCapacity); + } + + public void setCacheKeysToSave(int keyCacheKeysToSave, int rowCacheKeysToSave, int counterCacheKeysToSave) { + super.setCacheKeysToSave(keyCacheKeysToSave, rowCacheKeysToSave, counterCacheKeysToSave); + } + + public void setHintedHandoffThrottleInKB(int throttleInKB) { + super.setHintedHandoffThrottleInKB(throttleInKB); + } + + public List getEndpoints(String keyspace, String cf, String key) { + return super.getEndpoints(keyspace, cf, key); + } + + public List getSSTables(String keyspace, String cf, String key, boolean hexFormat) { + return super.getSSTables(keyspace, cf, key, hexFormat); + } + + public Set getStreamStatus() { + return super.getStreamStatus(); + } + + public String getOperationMode() { + return super.getOperationMode(); + } + + public boolean isStarting() { + return super.isStarting(); + } + + public void truncate(String keyspaceName, String tableName) { + super.truncate(keyspaceName, tableName); + } + + EndpointSnitchInfoMBean endpointSnitchInfo = null; + + public EndpointSnitchInfoMBean getEndpointSnitchInfoProxy() { + if (endpointSnitchInfo == null) { + endpointSnitchInfo = new ScyllaEndpointSnitchInfo(client); + } + return endpointSnitchInfo; + } + + public DynamicEndpointSnitchMBean getDynamicEndpointSnitchInfoProxy() { + return super.getDynamicEndpointSnitchInfoProxy(); + } + + public ColumnFamilyStoreMBean getCfsProxy(String ks, String cf) { + return super.getCfsProxy(ks, cf); + } + + StorageProxyMBean storageProxy = null; + + public StorageProxyMBean getSpProxy() { + if (storageProxy == null) { + storageProxy = new ScyllaStorageProxy(client); + } + return storageProxy; + } + + public String getEndpoint() { + return super.getEndpoint(); + } + + @Override + public String getDataCenter() { + return client.getStringValue("/snitch/datacenter", null, 10000); +// log(" getDatacenter(String host) throws UnknownHostException"); +// MultivaluedMap queryParams = null; +// try { +// queryParams = host != null ? new MultivaluedHashMap( +// singletonMap("host", InetAddress.getByName(host).getHostAddress())) : null; +// } catch (UnknownHostException e) { +// e.printStackTrace(); //TODO fix DNS name lookup error +// } +// return client.getStringValue("/snitch/datacenter", queryParams, 10000); + } + + @Override + public String getRack() { + return client.getStringValue("/snitch/rack", null, 10000); + } + + public List getKeyspaces() { + return super.getKeyspaces(); + } + + public List getNonSystemKeyspaces() { + return super.getNonSystemKeyspaces(); + } + + public List getNonLocalStrategyKeyspaces() { + return super.getNonLocalStrategyKeyspaces(); + } + + public String getClusterName() { + log(" getClusterName()"); + return client.getStringValue("/storage_service/cluster_name"); + } + + public String getPartitioner() { + log(" getPartitionerName()"); + return client.getStringValue("/storage_service/partitioner_name"); + } + + public void disableHintedHandoff() { + getSpProxy().setHintedHandoffEnabled(false); + } + + //TODO below are similar to above + public void enableHintedHandoff() { + super.enableHintedHandoff(); + } + + public boolean isHandoffEnabled() { + return super.isHandoffEnabled(); + } + + public void enableHintsForDC(String dc) { + super.enableHintsForDC(dc); + } + + public void disableHintsForDC(String dc) { + super.disableHintsForDC(dc); + } + + public Set getHintedHandoffDisabledDCs() { + return super.getHintedHandoffDisabledDCs(); + } + + public Map getViewBuildStatuses(String keyspace, String view) { + return super.getViewBuildStatuses(keyspace, view); + } + + public void pauseHintsDelivery() { + super.pauseHintsDelivery(); + } + + public void resumeHintsDelivery() { + super.pauseHintsDelivery(); + } + + public void truncateHints(final String host) { + super.truncateHints(host); + } + + public void truncateHints() { + super.truncateHints(); + } + + public void refreshSizeEstimates() { + super.refreshSizeEstimates(); + } + + public void stopNativeTransport() { + super.stopNativeTransport(); + } + + public void startNativeTransport() { + super.startNativeTransport(); + } + + public boolean isNativeTransportRunning() { + return super.isNativeTransportRunning(); + } + + public void stopGossiping() { + super.stopGossiping(); + } + + public void startGossiping() { + super.startGossiping(); + } + + @Override + public boolean isGossipRunning() { + log(" isGossipRunning()"); + return client.getBooleanValue("/storage_service/gossiping"); + } + + public void stopThriftServer() { + super.stopThriftServer(); + } + + public void startThriftServer() { + super.startThriftServer(); + } + + @Override + public boolean isThriftServerRunning() { + log(" isRPCServerRunning()"); + return client.getBooleanValue("/storage_service/rpc_server"); + } + + public void stopCassandraDaemon() { + super.stopCassandraDaemon(); + } + + public boolean isInitialized() { + return super.isInitialized(); + } + + //ssProxy is ssProxyV and should be acessed using getSsProxy() + private StorageServiceMBean ssProxyV = null; + + public StorageServiceMBean getSsProxy() { + if (ssProxyV == null) { + ssProxyV = new ScyllaStorageService(client); + } + return ssProxyV; + } + + public void setCompactionThroughput(int value) { + getSsProxy().setCompactionThroughputMbPerSec(value); + } + + public int getCompactionThroughput() { + return getSsProxy().getCompactionThroughputMbPerSec(); + } + + public void setConcurrentCompactors(int value) { + getSsProxy().setConcurrentCompactors(value); + } + + public int getConcurrentCompactors() { + return getSsProxy().getConcurrentCompactors(); + } + + public long getTimeout(String type) { + switch (type) + { + case "misc": + return getSsProxy().getRpcTimeout(); + case "read": + return getSsProxy().getReadRpcTimeout(); + case "range": + return getSsProxy().getRangeRpcTimeout(); + case "write": + return getSsProxy().getWriteRpcTimeout(); + case "counterwrite": + return getSsProxy().getCounterWriteRpcTimeout(); + case "cascontention": + return getSsProxy().getCasContentionTimeout(); + case "truncate": + return getSsProxy().getTruncateRpcTimeout(); + case "streamingsocket": + return (long) getSsProxy().getStreamingSocketTimeout(); + default: + throw new RuntimeException("Timeout type requires one of (" + GetTimeout.TIMEOUT_TYPES + ")"); + } + } + + public int getStreamThroughput() { + return getSsProxy().getStreamThroughputMbPerSec(); + } + + public int getInterDCStreamThroughput() { + return getSsProxy().getInterDCStreamThroughputMbPerSec(); + } + + public double getTraceProbability() { + return getSsProxy().getTraceProbability(); + } + + public int getExceptionCount() { + return (int) StorageMetrics.exceptions.getCount(); + } + + public Map getDroppedMessages() { + return msProxy.getDroppedMessages(); + } + + public void loadNewSSTables(String ksName, String cfName) { + getSsProxy().loadNewSSTables(ksName, cfName); + } + + public void rebuildIndex(String ksName, String cfName, String... idxNames) { + getSsProxy().rebuildSecondaryIndex(ksName, cfName, idxNames); + } + + public String getGossipInfo() { + return super.getGossipInfo(); + } + + public void stop(String string) { + super.stop(string); + } + + public void setTimeout(String type, long value) { + super.setTimeout(type, value); + } + + public void stopById(String compactionId) { + super.stopById(compactionId); + } + + public void setStreamThroughput(int value) { + super.setStreamThroughput(value); + } + + public void setInterDCStreamThroughput(int value) { + super.setInterDCStreamThroughput(value); + } + + public void setTraceProbability(double value) { + super.setTraceProbability(value); + } + + public String getSchemaVersion() { + return super.getSchemaVersion(); + } + + public List describeRing(String keyspaceName) throws IOException { + return super.describeRing(keyspaceName); + } + + public void rebuild(String sourceDc, String keyspace, String tokens, String specificSources) { + super.rebuild(sourceDc, keyspace, tokens, specificSources); + } + + public List sampleKeyRange() { + return super.sampleKeyRange(); + } + + public void resetLocalSchema() throws IOException { + super.resetLocalSchema(); + } + + public void reloadLocalSchema() { + super.reloadLocalSchema(); + } + + public boolean isFailed() { + return super.isFailed(); + } + + public long getReadRepairAttempted() { + return super.getReadRepairAttempted(); + } + + public long getReadRepairRepairedBlocking() { + return super.getReadRepairRepairedBlocking(); + } + + public long getReadRepairRepairedBackground() { + return super.getReadRepairRepairedBackground(); + } + + + static Map uriCacheRegistry = new HashMap<>(); + + static { + uriCacheRegistry.put("Capacity", "capacity"); //Long.class + uriCacheRegistry.put("Hits", "hits_moving_avrage"); + uriCacheRegistry.put("Requests", "requests_moving_avrage"); + uriCacheRegistry.put("HitRate", "hit_rate"); //Double.class + uriCacheRegistry.put("Size", "size"); + uriCacheRegistry.put("Entries", "entries"); //Integer.class + } + + static Map uriCacheTypeRegistry = new HashMap<>(); + + static { + uriCacheTypeRegistry.put("RowCache", "row"); + uriCacheTypeRegistry.put("KeyCache", "key"); + uriCacheTypeRegistry.put("CounterCache", "counter"); + uriCacheTypeRegistry.put("ChunkCache", ""); + + } + + // JMX getters for the o.a.c.metrics API below. + + /** + * Retrieve cache metrics based on the cache type (KeyCache, RowCache, or CounterCache) + * + * @param cacheType KeyCach, RowCache, or CounterCache + * @param metricName Capacity, Entries, HitRate, Size, Requests or Hits. + */ + public Object getCacheMetric(String cacheType, String metricName) { + + if (cacheType == "ChunkCache") { + if (metricName == "MissLatencyUnit") { + return TimeUnit.MICROSECONDS; + } else { + if (metricName == "Entries") { + return 0; + } else if (metricName == "HitRate") { + return 0D; + } else { + return 0L; + } + } + } + if (!uriCacheTypeRegistry.containsKey(cacheType)) { + throw new RuntimeException("Cache type: " + cacheType + " lacks its type REST mapping for: " + metricName); + } + if (!uriCacheRegistry.containsKey(metricName)) { + throw new RuntimeException("Cache type: " + cacheType + " lacks its REST mapping for: " + metricName); + } + String url = "/cache_service/metrics/" + uriCacheTypeRegistry.get(cacheType) + "/" + uriCacheRegistry.get(metricName); + + switch (metricName) { + case "Capacity": + case "Size": + return client.getLongValue(url); //TODO fix for proper types using getReader(xxx) + case "Entries": + return client.getIntValue(url); + case "HitRate": + return client.getDoubleValue(url); +// return JMX.newMBeanProxy(mbeanServerConn, +// new ObjectName("org.apache.cassandra.metrics:type=Cache,scope=" + cacheType + ",name=" + metricName), +// CassandraMetricsRegistry.JmxGaugeMBean.class).getValue(); + case "Requests": + case "Hits": + case "Misses": { + JsonObject obj = client.getJsonObj(url, null); +// JsonArray rates = obj.getJsonArray("rates"); +// Double oneMinuteRate = rates.getJsonNumber(0).doubleValue(); +// Double fiveMinuteRate = rates.getJsonNumber(1).doubleValue(); +// Double fifteenMinuteRate = rates.getJsonNumber(2).doubleValue(); +// Double meanRate = obj.getJsonNumber("mean_rate").doubleValue(); + Long count = obj.getJsonNumber("count").longValue(); + return count; + } +// return JMX.newMBeanProxy(mbeanServerConn, +// new ObjectName("org.apache.cassandra.metrics:type=Cache,scope=" + cacheType + ",name=" + metricName), +// CassandraMetricsRegistry.JmxMeterMBean.class).getCount(); + case "MissLatency": + return 0D; //TODO implement call on server side? +// return JMX.newMBeanProxy(mbeanServerConn, +// new ObjectName("org.apache.cassandra.metrics:type=Cache,scope=" + cacheType + ",name=" + metricName), +// CassandraMetricsRegistry.JmxTimerMBean.class).getMean(); + case "MissLatencyUnit": + return TimeUnit.MICROSECONDS.toString(); +// return JMX.newMBeanProxy(mbeanServerConn, +// new ObjectName("org.apache.cassandra.metrics:type=Cache,scope=" + cacheType + ",name=MissLatency"), +// CassandraMetricsRegistry.JmxTimerMBean.class).getDurationUnit(); + default: + throw new RuntimeException("Unknown cache metric name."); + + } +// } +// catch (MalformedObjectNameException e) +// { +// throw new RuntimeException(e); +// } + } + + public static BiFunction getReader(Class type) { + if (type == String.class) { + return (c, s) -> type.cast(c.getRawValue(s)); + } else if (type == Integer.class) { + return (c, s) -> type.cast(c.getIntValue(s)); + } else if (type == Double.class) { + return (c, s) -> type.cast(c.getDoubleValue(s)); + } else if (type == Long.class) { + return (c, s) -> type.cast(c.getLongValue(s)); + } + throw new IllegalArgumentException(type.getName()); + } + + public Object getThreadPoolMetric(String pathName, String poolName, String metricName) { + return super.getThreadPoolMetric(pathName, poolName, metricName); + } + + /** + * Retrieve threadpool paths and names for threadpools with metrics. + * + * @return Multimap from path (internal, request, etc.) to name + */ + public Multimap getThreadPools() { + return super.getThreadPools(); + } + + public int getNumberOfTables() { + return super.getNumberOfTables(); + } + + static Map uriCFMetricRegistry = new HashMap<>(); + + static { + //registerCommon + uriCFMetricRegistry.put("MemtableColumnsCount", "memtable_columns_count"); + uriCFMetricRegistry.put("MemtableOnHeapSize", "memtable_on_heap_size"); + uriCFMetricRegistry.put("MemtableOffHeapSize", "memtable_off_heap_size"); + uriCFMetricRegistry.put("MemtableLiveDataSize", "memtable_live_data_size"); + uriCFMetricRegistry.put("AllMemtablesHeapSize", "all_memtables_on_heap_size"); + uriCFMetricRegistry.put("AllMemtablesOffHeapSize", "all_memtables_off_heap_size"); + uriCFMetricRegistry.put("AllMemtablesLiveDataSize", "all_memtables_live_data_size"); + + uriCFMetricRegistry.put("MemtableSwitchCount", "memtable_switch_count"); + + uriCFMetricRegistry.put("SSTablesPerReadHistogram", "sstables_per_read_histogram"); + uriCFMetricRegistry.put("CompressionRatio", "compression_ratio"); + + uriCFMetricRegistry.put("PendingFlushes", "pending_flushes"); + + uriCFMetricRegistry.put("PendingCompactions", "pending_compactions"); + uriCFMetricRegistry.put("LiveSSTableCount", "live_ss_table_count"); + + uriCFMetricRegistry.put("LiveDiskSpaceUsed", "live_disk_space_used"); + uriCFMetricRegistry.put("TotalDiskSpaceUsed", "total_disk_space_used"); + uriCFMetricRegistry.put("MinPartitionSize", "min_row_size"); + uriCFMetricRegistry.put("MaxPartitionSize", "max_row_size"); + uriCFMetricRegistry.put("MeanPartitionSize", "mean_row_size"); + + uriCFMetricRegistry.put("BloomFilterFalsePositives", "bloom_filter_false_positives"); + uriCFMetricRegistry.put("RecentBloomFilterFalsePositives", "recent_bloom_filter_false_positives"); + uriCFMetricRegistry.put("BloomFilterFalseRatio", "bloom_filter_false_ratio"); + uriCFMetricRegistry.put("RecentBloomFilterFalseRatio", "recent_bloom_filter_false_ratio"); + + uriCFMetricRegistry.put("BloomFilterDiskSpaceUsed", "bloom_filter_disk_space_used"); + uriCFMetricRegistry.put("BloomFilterOffHeapMemoryUsed", "bloom_filter_off_heap_memory_used"); + uriCFMetricRegistry.put("IndexSummaryOffHeapMemoryUsed", "index_summary_off_heap_memory_used"); + uriCFMetricRegistry.put("CompressionMetadataOffHeapMemoryUsed", "compression_metadata_off_heap_memory_used"); + uriCFMetricRegistry.put("SpeculativeRetries", "speculative_retries"); + + uriCFMetricRegistry.put("TombstoneScannedHistogram", "tombstone_scanned_histogram"); + uriCFMetricRegistry.put("LiveScannedHistogram", "live_scanned_histogram"); + uriCFMetricRegistry.put("ColUpdateTimeDeltaHistogram", "col_update_time_delta_histogram"); + + // We do not want to capture view mutation specific metrics for a view + // They only makes sense to capture on the base table + // TODO: views + // if (!cfs.metadata.isView()) + // { + // viewLockAcquireTime = createTableTimer("ViewLockAcquireTime", + // cfs.keyspace.metric.viewLockAcquireTime); + // viewReadTime = createTableTimer("ViewReadTime", + // cfs.keyspace.metric.viewReadTime); + // } + + uriCFMetricRegistry.put("SnapshotsSize", "snapshots_size"); + uriCFMetricRegistry.put("RowCacheHitOutOfRange", "row_cache_hit_out_of_range"); + uriCFMetricRegistry.put("RowCacheHit", "row_cache_hit"); + uriCFMetricRegistry.put("RowCacheMiss", "row_cache_miss"); + // TODO: implement + uriCFMetricRegistry.put("PercentRepaired", ""); + + //registerLocal + uriCFMetricRegistry.put("EstimatedPartitionSizeHistogram", "estimated_row_size_histogram"); //"EstimatedRowSizeHistogram" + uriCFMetricRegistry.put("EstimatedPartitionCount", "estimated_row_count"); //"EstimatedRowCount" + uriCFMetricRegistry.put("EstimatedColumnCountHistogram", "estimated_column_count_histogram"); + uriCFMetricRegistry.put("KeyCacheHitRate", "key_cache_hit_rate"); + + uriCFMetricRegistry.put("CoordinatorReadLatency", "coordinator/read"); + uriCFMetricRegistry.put("CoordinatorScanLatency", "coordinator/scan"); + uriCFMetricRegistry.put("WaitingOnFreeMemtableSpace", "waiting_on_free_memtable"); + + //TODO verify latencyMetrics fromTableMetrics + uriCFMetricRegistry.put("WriteLatency", "write_latency/moving_average_histogram"); + uriCFMetricRegistry.put("ReadLatency", "read_latency/moving_average_histogram"); + + uriCFMetricRegistry.put("WriteTotalLatency", "write_latency"); + uriCFMetricRegistry.put("ReadTotalLatency", "read_latency"); + + uriCFMetricRegistry.put("CasPrepare", "cas_prepare"); + uriCFMetricRegistry.put("CasPropose", "cas_propose"); + uriCFMetricRegistry.put("CasCommit", "cas_commit"); + + // TODO: implement + uriCFMetricRegistry.put("DroppedMutations", ""); + } + + //custom for RESTInfo class to avoid counting metrics together + public Long getAggrColumnFamilyMetric(String metricName) { + return client.getLongValue("/column_family/metrics/" + uriCFMetricRegistry.get(metricName)); + } + + public static String CF_M_URL = "/column_family/metrics/"; + + /** + * Retrieve ColumnFamily metrics + * + * @param ks Keyspace for which stats are to be displayed or null for the global value + * @param cf ColumnFamily for which stats are to be displayed or null for the keyspace value (if ks supplied) + * @param metricName View {@link TableMetrics}. + */ + @Override + public Object getColumnFamilyMetric(String ks, String cf, String metricName) { + String post = ""; + if (ks != null && cf != null) { + post = "/" + ks + ":" + cf; + } + if (!uriCFMetricRegistry.containsKey(metricName)) { + throw new RuntimeException("Table metric lacks its REST mapping: " + metricName); + } + switch (metricName) { + case "BloomFilterDiskSpaceUsed": + case "BloomFilterFalsePositives": + case "BloomFilterOffHeapMemoryUsed": + case "IndexSummaryOffHeapMemoryUsed": + case "CompressionMetadataOffHeapMemoryUsed": + case "EstimatedPartitionCount": + case "MaxPartitionSize": + case "MeanPartitionSize": + case "MemtableColumnsCount": + case "MemtableLiveDataSize": + case "MemtableOffHeapSize": + case "MinPartitionSize": + case "RecentBloomFilterFalsePositives": + case "SnapshotsSize": { + return client.getLongValue(CF_M_URL + uriCFMetricRegistry.get(metricName) + post); + } + case "LiveSSTableCount": //Integer + case "PendingCompactions": { + if (cf == null) { + post = "/" + ks; + return client.getLongValue(CF_M_URL + uriCFMetricRegistry.get(metricName) + post); + } + return client.getIntValue(CF_M_URL + uriCFMetricRegistry.get(metricName) + post); + } + case "KeyCacheHitRate": + case "BloomFilterFalseRatio": //Double + case "CompressionRatio": + case "RecentBloomFilterFalseRatio": { + return client.getDoubleValue(CF_M_URL + uriCFMetricRegistry.get(metricName) + post); + } + case "PercentRepaired": { //TODO - this needs server implementation !!!! + return 0D; + } + case "LiveDiskSpaceUsed": + case "MemtableSwitchCount": + case "SpeculativeRetries": + case "TotalDiskSpaceUsed": + case "WriteTotalLatency": + case "ReadTotalLatency": + case "PendingFlushes": + return client.getLongValue(CF_M_URL + uriCFMetricRegistry.get(metricName) + post); + case "DroppedMutations": + return 0L; + case "CasPrepare": + case "CasPropose": + case "CasCommit": + case "CoordinatorReadLatency": + case "CoordinatorScanLatency": + case "ReadLatency": + case "WriteLatency": + { + String alias = uriCFMetricRegistry.get(metricName); + try { + JsonObject obj = client.getJsonObj(CF_M_URL + alias + post, null); + return new ScyllaJmxTimer(obj, metricName); + } catch (IllegalStateException e) { + return new ScyllaJmxTimer(); + } + } + case "EstimatedPartitionSizeHistogram": + case "EstimatedColumnCountHistogram": + JsonObject obj = client.getJsonObj(CF_M_URL + uriCFMetricRegistry.get(metricName) + post, null); + JsonArray arr = obj.getJsonArray("buckets"); + if (arr == null) { + return new long[0]; + } + long res[] = new long[arr.size()]; + for (int i = 0; i < arr.size(); i++) { + res[i] = arr.getJsonNumber(i).longValue(); + } + return res; + case "LiveScannedHistogram": + case "SSTablesPerReadHistogram": + case "TombstoneScannedHistogram": + case "ColUpdateTimeDeltaHistogram": + JsonObject objH = client.getJsonObj(CF_M_URL + uriCFMetricRegistry.get(metricName) + post, null); + return new ScyllaJmxHistogram(objH, metricName); + default: + throw new RuntimeException("Unknown table metric " + metricName); + } + } + + private static BiFunction getDummy(Class type) { + if (type == String.class) { + return (c, s) -> type.cast(""); + } else if (type == Integer.class) { + return (c, s) -> type.cast(0); + } else if (type == Double.class) { + return (c, s) -> type.cast(0.0); + } else if (type == Long.class) { + return (c, s) -> type.cast(0L); + } + throw new IllegalArgumentException(type.getName()); + } + + /** + * Retrieve Proxy metrics + * + * @param scope RangeSlice, Read or Write + */ + public CassandraMetricsRegistry.JmxTimerMBean getProxyMetric(String scope) { + return super.getProxyMetric(scope); + } + + static Map uriCompactionMetricRegistry = new HashMap<>(); + + static { + uriCompactionMetricRegistry.put("PendingTasks", "pending_tasks"); + uriCompactionMetricRegistry.put("CompletedTasks", "completed_tasks"); + uriCompactionMetricRegistry.put("TotalCompactionsCompleted", "total_compactions_completed"); + uriCompactionMetricRegistry.put("BytesCompacted", "bytes_compacted"); + uriCompactionMetricRegistry.put("PendingTasksByTableName", ""); + } + + public static String COMPACTION_M_URL = "/compaction_manager/metrics/"; + + /** + * Retrieve Proxy metrics + * + * @param metricName CompletedTasks, PendingTasks, BytesCompacted or TotalCompactionsCompleted. + */ + public Object getCompactionMetric(String metricName) { + if (!uriCompactionMetricRegistry.containsKey(metricName)) { + throw new RuntimeException("Compaction metric lacks its REST mapping: " + metricName); + } + switch (metricName) { + case "BytesCompacted": +// /** Total number of bytes compacted since server [re]start */ +// registry.register(() -> registry.meter("/compaction_manager/metrics/bytes_compacted"), +// factory.createMetricName("BytesCompacted")); + +// return JMX.newMBeanProxy(mbeanServerConn, +// new ObjectName("org.apache.cassandra.metrics:type=Compaction,name=" + metricName), +// CassandraMetricsRegistry.JmxCounterMBean.class); + throw new RuntimeException("Not implemented metric "+metricName); + case "CompletedTasks": +// /** Number of completed compactions since server [re]start */ +// registry.register(() -> registry.gauge("/compaction_manager/metrics/completed_tasks"), +// factory.createMetricName("CompletedTasks")); +// return JMX.newMBeanProxy(mbeanServerConn, +// new ObjectName("org.apache.cassandra.metrics:type=Compaction,name=" + metricName), +// CassandraMetricsRegistry.JmxGaugeMBean.class).getValue(); + throw new RuntimeException("Not implemented metric "+metricName); + case "PendingTasks": + return client.getIntValue(COMPACTION_M_URL + uriCompactionMetricRegistry.get(metricName)); + + case "PendingTasksByTableName": + Map> result = new HashMap<>(); + JsonArray compactions = client.getJsonArray("compaction_manager/compactions"); + + for (int i = 0; i < compactions.size(); i++) { + JsonObject c = compactions.getJsonObject(i); + + String ks = c.getString("ks"); + String cf = c.getString("cf"); + + if (!result.containsKey(ks)) { + result.put(ks, new HashMap<>()); + } + + Map map = result.get(ks); + map.put(cf, (int) (c.getJsonNumber("total").longValue() - c.getJsonNumber("completed").longValue())); + } + return result; + case "TotalCompactionsCompleted": + throw new RuntimeException("Not implemented metric "+metricName); +// /** Total number of compactions since server [re]start */ +// registry.register(() -> registry.meter("/compaction_manager/metrics/total_compactions_completed"), +// factory.createMetricName("TotalCompactionsCompleted")); + +// return JMX.newMBeanProxy(mbeanServerConn, +// new ObjectName("org.apache.cassandra.metrics:type=Compaction,name=" + metricName), +// CassandraMetricsRegistry.JmxMeterMBean.class); + default: + throw new RuntimeException("Unknown compaction metric."); + } + } + + static Map uriStorageRegistry = new HashMap<>(); + + static { + uriStorageRegistry.put("Load", "/storage_service/metrics/load"); + uriStorageRegistry.put("Exceptions", "/storage_service/metrics/exceptions"); + uriStorageRegistry.put("TotalHintsInProgress", "/storage_service/metrics/hints_in_progress"); + uriStorageRegistry.put("TotalHints", "/storage_service/metrics/total_hints"); + } + + /** + * Retrieve Proxy metrics + * + * @param metricName Exceptions, Load, TotalHints or TotalHintsInProgress. + */ + @Override + public long getStorageMetric(String metricName) { + return client.getLongValue(uriStorageRegistry.get(metricName)); + } + + public double[] metricPercentilesAsArray(CassandraMetricsRegistry.JmxHistogramMBean metric) { + return new double[]{metric.get50thPercentile(), + metric.get75thPercentile(), + metric.get95thPercentile(), + metric.get98thPercentile(), + metric.get99thPercentile(), + metric.getMin(), + metric.getMax()}; + } + + public double[] metricPercentilesAsArray(CassandraMetricsRegistry.JmxTimerMBean metric) { + return new double[]{metric.get50thPercentile(), + metric.get75thPercentile(), + metric.get95thPercentile(), + metric.get98thPercentile(), + metric.get99thPercentile(), + metric.getMin(), + metric.getMax()}; + } + + public TabularData getCompactionHistory() { + return super.getCompactionHistory(); + } + + public void reloadTriggers() { + super.reloadTriggers(); + } + + public void setLoggingLevel(String classQualifier, String level) { + super.setLoggingLevel(classQualifier, level); + } + + public Map getLoggingLevels() { + return super.getLoggingLevels(); + } + + public void resumeBootstrap(PrintStream out) throws IOException { + super.resumeBootstrap(out); + } + + public void replayBatchlog() throws IOException { + super.replayBatchlog(); + } + + public TabularData getFailureDetectorPhilValues() { + return super.getFailureDetectorPhilValues(); + } + + + private static final Logger logger = Logger.getLogger(RESTNodeProbe.class.getName()); + + public void log(String str) { + logger.finest(str); + } + +} + +//TODO below is unused, was a PoC for mocking mbeans to avoid changing Info.class (but then I went for RESTInfo anyways +class RESTColumnFamilyStoreMBeanIterator implements Iterator> { + private MBeanServerConnection mbeanServerConn; + Iterator> mbeans; + + public RESTColumnFamilyStoreMBeanIterator(APIClient client) + throws MalformedObjectNameException, NullPointerException, IOException { + + JsonArray tables = client.getJsonArray("/column_family/name"); // format keyspace:table + + List> cfMbeans = new ArrayList>(tables.size()); + for (JsonString record : tables.getValuesAs(JsonString.class)) { + String srecord = record.getString(); + String[] sarray = srecord.split(":"); + String keyspaceName = sarray[0]; + String tableName = null; + if (sarray.length > 1) { + tableName = sarray[1]; + } + CFMetaData cfmd = CFMetaData.Builder.create(keyspaceName, tableName, false, false, false) + .addPartitionKey("pkey", AsciiType.instance) + .addClusteringColumn("name", AsciiType.instance) + .addRegularColumn("val", AsciiType.instance) + .build(); + ColumnFamilyStoreMBean cfs = new ColumnFamilyStore(Keyspace.openWithoutSSTables(keyspaceName), srecord.replaceFirst(":", "."), 0, cfmd, new Directories(cfmd), false, false, false); + cfMbeans.add(new AbstractMap.SimpleImmutableEntry(keyspaceName, cfs)); + } +// getCFSMBeans(mbeanServerConn, "ColumnFamilies"); +// cfMbeans.addAll(getCFSMBeans(mbeanServerConn, "IndexColumnFamilies")); + Collections.sort(cfMbeans, new Comparator>() { + public int compare(Map.Entry e1, Map.Entry e2) { + //compare keyspace, then CF name, then normal vs. index + int keyspaceNameCmp = e1.getKey().compareTo(e2.getKey()); + if (keyspaceNameCmp != 0) + return keyspaceNameCmp; + + // get CF name and split it for index name + String e1CF[] = e1.getValue().getColumnFamilyName().split("\\."); + String e2CF[] = e2.getValue().getColumnFamilyName().split("\\."); + assert e1CF.length <= 2 && e2CF.length <= 2 : "unexpected split count for table name"; + + //if neither are indexes, just compare CF names + if (e1CF.length == 1 && e2CF.length == 1) + return e1CF[0].compareTo(e2CF[0]); + + //check if it's the same CF + int cfNameCmp = e1CF[0].compareTo(e2CF[0]); + if (cfNameCmp != 0) + return cfNameCmp; + + // if both are indexes (for the same CF), compare them + if (e1CF.length == 2 && e2CF.length == 2) + return e1CF[1].compareTo(e2CF[1]); + + //if length of e1CF is 1, it's not an index, so sort it higher + return e1CF.length == 1 ? 1 : -1; + } + }); + mbeans = cfMbeans.iterator(); + } + + //TODO delete if you are sure we get both IndexColumnFamilies and ColumnFamilies + private List> getCFSMBeans(MBeanServerConnection mbeanServerConn, String type) + throws MalformedObjectNameException, IOException { + ObjectName query = new ObjectName("org.apache.cassandra.db:type=" + type + ",*"); + Set cfObjects = mbeanServerConn.queryNames(query, null); + List> mbeans = new ArrayList>(cfObjects.size()); + for (ObjectName n : cfObjects) { + String keyspaceName = n.getKeyProperty("keyspace"); + ColumnFamilyStoreMBean cfsProxy = JMX.newMBeanProxy(mbeanServerConn, n, ColumnFamilyStoreMBean.class); + mbeans.add(new AbstractMap.SimpleImmutableEntry(keyspaceName, cfsProxy)); + } + return mbeans; + } + + public boolean hasNext() { + return mbeans.hasNext(); + } + + public Map.Entry next() { + return mbeans.next(); + } + + public void remove() { + throw new UnsupportedOperationException(); + } +} \ No newline at end of file diff --git a/src/java/org/apache/cassandra/tools/nodetool/RESTInfo.java b/src/java/org/apache/cassandra/tools/nodetool/RESTInfo.java new file mode 100644 index 0000000000..adff88bdf7 --- /dev/null +++ b/src/java/org/apache/cassandra/tools/nodetool/RESTInfo.java @@ -0,0 +1,189 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.cassandra.tools.nodetool; + +import io.airlift.command.Command; +import io.airlift.command.Option; + +import java.lang.management.MemoryUsage; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import javax.management.InstanceNotFoundException; + +import org.apache.cassandra.db.ColumnFamilyStoreMBean; +import org.apache.cassandra.io.util.FileUtils; +import org.apache.cassandra.service.CacheServiceMBean; +import org.apache.cassandra.tools.NodeProbe; +import org.apache.cassandra.tools.RESTNodeProbe; +import org.apache.cassandra.tools.NodeTool.NodeToolCmd; + +@Command(name = "info", description = "Print node information (uptime, load, ...)") +public class RESTInfo extends NodeToolCmd +{ + @Option(name = {"-T", "--tokens"}, description = "Display all tokens") + private boolean tokens = false; + + @Override + public void execute(NodeProbe probe) { + this.execute((RESTNodeProbe)probe); + } + + public void execute(RESTNodeProbe probe) + { + boolean gossipInitialized = probe.isGossipRunning(); + + System.out.printf("%-23s: %s%n", "ID", probe.getLocalHostId()); + System.out.printf("%-23s: %s%n", "Gossip active", gossipInitialized); + System.out.printf("%-23s: %s%n", "Thrift active", probe.isThriftServerRunning()); + System.out.printf("%-23s: %s%n", "Native Transport active", probe.isNativeTransportRunning()); + System.out.printf("%-23s: %s%n", "Load", probe.getLoadString()); + if (gossipInitialized) + System.out.printf("%-23s: %s%n", "Generation No", probe.getCurrentGenerationNumber()); + else + System.out.printf("%-23s: %s%n", "Generation No", 0); + + // Uptime + long secondsUp = probe.getUptime() / 1000; + System.out.printf("%-23s: %d%n", "Uptime (seconds)", secondsUp); + + // Memory usage + MemoryUsage heapUsage = probe.getHeapMemoryUsage(); + double memUsed = (double) heapUsage.getUsed() / (1024 * 1024); + double memMax = (double) heapUsage.getMax() / (1024 * 1024); + System.out.printf("%-23s: %.2f / %.2f%n", "Heap Memory (MB)", memUsed, memMax); + System.out.printf("%-23s: %.2f%n", "Off Heap Memory (MB)", getOffHeapMemoryUsed(probe)); + + // Data Center/Rack + System.out.printf("%-23s: %s%n", "Data Center", probe.getDataCenter()); + System.out.printf("%-23s: %s%n", "Rack", probe.getRack()); + + // Exceptions + System.out.printf("%-23s: %s%n", "Exceptions", probe.getStorageMetric("Exceptions")); + + CacheServiceMBean cacheService = probe.getCacheServiceMBean(); + + // Key Cache: Hits, Requests, RecentHitRate, SavePeriodInSeconds + System.out.printf("%-23s: entries %d, size %s, capacity %s, %d hits, %d requests, %.3f recent hit rate, %d save period in seconds%n", + "Key Cache", + probe.getCacheMetric("KeyCache", "Entries"), + FileUtils.stringifyFileSize((long) probe.getCacheMetric("KeyCache", "Size")), + FileUtils.stringifyFileSize((long) probe.getCacheMetric("KeyCache", "Capacity")), + probe.getCacheMetric("KeyCache", "Hits"), + probe.getCacheMetric("KeyCache", "Requests"), + probe.getCacheMetric("KeyCache", "HitRate"), + cacheService.getKeyCacheSavePeriodInSeconds()); + + // Row Cache: Hits, Requests, RecentHitRate, SavePeriodInSeconds + System.out.printf("%-23s: entries %d, size %s, capacity %s, %d hits, %d requests, %.3f recent hit rate, %d save period in seconds%n", + "Row Cache", + probe.getCacheMetric("RowCache", "Entries"), + FileUtils.stringifyFileSize((long) probe.getCacheMetric("RowCache", "Size")), + FileUtils.stringifyFileSize((long) probe.getCacheMetric("RowCache", "Capacity")), + probe.getCacheMetric("RowCache", "Hits"), + probe.getCacheMetric("RowCache", "Requests"), + probe.getCacheMetric("RowCache", "HitRate"), + cacheService.getRowCacheSavePeriodInSeconds()); + + // Counter Cache: Hits, Requests, RecentHitRate, SavePeriodInSeconds + System.out.printf("%-23s: entries %d, size %s, capacity %s, %d hits, %d requests, %.3f recent hit rate, %d save period in seconds%n", + "Counter Cache", + probe.getCacheMetric("CounterCache", "Entries"), + FileUtils.stringifyFileSize((long) probe.getCacheMetric("CounterCache", "Size")), + FileUtils.stringifyFileSize((long) probe.getCacheMetric("CounterCache", "Capacity")), + probe.getCacheMetric("CounterCache", "Hits"), + probe.getCacheMetric("CounterCache", "Requests"), + probe.getCacheMetric("CounterCache", "HitRate"), + cacheService.getCounterCacheSavePeriodInSeconds()); + + // Chunk Cache: Hits, Requests, RecentHitRate, SavePeriodInSeconds +// try +// { +// TODO this is disabled, since Scylla doesn't support it anyways +// System.out.printf("%-23s: entries %d, size %s, capacity %s, %d misses, %d requests, %.3f recent hit rate, %.3f %s miss latency%n", +// "Chunk Cache", +// probe.getCacheMetric("ChunkCache", "Entries"), +// FileUtils.stringifyFileSize((long) probe.getCacheMetric("ChunkCache", "Size")), +// FileUtils.stringifyFileSize((long) probe.getCacheMetric("ChunkCache", "Capacity")), +// probe.getCacheMetric("ChunkCache", "Misses"), +// probe.getCacheMetric("ChunkCache", "Requests"), +// probe.getCacheMetric("ChunkCache", "HitRate"), +// probe.getCacheMetric("ChunkCache", "MissLatency"), +// probe.getCacheMetric("ChunkCache", "MissLatencyUnit")); +// } +// catch (RuntimeException e) +// { +// if (!(e.getCause() instanceof InstanceNotFoundException)) +// throw e; +// +// // Chunk cache is not on. +// } + + // Global table stats + System.out.printf("%-23s: %s%%%n", "Percent Repaired", probe.getColumnFamilyMetric(null, null, "PercentRepaired")); + + // check if node is already joined, before getting tokens, since it throws exception if not. + if (probe.isJoined()) + { + // Tokens + List tokens = probe.getTokens(); + if (tokens.size() == 1 || this.tokens) + for (String token : tokens) + System.out.printf("%-23s: %s%n", "Token", token); + else + System.out.printf("%-23s: (invoke with -T/--tokens to see all %d tokens)%n", "Token", + tokens.size()); + } + else + { + System.out.printf("%-23s: (node is not joined to the cluster)%n", "Token"); + } + } + + /** + * Returns the total off heap memory used in MB. + * @return the total off heap memory used in MB. + */ + private static double getOffHeapMemoryUsed(RESTNodeProbe probe) + { + long offHeapMemUsedInBytes = 0; + // get a list of column family stores + offHeapMemUsedInBytes+=probe.getAggrColumnFamilyMetric("MemtableOffHeapSize"); + offHeapMemUsedInBytes+=probe.getAggrColumnFamilyMetric("BloomFilterOffHeapMemoryUsed"); + offHeapMemUsedInBytes+=probe.getAggrColumnFamilyMetric("IndexSummaryOffHeapMemoryUsed"); +// offHeapMemUsedInBytes+=probe.getAggrColumnFamilyMetric("CompressionMetadataOffHeapMemoryUsed"); //TODO not implemented on server !!!!!! + + Iterator> cfamilies = probe.getColumnFamilyStoreMap(); + + while (cfamilies.hasNext()) + { + Entry entry = cfamilies.next(); + String keyspaceName = entry.getKey(); + String cfName = entry.getValue(); + +// offHeapMemUsedInBytes += (Long) probe.getColumnFamilyMetric(keyspaceName, cfName, "MemtableOffHeapSize"); +// offHeapMemUsedInBytes += (Long) probe.getColumnFamilyMetric(keyspaceName, cfName, "BloomFilterOffHeapMemoryUsed"); +// offHeapMemUsedInBytes += (Long) probe.getColumnFamilyMetric(keyspaceName, cfName, "IndexSummaryOffHeapMemoryUsed"); + offHeapMemUsedInBytes += (Long) probe.getColumnFamilyMetric(keyspaceName, cfName, "CompressionMetadataOffHeapMemoryUsed"); + } + + return offHeapMemUsedInBytes / (1024d * 1024); + } +}