|
1 | | -// *** Auto-generated from librdkafka branch v1.0.0-RC7 *** - do not modify manually. |
| 1 | +// *** Auto-generated from librdkafka branch v1.0.0 *** - do not modify manually. |
2 | 2 | // |
3 | 3 | // Copyright 2018 Confluent Inc. |
4 | 4 | // |
|
24 | 24 |
|
25 | 25 | namespace Confluent.Kafka |
26 | 26 | { |
| 27 | + /// <summary> |
| 28 | + /// Partitioner enum values |
| 29 | + /// </summary> |
| 30 | + public enum Partitioner |
| 31 | + { |
| 32 | + /// <summary> |
| 33 | + /// Random |
| 34 | + /// </summary> |
| 35 | + Random, |
| 36 | + |
| 37 | + /// <summary> |
| 38 | + /// Consistent |
| 39 | + /// </summary> |
| 40 | + Consistent, |
| 41 | + |
| 42 | + /// <summary> |
| 43 | + /// ConsistentRandom |
| 44 | + /// </summary> |
| 45 | + ConsistentRandom, |
| 46 | + |
| 47 | + /// <summary> |
| 48 | + /// Murmur2 |
| 49 | + /// </summary> |
| 50 | + Murmur2, |
| 51 | + |
| 52 | + /// <summary> |
| 53 | + /// Murmur2Random |
| 54 | + /// </summary> |
| 55 | + Murmur2Random |
| 56 | + } |
| 57 | + |
| 58 | + /// <summary> |
| 59 | + /// AutoOffsetReset enum values |
| 60 | + /// </summary> |
| 61 | + public enum AutoOffsetReset |
| 62 | + { |
| 63 | + /// <summary> |
| 64 | + /// Latest |
| 65 | + /// </summary> |
| 66 | + Latest, |
| 67 | + |
| 68 | + /// <summary> |
| 69 | + /// Earliest |
| 70 | + /// </summary> |
| 71 | + Earliest, |
| 72 | + |
| 73 | + /// <summary> |
| 74 | + /// Error |
| 75 | + /// </summary> |
| 76 | + Error |
| 77 | + } |
| 78 | + |
27 | 79 | /// <summary> |
28 | 80 | /// BrokerAddressFamily enum values |
29 | 81 | /// </summary> |
@@ -88,55 +140,34 @@ public enum PartitionAssignmentStrategy |
88 | 140 | } |
89 | 141 |
|
90 | 142 | /// <summary> |
91 | | - /// Partitioner enum values |
| 143 | + /// CompressionType enum values |
92 | 144 | /// </summary> |
93 | | - public enum Partitioner |
| 145 | + public enum CompressionType |
94 | 146 | { |
95 | 147 | /// <summary> |
96 | | - /// Random |
97 | | - /// </summary> |
98 | | - Random, |
99 | | - |
100 | | - /// <summary> |
101 | | - /// Consistent |
102 | | - /// </summary> |
103 | | - Consistent, |
104 | | - |
105 | | - /// <summary> |
106 | | - /// ConsistentRandom |
107 | | - /// </summary> |
108 | | - ConsistentRandom, |
109 | | - |
110 | | - /// <summary> |
111 | | - /// Murmur2 |
| 148 | + /// None |
112 | 149 | /// </summary> |
113 | | - Murmur2, |
| 150 | + None, |
114 | 151 |
|
115 | 152 | /// <summary> |
116 | | - /// Murmur2Random |
| 153 | + /// Gzip |
117 | 154 | /// </summary> |
118 | | - Murmur2Random |
119 | | - } |
| 155 | + Gzip, |
120 | 156 |
|
121 | | - /// <summary> |
122 | | - /// AutoOffsetReset enum values |
123 | | - /// </summary> |
124 | | - public enum AutoOffsetReset |
125 | | - { |
126 | 157 | /// <summary> |
127 | | - /// Latest |
| 158 | + /// Snappy |
128 | 159 | /// </summary> |
129 | | - Latest, |
| 160 | + Snappy, |
130 | 161 |
|
131 | 162 | /// <summary> |
132 | | - /// Earliest |
| 163 | + /// Lz4 |
133 | 164 | /// </summary> |
134 | | - Earliest, |
| 165 | + Lz4, |
135 | 166 |
|
136 | 167 | /// <summary> |
137 | | - /// Error |
| 168 | + /// Zstd |
138 | 169 | /// </summary> |
139 | | - Error |
| 170 | + Zstd |
140 | 171 | } |
141 | 172 |
|
142 | 173 | /// <summary> |
@@ -416,14 +447,6 @@ public Acks? Acks |
416 | 447 | /// </summary> |
417 | 448 | public BrokerAddressFamily? BrokerAddressFamily { get { return (BrokerAddressFamily?)GetEnum(typeof(BrokerAddressFamily), "broker.address.family"); } set { this.SetObject("broker.address.family", value); } } |
418 | 449 |
|
419 | | - /// <summary> |
420 | | - /// When enabled the client will only connect to brokers it needs to communicate with. When disabled the client will maintain connections to all brokers in the cluster. |
421 | | - /// |
422 | | - /// default: true |
423 | | - /// importance: medium |
424 | | - /// </summary> |
425 | | - public bool? EnableSparseConnections { get { return GetBool("enable.sparse.connections"); } set { this.SetObject("enable.sparse.connections", value); } } |
426 | | - |
427 | 450 | /// <summary> |
428 | 451 | /// The initial time to wait before reconnecting to a broker after the connection has been closed. The time is increased exponentially until `reconnect.backoff.max.ms` is reached. -25% to +50% jitter is applied to each reconnect backoff. A value of 0 disables the backoff and reconnects immediately. |
429 | 452 | /// |
@@ -745,6 +768,38 @@ public ProducerConfig() {} |
745 | 768 | /// </summary> |
746 | 769 | public string DeliveryReportFields { get { return Get("dotnet.producer.delivery.report.fields"); } set { this.SetObject("dotnet.producer.delivery.report.fields", value.ToString()); } } |
747 | 770 |
|
| 771 | + /// <summary> |
| 772 | + /// The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0. |
| 773 | + /// |
| 774 | + /// default: 5000 |
| 775 | + /// importance: medium |
| 776 | + /// </summary> |
| 777 | + public int? RequestTimeoutMs { get { return GetInt("request.timeout.ms"); } set { this.SetObject("request.timeout.ms", value); } } |
| 778 | + |
| 779 | + /// <summary> |
| 780 | + /// Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. |
| 781 | + /// |
| 782 | + /// default: 300000 |
| 783 | + /// importance: high |
| 784 | + /// </summary> |
| 785 | + public int? MessageTimeoutMs { get { return GetInt("message.timeout.ms"); } set { this.SetObject("message.timeout.ms", value); } } |
| 786 | + |
| 787 | + /// <summary> |
| 788 | + /// Partitioner: `random` - random distribution, `consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), `consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), `murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), `murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer.). |
| 789 | + /// |
| 790 | + /// default: consistent_random |
| 791 | + /// importance: high |
| 792 | + /// </summary> |
| 793 | + public Partitioner? Partitioner { get { return (Partitioner?)GetEnum(typeof(Partitioner), "partitioner"); } set { this.SetObject("partitioner", value); } } |
| 794 | + |
| 795 | + /// <summary> |
| 796 | + /// Compression level parameter for algorithm selected by configuration property `compression.codec`. Higher values will result in better compression at the cost of more CPU usage. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level. |
| 797 | + /// |
| 798 | + /// default: -1 |
| 799 | + /// importance: medium |
| 800 | + /// </summary> |
| 801 | + public int? CompressionLevel { get { return GetInt("compression.level"); } set { this.SetObject("compression.level", value); } } |
| 802 | + |
748 | 803 | /// <summary> |
749 | 804 | /// When set to `true`, the producer will ensure that messages are successfully produced exactly once and in the original produce order. The following configuration properties are adjusted automatically (if not modified by the user) when idempotence is enabled: `max.in.flight.requests.per.connection=5` (must be less than or equal to 5), `retries=INT32_MAX` (must be greater than 0), `acks=all`, `queuing.strategy=fifo`. Producer instantation will fail if user-supplied configuration is incompatible. |
750 | 805 | /// |
@@ -810,44 +865,20 @@ public ProducerConfig() {} |
810 | 865 | public int? QueueBufferingBackpressureThreshold { get { return GetInt("queue.buffering.backpressure.threshold"); } set { this.SetObject("queue.buffering.backpressure.threshold", value); } } |
811 | 866 |
|
812 | 867 | /// <summary> |
813 | | - /// Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by message.max.bytes. |
814 | | - /// |
815 | | - /// default: 10000 |
816 | | - /// importance: medium |
817 | | - /// </summary> |
818 | | - public int? BatchNumMessages { get { return GetInt("batch.num.messages"); } set { this.SetObject("batch.num.messages", value); } } |
819 | | - |
820 | | - /// <summary> |
821 | | - /// The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0. |
| 868 | + /// compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`. |
822 | 869 | /// |
823 | | - /// default: 5000 |
| 870 | + /// default: none |
824 | 871 | /// importance: medium |
825 | 872 | /// </summary> |
826 | | - public int? RequestTimeoutMs { get { return GetInt("request.timeout.ms"); } set { this.SetObject("request.timeout.ms", value); } } |
827 | | - |
828 | | - /// <summary> |
829 | | - /// Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. |
830 | | - /// |
831 | | - /// default: 300000 |
832 | | - /// importance: high |
833 | | - /// </summary> |
834 | | - public int? MessageTimeoutMs { get { return GetInt("message.timeout.ms"); } set { this.SetObject("message.timeout.ms", value); } } |
| 873 | + public CompressionType? CompressionType { get { return (CompressionType?)GetEnum(typeof(CompressionType), "compression.type"); } set { this.SetObject("compression.type", value); } } |
835 | 874 |
|
836 | 875 | /// <summary> |
837 | | - /// Partitioner: `random` - random distribution, `consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), `consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), `murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), `murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer.). |
838 | | - /// |
839 | | - /// default: consistent_random |
840 | | - /// importance: high |
841 | | - /// </summary> |
842 | | - public Partitioner? Partitioner { get { return (Partitioner?)GetEnum(typeof(Partitioner), "partitioner"); } set { this.SetObject("partitioner", value); } } |
843 | | - |
844 | | - /// <summary> |
845 | | - /// Compression level parameter for algorithm selected by configuration property `compression.codec`. Higher values will result in better compression at the cost of more CPU usage. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level. |
| 876 | + /// Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by message.max.bytes. |
846 | 877 | /// |
847 | | - /// default: -1 |
| 878 | + /// default: 10000 |
848 | 879 | /// importance: medium |
849 | 880 | /// </summary> |
850 | | - public int? CompressionLevel { get { return GetInt("compression.level"); } set { this.SetObject("compression.level", value); } } |
| 881 | + public int? BatchNumMessages { get { return GetInt("batch.num.messages"); } set { this.SetObject("batch.num.messages", value); } } |
851 | 882 |
|
852 | 883 | } |
853 | 884 |
|
@@ -888,6 +919,14 @@ public ConsumerConfig() {} |
888 | 919 | /// </summary> |
889 | 920 | public string ConsumeResultFields { set { this.SetObject("dotnet.consumer.consume.result.fields", value); } } |
890 | 921 |
|
| 922 | + /// <summary> |
| 923 | + /// Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error which is retrieved by consuming messages and checking 'message->err'. |
| 924 | + /// |
| 925 | + /// default: largest |
| 926 | + /// importance: high |
| 927 | + /// </summary> |
| 928 | + public AutoOffsetReset? AutoOffsetReset { get { return (AutoOffsetReset?)GetEnum(typeof(AutoOffsetReset), "auto.offset.reset"); } set { this.SetObject("auto.offset.reset", value); } } |
| 929 | + |
891 | 930 | /// <summary> |
892 | 931 | /// Client group id string. All clients sharing the same group.id belong to the same group. |
893 | 932 | /// |
@@ -1040,14 +1079,6 @@ public ConsumerConfig() {} |
1040 | 1079 | /// </summary> |
1041 | 1080 | public bool? CheckCrcs { get { return GetBool("check.crcs"); } set { this.SetObject("check.crcs", value); } } |
1042 | 1081 |
|
1043 | | - /// <summary> |
1044 | | - /// Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error which is retrieved by consuming messages and checking 'message->err'. |
1045 | | - /// |
1046 | | - /// default: largest |
1047 | | - /// importance: high |
1048 | | - /// </summary> |
1049 | | - public AutoOffsetReset? AutoOffsetReset { get { return (AutoOffsetReset?)GetEnum(typeof(AutoOffsetReset), "auto.offset.reset"); } set { this.SetObject("auto.offset.reset", value); } } |
1050 | | - |
1051 | 1082 | } |
1052 | 1083 |
|
1053 | 1084 | } |
0 commit comments