forked from googleapis/google-cloudevents
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdata.proto
More file actions
1394 lines (1107 loc) · 45.1 KB
/
data.proto
File metadata and controls
1394 lines (1107 loc) · 45.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.events.cloud.visionai.v1;
import "google/protobuf/duration.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";
option csharp_namespace = "Google.Events.Protobuf.Cloud.VisionAI.V1";
option php_namespace = "Google\\Events\\Cloud\\VisionAI\\V1";
option ruby_package = "Google::Events::Cloud::VisionAI::V1";
// message about annotations about Vision AI stream resource.
message StreamAnnotation {
oneof annotation_payload {
// Annotation for type ACTIVE_ZONE
NormalizedPolygon active_zone = 5;
// Annotation for type CROSSING_LINE
NormalizedPolyline crossing_line = 6;
}
// ID of the annotation. It must be unique when used in the certain context.
// For example, all the annotations to one input streams of a Vision AI
// application.
string id = 1;
// User-friendly name for the annotation.
string display_name = 2;
// The Vision AI stream resource name.
string source_stream = 3;
// The actual type of Annotation.
StreamAnnotationType type = 4;
}
// Normalized Polygon.
message NormalizedPolygon {
// The bounding polygon normalized vertices. Top left corner of the image
// will be [0, 0].
repeated NormalizedVertex normalized_vertices = 1;
}
// Normalized Pplyline, which represents a curve consisting of connected
// straight-line segments.
message NormalizedPolyline {
// A sequence of vertices connected by straight lines.
repeated NormalizedVertex normalized_vertices = 1;
}
// A vertex represents a 2D point in the image.
// NOTE: the normalized vertex coordinates are relative to the original image
// and range from 0 to 1.
message NormalizedVertex {
// X coordinate.
float x = 1;
// Y coordinate.
float y = 2;
}
// Enum describing all possible types of a stream annotation.
enum StreamAnnotationType {
// Type UNSPECIFIED.
STREAM_ANNOTATION_TYPE_UNSPECIFIED = 0;
// active_zone annotation defines a polygon on top of the content from an
// image/video based stream, following processing will only focus on the
// content inside the active zone.
STREAM_ANNOTATION_TYPE_ACTIVE_ZONE = 1;
// crossing_line annotation defines a polyline on top of the content from an
// image/video based Vision AI stream, events happening across the line will
// be captured. For example, the counts of people who goes acroos the line
// in Occupancy Analytic Processor.
STREAM_ANNOTATION_TYPE_CROSSING_LINE = 2;
}
// Message describing the Cluster object.
message Cluster {
// The current state of the cluster.
enum State {
// Not set.
STATE_UNSPECIFIED = 0;
// The PROVISIONING state indicates the cluster is being created.
PROVISIONING = 1;
// The RUNNING state indicates the cluster has been created and is fully
// usable.
RUNNING = 2;
// The STOPPING state indicates the cluster is being deleted.
STOPPING = 3;
// The ERROR state indicates the cluster is unusable. It will be
// automatically deleted.
ERROR = 4;
}
// Output only. Name of the resource.
string name = 1;
// Output only. The create timestamp.
google.protobuf.Timestamp create_time = 2;
// Output only. The update timestamp.
google.protobuf.Timestamp update_time = 3;
// Labels as key value pairs
map<string, string> labels = 4;
// Annotations to allow clients to store small amounts of arbitrary data.
map<string, string> annotations = 5;
// Output only. The DNS name of the data plane service
string dataplane_service_endpoint = 6;
// Output only. The current state of the cluster.
State state = 7;
// Output only. The private service connection service target name.
string psc_target = 8;
}
// The Google Cloud Storage location for the input content.
message GcsSource {
// Required. References to a Google Cloud Storage paths.
repeated string uris = 1;
}
// Represents an actual value of an operator attribute.
message AttributeValue {
// Attribute value.
oneof value {
// int.
int64 i = 1;
// float.
float f = 2;
// bool.
bool b = 3;
// string.
bytes s = 4;
}
}
// Defines an Analyzer.
//
// An analyzer processes data from its input streams using the logic defined in
// the Operator that it represents. Of course, it produces data for the output
// streams declared in the Operator.
message AnalyzerDefinition {
// The inputs to this analyzer.
//
// We accept input name references of the following form:
// <analyzer-name>:<output-argument-name>
//
// Example:
//
// Suppose you had an operator named "SomeOp" that has 2 output
// arguments, the first of which is named "foo" and the second of which is
// named "bar", and an operator named "MyOp" that accepts 2 inputs.
//
// Also suppose that there is an analyzer named "some-analyzer" that is
// running "SomeOp" and another analyzer named "my-analyzer" running "MyOp".
//
// To indicate that "my-analyzer" is to consume "some-analyzer"'s "foo"
// output as its first input and "some-analyzer"'s "bar" output as its
// second input, you can set this field to the following:
// input = ["some-analyzer:foo", "some-analyzer:bar"]
message StreamInput {
// The name of the stream input (as discussed above).
string input = 1;
}
// Options available for debugging purposes only.
message DebugOptions {
// Environment variables.
map<string, string> environment_variables = 1;
}
// The name of this analyzer.
//
// Tentatively [a-z][a-z0-9]*(_[a-z0-9]+)*.
string analyzer = 1;
// The name of the operator that this analyzer runs.
//
// Must match the name of a supported operator.
string operator = 2;
// Input streams.
repeated StreamInput inputs = 3;
// The attribute values that this analyzer applies to the operator.
//
// Supply a mapping between the attribute names and the actual value you wish
// to apply. If an attribute name is omitted, then it will take a
// preconfigured default value.
map<string, AttributeValue> attrs = 4;
// Debug options.
DebugOptions debug_options = 5;
}
// Defines a full analysis.
//
// This is a description of the overall live analytics pipeline.
// You may think of this as an edge list representation of a multigraph.
//
// This may be directly authored by a human in protobuf textformat, or it may be
// generated by a programming API (perhaps Python or JavaScript depending on
// context).
message AnalysisDefinition {
// Analyzer definitions.
repeated AnalyzerDefinition analyzers = 1;
}
// Message describing the status of the Process.
message RunStatus {
// State represents the running status of the Process.
enum State {
// State is unspecified.
STATE_UNSPECIFIED = 0;
// INITIALIZING means the Process is scheduled but yet ready to handle
// real traffic.
INITIALIZING = 1;
// RUNNING means the Process is up running and handling traffic.
RUNNING = 2;
// COMPLETED means the Process has completed the processing, especially
// for non-streaming use case.
COMPLETED = 3;
// FAILED means the Process failed to complete the processing.
FAILED = 4;
// PENDING means the Process is created but yet to be scheduled.
PENDING = 5;
}
// The state of the Process.
State state = 1;
// The reason of becoming the state.
string reason = 2;
}
// RunMode represents the mode to launch the Process on.
enum RunMode {
// Mode is unspecified.
RUN_MODE_UNSPECIFIED = 0;
// Live mode. Meaning the Process is launched to handle live video
// source, and possible packet drops are expected.
LIVE = 1;
// Submission mode. Meaning the Process is launched to handle bounded video
// files, with no packet drop. Completion status is tracked.
SUBMISSION = 2;
}
// Message describing the Analysis object.
message Analysis {
// The name of resource.
string name = 1;
// Output only. The create timestamp.
google.protobuf.Timestamp create_time = 2;
// Output only. The update timestamp.
google.protobuf.Timestamp update_time = 3;
// Labels as key value pairs.
map<string, string> labels = 4;
// The definition of the analysis.
AnalysisDefinition analysis_definition = 5;
// Map from the input parameter in the definition to the real stream.
// E.g., suppose you had a stream source operator named "input-0" and you try
// to receive from the real stream "stream-0". You can add the following
// mapping: [input-0: stream-0].
map<string, string> input_streams_mapping = 6;
// Map from the output parameter in the definition to the real stream.
// E.g., suppose you had a stream sink operator named "output-0" and you try
// to send to the real stream "stream-0". You can add the following
// mapping: [output-0: stream-0].
map<string, string> output_streams_mapping = 7;
// Boolean flag to indicate whether you would like to disable the ability
// to automatically start a Process when new event happening in the input
// Stream. If you would like to start a Process manually, the field needs
// to be set to true.
bool disable_event_watch = 8;
}
// Message describing the Process object.
message Process {
// The name of resource.
string name = 1;
// Output only. The create timestamp.
google.protobuf.Timestamp create_time = 2;
// Output only. The update timestamp.
google.protobuf.Timestamp update_time = 3;
// Required. Reference to an existing Analysis resource.
string analysis = 4;
// Optional. Attribute overrides of the Analyzers.
// Format for each single override item:
// "{analyzer_name}:{attribute_key}={value}"
repeated string attribute_overrides = 5;
// Optional. Status of the Process.
RunStatus run_status = 6;
// Optional. Run mode of the Process.
RunMode run_mode = 7;
// Optional. Event ID of the input/output streams.
// This is useful when you have a StreamSource/StreamSink operator in the
// Analysis, and you want to manually specify the Event to read from/write to.
string event_id = 8;
// Optional. Optional: Batch ID of the Process.
string batch_id = 9;
// Optional. Optional: The number of retries for a process in submission mode
// the system should try before declaring failure. By default, no retry will
// be performed.
int32 retry_count = 10;
}
// Message describing Application object
message Application {
// Message storing the runtime information of the application.
message ApplicationRuntimeInfo {
// Message about output resources from application.
message GlobalOutputResource {
// The full resource name of the outputted resources.
string output_resource = 1;
// The name of graph node who produces the output resource name.
// For example:
// output_resource:
// /projects/123/locations/us-central1/corpora/my-corpus/dataSchemas/my-schema
// producer_node: occupancy-count
string producer_node = 2;
// The key of the output resource, it has to be unique within the same
// producer node. One producer node can output several output resources,
// the key can be used to match corresponding output resources.
string key = 3;
}
// Monitoring-related configuration for an application.
message MonitoringConfig {
// Whether this application has monitoring enabled.
bool enabled = 1;
}
// Timestamp when the engine be deployed
google.protobuf.Timestamp deploy_time = 1;
// Globally created resources like warehouse dataschemas.
repeated GlobalOutputResource global_output_resources = 3;
// Monitoring-related configuration for this application.
MonitoringConfig monitoring_config = 4;
}
// State of the Application
enum State {
// The default value. This value is used if the state is omitted.
STATE_UNSPECIFIED = 0;
// State CREATED.
CREATED = 1;
// State DEPLOYING.
DEPLOYING = 2;
// State DEPLOYED.
DEPLOYED = 3;
// State UNDEPLOYING.
UNDEPLOYING = 4;
// State DELETED.
DELETED = 5;
// State ERROR.
ERROR = 6;
// State CREATING.
CREATING = 7;
// State Updating.
UPDATING = 8;
// State Deleting.
DELETING = 9;
// State Fixing.
FIXING = 10;
}
// Billing mode of the Application
enum BillingMode {
// The default value.
BILLING_MODE_UNSPECIFIED = 0;
// Pay as you go billing mode.
PAYG = 1;
// Monthly billing mode.
MONTHLY = 2;
}
// name of resource
string name = 1;
// Output only. [Output only] Create timestamp
google.protobuf.Timestamp create_time = 2;
// Output only. [Output only] Update timestamp
google.protobuf.Timestamp update_time = 3;
// Labels as key value pairs
map<string, string> labels = 4;
// Required. A user friendly display name for the solution.
string display_name = 5;
// A description for this application.
string description = 6;
// Application graph configuration.
ApplicationConfigs application_configs = 7;
// Output only. Application graph runtime info. Only exists when application
// state equals to DEPLOYED.
ApplicationRuntimeInfo runtime_info = 8;
// Output only. State of the application.
State state = 9;
// Billing mode of the application.
BillingMode billing_mode = 12;
}
// Message storing the graph of the application.
message ApplicationConfigs {
// A list of nodes in the application graph.
repeated Node nodes = 1;
}
// Message describing node object.
message Node {
// Message describing one edge pointing into a node.
message InputEdge {
// The name of the parent node.
string parent_node = 1;
// The connected output artifact of the parent node.
// It can be omitted if target processor only has 1 output artifact.
string parent_output_channel = 2;
// The connected input channel of the current node's processor.
// It can be omitted if target processor only has 1 input channel.
string connected_input_channel = 3;
}
oneof stream_output_config {
// By default, the output of the node will only be available to downstream
// nodes. To consume the direct output from the application node, the output
// must be sent to Vision AI Streams at first.
//
// By setting output_all_output_channels_to_stream to true, App Platform
// will automatically send all the outputs of the current node to Vision AI
// Stream resources (one stream per output channel). The output stream
// resource will be created by App Platform automatically during deployment
// and deleted after application un-deployment.
// Note that this config applies to all the Application Instances.
//
// The output stream can be override at instance level by
// configuring the `output_resources` section of Instance resource.
// `producer_node` should be current node, `output_resource_binding` should
// be the output channel name (or leave it blank if there is only 1 output
// channel of the processor) and `output_resource` should be the target
// output stream.
bool output_all_output_channels_to_stream = 6;
}
// Required. A unique name for the node.
string name = 1;
// A user friendly display name for the node.
string display_name = 2;
// Node config.
ProcessorConfig node_config = 3;
// Processor name refer to the chosen processor resource.
string processor = 4;
// Parent node. Input node should not have parent node. For V1 Alpha1/Beta
// only media warehouse node can have multiple parents, other types of nodes
// will only have one parent.
repeated InputEdge parents = 5;
}
// Message describing Draft object
message Draft {
// name of resource
string name = 1;
// Output only. [Output only] Create timestamp
google.protobuf.Timestamp create_time = 2;
// Output only. [Output only] Create timestamp
google.protobuf.Timestamp update_time = 7;
// Labels as key value pairs
map<string, string> labels = 3;
// Required. A user friendly display name for the solution.
string display_name = 4;
// A description for this application.
string description = 5;
// The draft application configs which haven't been updated to an application.
ApplicationConfigs draft_application_configs = 6;
}
// Message describing Processor object.
// Next ID: 19
message Processor {
// Type
enum ProcessorType {
// Processor Type UNSPECIFIED.
PROCESSOR_TYPE_UNSPECIFIED = 0;
// Processor Type PRETRAINED.
// Pretrained processor is developed by Vision AI App Platform with
// state-of-the-art vision data processing functionality, like occupancy
// counting or person blur. Pretrained processor is usually publicly
// available.
PRETRAINED = 1;
// Processor Type CUSTOM.
// Custom processors are specialized processors which are either uploaded by
// customers or imported from other GCP platform (for example Vertex AI).
// Custom processor is only visible to the creator.
CUSTOM = 2;
// Processor Type CONNECTOR.
// Connector processors are special processors which perform I/O for the
// application, they do not processing the data but either deliver the data
// to other processors or receive data from other processors.
CONNECTOR = 3;
}
enum ProcessorState {
// Unspecified Processor state.
PROCESSOR_STATE_UNSPECIFIED = 0;
// Processor is being created (not ready for use).
CREATING = 1;
// Processor is and ready for use.
ACTIVE = 2;
// Processor is being deleted (not ready for use).
DELETING = 3;
// Processor deleted or creation failed .
FAILED = 4;
}
// name of resource.
string name = 1;
// Output only. [Output only] Create timestamp.
google.protobuf.Timestamp create_time = 2;
// Output only. [Output only] Update timestamp.
google.protobuf.Timestamp update_time = 3;
// Labels as key value pairs.
map<string, string> labels = 4;
// Required. A user friendly display name for the processor.
string display_name = 5;
// Illustrative sentences for describing the functionality of the processor.
string description = 10;
// Output only. Processor Type.
ProcessorType processor_type = 6;
// Model Type.
ModelType model_type = 13;
// Source info for customer created processor.
CustomProcessorSourceInfo custom_processor_source_info = 7;
// Output only. State of the Processor.
ProcessorState state = 8;
// Output only. [Output only] The input / output specifications of a
// processor, each type of processor has fixed input / output specs which
// cannot be altered by customer.
ProcessorIOSpec processor_io_spec = 11;
// Output only. The corresponding configuration can be used in the Application
// to customize the behavior of the processor.
string configuration_typeurl = 14;
repeated StreamAnnotationType supported_annotation_types = 15;
// Indicates if the processor supports post processing.
bool supports_post_processing = 17;
}
// Message describing the input / output specifications of a processor.
message ProcessorIOSpec {
// Message for input channel specification.
message GraphInputChannelSpec {
// The name of the current input channel.
string name = 1;
// The data types of the current input channel.
// When this field has more than 1 value, it means this input channel can be
// connected to either of these different data types.
DataType data_type = 2;
// If specified, only those detailed data types can be connected to the
// processor. For example, jpeg stream for MEDIA, or PredictionResult proto
// for PROTO type. If unspecified, then any proto is accepted.
repeated string accepted_data_type_uris = 5;
// Whether the current input channel is required by the processor.
// For example, for a processor with required video input and optional audio
// input, if video input is missing, the application will be rejected while
// the audio input can be missing as long as the video input exists.
bool required = 3;
// How many input edges can be connected to this input channel. 0 means
// unlimited.
int64 max_connection_allowed = 4;
}
// Message for output channel specification.
message GraphOutputChannelSpec {
// The name of the current output channel.
string name = 1;
// The data type of the current output channel.
DataType data_type = 2;
string data_type_uri = 3;
}
// Message for instance resource channel specification.
// External resources are virtual nodes which are not expressed in the
// application graph. Each processor expresses its out-graph spec, so customer
// is able to override the external source or destinations to the
message InstanceResourceInputBindingSpec {
oneof resource_type {
// The configuration proto that includes the Googleapis resources. I.e.
// type.googleapis.com/google.cloud.vision.v1.StreamWithAnnotation
string config_type_uri = 2;
// The direct type url of Googleapis resource. i.e.
// type.googleapis.com/google.cloud.vision.v1.Asset
string resource_type_uri = 3;
}
// Name of the input binding, unique within the processor.
string name = 1;
}
message InstanceResourceOutputBindingSpec {
// Name of the output binding, unique within the processor.
string name = 1;
// The resource type uri of the acceptable output resource.
string resource_type_uri = 2;
// Whether the output resource needs to be explicitly set in the instance.
// If it is false, the processor will automatically generate it if required.
bool explicit = 3;
}
// For processors with input_channel_specs, the processor must be explicitly
// connected to another processor.
repeated GraphInputChannelSpec graph_input_channel_specs = 3;
// The output artifact specifications for the current processor.
repeated GraphOutputChannelSpec graph_output_channel_specs = 4;
// The input resource that needs to be fed from the application instance.
repeated InstanceResourceInputBindingSpec
instance_resource_input_binding_specs = 5;
// The output resource that the processor will generate per instance.
// Other than the explicitly listed output bindings here, all the processors'
// GraphOutputChannels can be binded to stream resource. The bind name then is
// the same as the GraphOutputChannel's name.
repeated InstanceResourceOutputBindingSpec
instance_resource_output_binding_specs = 6;
}
// Describes the source info for a custom processor.
message CustomProcessorSourceInfo {
// The schema is defined as an OpenAPI 3.0.2 [Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
message ModelSchema {
// Cloud Storage location to a YAML file that defines the format of a single
// instance used in prediction and explanation requests.
GcsSource instances_schema = 1;
// Cloud Storage location to a YAML file that defines the prediction and
// explanation parameters.
GcsSource parameters_schema = 2;
// Cloud Storage location to a YAML file that defines the format of a single
// prediction or explanation.
GcsSource predictions_schema = 3;
}
// Source type of the imported custom processor.
enum SourceType {
// Source type unspecified.
SOURCE_TYPE_UNSPECIFIED = 0;
// Custom processors coming from Vertex AutoML product.
VERTEX_AUTOML = 1;
// Custom processors coming from general custom models from Vertex.
VERTEX_CUSTOM = 2;
// Source for Product Recognizer.
PRODUCT_RECOGNIZER = 3;
}
// The path where App Platform loads the artifacts for the custom processor.
oneof artifact_path {
// The resource name original model hosted in the vertex AI platform.
string vertex_model = 2;
}
// The original product which holds the custom processor's functionality.
SourceType source_type = 1;
// Output only. Additional info related to the imported custom processor.
// Data is filled in by app platform during the processor creation.
map<string, string> additional_info = 4;
// Model schema files which specifies the signature of the model.
// For VERTEX_CUSTOM models, instances schema is required.
// If instances schema is not specified during the processor creation,
// VisionAI Platform will try to get it from Vertex, if it doesn't exist, the
// creation will fail.
ModelSchema model_schema = 5;
}
// Next ID: 29
message ProcessorConfig {
oneof processor_config {
// Configs of stream input processor.
VideoStreamInputConfig video_stream_input_config = 9;
// Config of AI-enabled input devices.
AIEnabledDevicesInputConfig ai_enabled_devices_input_config = 20;
// Configs of media warehouse processor.
MediaWarehouseConfig media_warehouse_config = 10;
// Configs of person blur processor.
PersonBlurConfig person_blur_config = 11;
// Configs of occupancy count processor.
OccupancyCountConfig occupancy_count_config = 12;
// Configs of Person Vehicle Detection processor.
PersonVehicleDetectionConfig person_vehicle_detection_config = 15;
// Configs of Vertex AutoML vision processor.
VertexAutoMLVisionConfig vertex_automl_vision_config = 13;
// Configs of Vertex AutoML video processor.
VertexAutoMLVideoConfig vertex_automl_video_config = 14;
// Configs of Vertex Custom processor.
VertexCustomConfig vertex_custom_config = 17;
// Configs of General Object Detection processor.
GeneralObjectDetectionConfig general_object_detection_config = 18;
// Configs of BigQuery processor.
BigQueryConfig big_query_config = 19;
// Configs of personal_protective_equipment_detection_config
PersonalProtectiveEquipmentDetectionConfig
personal_protective_equipment_detection_config = 22;
}
}
// Message describing Vision AI stream with application specific annotations.
// All the StreamAnnotation object inside this message MUST have unique id.
message StreamWithAnnotation {
// Message describing annotations specific to application node.
message NodeAnnotation {
// The node name of the application graph.
string node = 1;
// The node specific stream annotations.
repeated StreamAnnotation annotations = 2;
}
// Vision AI Stream resource name.
string stream = 1;
// Annotations that will be applied to the whole application.
repeated StreamAnnotation application_annotations = 2;
// Annotations that will be applied to the specific node of the application.
// If the same type of the annotations is applied to both application and
// node, the node annotation will be added in addition to the global
// application one.
// For example, if there is one active zone annotation for the whole
// application and one active zone annotation for the Occupancy Analytic
// processor, then the Occupancy Analytic processor will have two active zones
// defined.
repeated NodeAnnotation node_annotations = 3;
}
// Message describing Video Stream Input Config.
// This message should only be used as a placeholder for builtin:stream-input
// processor, actual stream binding should be specified using corresponding
// API.
message VideoStreamInputConfig {
repeated string streams = 1;
repeated StreamWithAnnotation streams_with_annotation = 2;
}
// Message describing AI-enabled Devices Input Config.
message AIEnabledDevicesInputConfig {}
// Message describing MediaWarehouseConfig.
message MediaWarehouseConfig {
// Resource name of the Media Warehouse corpus.
// Format:
// projects/${project_id}/locations/${location_id}/corpora/${corpus_id}
string corpus = 1;
// Deprecated.
string region = 2;
// The duration for which all media assets, associated metadata, and search
// documents can exist.
google.protobuf.Duration ttl = 3;
}
// Message describing FaceBlurConfig.
message PersonBlurConfig {
// Type of Person Blur
enum PersonBlurType {
// PersonBlur Type UNSPECIFIED.
PERSON_BLUR_TYPE_UNSPECIFIED = 0;
// FaceBlur Type full occlusion.
FULL_OCCULUSION = 1;
// FaceBlur Type blur filter.
BLUR_FILTER = 2;
}
// Person blur type.
PersonBlurType person_blur_type = 1;
// Whether only blur faces other than the whole object in the processor.
bool faces_only = 2;
}
// Message describing OccupancyCountConfig.
message OccupancyCountConfig {
// Whether to count the appearances of people, output counts have 'people' as
// the key.
bool enable_people_counting = 1;
// Whether to count the appearances of vehicles, output counts will have
// 'vehicle' as the key.
bool enable_vehicle_counting = 2;
// Whether to track each invidual object's loitering time inside the scene or
// specific zone.
bool enable_dwelling_time_tracking = 3;
}
// Message describing PersonVehicleDetectionConfig.
message PersonVehicleDetectionConfig {
// At least one of enable_people_counting and enable_vehicle_counting fields
// must be set to true.
// Whether to count the appearances of people, output counts have 'people' as
// the key.
bool enable_people_counting = 1;
// Whether to count the appearances of vehicles, output counts will have
// 'vehicle' as the key.
bool enable_vehicle_counting = 2;
}
// Message describing PersonalProtectiveEquipmentDetectionConfig.
message PersonalProtectiveEquipmentDetectionConfig {
// Whether to enable face coverage detection.
bool enable_face_coverage_detection = 1;
// Whether to enable head coverage detection.
bool enable_head_coverage_detection = 2;
// Whether to enable hands coverage detection.
bool enable_hands_coverage_detection = 3;
}
// Message of configurations for General Object Detection processor.
message GeneralObjectDetectionConfig {}
// Message of configurations for BigQuery processor.
message BigQueryConfig {
// BigQuery table resource for Vision AI Platform to ingest annotations to.
string table = 1;
// Data Schema
// By default, Vision AI Application will try to write annotations to the
// target BigQuery table using the following schema:
//
// ingestion_time: TIMESTAMP, the ingestion time of the original data.
//
// application: STRING, name of the application which produces the annotation.
//
// instance: STRING, Id of the instance which produces the annotation.
//
// node: STRING, name of the application graph node which produces the
// annotation.
//
// annotation: STRING or JSON, the actual annotation protobuf will be
// converted to json string with bytes field as 64 encoded string. It can be
// written to both String or Json type column.
//
// To forward annotation data to an existing BigQuery table, customer needs to
// make sure the compatibility of the schema.
// The map maps application node name to its corresponding cloud function
// endpoint to transform the annotations directly to the
// google.cloud.bigquery.storage.v1.AppendRowsRequest (only avro_rows or
// proto_rows should be set). If configured, annotations produced by
// corresponding application node will sent to the Cloud Function at first
// before be forwarded to BigQuery.
//
// If the default table schema doesn't fit, customer is able to transform the
// annotation output from Vision AI Application to arbitrary BigQuery table
// schema with CloudFunction.
// * The cloud function will receive AppPlatformCloudFunctionRequest where
// the annotations field will be the json format of Vision AI annotation.
// * The cloud function should return AppPlatformCloudFunctionResponse with
// AppendRowsRequest stored in the annotations field.
// * To drop the annotation, simply clear the annotations field in the
// returned AppPlatformCloudFunctionResponse.
map<string, string> cloud_function_mapping = 2;
// If true, App Platform will create the BigQuery DataSet and the
// BigQuery Table with default schema if the specified table doesn't exist.