1 // Copyright 2018 Google LLC.
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
18 package google.privacy.dlp.v2;
20 import "google/api/annotations.proto";
21 import "google/protobuf/timestamp.proto";
23 option csharp_namespace = "Google.Cloud.Dlp.V2";
24 option go_package = "google.golang.org/genproto/googleapis/privacy/dlp/v2;dlp";
25 option java_multiple_files = true;
26 option java_outer_classname = "DlpStorage";
27 option java_package = "com.google.privacy.dlp.v2";
28 option php_namespace = "Google\\Cloud\\Dlp\\V2";
30 // Type of information detected by the API.
32 // Name of the information type. Either a name of your choosing when
33 // creating a CustomInfoType, or one of the names listed
34 // at https://cloud.google.com/dlp/docs/infotypes-reference when specifying
35 // a built-in type. InfoType names should conform to the pattern
36 // [a-zA-Z0-9_]{1,64}.
40 // A reference to a StoredInfoType to use with scanning.
42 // Resource name of the requested `StoredInfoType`, for example
43 // `organizations/433245324/storedInfoTypes/432452342` or
44 // `projects/project-id/storedInfoTypes/432452342`.
47 // Timestamp indicating when the version of the `StoredInfoType` used for
48 // inspection was created. Output-only field, populated by the system.
49 google.protobuf.Timestamp create_time = 2;
52 // Categorization of results based on how likely they are to represent a match,
53 // based on the number of elements they contain which imply a match.
55 // Default value; same as POSSIBLE.
56 LIKELIHOOD_UNSPECIFIED = 0;
58 // Few matching elements.
63 // Some matching elements.
68 // Many matching elements.
72 // Custom information type provided by the user. Used to find domain-specific
73 // sensitive information configurable to the data in question.
74 message CustomInfoType {
75 // Custom information type based on a dictionary of words or phrases. This can
76 // be used to match sensitive information specific to the data, such as a list
77 // of employee IDs or job titles.
79 // Dictionary words are case-insensitive and all characters other than letters
80 // and digits in the unicode [Basic Multilingual
81 // Plane](https://en.wikipedia.org/wiki/Plane_%28Unicode%29#Basic_Multilingual_Plane)
82 // will be replaced with whitespace when scanning for matches, so the
83 // dictionary phrase "Sam Johnson" will match all three phrases "sam johnson",
84 // "Sam, Johnson", and "Sam (Johnson)". Additionally, the characters
85 // surrounding any match must be of a different type than the adjacent
86 // characters within the word, so letters must be next to non-letters and
87 // digits next to non-digits. For example, the dictionary word "jen" will
88 // match the first three letters of the text "jen123" but will return no
89 // matches for "jennifer".
91 // Dictionary words containing a large number of characters that are not
92 // letters or digits may result in unexpected findings because such characters
93 // are treated as whitespace. The
94 // [limits](https://cloud.google.com/dlp/limits) page contains details about
95 // the size limits of dictionaries. For dictionaries that do not fit within
96 // these constraints, consider using `LargeCustomDictionaryConfig` in the
97 // `StoredInfoType` API.
99 // Message defining a list of words or phrases to search for in the data.
101 // Words or phrases defining the dictionary. The dictionary must contain
102 // at least one phrase and every phrase must contain at least 2 characters
103 // that are letters or digits. [required]
104 repeated string words = 1;
108 // List of words or phrases to search for.
109 WordList word_list = 1;
111 // Newline-delimited file of words in Cloud Storage. Only a single file
113 CloudStoragePath cloud_storage_path = 3;
117 // Message defining a custom regular expression.
119 // Pattern defining the regular expression. Its syntax
120 // (https://github.com/google/re2/wiki/Syntax) can be found under the
121 // google/re2 repository on GitHub.
124 // The index of the submatch to extract as findings. When not
125 // specified, the entire match is returned. No more than 3 may be included.
126 repeated int32 group_indexes = 2;
129 // Message for detecting output from deidentification transformations
131 // [`CryptoReplaceFfxFpeConfig`](/dlp/docs/reference/rest/v2/organizations.deidentifyTemplates#cryptoreplaceffxfpeconfig).
132 // These types of transformations are
133 // those that perform pseudonymization, thereby producing a "surrogate" as
134 // output. This should be used in conjunction with a field on the
135 // transformation such as `surrogate_info_type`. This CustomInfoType does
136 // not support the use of `detection_rules`.
137 message SurrogateType {}
139 // Rule for modifying a CustomInfoType to alter behavior under certain
140 // circumstances, depending on the specific details of the rule. Not supported
141 // for the `surrogate_type` custom info type.
142 message DetectionRule {
143 // Message for specifying a window around a finding to apply a detection
146 // Number of characters before the finding to consider.
147 int32 window_before = 1;
149 // Number of characters after the finding to consider.
150 int32 window_after = 2;
153 // Message for specifying an adjustment to the likelihood of a finding as
154 // part of a detection rule.
155 message LikelihoodAdjustment {
157 // Set the likelihood of a finding to a fixed value.
158 Likelihood fixed_likelihood = 1;
160 // Increase or decrease the likelihood by the specified number of
161 // levels. For example, if a finding would be `POSSIBLE` without the
162 // detection rule and `relative_likelihood` is 1, then it is upgraded to
163 // `LIKELY`, while a value of -1 would downgrade it to `UNLIKELY`.
164 // Likelihood may never drop below `VERY_UNLIKELY` or exceed
165 // `VERY_LIKELY`, so applying an adjustment of 1 followed by an
166 // adjustment of -1 when base likelihood is `VERY_LIKELY` will result in
167 // a final likelihood of `LIKELY`.
168 int32 relative_likelihood = 2;
172 // The rule that adjusts the likelihood of findings within a certain
173 // proximity of hotwords.
174 message HotwordRule {
175 // Regular expression pattern defining what qualifies as a hotword.
176 Regex hotword_regex = 1;
178 // Proximity of the finding within which the entire hotword must reside.
179 // The total length of the window cannot exceed 1000 characters. Note that
180 // the finding itself will be included in the window, so that hotwords may
181 // be used to match substrings of the finding itself. For example, the
182 // certainty of a phone number regex "\(\d{3}\) \d{3}-\d{4}" could be
183 // adjusted upwards if the area code is known to be the local area code of
184 // a company office using the hotword regex "\(xxx\)", where "xxx"
185 // is the area code in question.
186 Proximity proximity = 2;
188 // Likelihood adjustment to apply to all matching findings.
189 LikelihoodAdjustment likelihood_adjustment = 3;
193 // Hotword-based detection rule.
194 HotwordRule hotword_rule = 1;
199 // A finding of this custom info type will not be excluded from results.
200 EXCLUSION_TYPE_UNSPECIFIED = 0;
202 // A finding of this custom info type will be excluded from final results,
203 // but can still affect rule execution.
204 EXCLUSION_TYPE_EXCLUDE = 1;
207 // CustomInfoType can either be a new infoType, or an extension of built-in
208 // infoType, when the name matches one of existing infoTypes and that infoType
209 // is specified in `InspectContent.info_types` field. Specifying the latter
210 // adds findings to the one detected by the system. If built-in info type is
211 // not specified in `InspectContent.info_types` list then the name is treated
212 // as a custom info type.
213 InfoType info_type = 1;
215 // Likelihood to return for this CustomInfoType. This base value can be
216 // altered by a detection rule if the finding meets the criteria specified by
217 // the rule. Defaults to `VERY_LIKELY` if not specified.
218 Likelihood likelihood = 6;
221 // A list of phrases to detect as a CustomInfoType.
222 Dictionary dictionary = 2;
224 // Regular expression based CustomInfoType.
227 // Message for detecting output from deidentification transformations that
228 // support reversing.
229 SurrogateType surrogate_type = 4;
231 // Load an existing `StoredInfoType` resource for use in
232 // `InspectDataSource`. Not currently supported in `InspectContent`.
233 StoredType stored_type = 5;
236 // Set of detection rules to apply to all findings of this CustomInfoType.
237 // Rules are applied in order that they are specified. Not supported for the
238 // `surrogate_type` CustomInfoType.
239 repeated DetectionRule detection_rules = 7;
241 // If set to EXCLUSION_TYPE_EXCLUDE this infoType will not cause a finding
242 // to be returned. It still can be used for rules matching.
243 ExclusionType exclusion_type = 8;
246 // General identifier of a data field in a storage service.
248 // Name describing the field.
252 // Datastore partition ID.
253 // A partition ID identifies a grouping of entities. The grouping is always
254 // by project and namespace, however the namespace ID may be empty.
256 // A partition ID contains several dimensions:
257 // project ID and namespace ID.
258 message PartitionId {
259 // The ID of the project to which the entities belong.
260 string project_id = 2;
262 // If not empty, the ID of the namespace to which the entities belong.
263 string namespace_id = 4;
266 // A representation of a Datastore kind.
267 message KindExpression {
268 // The name of the kind.
272 // Options defining a data set within Google Cloud Datastore.
273 message DatastoreOptions {
274 // A partition ID identifies a grouping of entities. The grouping is always
275 // by project and namespace, however the namespace ID may be empty.
276 PartitionId partition_id = 1;
278 // The kind to process.
279 KindExpression kind = 2;
282 // Message representing a set of files in a Cloud Storage bucket. Regular
283 // expressions are used to allow fine-grained control over which files in the
284 // bucket to include.
286 // Included files are those that match at least one item in `include_regex` and
287 // do not match any items in `exclude_regex`. Note that a file that matches
288 // items from both lists will _not_ be included. For a match to occur, the
289 // entire file path (i.e., everything in the url after the bucket name) must
290 // match the regular expression.
292 // For example, given the input `{bucket_name: "mybucket", include_regex:
293 // ["directory1/.*"], exclude_regex:
294 // ["directory1/excluded.*"]}`:
296 // * `gs://mybucket/directory1/myfile` will be included
297 // * `gs://mybucket/directory1/directory2/myfile` will be included (`.*` matches
299 // * `gs://mybucket/directory0/directory1/myfile` will _not_ be included (the
300 // full path doesn't match any items in `include_regex`)
301 // * `gs://mybucket/directory1/excludedfile` will _not_ be included (the path
302 // matches an item in `exclude_regex`)
304 // If `include_regex` is left empty, it will match all files by default
305 // (this is equivalent to setting `include_regex: [".*"]`).
307 // Some other common use cases:
309 // * `{bucket_name: "mybucket", exclude_regex: [".*\.pdf"]}` will include all
310 // files in `mybucket` except for .pdf files
311 // * `{bucket_name: "mybucket", include_regex: ["directory/[^/]+"]}` will
312 // include all files directly under `gs://mybucket/directory/`, without matching
314 message CloudStorageRegexFileSet {
315 // The name of a Cloud Storage bucket. Required.
316 string bucket_name = 1;
318 // A list of regular expressions matching file paths to include. All files in
319 // the bucket that match at least one of these regular expressions will be
320 // included in the set of files, except for those that also match an item in
321 // `exclude_regex`. Leaving this field empty will match all files by default
322 // (this is equivalent to including `.*` in the list).
324 // Regular expressions use RE2
325 // [syntax](https://github.com/google/re2/wiki/Syntax); a guide can be found
326 // under the google/re2 repository on GitHub.
327 repeated string include_regex = 2;
329 // A list of regular expressions matching file paths to exclude. All files in
330 // the bucket that match at least one of these regular expressions will be
331 // excluded from the scan.
333 // Regular expressions use RE2
334 // [syntax](https://github.com/google/re2/wiki/Syntax); a guide can be found
335 // under the google/re2 repository on GitHub.
336 repeated string exclude_regex = 3;
339 // Options defining a file or a set of files within a Google Cloud Storage
341 message CloudStorageOptions {
342 // Set of files to scan.
344 // The Cloud Storage url of the file(s) to scan, in the format
345 // `gs://<bucket>/<path>`. Trailing wildcard in the path is allowed.
347 // If the url ends in a trailing slash, the bucket or directory represented
348 // by the url will be scanned non-recursively (content in sub-directories
349 // will not be scanned). This means that `gs://mybucket/` is equivalent to
350 // `gs://mybucket/*`, and `gs://mybucket/directory/` is equivalent to
351 // `gs://mybucket/directory/*`.
353 // Exactly one of `url` or `regex_file_set` must be set.
356 // The regex-filtered set of files to scan. Exactly one of `url` or
357 // `regex_file_set` must be set.
358 CloudStorageRegexFileSet regex_file_set = 2;
361 // How to sample bytes if not all bytes are scanned. Meaningful only when used
362 // in conjunction with bytes_limit_per_file. If not specified, scanning would
363 // start from the top.
365 SAMPLE_METHOD_UNSPECIFIED = 0;
367 // Scan from the top (default).
370 // For each file larger than bytes_limit_per_file, randomly pick the offset
371 // to start scanning. The scanned bytes are contiguous.
375 // The set of one or more files to scan.
376 FileSet file_set = 1;
378 // Max number of bytes to scan from a file. If a scanned file's size is bigger
379 // than this value then the rest of the bytes are omitted. Only one
380 // of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
381 int64 bytes_limit_per_file = 4;
383 // Max percentage of bytes to scan from a file. The rest are omitted. The
384 // number of bytes scanned is rounded down. Must be between 0 and 100,
385 // inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one
386 // of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
387 int32 bytes_limit_per_file_percent = 8;
389 // List of file type groups to include in the scan.
390 // If empty, all files are scanned and available data format processors
391 // are applied. In addition, the binary content of the selected files
392 // is always scanned as well.
393 repeated FileType file_types = 5;
395 SampleMethod sample_method = 6;
397 // Limits the number of files to scan to this percentage of the input FileSet.
398 // Number of files scanned is rounded down. Must be between 0 and 100,
399 // inclusively. Both 0 and 100 means no limit. Defaults to 0.
400 int32 files_limit_percent = 7;
403 // Message representing a set of files in Cloud Storage.
404 message CloudStorageFileSet {
405 // The url, in the format `gs://<bucket>/<path>`. Trailing wildcard in the
410 // Message representing a single file or path in Cloud Storage.
411 message CloudStoragePath {
412 // A url representing a file or path (no wildcards) in Cloud Storage.
413 // Example: gs://[BUCKET_NAME]/dictionary.txt
417 // Options defining BigQuery table and row identifiers.
418 message BigQueryOptions {
419 // How to sample rows if not all rows are scanned. Meaningful only when used
420 // in conjunction with either rows_limit or rows_limit_percent. If not
421 // specified, scanning would start from the top.
423 SAMPLE_METHOD_UNSPECIFIED = 0;
425 // Scan from the top (default).
428 // Randomly pick the row to start scanning. The scanned rows are contiguous.
432 // Complete BigQuery table reference.
433 BigQueryTable table_reference = 1;
435 // References to fields uniquely identifying rows within the table.
436 // Nested fields in the format, like `person.birthdate.year`, are allowed.
437 repeated FieldId identifying_fields = 2;
439 // Max number of rows to scan. If the table has more rows than this value, the
440 // rest of the rows are omitted. If not set, or if set to 0, all rows will be
441 // scanned. Only one of rows_limit and rows_limit_percent can be specified.
442 // Cannot be used in conjunction with TimespanConfig.
443 int64 rows_limit = 3;
445 // Max percentage of rows to scan. The rest are omitted. The number of rows
446 // scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and
447 // 100 means no limit. Defaults to 0. Only one of rows_limit and
448 // rows_limit_percent can be specified. Cannot be used in conjunction with
450 int32 rows_limit_percent = 6;
452 SampleMethod sample_method = 4;
454 // References to fields excluded from scanning. This allows you to skip
455 // inspection of entire columns which you know have no findings.
456 repeated FieldId excluded_fields = 5;
459 // Shared message indicating Cloud storage type.
460 message StorageConfig {
461 // Configuration of the timespan of the items to include in scanning.
462 // Currently only supported when inspecting Google Cloud Storage and BigQuery.
463 message TimespanConfig {
464 // Exclude files or rows older than this value.
465 google.protobuf.Timestamp start_time = 1;
467 // Exclude files or rows newer than this value.
468 // If set to zero, no upper time limit is applied.
469 google.protobuf.Timestamp end_time = 2;
471 // Specification of the field containing the timestamp of scanned items.
472 // Used for data sources like Datastore or BigQuery.
473 // If not specified for BigQuery, table last modification timestamp
474 // is checked against given time span.
475 // The valid data types of the timestamp field are:
476 // for BigQuery - timestamp, date, datetime;
477 // for Datastore - timestamp.
478 // Datastore entity will be scanned if the timestamp property does not exist
479 // or its value is empty or invalid.
480 FieldId timestamp_field = 3;
482 // When the job is started by a JobTrigger we will automatically figure out
483 // a valid start_time to avoid scanning files that have not been modified
484 // since the last time the JobTrigger executed. This will be based on the
485 // time of the execution of the last run of the JobTrigger.
486 bool enable_auto_population_of_timespan_config = 4;
490 // Google Cloud Datastore options specification.
491 DatastoreOptions datastore_options = 2;
493 // Google Cloud Storage options specification.
494 CloudStorageOptions cloud_storage_options = 3;
496 // BigQuery options specification.
497 BigQueryOptions big_query_options = 4;
500 TimespanConfig timespan_config = 6;
503 // Definitions of file type groups to scan.
505 // Includes all files.
506 FILE_TYPE_UNSPECIFIED = 0;
508 // Includes all file extensions not covered by text file types.
511 // Included file extensions:
512 // asc, brf, c, cc, cpp, csv, cxx, c++, cs, css, dart, eml, go, h, hh, hpp,
513 // hxx, h++, hs, html, htm, shtml, shtm, xhtml, lhs, ini, java, js, json,
514 // ocaml, md, mkd, markdown, m, ml, mli, pl, pm, php, phtml, pht, py, pyw,
515 // rb, rbw, rs, rc, scala, sh, sql, tex, txt, text, tsv, vcard, vcs, wml,
516 // xml, xsl, xsd, yml, yaml.
519 // Included file extensions:
520 // bmp, gif, jpg, jpeg, jpe, png.
521 // bytes_limit_per_file has no effect on image files.
525 // Row key for identifying a record in BigQuery table.
526 message BigQueryKey {
527 // Complete BigQuery table reference.
528 BigQueryTable table_reference = 1;
530 // Absolute number of the row from the beginning of the table at the time
532 int64 row_number = 2;
535 // Record key for a finding in Cloud Datastore.
536 message DatastoreKey {
537 // Datastore entity key.
541 // A unique identifier for a Datastore entity.
542 // If a key's partition ID or any of its path kinds or names are
543 // reserved/read-only, the key is reserved/read-only.
544 // A reserved/read-only key is forbidden in certain documented contexts.
546 // A (kind, ID/name) pair used to construct a key path.
548 // If either name or ID is set, the element is complete.
549 // If neither is set, the element is incomplete.
550 message PathElement {
551 // The kind of the entity.
552 // A kind matching regex `__.*__` is reserved/read-only.
553 // A kind must not contain more than 1500 bytes when UTF-8 encoded.
559 // The auto-allocated ID of the entity.
560 // Never equal to zero. Values less than zero are discouraged and may not
561 // be supported in the future.
564 // The name of the entity.
565 // A name matching regex `__.*__` is reserved/read-only.
566 // A name must not be more than 1500 bytes when UTF-8 encoded.
572 // Entities are partitioned into subsets, currently identified by a project
573 // ID and namespace ID.
574 // Queries are scoped to a single partition.
575 PartitionId partition_id = 1;
578 // An entity path consists of one or more elements composed of a kind and a
579 // string or numerical identifier, which identify entities. The first
580 // element identifies a _root entity_, the second element identifies
581 // a _child_ of the root entity, the third element identifies a child of the
582 // second entity, and so forth. The entities identified by all prefixes of
583 // the path are called the element's _ancestors_.
585 // A path can never be empty, and a path can have at most 100 elements.
586 repeated PathElement path = 2;
589 // Message for a unique key indicating a record that contains a finding.
592 DatastoreKey datastore_key = 2;
594 BigQueryKey big_query_key = 3;
597 // Values of identifying columns in the given row. Order of values matches
598 // the order of field identifiers specified in the scanning request.
599 repeated string id_values = 5;
602 // Message defining the location of a BigQuery table. A table is uniquely
603 // identified by its project_id, dataset_id, and table_name. Within a query
604 // a table is often referenced with a string in the format of:
605 // `<project_id>:<dataset_id>.<table_id>` or
606 // `<project_id>.<dataset_id>.<table_id>`.
607 message BigQueryTable {
608 // The Google Cloud Platform project ID of the project containing the table.
609 // If omitted, project ID is inferred from the API call.
610 string project_id = 1;
612 // Dataset ID of the table.
613 string dataset_id = 2;
615 // Name of the table.
619 // Message defining a field of a BigQuery table.
620 message BigQueryField {
621 // Source table of the field.
622 BigQueryTable table = 1;
624 // Designated field in the BigQuery table.
628 // An entity in a dataset is a field or set of fields that correspond to a
629 // single person. For example, in medical records the `EntityId` might be a
630 // patient identifier, or for financial records it might be an account
631 // identifier. This message is used when generalizations or analysis must take
632 // into account that multiple rows correspond to the same entity.
634 // Composite key indicating which field contains the entity identifier.