diff --git a/src/sqlfluff/dialects/dialect_snowflake.py b/src/sqlfluff/dialects/dialect_snowflake.py index 416b51a4e6c..e636771493a 100644 --- a/src/sqlfluff/dialects/dialect_snowflake.py +++ b/src/sqlfluff/dialects/dialect_snowflake.py @@ -3076,6 +3076,26 @@ class CreateStatementSegment(BaseSegment): Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), + # For network policy + Sequence( + "ALLOWED_IP_LIST", + Ref("EqualsSegment"), + Bracketed( + Delimited( + Ref("QuotedLiteralSegment"), + ), + ), + ), + # For network policy + Sequence( + "BLOCKED_IP_LIST", + Ref("EqualsSegment"), + Bracketed( + Delimited( + Ref("QuotedLiteralSegment"), + ), + ), + ), ), # Next set are Storage Integration statements # https://docs.snowflake.com/en/sql-reference/sql/create-storage-integration.html diff --git a/src/sqlfluff/dialects/dialect_snowflake_keywords.py b/src/sqlfluff/dialects/dialect_snowflake_keywords.py index 7afd3454cb2..f388ff39c09 100644 --- a/src/sqlfluff/dialects/dialect_snowflake_keywords.py +++ b/src/sqlfluff/dialects/dialect_snowflake_keywords.py @@ -102,6 +102,7 @@ ADMIN AFTER ALLOW_DUPLICATE +ALLOWED_IP_LIST ALLOW_OVERLAPPING_EXECUTION API API_INTEGRATION @@ -145,6 +146,7 @@ BINARY_FORMAT BINDING BLOCK +BLOCKED_IP_LIST BROTLI BZ2 CACHE diff --git a/src/sqlfluff/rules/L008.py b/src/sqlfluff/rules/L008.py index ee949240a4e..10c4b5466de 100644 --- a/src/sqlfluff/rules/L008.py +++ b/src/sqlfluff/rules/L008.py @@ -1,14 +1,11 @@ """Implementation of Rule L008.""" -from typing import Optional, Tuple +from typing import Optional -from sqlfluff.core.parser import WhitespaceSegment - -from sqlfluff.core.rules import BaseRule, LintResult, LintFix, RuleContext +from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups -from sqlfluff.utils.functional import sp, FunctionalContext -from sqlfluff.core.parser.segments.base import BaseSegment +from sqlfluff.utils.reflow.sequence import ReflowSequence @document_groups @@ -43,71 +40,19 @@ class Rule_L008(BaseRule): groups = ("all", "core") crawl_behaviour = SegmentSeekerCrawler({"comma"}) - def _get_subsequent_whitespace( - self, - context, - ) -> Tuple[Optional[BaseSegment], Optional[BaseSegment]]: - """Search forwards through the raw segments for subsequent whitespace. - - Return a tuple of both the trailing whitespace segment and the - first non-whitespace segment discovered. - """ - # Get all raw segments. "raw_segments" is appropriate as the - # only segments we can care about are comma, whitespace, - # newline, and comment, which are all raw. Using the - # raw_segments allows us to account for possible unexpected - # parse tree structures resulting from other rule fixes. - raw_segments = FunctionalContext(context).raw_segments - # Start after the current comma within the list. Get all the - # following whitespace. - following_segments = raw_segments.select( - loop_while=sp.or_(sp.is_meta(), sp.is_type("whitespace")), - start_seg=context.segment, - ) - subsequent_whitespace = following_segments.last(sp.is_type("whitespace")) - try: - return ( - subsequent_whitespace[0] if subsequent_whitespace else None, - raw_segments[ - raw_segments.index(context.segment) + len(following_segments) + 1 - ], - ) - except IndexError: - # If we find ourselves here it's all whitespace (or nothing) to the - # end of the file. This can only happen in bigquery (see - # test_pass_bigquery_trailing_comma). - return subsequent_whitespace, None - def _eval(self, context: RuleContext) -> Optional[LintResult]: - # We only care about commas. - assert context.segment.is_type("comma") - - # Get subsequent whitespace segment and the first non-whitespace segment. - subsequent_whitespace, first_non_whitespace = self._get_subsequent_whitespace( - context - ) - - if ( - not subsequent_whitespace - and (first_non_whitespace is not None) - and (not first_non_whitespace.is_type("newline")) - ): - # No trailing whitespace and not followed by a newline, - # therefore create a whitespace after the comma. - return LintResult( - anchor=first_non_whitespace, - fixes=[LintFix.create_after(context.segment, [WhitespaceSegment()])], - ) - elif ( - subsequent_whitespace - and (subsequent_whitespace.raw != " ") - and (first_non_whitespace is not None) - and (not first_non_whitespace.is_comment) - ): - # Excess trailing whitespace therefore edit to only be one space long. - return LintResult( - anchor=subsequent_whitespace, - fixes=[LintFix.replace(subsequent_whitespace, [WhitespaceSegment()])], + """Commas should not have whitespace directly before them.""" + fixes = ( + ReflowSequence.from_around_target( + context.segment, + context.parent_stack[0], + config=context.config, + sides="after", ) - + .respace() + .get_fixes() + ) + if fixes: + # There should just be one, so just take the first. + return LintResult(anchor=fixes[0].anchor, fixes=fixes[:1]) return None diff --git a/src/sqlfluff/utils/functional/context.py b/src/sqlfluff/utils/functional/context.py index 257d55171b3..b684e728b44 100644 --- a/src/sqlfluff/utils/functional/context.py +++ b/src/sqlfluff/utils/functional/context.py @@ -46,7 +46,7 @@ def raw_stack(self) -> "Segments": # pragma: no cover ) @property - def raw_segments(self): + def raw_segments(self): # pragma: no cover """Returns a Segments object for all the raw segments in the file.""" file_segment = self.context.parent_stack[0] return Segments( diff --git a/test/fixtures/dialects/snowflake/snowflake_create_network_policy.sql b/test/fixtures/dialects/snowflake/snowflake_create_network_policy.sql new file mode 100644 index 00000000000..cb2cf2e57a5 --- /dev/null +++ b/test/fixtures/dialects/snowflake/snowflake_create_network_policy.sql @@ -0,0 +1,5 @@ +create network policy mypolicy1 allowed_ip_list=('192.168.1.0/24') + blocked_ip_list=('192.168.1.99'); + +CREATE OR REPLACE NETWORK POLICY TEST_NW_POLICY +ALLOWED_IP_LIST=('xx.xxx.xxx.xx/xx','xx.xxx.xxx.xx/xx') COMMENT='NW Policy' ; diff --git a/test/fixtures/dialects/snowflake/snowflake_create_network_policy.yml b/test/fixtures/dialects/snowflake/snowflake_create_network_policy.yml new file mode 100644 index 00000000000..a3b8e70c18e --- /dev/null +++ b/test/fixtures/dialects/snowflake/snowflake_create_network_policy.yml @@ -0,0 +1,52 @@ +# YML test files are auto-generated from SQL files and should not be edited by +# hand. To help enforce this, the "hash" field in the file must match a hash +# computed by SQLFluff when running the tests. Please run +# `python test/generate_parse_fixture_yml.py` to generate them after adding or +# altering SQL files. +_hash: 9315b4c172e0bf1ae67125034ad97c68730a35c856061ebdd6fe8a0fe9aaaaf7 +file: +- statement: + create_statement: + - keyword: create + - keyword: network + - keyword: policy + - object_reference: + naked_identifier: mypolicy1 + - keyword: allowed_ip_list + - comparison_operator: + raw_comparison_operator: '=' + - bracketed: + start_bracket: ( + quoted_literal: "'192.168.1.0/24'" + end_bracket: ) + - keyword: blocked_ip_list + - comparison_operator: + raw_comparison_operator: '=' + - bracketed: + start_bracket: ( + quoted_literal: "'192.168.1.99'" + end_bracket: ) +- statement_terminator: ; +- statement: + create_statement: + - keyword: CREATE + - keyword: OR + - keyword: REPLACE + - keyword: NETWORK + - keyword: POLICY + - object_reference: + naked_identifier: TEST_NW_POLICY + - keyword: ALLOWED_IP_LIST + - comparison_operator: + raw_comparison_operator: '=' + - bracketed: + - start_bracket: ( + - quoted_literal: "'xx.xxx.xxx.xx/xx'" + - comma: ',' + - quoted_literal: "'xx.xxx.xxx.xx/xx'" + - end_bracket: ) + - keyword: COMMENT + - comparison_operator: + raw_comparison_operator: '=' + - quoted_literal: "'NW Policy'" +- statement_terminator: ;