From 3aeeceab983f5d98fea13cf6cc0d2d0d807419ba Mon Sep 17 00:00:00 2001 From: Thanabodee Charoenpiriyakij Date: Thu, 30 Nov 2017 21:40:31 +0700 Subject: [PATCH] Support more C string literal --- grammars/c.cson | 2 +- spec/c-spec.coffee | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/grammars/c.cson b/grammars/c.cson index c04ce5e..38e7cf3 100644 --- a/grammars/c.cson +++ b/grammars/c.cson @@ -642,7 +642,7 @@ 'strings': 'patterns': [ { - 'begin': '"' + 'begin': '(?:u8|u|U|L)?"' 'beginCaptures': '0': 'name': 'punctuation.definition.string.begin.c' diff --git a/spec/c-spec.coffee b/spec/c-spec.coffee index 7a1ff5e..056f237 100644 --- a/spec/c-spec.coffee +++ b/spec/c-spec.coffee @@ -114,6 +114,22 @@ describe "Language-C", -> expect(tokens[1]).toEqual value: '%', scopes: ['source.c', 'string.quoted.double.c'] expect(tokens[2]).toEqual value: '"', scopes: ['source.c', 'string.quoted.double.c', 'punctuation.definition.string.end.c'] + it "tokenizes utf-8 string literal", -> + {tokens} = grammar.tokenizeLine 'char[] s = u8"this is utf-8 string literal"' + expect(tokens[6]).toEqual value: 'u8"', scopes: ['source.c', 'string.quoted.double.c', 'punctuation.definition.string.begin.c'] + + it "tokenizes 16-bit string literal", -> + {tokens} = grammar.tokenizeLine 'char16_t[] s = u"this is 16-bit string literal"' + expect(tokens[6]).toEqual value: 'u"', scopes: ['source.c', 'string.quoted.double.c', 'punctuation.definition.string.begin.c'] + + it "tokenizes 32-bit string literal", -> + {tokens} = grammar.tokenizeLine 'char32_t[] s = U"this is 32-bit string literal"' + expect(tokens[6]).toEqual value: 'U"', scopes: ['source.c', 'string.quoted.double.c', 'punctuation.definition.string.begin.c'] + + it "tokenizes wide string literal", -> + {tokens} = grammar.tokenizeLine 'wchar_t[] s = L"this is wide string literal"' + expect(tokens[6]).toEqual value: 'L"', scopes: ['source.c', 'string.quoted.double.c', 'punctuation.definition.string.begin.c'] + describe "comments", -> it "tokenizes them", -> {tokens} = grammar.tokenizeLine '/**/'