Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion lib/coffeescript/coffeescript.js

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

59 changes: 38 additions & 21 deletions lib/coffeescript/helpers.js

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

19 changes: 10 additions & 9 deletions lib/coffeescript/lexer.js

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion src/coffeescript.coffee
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ lexer = new Lexer

# The real Lexer produces a generic stream of tokens. This object provides a
# thin wrapper around it, compatible with the Jison API. We can then pass it
# directly as a "Jison lexer".
# directly as a Jison lexer.”
parser.lexer =
lex: ->
token = parser.tokens[@pos++]
Expand Down
38 changes: 24 additions & 14 deletions src/helpers.coffee
Original file line number Diff line number Diff line change
Expand Up @@ -111,30 +111,40 @@ buildLocationData = (first, last) ->
buildLocationHash = (loc) ->
"#{loc.first_line}x#{loc.first_column}-#{loc.last_line}x#{loc.last_column}"

# Build a dictionary of extra token properties organized by tokens’ locations
# used as lookup hashes.
buildTokenDataDictionary = (parserState) ->
tokenData = {}
for token in parserState.parser.tokens when token.comments
tokenHash = buildLocationHash token[2]
# Multiple tokens might have the same location hash, such as the generated
# `JS` tokens added at the start or end of the token stream to hold
# comments that start or end a file.
tokenData[tokenHash] ?= {}
if token.comments # `comments` is always an array.
# For “overlapping” tokens, that is tokens with the same location data
# and therefore matching `tokenHash`es, merge the comments from both/all
# tokens together into one array, even if there are duplicate comments;
# they will get sorted out later.
(tokenData[tokenHash].comments ?= []).push token.comments...
tokenData

# This returns a function which takes an object as a parameter, and if that
# object is an AST node, updates that object's locationData.
# The object is returned either way.
exports.addDataToNode = (parserState, first, last) ->
(obj) ->
# Add location data
# Add location data.
if obj?.updateLocationDataIfMissing? and first?
obj.updateLocationDataIfMissing buildLocationData(first, last)

# Add comments data
unless parserState.tokenComments
parserState.tokenComments = {}
for token in parserState.parser.tokens when token.comments
tokenHash = buildLocationHash token[2]
unless parserState.tokenComments[tokenHash]?
parserState.tokenComments[tokenHash] = token.comments
else
parserState.tokenComments[tokenHash].push token.comments...

# Add comments, building the dictionary of token data if it hasn’t been
# built yet.
parserState.tokenData ?= buildTokenDataDictionary parserState
if obj.locationData?
objHash = buildLocationHash obj.locationData
if parserState.tokenComments[objHash]?
attachCommentsToNode parserState.tokenComments[objHash], obj

if parserState.tokenData[objHash]?.comments?
attachCommentsToNode parserState.tokenData[objHash].comments, obj
obj

exports.attachCommentsToNode = attachCommentsToNode = (comments, node) ->
Expand Down
16 changes: 8 additions & 8 deletions src/lexer.coffee
Original file line number Diff line number Diff line change
Expand Up @@ -305,6 +305,7 @@ exports.Lexer = class Lexer
else
@mergeInterpolationTokens tokens, {delimiter}, (value, i) =>
value = @formatString value, delimiter: quote
# Remove indentation from multiline single-quoted strings.
value = value.replace SIMPLE_STRING_OMIT, (match, offset) ->
if (i is 0 and offset is 0) or
(i is $ and offset + match.length is value.length)
Expand Down Expand Up @@ -780,7 +781,7 @@ exports.Lexer = class Lexer
rest = str[interpolationOffset..]
{tokens: nested, index} =
new Lexer().tokenize rest, line: line, column: column, untilBalanced: on
# Account for the `#` in `#{`
# Account for the `#` in `#{`.
index += interpolationOffset

braceInterpolator = str[index - 1] is '}'
Expand Down Expand Up @@ -877,7 +878,7 @@ exports.Lexer = class Lexer
locationToken = token
tokensToPush = [token]
if @tokens.length > firstIndex
# Create a 0-length "+" token.
# Create a 0-length `+` token.
plusToken = @token '+', '+'
plusToken[2] =
first_line: locationToken[2].first_line
Expand Down Expand Up @@ -946,19 +947,19 @@ exports.Lexer = class Lexer

# Same as `token`, except this just returns the token without adding it
# to the results.
makeToken: (tag, value, offsetInChunk = 0, length = value.length) ->
makeToken: (tag, value, offsetInChunk = 0, length = value.length, origin) ->
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@GeoffreyBooth ah I see ya once this is merged I can merge into preserve-string-literal and convert to an option arg that gets passed along by token()

locationData = {}
[locationData.first_line, locationData.first_column] =
@getLineAndColumnFromChunk offsetInChunk

# Use length - 1 for the final offset - we're supplying the last_line and the last_column,
# so if last_column == first_column, then we're looking at a character of length 1.
# Use length - 1 for the final offset - were supplying the last_line and the last_column,
# so if last_column == first_column, then were looking at a character of length 1.
lastCharacter = if length > 0 then (length - 1) else 0
[locationData.last_line, locationData.last_column] =
@getLineAndColumnFromChunk offsetInChunk + lastCharacter

token = [tag, value, locationData]

token.origin = origin if origin
token

# Add a token to the results.
Expand All @@ -968,8 +969,7 @@ exports.Lexer = class Lexer
#
# Returns the new token.
token: (tag, value, offsetInChunk, length, origin) ->
token = @makeToken tag, value, offsetInChunk, length
token.origin = origin if origin
token = @makeToken tag, value, offsetInChunk, length, origin
@tokens.push token
token

Expand Down