diff --git a/compiler/src/dotty/tools/dotc/ast/NavigateAST.scala b/compiler/src/dotty/tools/dotc/ast/NavigateAST.scala index 33aa87d8e8e5..ccf02ddd88da 100644 --- a/compiler/src/dotty/tools/dotc/ast/NavigateAST.scala +++ b/compiler/src/dotty/tools/dotc/ast/NavigateAST.scala @@ -6,7 +6,9 @@ import core.Decorators._ import util.Positions._ import Trees.{MemberDef, DefTree} -/** Utility functions to go from typed to untyped ASTs */ +/** Utility functions to go from typed to untyped ASTs + * and to navigate in untyped ASTs + */ object NavigateAST { /** The untyped tree corresponding to typed tree `tree` in the compilation @@ -57,7 +59,6 @@ object NavigateAST { def untypedPath(pos: Position)(implicit ctx: Context): List[Positioned] = pathTo(pos, ctx.compilationUnit.untpdTree) - /** The reverse path from node `from` to the node that closest encloses position `pos`, * or `Nil` if no such path exists. If a non-empty path is returned it starts with * the node closest enclosing `pos` and ends with `from`. @@ -79,4 +80,28 @@ object NavigateAST { else path singlePath(from, Nil) } + + /** The subtrees of `roots` that immediately precede `offset`, from outer to inner. + * Every returned subtree `t` satisfies the following condition + * + * - t's position is a non-empty, non-synthetic interval + * - t's end position <= offset` and + * - there is no other eligible tree t' which lies entirely between t's end position and `offset`. + */ + def precedingTrees(offset: Int, roots: List[Positioned])(implicit ctx: Context): List[Positioned] = { + def precedingPath(xs: List[Any], path: List[Positioned]): List[Positioned] = xs match { + case (p: Positioned) :: xs1 + if p.pos.exists && p.pos.start < p.pos.end && p.pos.start <= offset => + val children = p.productIterator.toList.reverse + if (!p.pos.isSynthetic && p.pos.end <= offset) precedingPath(children, p :: path) + else precedingPath(children ::: xs1, path) + case (ys: List[_]) :: xs1 => + precedingPath(ys.reverse ::: xs1, path) + case _ :: xs1 => + precedingPath(xs1, path) + case _ => + path + } + precedingPath(roots.reverse, Nil) + } } \ No newline at end of file diff --git a/compiler/src/dotty/tools/dotc/core/Comments.scala b/compiler/src/dotty/tools/dotc/core/Comments.scala index 2559209c3b97..9b85b73c64b0 100644 --- a/compiler/src/dotty/tools/dotc/core/Comments.scala +++ b/compiler/src/dotty/tools/dotc/core/Comments.scala @@ -8,9 +8,11 @@ import util.SourceFile import util.Positions._ import util.CommentParsing._ import util.Property.Key +import util.Chars.{isIdentifierPart, isOperatorPart} import parsing.Parsers.Parser import reporting.diagnostic.messages.ProperDefinitionNotFound +// TODO Document. Also, this should not be in core. util or ast is better. object Comments { val ContextDoc = new Key[ContextDocstrings] @@ -19,6 +21,8 @@ object Comments { def docCtx: Option[ContextDocstrings] = ctx.property(ContextDoc) } + private final val endCommentPrefix = "// end " + /** Context for Docstrings, contains basic functionality for getting * docstrings via `Symbol` and expanding templates */ @@ -50,6 +54,19 @@ object Comments { val isDocComment = raw.startsWith("/**") + /** If comment starts with `// end `, the identifier immediately following it. + * The identifier counts if the comment ends with it, or if it followed by + * a punctuation character in '.', ';', ','. + */ + val endCommentString: String = + if (raw.startsWith(endCommentPrefix)) { + val contents = raw.drop(endCommentPrefix.length) + val str = contents.takeWhile(c => isIdentifierPart(c) || isOperatorPart(c)) + val follow = contents.drop(str.length) + if (follow.isEmpty || ".;,".contains(follow.head)) str else "" + } + else "" + def expand(f: String => String): Comment = new Comment(pos, f(raw)) { val isExpanded = true val usecases = self.usecases diff --git a/compiler/src/dotty/tools/dotc/parsing/CheckAlignments.scala b/compiler/src/dotty/tools/dotc/parsing/CheckAlignments.scala new file mode 100644 index 000000000000..96cd14ca18f9 --- /dev/null +++ b/compiler/src/dotty/tools/dotc/parsing/CheckAlignments.scala @@ -0,0 +1,76 @@ +package dotty.tools.dotc +package parsing + +import ast.NavigateAST.precedingTrees +import core.Comments._ +import core.Flags +import core.Decorators._ +import core.Contexts._ +import ast.{Positioned, Trees, untpd} +import util.SourceFile + +object CheckAlignments { + import untpd._ + + private def kindString(t: Positioned)(implicit ctx: Context) = t match { + case t: ValDef => if (t.mods.is(Flags.Mutable)) "var" else "val" + case _: DefDef => "def" + case t: TypeDef => if (t.isClassDef) "class" else "type" + case _: ModuleDef => "object" + case _: PackageDef => "package" + case _: If => "if" + case _: Try => "try" + case _: Match => "match" + case _: WhileDo => "while" + case _: DoWhile => "do" + case _: ForDo | _: ForYield => "for" + case _ => "" + } + + private def definedString(t: Positioned) = t match { + case t: MemberDef => t.name.toString + case _ => "" + } + + private def isEnum(t: Positioned)(implicit ctx: Context) = t match { + case t: MemberDef => t.mods.hasMod[Mod.Enum] + case _ => false + } + + def checkEndComments(source: SourceFile, endComments: List[Comment], roots: List[Tree])(implicit ctx: Context) = { + for (ec <- endComments) { + val endStr = ec.endCommentString + val column = source.column(ec.pos.start) + def misAligned(other: String): String = + i"misaligned '// end', corresponds to $other" + var warning: String = misAligned("nothing") + def combine(s1: String, s2: String) = + if (s1.isEmpty) + if (s2.isEmpty) "nothing" else s"'$s2'" + else + if (s2.isEmpty) s"'$s1'" else s"'$s1 $s2'" + def isMatch(t: Positioned) = { + endStr == kindString(t) || + endStr == definedString(t) || + endStr == "enum" && isEnum(t) + // FIXME: enum does not work yet because the preceding object + // - does not carry the enum modifier + // - has the wrong column if cases are indented, since it starts with the first case + } + def checkMatch(ts: List[Positioned]): Unit = ts match { + case t :: ts1 => + if (source.column(t.pos.start) == column) { + if (isMatch(t)) warning = "" + else { + if (kindString(t).nonEmpty) + warning = misAligned(combine(kindString(t), definedString(t))) + checkMatch(ts1) + } + } else checkMatch(ts1) + case Nil => + } + checkMatch(precedingTrees(ec.pos.start, roots)) + if (warning.nonEmpty) ctx.warning(warning, ec.pos) + } + } +} \ No newline at end of file diff --git a/compiler/src/dotty/tools/dotc/parsing/MarkupParsers.scala b/compiler/src/dotty/tools/dotc/parsing/MarkupParsers.scala index 3b091d542fba..d3ca43d97680 100644 --- a/compiler/src/dotty/tools/dotc/parsing/MarkupParsers.scala +++ b/compiler/src/dotty/tools/dotc/parsing/MarkupParsers.scala @@ -389,7 +389,7 @@ object MarkupParsers { def escapeToScala[A](op: => A, kind: String) = { xEmbeddedBlock = false - val res = saving[List[Int], A](parser.in.sepRegions, parser.in.sepRegions = _) { + val res = saving[List[Tokens.LexicalRegion], A](parser.in.regionStack, parser.in.regionStack = _) { parser.in resume LBRACE op } diff --git a/compiler/src/dotty/tools/dotc/parsing/Parsers.scala b/compiler/src/dotty/tools/dotc/parsing/Parsers.scala index d5e37be8676c..14c394b59aec 100644 --- a/compiler/src/dotty/tools/dotc/parsing/Parsers.scala +++ b/compiler/src/dotty/tools/dotc/parsing/Parsers.scala @@ -24,6 +24,7 @@ import Comments._ import scala.annotation.{tailrec, switch} import util.DotClass import rewrite.Rewrites.patch +import CheckAlignments.checkEndComments object Parsers { @@ -158,7 +159,8 @@ object Parsers { def isBindingIntro = canStartBindingTokens contains in.token def isTemplateIntro = templateIntroTokens contains in.token def isDclIntro = dclIntroTokens contains in.token - def isStatSeqEnd = in.token == RBRACE || in.token == EOF + def isStatSeqEnd = in.token == RBRACE || in.token == UNDENT || in.token == EOF + def isBlockStart = in.token == LBRACE || in.token == INDENT def mustStartStat = mustStartStatTokens contains in.token def isDefIntro(allowedMods: BitSet) = @@ -386,7 +388,8 @@ object Parsers { } def inParens[T](body: => T): T = enclosed(LPAREN, body) - def inBraces[T](body: => T): T = enclosed(LBRACE, body) + def inBraces[T](body: => T): T = + enclosed(if (in.token == INDENT) INDENT else LBRACE, body) def inBrackets[T](body: => T): T = enclosed(LBRACKET, body) def inDefScopeBraces[T](body: => T): T = { @@ -395,19 +398,17 @@ object Parsers { finally lastStatOffset = saved } - /** part { `separator` part } + /** part { `,' part } */ - def tokenSeparated[T](separator: Int, part: () => T): List[T] = { + def commaSeparated[T](part: () => T): List[T] = { val ts = new ListBuffer[T] += part() - while (in.token == separator) { + while (in.token == COMMA) { in.nextToken() ts += part() } ts.toList } - def commaSeparated[T](part: () => T): List[T] = tokenSeparated(COMMA, part) - /* --------- OPERAND/OPERATOR STACK --------------------------------------- */ var opStack: List[OpInfo] = Nil @@ -748,8 +749,14 @@ object Parsers { infixOps(t, canStartTypeTokens, refinedType, isType = true, notAnOperator = nme.raw.STAR) /** RefinedType ::= WithType {Annotation | [nl] Refinement} + * | Refinement */ - val refinedType: () => Tree = () => refinedTypeRest(withType()) + val refinedType: () => Tree = () => { + if (in.token == LBRACE) + atPos(in.offset) { RefinedTypeTree(EmptyTree, refinement()) } + else + refinedTypeRest(withType()) + } def refinedTypeRest(t: Tree): Tree = { newLineOptWhenFollowedBy(LBRACE) @@ -784,7 +791,6 @@ object Parsers { * | Path `.' type * | `(' ArgTypes `)' * | `_' TypeBounds - * | Refinement * | Literal */ def simpleType(): Tree = simpleTypeRest { @@ -792,8 +798,6 @@ object Parsers { atPos(in.offset) { makeTupleOrParens(inParens(argTypes(namedOK = false, wildOK = true))) } - else if (in.token == LBRACE) - atPos(in.offset) { RefinedTypeTree(EmptyTree, refinement()) } else if (isSimpleLiteral) { SingletonTypeTree(literal()) } else if (in.token == USCORE) { val start = in.skipToken() @@ -957,6 +961,7 @@ object Parsers { t } else { val t = expr() + newLineOptWhenFollowedBy(altToken) accept(altToken) t } @@ -1214,6 +1219,7 @@ object Parsers { * | `(' [ExprsInParens] `)' * | SimpleExpr `.' id * | SimpleExpr (TypeArgs | NamedTypeArgs) + * | SimpleExpr1 `with' BlockExpr * | SimpleExpr1 ArgumentExprs */ def simpleExpr(): Tree = { @@ -1232,7 +1238,7 @@ object Parsers { atPos(start) { Ident(pname) } case LPAREN => atPos(in.offset) { makeTupleOrParens(inParens(exprsInParensOpt())) } - case LBRACE => + case LBRACE | INDENT => canApply = false blockExpr() case NEW => @@ -1264,9 +1270,12 @@ object Parsers { case LBRACKET => val tapp = atPos(startOffset(t), in.offset) { TypeApply(t, typeArgs(namedOK = true, wildOK = false)) } simpleExprRest(tapp, canApply = true) - case LPAREN | LBRACE if canApply => + case LPAREN | LBRACE | INDENT if canApply => val app = atPos(startOffset(t), in.offset) { Apply(t, argumentExprs()) } simpleExprRest(app, canApply = true) + case WITH => + in.nextToken() + simpleExprRest(Apply(t, Block(Nil, blockExpr())), canApply = true) case USCORE => atPos(startOffset(t), in.skipToken()) { PostfixOp(t, Ident(nme.WILDCARD)) } case _ => @@ -1289,7 +1298,7 @@ object Parsers { * | [nl] BlockExpr */ def argumentExprs(): List[Tree] = - if (in.token == LBRACE) blockExpr() :: Nil else parArgumentExprs() + if (isBlockStart) blockExpr() :: Nil else parArgumentExprs() val argumentExpr = () => exprInParens() match { case a @ Assign(Ident(id), rhs) => cpy.NamedArg(a)(id, rhs) @@ -1300,7 +1309,7 @@ object Parsers { */ def argumentExprss(fn: Tree): Tree = { newLineOptWhenFollowedBy(LBRACE) - if (in.token == LPAREN || in.token == LBRACE) argumentExprss(Apply(fn, argumentExprs())) + if (in.token == LPAREN || isBlockStart) argumentExprss(Apply(fn, argumentExprs())) else fn } @@ -1370,7 +1379,7 @@ object Parsers { def forExpr(): Tree = atPos(in.skipToken()) { var wrappedEnums = true val enums = - if (in.token == LBRACE) inBraces(enumerators()) + if (isBlockStart) inBraces(enumerators()) else if (in.token == LPAREN) { val lparenOffset = in.skipToken() openParens.change(LPAREN, 1) @@ -1423,10 +1432,14 @@ object Parsers { /** Pattern ::= Pattern1 { `|' Pattern1 } */ val pattern = () => { - val pat = pattern1() - if (isIdent(nme.raw.BAR)) - atPos(startOffset(pat)) { Alternative(pat :: patternAlts()) } - else pat + in.startPattern() + try { + val pat = pattern1() + if (isIdent(nme.raw.BAR)) + atPos(startOffset(pat)) { Alternative(pat :: patternAlts()) } + else pat + } + finally in.endPattern() } def patternAlts(): List[Tree] = @@ -1992,7 +2005,7 @@ object Parsers { EmptyTree else if (scala2ProcedureSyntax(": Unit")) { tpt = scalaUnit - if (in.token == LBRACE) expr() + if (isBlockStart) expr() else EmptyTree } else { @@ -2008,7 +2021,7 @@ object Parsers { * | ConstrBlock */ def constrExpr(): Tree = - if (in.token == LBRACE) constrBlock() + if (isBlockStart) constrBlock() else Block(selfInvocation() :: Nil, Literal(Constant(()))) /** SelfInvocation ::= this ArgumentExprs {ArgumentExprs} @@ -2022,12 +2035,14 @@ object Parsers { /** ConstrBlock ::= `{' SelfInvocation {semi BlockStat} `}' */ def constrBlock(): Tree = - atPos(in.skipToken()) { - val stats = selfInvocation() :: { - if (isStatSep) { in.nextToken(); blockStatSeq() } - else Nil - } - accept(RBRACE) + atPos(in.offset) { + val stats = + inBraces { + selfInvocation() :: { + if (isStatSep) { in.nextToken(); blockStatSeq() } + else Nil + } + } Block(stats, Literal(Constant(()))) } @@ -2043,7 +2058,7 @@ object Parsers { case EQUALS => in.nextToken() TypeDef(name, lambdaAbstract(tparams, typ())).withMods(mods).setComment(in.getDocComment(start)) - case SUPERTYPE | SUBTYPE | SEMI | NEWLINE | NEWLINES | COMMA | RBRACE | EOF => + case SUPERTYPE | SUBTYPE | SEMI | NEWLINE | NEWLINES | COMMA | RBRACE | UNDENT | EOF => TypeDef(name, lambdaAbstract(tparams, typeBounds())).withMods(mods).setComment(in.getDocComment(start)) case _ => syntaxErrorOrIncomplete("`=', `>:', or `<:' expected") @@ -2121,8 +2136,13 @@ object Parsers { ModuleDef(name, template).withMods(mods).setComment(in.getDocComment(start)) } + def skipToBrace(): Unit = { + if (in.token == WITH) in.nextToken() + newLineOptWhenFollowedBy(LBRACE) + } + /** id ClassConstr [`extends' [ConstrApps]] - * [nl] ‘{’ EnumCaseStats ‘}’ + * [`with'] [nl] ‘{’ EnumCaseStats ‘}’ */ def enumDef(start: Offset, mods: Modifiers, enumMod: Mod): Thicket = { val point = nameStart @@ -2132,15 +2152,14 @@ object Parsers { val parents = if (in.token == EXTENDS) { in.nextToken(); - newLineOptWhenFollowedBy(LBRACE) - if (in.token == LBRACE) Nil else tokenSeparated(WITH, constrApp) + constrAppsOpt() } else Nil val clsDef = atPos(start, point) { TypeDef(clsName, Template(constr, parents, EmptyValDef, Nil)) .withMods(addMod(mods, enumMod)).setComment(in.getDocComment(start)) } - newLineOptWhenFollowedBy(LBRACE) + skipToBrace() val modDef = atPos(in.offset) { val body = inBraces(enumCaseStats) ModuleDef(modName, Template(emptyConstructor, Nil, EmptyValDef, body)) @@ -2152,7 +2171,7 @@ object Parsers { /** EnumCaseStats = EnumCaseStat {semi EnumCaseStat */ def enumCaseStats(): List[DefTree] = { val cases = new ListBuffer[DefTree] += enumCaseStat() - while (in.token != RBRACE && in.token != EOF) { + while (!isStatSeqEnd) { acceptStatSep() cases += enumCaseStat() } @@ -2191,38 +2210,54 @@ object Parsers { else t } + /** ConstrApps ::= ConstrApp {`with' ConstrApp} + * + * Accepts empty list if followed by `{`. + */ + def constrAppsOpt(): List[Tree] = { + newLineOptWhenFollowedBy(LBRACE) + if (isBlockStart) Nil + else + constrApp() :: { + if (in.token == WITH) { + in.nextToken() + constrAppsOpt() + } + else Nil + } + } + /** Template ::= ConstrApps [TemplateBody] | TemplateBody - * ConstrApps ::= ConstrApp {`with' ConstrApp} * * @return a pair consisting of the template, and a boolean which indicates * whether the template misses a body (i.e. no {...} part). */ def template(constr: DefDef): (Template, Boolean) = { + val parents = constrAppsOpt() newLineOptWhenFollowedBy(LBRACE) - if (in.token == LBRACE) (templateBodyOpt(constr, Nil), false) - else { - val parents = tokenSeparated(WITH, constrApp) - newLineOptWhenFollowedBy(LBRACE) - val missingBody = in.token != LBRACE - (templateBodyOpt(constr, parents), missingBody) - } + val missingBody = !isBlockStart + (templateBodyOpt(constr, parents), missingBody) } /** TemplateOpt = [`extends' Template | TemplateBody] */ def templateOpt(constr: DefDef): Template = if (in.token == EXTENDS) { in.nextToken(); template(constr)._1 } - else { - newLineOptWhenFollowedBy(LBRACE) - if (in.token == LBRACE) template(constr)._1 - else Template(constr, Nil, EmptyValDef, Nil) - } + else templateBodyOpt(constr, Nil) - /** TemplateBody ::= [nl] `{' TemplateStatSeq `}' + /** TemplateBody ::= [with] [nl] `{' TemplateStatSeq `}' */ - def templateBodyOpt(constr: DefDef, parents: List[Tree]) = { + def templateBodyOpt(constr: DefDef, parents: List[Tree]): Template = { val (self, stats) = - if (in.token == LBRACE) templateBody() else (EmptyValDef, Nil) + if (in.token == WITH) { + skipToBrace() + templateBody() + } + else { + newLineOptWhenFollowedBy(LBRACE) + if (isBlockStart) templateBody() + else (EmptyValDef, Nil) + } Template(constr, parents, self, stats) } @@ -2247,7 +2282,7 @@ object Parsers { */ def packaging(start: Int): Tree = { val pkg = qualId() - newLineOptWhenFollowedBy(LBRACE) + skipToBrace() val stats = inDefScopeBraces(topStatSeq) makePackaging(start, pkg, stats) } @@ -2411,10 +2446,10 @@ object Parsers { } } else { val pkg = qualId() - newLineOptWhenFollowedBy(LBRACE) + skipToBrace() if (in.token == EOF) ts += makePackaging(start, pkg, List()) - else if (in.token == LBRACE) { + else if (isBlockStart) { ts += inDefScopeBraces(makePackaging(start, pkg, topStatSeq())) acceptStatSepUnlessAtEnd() ts ++= topStatSeq() @@ -2428,6 +2463,7 @@ object Parsers { else ts ++= topStatSeq() + checkEndComments(source, in.endComments, ts.toList) ts.toList } diff --git a/compiler/src/dotty/tools/dotc/parsing/Scanners.scala b/compiler/src/dotty/tools/dotc/parsing/Scanners.scala index b37d5e774c4c..8e2e7d0b4d1f 100644 --- a/compiler/src/dotty/tools/dotc/parsing/Scanners.scala +++ b/compiler/src/dotty/tools/dotc/parsing/Scanners.scala @@ -184,7 +184,7 @@ object Scanners { def nextPos: Int = (lookahead.getc(): @switch) match { case ' ' | '\t' => nextPos case CR | LF | FF => - // if we encounter line delimitng whitespace we don't count it, since + // if we encounter line delimiting whitespace we don't count it, since // it seems not to affect positions in source nextPos - 1 case _ => lookahead.charOffset - 1 @@ -195,6 +195,11 @@ object Scanners { /** Returns the closest docstring preceding the position supplied */ def getDocComment(pos: Int): Option[Comment] = docstringMap.get(pos) + private[this] var myEndComments = new ListBuffer[Comment] + + /** Comments of the form `// end ` */ + def endComments: List[Comment] = myEndComments.toList + /** A buffer for comments */ val commentBuf = new StringBuilder @@ -233,7 +238,73 @@ object Scanners { * (the STRINGLIT appears twice in succession on the stack iff the * expression is a multiline string literal). */ - var sepRegions: List[Token] = List() + var regionStack: List[LexicalRegion] = Indented(0) :: Nil + +// Indentation handling + + /** The column of current token in source; issues warning if tabs are used */ + private def currentColumn = { + val c = source.column(offset, tabOK = false) + if (c >= 0) c + else { + ctx.warning(s"tab character should not be used for indentation; assuming tab with = ${source.tabInc}") + source.column(offset) + } + } + + def lookaheadScanner: Scanner = new Scanner(source, offset) + + /** If last token on the current line is `=>' and there are no open + * braces or other non-indent lexical regions, the start offset of the next line. + * Otherwise -1 + */ + private def columnIfFollowingArrow(): Int = { + val la = lookaheadScanner + var lastToken = la.token + var withSeen = false + do { + lastToken = la.token + la.nextToken() + withSeen |= la.token == WITH + } while (la.token != EOF && !la.isAfterLineEnd()) + if (lastToken == ARROW && la.inIndented && !withSeen) la.currentColumn else -1 + } + + /** Called when at line end, insert INDENT/UNDENT tokens as needed + * and update region stack. + * @param lastToken The last token before the end of line + * @param column The column of the start of the next line + */ + private def trackIndent(lastToken: Token, column: Int) = regionStack match { + case Indented(col) :: _ => + if (column > col && canStartIndentTokens.contains(lastToken)) { + regionStack = Indented(column) :: regionStack + next.copyFrom(this) + token = INDENT + } else if (column < col) { + regionStack = regionStack.tail + next.copyFrom(this) + token = UNDENT + } + case _ => + } + + /** Track a `with` that occurs not at the end of a line: + * If the line ends in an `=>' and the next line is indented, + * start a new indentation block immediately after the `with'. + */ + private def trackWith() = regionStack match { + case Indented(col) :: _ => + val column = columnIfFollowingArrow() + if (column > col) { + regionStack = Indented(column) :: regionStack + next.copyFrom(this) + token = INDENT + } + case _ => + } + + private def inIndented = regionStack.head.isInstanceOf[Indented] // Scala 2 compatibility @@ -250,13 +321,13 @@ object Scanners { /** Are we directly in a string interpolation expression? */ private def inStringInterpolation = - sepRegions.nonEmpty && sepRegions.head == STRINGLIT + (regionStack.head == InStringLit || regionStack.head == InMultiLineStringLit) /** Are we directly in a multiline string interpolation expression? * @pre inStringInterpolation */ private def inMultiLineInterpolation = - inStringInterpolation && sepRegions.tail.nonEmpty && sepRegions.tail.head == STRINGPART + regionStack.head == InMultiLineStringLit /** read next token and return last offset */ @@ -266,36 +337,35 @@ object Scanners { off } - def adjustSepRegions(lastToken: Token): Unit = (lastToken: @switch) match { + private def endRegion(region: LexicalRegion): Unit = + if (regionStack.head == region) regionStack = regionStack.tail + + private def adjustSepRegions(lastToken: Token): Unit = (lastToken: @switch) match { case LPAREN => - sepRegions = RPAREN :: sepRegions + regionStack = InParens :: regionStack case LBRACKET => - sepRegions = RBRACKET :: sepRegions + regionStack = InBrackets :: regionStack case LBRACE => - sepRegions = RBRACE :: sepRegions - case CASE => - sepRegions = ARROW :: sepRegions + regionStack = InBraces :: regionStack case RBRACE => - while (!sepRegions.isEmpty && sepRegions.head != RBRACE) - sepRegions = sepRegions.tail - if (!sepRegions.isEmpty) sepRegions = sepRegions.tail - case RBRACKET | RPAREN => - if (!sepRegions.isEmpty && sepRegions.head == lastToken) - sepRegions = sepRegions.tail - case ARROW => - if (!sepRegions.isEmpty && sepRegions.head == ARROW) - sepRegions = sepRegions.tail - case EXTENDS => - if (!sepRegions.isEmpty && sepRegions.head == ARROW) - sepRegions = sepRegions.tail + while (regionStack.head != InBraces && !inIndented) + regionStack = regionStack.tail + endRegion(InBraces) + case RBRACKET => + endRegion(InBrackets) + case RPAREN => + endRegion(InParens) case STRINGLIT => - if (inMultiLineInterpolation) - sepRegions = sepRegions.tail.tail - else if (inStringInterpolation) - sepRegions = sepRegions.tail + endRegion(if (inMultiLineInterpolation) InMultiLineStringLit else InStringLit) case _ => } + def startPattern() = + regionStack = InPattern :: regionStack + + def endPattern() = + regionStack = regionStack.dropWhile(_ != InPattern).tail + /** Produce next token, filling TokenData fields of Scanner. */ def nextToken(): Unit = { @@ -313,22 +383,29 @@ object Scanners { next.token = EMPTY } - /** Insert NEWLINE or NEWLINES if + /** (1) Insert NEWLINE or NEWLINES if * - we are after a newline - * - we are within a { ... } or on toplevel (wrt sepRegions) + * - we are within a { ... } or on toplevel (wrt regionStack) * - the current token can start a statement and the one before can end it - * insert NEWLINES if we are past a blank line, NEWLINE otherwise + * insert NEWLINES if we are past a blank line, NEWLINE otherwise. + * + * (2) Handle indentation */ - if (isAfterLineEnd() && - (canEndStatTokens contains lastToken) && - (canStartStatTokens contains token) && - (sepRegions.isEmpty || sepRegions.head == RBRACE || - sepRegions.head == ARROW && token == CASE)) { - next copyFrom this - // todo: make offset line-end of previous line? - offset = if (lineStartOffset <= offset) lineStartOffset else lastLineStartOffset - token = if (pastBlankLine()) NEWLINES else NEWLINE + if (isAfterLineEnd()) { + trackIndent(lastToken, if (token == EOF) 0 else currentColumn) + if ((canEndStatTokens contains lastToken) && + (canStartStatTokens contains token) && + (inIndented || + regionStack.head == InBraces || + regionStack.head == InPattern && token == CASE)) { + next copyFrom this + // todo: make offset line-end of previous line? + offset = if (lineStartOffset <= offset) lineStartOffset else lastLineStartOffset + token = if (pastBlankLine()) NEWLINES else NEWLINE + } } + else if (token == EOF) trackIndent(lastToken, 0) + else if (lastToken == WITH) trackWith() postProcessToken() // print("[" + this +"]") @@ -470,15 +547,14 @@ object Scanners { if (ch == '\"') { nextRawChar() getStringPart(multiLine = true) - sepRegions = STRINGPART :: sepRegions // indicate string part - sepRegions = STRINGLIT :: sepRegions // once more to indicate multi line string part + regionStack = InMultiLineStringLit :: regionStack } else { token = STRINGLIT strVal = "" } } else { getStringPart(multiLine = false) - sepRegions = STRINGLIT :: sepRegions // indicate single line string part + regionStack = InStringLit :: regionStack } } else { nextChar() @@ -570,10 +646,8 @@ object Scanners { } private def skipComment(): Boolean = { - def appendToComment(ch: Char) = - if (keepComments) commentBuf.append(ch) def nextChar() = { - appendToComment(ch) + commentBuf.append(ch) Scanner.this.nextChar() } def skipLine(): Unit = { @@ -598,14 +672,11 @@ object Scanners { def nestedComment() = { nextChar(); skipComment() } val start = lastCharOffset def finishComment(): Boolean = { - if (keepComments) { - val pos = Position(start, charOffset - 1, start) - val comment = Comment(pos, flushBuf(commentBuf)) + val pos = Position(start, charOffset - 1, start) + val comment = Comment(pos, flushBuf(commentBuf)) - if (comment.isDocComment) { - addComment(comment) - } - } + if (keepComments && comment.isDocComment) addComment(comment) + if (comment.endCommentString.nonEmpty) myEndComments += comment true } diff --git a/compiler/src/dotty/tools/dotc/parsing/Tokens.scala b/compiler/src/dotty/tools/dotc/parsing/Tokens.scala index 770b826fd9f2..084a401b796f 100644 --- a/compiler/src/dotty/tools/dotc/parsing/Tokens.scala +++ b/compiler/src/dotty/tools/dotc/parsing/Tokens.scala @@ -124,9 +124,11 @@ abstract class TokensCommon { final val RBRACKET = 93; enter(RBRACKET, "']'") final val LBRACE = 94; enter(LBRACE, "'{'") final val RBRACE = 95; enter(RBRACE, "'}'") + final val INDENT = 96; enter(INDENT, "start of indented block") + final val UNDENT = 97; enter(UNDENT, "end of indented block") final val firstParen = LPAREN - final val lastParen = RBRACE + final val lastParen = UNDENT def buildKeywordArray(keywords: TokenSet) = { def start(tok: Token) = tokenString(tok).toTermName.asSimpleName.start @@ -192,7 +194,7 @@ object Tokens extends TokensCommon { final val VIEWBOUND = 84; enter(VIEWBOUND, "<%") // TODO: deprecate /** XML mode */ - final val XMLSTART = 96; enter(XMLSTART, "$XMLSTART$<") // TODO: deprecate + final val XMLSTART = 98; enter(XMLSTART, "$XMLSTART$<") // TODO: deprecate final val alphaKeywords = tokenRange(IF, ENUM) final val symbolicKeywords = tokenRange(USCORE, VIEWBOUND) @@ -208,7 +210,7 @@ object Tokens extends TokensCommon { USCORE, NULL, THIS, SUPER, TRUE, FALSE, RETURN, XMLSTART) final val canStartExpressionTokens = atomicExprTokens | BitSet( - LBRACE, LPAREN, IF, DO, WHILE, FOR, NEW, TRY, THROW) + LBRACE, LPAREN, INDENT, IF, DO, WHILE, FOR, NEW, TRY, THROW) final val canStartTypeTokens = literalTokens | identifierTokens | BitSet( THIS, SUPER, USCORE, LPAREN, AT) @@ -240,7 +242,21 @@ object Tokens extends TokensCommon { AT, CASE) final val canEndStatTokens = atomicExprTokens | BitSet( - TYPE, RPAREN, RBRACE, RBRACKET) + TYPE, RPAREN, RBRACE, RBRACKET, UNDENT) + + final val canStartIndentTokens = + BitSet(EQUALS, IF, THEN, ELSE, MATCH, FOR, YIELD, WHILE, DO, TRY, CATCH, FINALLY, WITH) final val numericLitTokens = BitSet(INTLIT, LONGLIT, FLOATLIT, DOUBLELIT) + + class LexicalRegion(override val toString: String) + + final val InParens = new LexicalRegion("(..)") + final val InBrackets = new LexicalRegion("[..]") + final val InBraces = new LexicalRegion("{..}") + final val InPattern = new LexicalRegion("case..=>") + final val InStringLit = new LexicalRegion("\"..\"") + final val InMultiLineStringLit = new LexicalRegion("\"\"\"..\"\"\"") + + case class Indented(column: Int) extends LexicalRegion(s"indent $column") } diff --git a/compiler/src/dotty/tools/dotc/util/SourceFile.scala b/compiler/src/dotty/tools/dotc/util/SourceFile.scala index 1a17fb8ee445..1c00ad046e9c 100644 --- a/compiler/src/dotty/tools/dotc/util/SourceFile.scala +++ b/compiler/src/dotty/tools/dotc/util/SourceFile.scala @@ -42,7 +42,7 @@ case class SourceFile(file: AbstractFile, content: Array[Char]) extends interfac def this(file: AbstractFile, cs: Seq[Char]) = this(file, cs.toArray) /** Tab increment; can be overridden */ - def tabInc = 8 + def tabInc = 2 override def name = file.name override def path = file.path @@ -125,15 +125,22 @@ case class SourceFile(file: AbstractFile, content: Array[Char]) extends interfac def lineContent(offset: Int): String = content.slice(startOfLine(offset), nextLine(offset)).mkString - /** The column corresponding to `offset`, starting at 0 */ - def column(offset: Int): Int = { + /** The column corresponding to `offset`, starting at 0 + * Returns -1 if tab character is encountered and `tabOK` is false. + */ + def column(offset: Int, tabOK: Boolean = true): Int = { var idx = startOfLine(offset) var col = 0 + var tabSeen = false while (idx != offset) { - col += (if (idx < length && content(idx) == '\t') (tabInc - col) % tabInc else 1) + if (idx < length && content(idx) == '\t') { + tabSeen = true + col += (tabInc - col) % tabInc + } + else col += 1 idx += 1 } - col + if (tabSeen && !tabOK) -1 else col } override def toString = file.toString diff --git a/docs/docs/internals/syntax.md b/docs/docs/internals/syntax.md index e4285e20f902..19b057c2b9e0 100644 --- a/docs/docs/internals/syntax.md +++ b/docs/docs/internals/syntax.md @@ -124,6 +124,7 @@ FunArgTypes ::= InfixType | ‘(’ [ FunArgType {‘,’ FunArgType } ] ‘)’ InfixType ::= RefinedType {id [nl] RefinedType} InfixOp(t1, op, t2) RefinedType ::= WithType {[nl] Refinement} RefinedTypeTree(t, ds) + | Refinement WithType ::= AnnotType {‘with’ AnnotType} (deprecated) AnnotType ::= SimpleType {Annotation} Annotated(t, annot) SimpleType ::= SimpleType TypeArgs AppliedTypeTree(t, args) @@ -132,7 +133,6 @@ SimpleType ::= SimpleType TypeArgs | Path ‘.’ ‘type’ SingletonTypeTree(p) | ‘(’ ArgTypes ‘)’ Tuple(ts) | ‘_’ TypeBounds - | Refinement RefinedTypeTree(EmptyTree, refinement) | SimpleLiteral SingletonTypeTree(l) ArgTypes ::= Type {‘,’ Type} | NamedTypeArg {‘,’ NamedTypeArg} @@ -188,6 +188,7 @@ SimpleExpr1 ::= Literal | SimpleExpr ‘.’ id Select(expr, id) | SimpleExpr (TypeArgs | NamedTypeArgs) TypeApply(expr, args) | SimpleExpr1 ArgumentExprs Apply(expr, args) + | SimpleExpr1 ‘with’ BlockExpr | XmlExpr ExprsInParens ::= ExprInParens {‘,’ ExprInParens} ExprInParens ::= PostfixExpr ‘:’ Type @@ -285,7 +286,8 @@ AccessQualifier ::= ‘[’ (id | ‘this’) ‘]’ Annotation ::= ‘@’ SimpleType {ParArgumentExprs} Apply(tpe, args) -TemplateBody ::= [nl] ‘{’ [SelfType] TemplateStat {semi TemplateStat} ‘}’ (self, stats) +TemplateBody ::= [‘with’] [nl] ‘{’ [SelfType] + TemplateStat {semi TemplateStat} ‘}’ (self, stats) TemplateStat ::= Import | {Annotation [nl]} {Modifier} Def | {Annotation [nl]} {Modifier} Dcl @@ -326,7 +328,6 @@ PatDef ::= Pattern2 {‘,’ Pattern2} [‘:’ Type] ‘=’ Expr VarDef ::= PatDef | ids ‘:’ Type ‘=’ ‘_’ DefDef ::= DefSig [‘:’ Type] ‘=’ Expr DefDef(_, name, tparams, vparamss, tpe, expr) - | DefSig [nl] ‘{’ Block ‘}’ DefDef(_, name, tparams, vparamss, tpe, Block) | ‘this’ DefParamClause DefParamClauses DefDef(_, , Nil, vparamss, EmptyTree, expr | Block) (‘=’ ConstrExpr | [nl] ConstrBlock) @@ -338,7 +339,7 @@ ClassConstr ::= [ClsTypeParamClause] [ConstrMods] ClsParamClauses ConstrMods ::= AccessModifier | Annotation {Annotation} (AccessModifier | ‘this’) ObjectDef ::= id TemplateOpt ModuleDef(mods, name, template) // no constructor -EnumDef ::= id ClassConstr [`extends' [ConstrApps]] EnumDef(mods, name, tparams, template) +EnumDef ::= id ClassConstr [`extends' [ConstrApps]] [‘with’] EnumDef(mods, name, tparams, template) [nl] ‘{’ EnumCaseStat {semi EnumCaseStat} ‘}’ EnumCaseStat ::= {Annotation [nl]} {Modifier} EnumCase EnumCase ::= `case' (EnumClassDef | ObjectDef | ids) @@ -358,7 +359,7 @@ TopStat ::= {Annotation [nl]} {Modifier} TmplDef | Import | Packaging | PackageObject -Packaging ::= ‘package’ QualId [nl] ‘{’ TopStatSeq ‘}’ Package(qid, stats) +Packaging ::= ‘package’ QualId [nl] [‘with’] ‘{’ TopStatSeq ‘}’ Package(qid, stats) PackageObject ::= ‘package’ ‘object’ ObjectDef object with package in mods. CompilationUnit ::= {‘package’ QualId semi} TopStatSeq Package(qid, stats) diff --git a/tests/pos/withTest.scala b/tests/pos/withTest.scala new file mode 100644 index 000000000000..b11bbb17eb96 --- /dev/null +++ b/tests/pos/withTest.scala @@ -0,0 +1,101 @@ +object Test with + + val xs = List(1, 2, 3) + +// Plain indentation + + xs.map with + x => x + 2 + .filter with + x => x % 2 == 0 + .foldLeft(0) with + _ + _ + +// Using lambdas with `with` + + xs.map with x => + x + 2 + .filter with x => + x % 2 == 0 + .foldLeft(0) with + _ + _ + +// for expressions + + for + x <- List(1, 2, 3) + y <- List(x + 1) + yield + x + y + + for + x <- List(1, 2, 3) + y <- List(x + 1) + do + println(x + y) + + +// Try expressions + + try + val x = 3 + x + catch + case ex: Exception => + 0 + finally + println("done") + +// Match expressions + + xs match + case Nil => + println() + 0 + case x :: Nil => + 1 + case _ => 2 + +// While and Do + + do + println("x") + println("y") + while + println("z") + true + + while + println("z") + true + do + println("x") + println("y") + + // end while + +// end Test + +package p with + + object o with + + class B with self => + def f(x: Int) = ??? + + val x = 3 + + enum Color with + case Red, Green, Blue + + class C extends Object with Serializable with self => + val result = + if x == x then + println("yes") + true + else + println("no") + false + + // end C + // end o