diff --git a/Gemfile.lock b/Gemfile.lock index 06dccdc0b..45a14bc10 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -251,8 +251,8 @@ GEM rb-fsevent (0.11.2) rb-inotify (0.10.1) ffi (~> 1.0) - rexml (3.2.8) - strscan (>= 3.0.9) + rexml (3.3.6) + strscan rouge (3.30.0) ruby2_keywords (0.0.5) rubyzip (2.3.2) @@ -280,7 +280,7 @@ GEM unf_ext (0.0.9.1-x64-mingw-ucrt) unicode-display_width (1.8.0) uri (0.13.0) - webrick (1.8.1) + webrick (1.8.2) yell (2.2.2) zeitwerk (2.6.7) diff --git a/README.md b/README.md index 6a32febe4..013a66267 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Scala Documentation # -[![Build Status](https://ci.scala-lang.org/api/badges/scala/docs.scala-lang/status.svg)](https://platform-ci.scala-lang.org/scala/docs.scala-lang) +[![Build Status](https://github.com/scala/docs.scala-lang/actions/workflows/build.yml/badge.svg)](https://github.com/scala/docs.scala-lang/actions/workflows/build.yml?query=branch%3Amain) This repository contains the source for the Scala documentation website, as well as the source for "Scala Improvement Process" (SIP) documents. diff --git a/_ba/tour/automatic-closures.md b/_ba/tour/automatic-closures.md deleted file mode 100644 index 90f751ee2..000000000 --- a/_ba/tour/automatic-closures.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: tour -title: Automatic Type-Dependent Closure Construction -partof: scala-tour - -language: ba ---- diff --git a/_config.yml b/_config.yml index f257fada0..aed90af2e 100644 --- a/_config.yml +++ b/_config.yml @@ -15,9 +15,9 @@ keywords: - Document - Guide -scala-version: 2.13.14 -scala-212-version: 2.12.19 -scala-3-version: 3.4.2 +scala-version: 2.13.15 +scala-212-version: 2.12.20 +scala-3-version: 3.5.2 collections: style: diff --git a/_data/compiler-options.yml b/_data/compiler-options.yml index 0490b2512..67b898f1d 100644 --- a/_data/compiler-options.yml +++ b/_data/compiler-options.yml @@ -469,10 +469,6 @@ schema: type: "Boolean" description: "Don't perform exhaustivity/unreachability analysis. Also, ignore @switch annotation." - - option: "-Xno-uescape" - schema: - type: "Boolean" - description: "Disable handling of \\u unicode escapes." - option: "-Xnojline" schema: type: "Boolean" diff --git a/_data/doc-nav-header.yml b/_data/doc-nav-header.yml index 9c2c92166..4f2bbd82c 100644 --- a/_data/doc-nav-header.yml +++ b/_data/doc-nav-header.yml @@ -1,5 +1,10 @@ - title: Getting Started - url: "/getting-started/index.html" + url: "#" + submenu: + - title: Install Scala + url: "/getting-started/install-scala.html" + - title: Scala IDEs + url: "/getting-started/scala-ides.html" - title: Scala 3 url: "#" submenu: diff --git a/_data/footer.yml b/_data/footer.yml index e725cef40..43338e7b7 100644 --- a/_data/footer.yml +++ b/_data/footer.yml @@ -23,6 +23,8 @@ url: "http://scala-lang.org/community/" - title: Governance url: "http://scala-lang.org/community/index.html#governance" + - title: Scala Ambassadors + url: "http://scala-lang.org/ambassadors/" - title: Mailing Lists url: "http://scala-lang.org/community/index.html#mailing-lists" - title: Chat Rooms & More diff --git a/_data/overviews.yml b/_data/overviews.yml index cc1534263..5756db5e3 100644 --- a/_data/overviews.yml +++ b/_data/overviews.yml @@ -161,6 +161,9 @@ description: "A diverse and comprehensive set of libraries is important to any productive software ecosystem. While it is easy to develop and distribute Scala libraries, good library authorship goes beyond just writing code and publishing it. In this guide, we cover the important topic of Binary Compatibility." icon: puzzle-piece url: "core/binary-compatibility-for-library-authors.html" + - title: Nightly Versions of Scala + description: "We regularly publish 'nightlies' of both Scala 3 and Scala 2 so that users can preview and test the contents of upcoming releases. Here's how to find and use these versions." + url: "core/nightlies.html" - category: "Tools" description: "Reference material on core Scala tools like the Scala REPL and Scaladoc generation." diff --git a/_data/setup-scala.yml b/_data/setup-scala.yml index ad8db6f59..cda4c2361 100644 --- a/_data/setup-scala.yml +++ b/_data/setup-scala.yml @@ -2,5 +2,5 @@ linux-x86-64: curl -fL https://github.com/coursier/coursier/releases/latest/down linux-arm64: curl -fL https://github.com/VirtusLab/coursier-m1/releases/latest/download/cs-aarch64-pc-linux.gz | gzip -d > cs && chmod +x cs && ./cs setup macOS-x86-64: curl -fL https://github.com/coursier/coursier/releases/latest/download/cs-x86_64-apple-darwin.gz | gzip -d > cs && chmod +x cs && (xattr -d com.apple.quarantine cs || true) && ./cs setup macOS-arm64: curl -fL https://github.com/VirtusLab/coursier-m1/releases/latest/download/cs-aarch64-apple-darwin.gz | gzip -d > cs && chmod +x cs && (xattr -d com.apple.quarantine cs || true) && ./cs setup -macOS-brew: brew install coursier/formulas/coursier && cs setup +macOS-brew: brew install coursier && coursier setup windows-link: https://github.com/coursier/coursier/releases/latest/download/cs-x86_64-pc-win32.zip diff --git a/_es/tour/automatic-closures.md b/_es/tour/automatic-closures.md deleted file mode 100644 index 5d8cb2f99..000000000 --- a/_es/tour/automatic-closures.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -layout: tour -title: Construcción de closures automáticas -partof: scala-tour - -num: 16 -language: es - -next-page: operators -previous-page: multiple-parameter-lists ---- - -Scala permite pasar funciones sin parámetros como parámetros de un método. Cuando un método así es invocado, los parámetros reales de la función enviada sin parámetros no son evaluados y una función "nularia" (de aridad cero, 0-aria, o sin parámetros) es pasada en su lugar. Esta función encapsula el comportamiento del parámetro correspondiente (comunmente conocido como "llamada por nombre"). - -Para aclarar un poco esto aquí se muestra un ejemplo: - - object TargetTest1 extends App { - def whileLoop(cond: => Boolean)(body: => Unit): Unit = - if (cond) { - body - whileLoop(cond)(body) - } - var i = 10 - whileLoop (i > 0) { - println(i) - i -= 1 - } - } - -La función `whileLoop` recibe dos parámetros `cond` y `body`. Cuando la función es llamada, los parámetros reales no son evaluados en ese momento. Pero cuando los parámetros son utilizados en el cuerpo de la función `whileLoop`, las funciones nularias creadas implícitamente serán evaluadas en su lugar. Así, nuestro método `whileLoop` implementa un bucle tipo Java mediante una implementación recursiva. - -Es posible combinar el uso de [operadores de infijo y postfijo (infix/postfix)](operators.html) con este mecanismo para crear declaraciones más complejas (con una sintaxis agradadable). - -Aquí mostramos la implementación de una declaración tipo repetir-a-menos-que (repetir el bucle a no ser que se cumpla X condición): - - object TargetTest2 extends App { - def loop(body: => Unit): LoopUnlessCond = - new LoopUnlessCond(body) - protected class LoopUnlessCond(body: => Unit) { - def unless(cond: => Boolean): Unit = { - body - if (!cond) unless(cond) - } - } - var i = 10 - loop { - println("i = " + i) - i -= 1 - } unless (i == 0) - } - -La función `loop` solo acepta el cuerpo de un bucle y retorna una instancia de la clase `LoopUnlessCond` (la cual encapsula el cuerpo del objeto). Es importante notar que en este punto el cuerpo del bucle no ha sido evaluado aún. La clase `LoopUnlessCond` tiene un método `unless` el cual puede ser usado como un *operador de infijo (infix)*. De esta manera podemos lograr una sintaxis muy natural para nuestro nuevo bucle `repetir { a_menos_que ( )`. - -A continuación se expone el resultado de la ejecución de `TargetTest2`: - - i = 10 - i = 9 - i = 8 - i = 7 - i = 6 - i = 5 - i = 4 - i = 3 - i = 2 - i = 1 diff --git a/_es/tour/multiple-parameter-lists.md b/_es/tour/multiple-parameter-lists.md index cdb652151..83b7218c0 100644 --- a/_es/tour/multiple-parameter-lists.md +++ b/_es/tour/multiple-parameter-lists.md @@ -6,7 +6,7 @@ partof: scala-tour num: 15 language: es -next-page: automatic-closures +next-page: operators previous-page: nested-functions --- diff --git a/_es/tour/operators.md b/_es/tour/operators.md index a2d3b5e4b..6aeb98e04 100644 --- a/_es/tour/operators.md +++ b/_es/tour/operators.md @@ -7,7 +7,7 @@ num: 17 language: es next-page: higher-order-functions -previous-page: automatic-closures +previous-page: multiple-parameter-lists --- En Scala, cualquier método el cual reciba un solo parámetro puede ser usado como un *operador de infijo (infix)*. Aquí se muestra la definición de la clase `MyBool`, la cual define tres métodos `and`, `or`, y `negate`. diff --git a/_es/tour/tour-of-scala.md b/_es/tour/tour-of-scala.md index 19b4f60af..b742b271a 100644 --- a/_es/tour/tour-of-scala.md +++ b/_es/tour/tour-of-scala.md @@ -37,7 +37,6 @@ El [mecanismo de inferencia de tipos locales](type-inference.html) se encarga de En la práctica, el desarrollo de aplicaciones específicas para un dominio generalmente requiere de "Lenguajes de dominio específico" (DSL). Scala provee una única combinación de mecanismos del lenguaje que simplifican la creación de construcciones propias del lenguaje en forma de bibliotecas: * cualquier método puede ser usado como un operador de [infijo o postfijo](operators.html) -* [las closures son construidas automáticamente dependiendo del tipo esperado](automatic-closures.html) (tipos objetivo). El uso conjunto de ambas características facilita la definición de nuevas sentencias sin tener que extender la sintaxis y sin usar facciones de meta-programación como tipo macros. diff --git a/_fr/getting-started/index.md b/_fr/getting-started/install-scala.md similarity index 100% rename from _fr/getting-started/index.md rename to _fr/getting-started/install-scala.md diff --git a/_fr/tour/automatic-closures.md b/_fr/tour/automatic-closures.md deleted file mode 100644 index f5a06a5f5..000000000 --- a/_fr/tour/automatic-closures.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: tour -title: Automatic Closures -partof: scala-tour - -language: fr ---- diff --git a/_includes/_markdown/install-cask.md b/_includes/_markdown/install-cask.md new file mode 100644 index 000000000..afb275432 --- /dev/null +++ b/_includes/_markdown/install-cask.md @@ -0,0 +1,37 @@ +{% altDetails require-info-box 'Getting Cask' %} + +{% tabs cask-install class=tabs-build-tool %} + +{% tab 'Scala CLI' %} +You can declare a dependency on Cask with the following `using` directive: +```scala +//> using dep "com.lihaoyi::cask::0.9.2" +``` +{% endtab %} + +{% tab 'sbt' %} +In your `build.sbt`, you can add a dependency on Cask: +```scala +lazy val example = project.in(file("example")) + .settings( + scalaVersion := "3.4.2", + libraryDependencies += "com.lihaoyi" %% "cask" % "0.9.2", + fork := true + ) +``` +{% endtab %} + +{% tab 'Mill' %} +In your `build.sc`, you can add a dependency on Cask: +```scala +object example extends RootModule with ScalaModule { + def scalaVersion = "3.3.3" + def ivyDeps = Agg( + ivy"com.lihaoyi::cask::0.9.2" + ) +} +``` +{% endtab %} + +{% endtabs %} +{% endaltDetails %} diff --git a/_includes/_markdown/install-munit.md b/_includes/_markdown/install-munit.md index 246716a11..853d18e23 100644 --- a/_includes/_markdown/install-munit.md +++ b/_includes/_markdown/install-munit.md @@ -16,13 +16,14 @@ Alternatively, you can require just a specific version of MUnit: {% tab 'sbt' %} In your build.sbt file, you can add the dependency on toolkit-test: ```scala -lazy val example = project.in(file("example")) +lazy val example = project.in(file(".")) .settings( scalaVersion := "3.3.3", libraryDependencies += "org.scala-lang" %% "toolkit-test" % "0.1.7" % Test ) ``` -Here the `Test` configuration means that the dependency is only used by the source files in `example/src/test`. + +Here the `Test` configuration means that the dependency is only used by the source files in `src/test`. Alternatively, you can require just a specific version of MUnit: ```scala diff --git a/_includes/_markdown/install-os-lib.md b/_includes/_markdown/install-os-lib.md index 28bd2ae5e..c35388e0a 100644 --- a/_includes/_markdown/install-os-lib.md +++ b/_includes/_markdown/install-os-lib.md @@ -15,7 +15,7 @@ Alternatively, you can require just a specific version of OS-Lib: {% tab 'sbt' %} In your `build.sbt`, you can add a dependency on the toolkit: ```scala -lazy val example = project.in(file("example")) +lazy val example = project.in(file(".")) .settings( scalaVersion := "3.3.3", libraryDependencies += "org.scala-lang" %% "toolkit" % "0.1.7" diff --git a/_includes/_markdown/install-sttp.md b/_includes/_markdown/install-sttp.md index 4a2b0f117..43ad70f7d 100644 --- a/_includes/_markdown/install-sttp.md +++ b/_includes/_markdown/install-sttp.md @@ -15,7 +15,7 @@ Alternatively, you can require just a specific version of sttp: {% tab 'sbt' %} In your build.sbt file, you can add a dependency on the Toolkit: ```scala -lazy val example = project.in(file("example")) +lazy val example = project.in(file(".")) .settings( scalaVersion := "3.3.3", libraryDependencies += "org.scala-lang" %% "toolkit" % "0.1.7" diff --git a/_includes/_markdown/install-upickle.md b/_includes/_markdown/install-upickle.md index 26e6ecd5b..58f3e23c9 100644 --- a/_includes/_markdown/install-upickle.md +++ b/_includes/_markdown/install-upickle.md @@ -15,7 +15,7 @@ Alternatively, you can require just a specific version of UPickle: {% tab 'sbt' %} In your build.sbt file, you can add the dependency on the Toolkit: ```scala -lazy val example = project.in(file("example")) +lazy val example = project.in(file(".")) .settings( scalaVersion := "3.3.3", libraryDependencies += "org.scala-lang" %% "toolkit" % "0.1.7" diff --git a/_includes/alert-banner.html b/_includes/alert-banner.html index 405cbd8fb..94c5ac127 100644 --- a/_includes/alert-banner.html +++ b/_includes/alert-banner.html @@ -1,8 +1,10 @@ {% comment %}use the variable 'message' to include markdown text to display in the alert.{% endcomment %} +{% unless include.message_id == 'disabled' %} +{% endunless %} diff --git a/_ja/getting-started/index.md b/_ja/getting-started/install-scala.md similarity index 100% rename from _ja/getting-started/index.md rename to _ja/getting-started/install-scala.md diff --git a/_ja/index.md b/_ja/index.md index b94f17177..358ef903d 100644 --- a/_ja/index.md +++ b/_ja/index.md @@ -14,7 +14,7 @@ sections: - title: "入門" description: "あなたのコンピューターに Scala をインストールして、Scala コードを書きはじめよう!" icon: "fa fa-rocket" - link: /ja/getting-started/index.html + link: /ja/getting-started/install-scala.html - title: "Scala ツアー" description: "コア言語機能をひと口大で紹介" icon: "fa fa-flag" diff --git a/_ja/overviews/macros/paradise.md b/_ja/overviews/macros/paradise.md index 7c897ff35..5fd8e5de7 100644 --- a/_ja/overviews/macros/paradise.md +++ b/_ja/overviews/macros/paradise.md @@ -19,7 +19,7 @@ title: マクロパラダイス マクロパラダイス (Macro paradise) とは Scala の複数のバージョンをサポートするコンパイラプラグインで、一般向けにリリースされている scalac と共に正しく動作するように設計されている。 これによって、将来の Scala に取り込まれるよりもいち早く最新のマクロ機能を使えるようになっている。 [サポートされている機能とバージョンの一覧](/ja/overviews/macros/roadmap.html))に関してはロードマップページを、 -動作の保証に関しては[マクロパラダイスのアナウンスメント](https://scalamacros.org/news/2013/08/07/roadmap-for-macro-paradise.html)を参照してほしい。 +動作の保証に関しては[マクロパラダイスのアナウンスメント](hxxps://scalamacros.org/news/2013/08/07/roadmap-for-macro-paradise.html)を参照してほしい。 ~/210x $ scalac -Xplugin:paradise_*.jar -Xshow-phases phase name id description diff --git a/_ja/overviews/macros/typemacros.md b/_ja/overviews/macros/typemacros.md index 38dae4318..0ed863eb8 100644 --- a/_ja/overviews/macros/typemacros.md +++ b/_ja/overviews/macros/typemacros.md @@ -9,7 +9,7 @@ title: 型マクロ **Eugene Yokota 訳** 型マクロ (type macro) は[マクロパラダイス](/ja/overviews/macros/paradise.html)の以前のバージョンから利用可能だったが、マクロパラダイス 2.0 ではサポートされなくなった。 -[the paradise 2.0 announcement](https://scalamacros.org/news/2013/08/05/macro-paradise-2.0.0-snapshot.html) に説明と移行のための戦略が書かれている。 +[the paradise 2.0 announcement](hxxps://scalamacros.org/news/2013/08/05/macro-paradise-2.0.0-snapshot.html) に説明と移行のための戦略が書かれている。 ## 直観 diff --git a/_ja/overviews/macros/untypedmacros.md b/_ja/overviews/macros/untypedmacros.md index 08ad463cd..7b857f783 100644 --- a/_ja/overviews/macros/untypedmacros.md +++ b/_ja/overviews/macros/untypedmacros.md @@ -9,7 +9,7 @@ title: 型指定の無いマクロ **Eugene Yokota 訳** 型指定の無いマクロ (untyped macro) は[マクロパラダイス](/ja/overviews/macros/paradise.html)の以前のバージョンから利用可能だったが、マクロパラダイス 2.0 ではサポートされなくなった。 -[the paradise 2.0 announcement](https://scalamacros.org/news/2013/08/05/macro-paradise-2.0.0-snapshot.html) に説明と移行のための戦略が書かれている。 +[the paradise 2.0 announcement](hxxps://scalamacros.org/news/2013/08/05/macro-paradise-2.0.0-snapshot.html) に説明と移行のための戦略が書かれている。 ## 直観 diff --git a/_ja/overviews/macros/usecases.md b/_ja/overviews/macros/usecases.md index 9db3bc139..5a6767e37 100644 --- a/_ja/overviews/macros/usecases.md +++ b/_ja/overviews/macros/usecases.md @@ -20,7 +20,7 @@ Scala の商用ユーザと研究ユーザの両方がマクロを利用して ここ EPFL においても我々はマクロを活用して研究を行っている。Lightbend 社もマクロを数々のプロジェクトに採用している。 マクロはコミュニティー内でも人気があり、既にいくつかの興味深い応用が現れている。 -最近行われた講演の ["What Are Macros Good For?"](https://scalamacros.org/paperstalks/2014-02-04-WhatAreMacrosGoodFor.pdf) では Scala 2.10 ユーザのマクロの利用方法を説明し、システム化した。講演の大筋はマクロはコード生成、静的な検査、および DSL に有効であるということで、これを研究や産業からの例を交えながら説明した。 +最近行われた講演の ["What Are Macros Good For?"](https://github.com/scalamacros/scalamacros.github.com/blob/5904f7ef88a439c668204b4bf262835e89fb13cb/paperstalks/2014-02-04-WhatAreMacrosGoodFor.pdf) では Scala 2.10 ユーザのマクロの利用方法を説明し、システム化した。講演の大筋はマクロはコード生成、静的な検査、および DSL に有効であるということで、これを研究や産業からの例を交えながら説明した。 -Scala'13 ワークショップにおいて ["Scala Macros: Let Our Powers Combine!"](https://scalamacros.org/paperstalks/2013-04-22-LetOurPowersCombine.pdf) という論文を発表した。これは Scala 2.10 における最先端のマクロ論をより学問的な視点から説明した。 +Scala'13 ワークショップにおいて ["Scala Macros: Let Our Powers Combine!"](https://github.com/scalamacros/scalamacros.github.com/blob/5904f7ef88a439c668204b4bf262835e89fb13cb/paperstalks/2013-04-22-LetOurPowersCombine.pdf) という論文を発表した。これは Scala 2.10 における最先端のマクロ論をより学問的な視点から説明した。 この論文では Scala のリッチな構文と静的な型がマクロと相乗することを示し、また既存の言語機能をマクロによって新しい方法で活用できることを考察する。 diff --git a/_ja/overviews/reflection/overview.md b/_ja/overviews/reflection/overview.md index e7d28c55f..d884785fe 100644 --- a/_ja/overviews/reflection/overview.md +++ b/_ja/overviews/reflection/overview.md @@ -81,7 +81,7 @@ Scala コンパイラが持つ型情報を全ては入手できない可能性 上の例では、まず `scala.reflect.runtime.universe` をインポートして (型タグを使うためには必ずインポートされる必要がある)、`l` という名前の `List[Int]` を作る。 -次に、context bound を持った型パラメータ `T` を持つ `getTypeTag` というメソッドは定義する +次に、context bound を持った型パラメータ `T` を持つ `getTypeTag` というメソッドを定義する (REPL が示すとおり、これは暗黙の evidence パラメータを定義することに等価であり、コンパイラは `T` に対する型タグを生成する)。 最後に、このメソッドに `l` を渡して呼び出し、`TypeTag` に格納される型を返す `tpe` を呼び出す。 見ての通り、正しい完全な型 (つまり、`List` の具象型引数を含むということ) である `List[Int]` が返ってきた。 diff --git a/_ja/tour/automatic-closures.md b/_ja/tour/automatic-closures.md deleted file mode 100644 index 80cb7edf7..000000000 --- a/_ja/tour/automatic-closures.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -layout: tour -title: 型依存クロージャの自動構築 -language: ja -partof: scala-tour ---- - -Scalaはメソッドのパラメータとしてパラメータ無しの関数名を渡せます。そのようなメソッドが呼ばれると、パラメータ無しの関数名は実際に評価されず、代わりに、対応するパラメーターの処理をカプセル化した、引数無しの関数が渡されます(いわゆる *名前渡し*評価です)。 - -以下のコードはこの仕組みを説明しています。 - - object TargetTest1 extends Application { - def whileLoop(cond: => Boolean)(body: => Unit): Unit = - if (cond) { - body - whileLoop(cond)(body) - } - var i = 10 - whileLoop (i > 0) { - println(i) - i -= 1 - } - } - -関数 whileLoop は2つのパラメータ`cond`と`body`を受け取ります。関数が適用される時、実際のパラメータは評価されません。しかし形式上のパラメータが`whileLoop`の本体内で使われる度に、暗黙に生成された引数の無い関数が代わりに評価されます。このようにメソッド`whileLoop`はJavaのようなwhileループを再帰的な方法で実装しています。 - -[中置/後置 演算子](operators.html)とこのメカニズムを組み合わせて利用し、より複雑な命令文を(より良い構文で)作れます。 - -こちらがloop-unless式の実装です。 - - object TargetTest2 extends Application { - def loop(body: => Unit): LoopUnlessCond = - new LoopUnlessCond(body) - protected class LoopUnlessCond(body: => Unit) { - def unless(cond: => Boolean): Unit = { - body - if (!cond) unless(cond) - } - } - var i = 10 - loop { - println("i = " + i) - i -= 1 - } unless (i == 0) - } -この`loop`関数はループ処理の本体を受け取り、クラス`LoopUnlessCond`(この処理の本体をカプセル化する)のインスタンスを返すだけです。処理の本体はまだ評価されていないことに気をつけてください。クラス`LoopUnlessCond`は *中置演算子* として使えるメソッド`unless`を持ちます。このように、新しいループ処理: `loop { < stats > } unless ( < cond > )`のとても自然な構文を作れます。 - -こちらが`TargetTest2`を実行した時の出力です。 - - i = 10 - i = 9 - i = 8 - i = 7 - i = 6 - i = 5 - i = 4 - i = 3 - i = 2 - i = 1 diff --git a/_ko/tour/annotations.md b/_ko/tour/annotations.md index 924664303..11b5da4b5 100644 --- a/_ko/tour/annotations.md +++ b/_ko/tour/annotations.md @@ -7,7 +7,7 @@ num: 31 language: ko next-page: packages-and-imports -previous-page: automatic-closures +previous-page: operators --- 어노테이션은 메타 정보와 정의 내용을 연결해준다. diff --git a/_ko/tour/automatic-closures.md b/_ko/tour/automatic-closures.md deleted file mode 100644 index 639623c53..000000000 --- a/_ko/tour/automatic-closures.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -layout: tour -title: 타입 의존 클로저의 자동 구성 -partof: scala-tour - -num: 30 -language: ko - -next-page: annotations -previous-page: operators ---- - -스칼라에선 파라미터가 없는 함수의 이름을 메소드의 파라미터로 사용할 수 있다. 이런 메소드가 호출되면 파라미터가 없는 함수의 이름에 해당하는 실제 파라미터를 찾지 않고, 대신 해당 파라미터의 계산을 캡슐화한 무항 함수를 전달하게 된다(소위 말하는 *이름에 의한 호출* 연산). - -다음 코드는 이 방식을 사용하는 방법을 보여준다. - - object TargetTest1 extends App { - def whileLoop(cond: => Boolean)(body: => Unit): Unit = - if (cond) { - body - whileLoop(cond)(body) - } - var i = 10 - whileLoop (i > 0) { - println(i) - i -= 1 - } - } - -`whileLoop` 함수는 `cond`와 `body`라는 두 파라미터를 받는다. 이 함수가 적용될 때 실제 파라미터는 계산되지 않는다. 대신 `whileLoop`의 내부에서 이 정형 파라미터를 사용할 때마다 암시적으로 생성된 무항 함수로 처리한다. 따라서 `whileLoop` 메소드는 재귀 구현의 방식에 맞춰 자바와 같은 while 반복문을 구현한다. - -[중위/후위 연산자](operators.html)와 이 기법을 함께 사용해 좀 더 복잡한 명령문(보기 좋게 작성된)을 생성할 수 있다. - -다음은 반복문을 제거한 명령문 구현이다. - - object TargetTest2 extends App { - def loop(body: => Unit): LoopUnlessCond = - new LoopUnlessCond(body) - protected class LoopUnlessCond(body: => Unit) { - def unless(cond: => Boolean): Unit = { - body - if (!cond) unless(cond) - } - } - var i = 10 - loop { - println("i = " + i) - i -= 1 - } unless (i == 0) - } - -`loop` 함수는 단순히 반복문의 내용을 받아서 `LoopUnlessCond` 클래스의 인스턴스(반복문 내용에 해당하는 객체를 캡슐화한)를 반환한다. 해당 내용이 아직 계산되지 않았음을 유념하자. `LoopUnlessCond` 클래스는 *중위 연산자*로 사용할 수 있는 `unless`라는 메소드를 포함하고 있다. 이런 접근을 통해 상당히 자연스럽게 표현된 새로운 반복문을 완성하게 된다: `loop { < stats > } unless ( < cond > )`. - -다음은 `TargetTest2`를 실행한 출력 결과다. - - i = 10 - i = 9 - i = 8 - i = 7 - i = 6 - i = 5 - i = 4 - i = 3 - i = 2 - i = 1 - -윤창석, 이한욱 옮김 diff --git a/_ko/tour/operators.md b/_ko/tour/operators.md index fd904bfdd..4306b0874 100644 --- a/_ko/tour/operators.md +++ b/_ko/tour/operators.md @@ -6,7 +6,7 @@ partof: scala-tour num: 29 language: ko -next-page: automatic-closures +next-page: annotations previous-page: type-inference --- diff --git a/_layouts/root-content-layout.html b/_layouts/root-content-layout.html index f8d99b142..b45513d34 100644 --- a/_layouts/root-content-layout.html +++ b/_layouts/root-content-layout.html @@ -3,7 +3,7 @@ -{% include alert-banner.html message=site.data.messages.scam-banner message_id='scam-courses-feb-2024' %} +{% include alert-banner.html message_id='disabled' message=site.data.messages.scam-banner %} {% include navbar-inner.html %} diff --git a/_layouts/root-index-layout.html b/_layouts/root-index-layout.html index baf0558d4..d83b64596 100644 --- a/_layouts/root-index-layout.html +++ b/_layouts/root-index-layout.html @@ -2,7 +2,7 @@ -{% include alert-banner.html message=site.data.messages.scam-banner message_id='scam-courses-feb-2024' %} +{% include alert-banner.html message_id='disabled' message=site.data.messages.scam-banner %} {% include navbar-inner.html %} diff --git a/_overviews/FAQ/index.md b/_overviews/FAQ/index.md index 00f52f41a..7c0b101dd 100644 --- a/_overviews/FAQ/index.md +++ b/_overviews/FAQ/index.md @@ -65,7 +65,7 @@ In short, the only officially sanctioned place is the \#jobs channel ### Who's behind Scala? -This is answered [on the community page](https://www.scala-lang.org/community/#whos-behind-scala). +This is answered [on the Governance page](https://www.scala-lang.org/governance/). ### Can I use the Scala logo? @@ -73,6 +73,10 @@ See [scala/scala-lang#1040](https://github.com/scala/scala-lang/issues/1040). ## Technical questions +### What IDEs are available for Scala? + +See [this doc page](https://docs.scala-lang.org/getting-started/scala-ides.html). + ### What compiler flags are recommended? The list of available options is @@ -244,18 +248,22 @@ differ from a function value such as: val square: Int => Int = x => x * x -For Scala 2, there is a [complete answer on Stack Overflow](https://stackoverflow.com/a/2530007/4111404) +For **Scala 2**, there is a [complete answer on Stack Overflow](https://stackoverflow.com/a/2530007/4111404) and a [summary with practical differences](https://tpolecat.github.io/2014/06/09/methods-functions.html). -Note that in **Scala 3** the differences are fewer; -for example, they will be able to -[accept implicit parameters]({{ site.scala3ref }}/contextual/context-functions.html) -as well as [type parameters]({{ site.scala3ref }}/new-types/polymorphic-function-types.html). +In **Scala 3**, the differences are fewer. +[Context functions]({{ site.scala3ref }}/contextual/context-functions.html) +accept given parameters and +[polymorphic functions]({{ site.scala3ref }}/new-types/polymorphic-function-types.html) +have type parameters. -Nevertheless, it is still recommended to use methods most of the time, -unless you absolutely need a function. And, thanks to -[eta-expansion](https://stackoverflow.com/questions/39445018/what-is-the-eta-expansion-in-scala) -you rarely would need to define a function rather than a method. +It's standard to use methods most of the time, +except when a function value is actually needed. +[Eta-expansion](https://stackoverflow.com/questions/39445018/what-is-the-eta-expansion-in-scala), +converts methods to functions when needed. +For example, a method such as `map` expects a function, +but even if you `def square` as shown above, you can +still `xs.map(square)`. ### What's the difference between types and classes? @@ -273,6 +281,15 @@ for multiple reasons, most notoriously For an in-depth treatment of types vs. classes, see the blog post ["There are more types than classes"](https://typelevel.org/blog/2017/02/13/more-types-than-classes.html). +### Should I declare my parameterless method with or without parentheses? + +In other words, should one write `def foo()` or just `def foo`? + +Answer: by convention, the former is used to indicate that a method +has side effects. + +For more details, see the Scala Style Guide, [here](https://docs.scala-lang.org/style/naming-conventions.html#parentheses). + ### How can a method in a superclass return a value of the “current” type? First, note that using `this.type` won't work. People often try that, @@ -332,14 +349,14 @@ setting in a multi-project build. For example, if you add this to your `build.sbt`: - scalaVersion := "2.13.14" + scalaVersion := "2.13.15" that's a "bare" setting, and you might expect it to apply build-wide. But it doesn't. _It only applies to the root project._ In many cases one should instead write: - ThisBuild / scalaVersion := "2.13.14" + ThisBuild / scalaVersion := "2.13.15" Other possibilities include: diff --git a/_overviews/collections-2.13/maps.md b/_overviews/collections-2.13/maps.md index 81580815b..34d9696f1 100644 --- a/_overviews/collections-2.13/maps.md +++ b/_overviews/collections-2.13/maps.md @@ -51,8 +51,8 @@ Immutable maps support in addition operations to add and remove mappings by retu | **Additions and Updates:**| | | `ms.updated(k, v)`
or `ms + (k -> v)` |The map containing all mappings of `ms` as well as the mapping `k -> v` from key `k` to value `v`.| | **Removals:** | | -| `ms.remove(k)`
or `ms - k` |The map containing all mappings of `ms` except for any mapping of key `k`.| -| `ms.removeAll(ks)`
or `ms -- ks` |The map containing all mappings of `ms` except for any mapping with a key in `ks`.| +| `ms.removed(k)`
or `ms - k` |The map containing all mappings of `ms` except for any mapping of key `k`.| +| `ms.removedAll(ks)`
or `ms -- ks` |The map containing all mappings of `ms` except for any mapping with a key in `ks`.| Mutable maps support in addition the operations summarized in the following table. @@ -64,7 +64,7 @@ Mutable maps support in addition the operations summarized in the following tabl | **Additions and Updates:** | | | `ms(k) = v` |(Or, written out, `ms.update(k, v)`). Adds mapping from key `k` to value `v` to map ms as a side effect, overwriting any previous mapping of `k`.| | `ms.addOne(k -> v)`
or `ms += (k -> v)` |Adds mapping from key `k` to value `v` to map `ms` as a side effect and returns `ms` itself.| -| `ms.addAll(xvs)`
or `ms ++= kvs` |Adds all mappings in `kvs` to `ms` as a side effect and returns `ms` itself.| +| `ms.addAll(kvs)`
or `ms ++= kvs` |Adds all mappings in `kvs` to `ms` as a side effect and returns `ms` itself.| | `ms.put(k, v)` |Adds mapping from key `k` to value `v` to `ms` and returns any value previously associated with `k` as an option.| | `ms.getOrElseUpdate(k, d)` |If key `k` is defined in map `ms`, return its associated value. Otherwise, update `ms` with the mapping `k -> d` and return `d`.| | **Removals:** | | diff --git a/_overviews/collections-2.13/seqs.md b/_overviews/collections-2.13/seqs.md index a5fe562ad..cabd0b8a0 100644 --- a/_overviews/collections-2.13/seqs.md +++ b/_overviews/collections-2.13/seqs.md @@ -103,7 +103,7 @@ Two often used implementations of buffers are `ListBuffer` and `ArrayBuffer`. A | ------ | ------ | | **Additions:** | | | `buf.append(x)`
or `buf += x` |Appends element `x` to buffer, and returns `buf` itself as result.| -| `buf.appendAll(xs)`
or`buf ++= xs` |Appends all elements in `xs` to buffer.| +| `buf.appendAll(xs)`
or `buf ++= xs` |Appends all elements in `xs` to buffer.| | `buf.prepend(x)`
or `x +=: buf` |Prepends element `x` to buffer.| | `buf.prependAll(xs)`
or `xs ++=: buf` |Prepends all elements in `xs` to buffer.| | `buf.insert(i, x)` |Inserts element `x` at index `i` in buffer.| diff --git a/_overviews/collections-2.13/trait-iterable.md b/_overviews/collections-2.13/trait-iterable.md index 3cf041028..21b28e228 100644 --- a/_overviews/collections-2.13/trait-iterable.md +++ b/_overviews/collections-2.13/trait-iterable.md @@ -133,7 +133,7 @@ res7: List[Int] = List(3, 4, 5) | `xs.count(p)` |The number of elements in `xs` that satisfy the predicate `p`.| | **Folds:** | | | `xs.foldLeft(z)(op)` |Apply binary operation `op` between successive elements of `xs`, going left to right and starting with `z`.| -| `xs.foldRight(z)(op)` |Apply binary operation `op` between successive elements of `xs`, going right to left and ending with `z`.| +| `xs.foldRight(z)(op)` |Apply binary operation `op` between successive elements of `xs`, going right to left and starting with `z`.| | `xs.reduceLeft(op)` |Apply binary operation `op` between successive elements of non-empty collection `xs`, going left to right.| | `xs.reduceRight(op)` |Apply binary operation `op` between successive elements of non-empty collection `xs`, going right to left.| | **Specific Folds:** | | diff --git a/_overviews/contribute/add-guides.md b/_overviews/contribute/add-guides.md index 5a4bed14f..4840739cd 100644 --- a/_overviews/contribute/add-guides.md +++ b/_overviews/contribute/add-guides.md @@ -338,7 +338,7 @@ You must also add the tutorial to the drop-down list in the navigation bar. To d --- - title: Getting Started - url: "/getting-started/index.html" + url: "/getting-started/install-scala.html" - title: Learn ... - title: Tutorials diff --git a/_overviews/contribute/bug-reporting-guide.md b/_overviews/contribute/bug-reporting-guide.md index c7bc2636d..d168797a2 100644 --- a/_overviews/contribute/bug-reporting-guide.md +++ b/_overviews/contribute/bug-reporting-guide.md @@ -65,7 +65,7 @@ If you cannot find your issue in the issue tracker, create a new bug. The detail Please make sure to fill in as many fields as possible. Make sure you've indicated the following: - 1. **Exact Scala version** that you are using. For example, `2.13.14` or `3.3.3`. If the bug happens in multiple versions indicate all of them. + 1. **Exact Scala version** that you are using. For example, `2.13.15` or `3.3.3`. If the bug happens in multiple versions indicate all of them. 2. **The component** that is affected by the bug. For example, the Standard Library, Scaladoc, etc. 3. **Labels** related to your issue. For example, if you think your issue is related to the typechecker, and if you have successfully minimized your issue, label your bug as "typechecker" and "minimized". Issue tracker will suggest names for existing labels as you type them so try not to create duplicates. 4. **Running environment**. Are you running on Linux? Windows? What JVM version are you using? diff --git a/_overviews/contribute/guide.md b/_overviews/contribute/guide.md index beffb0186..f5307a325 100644 --- a/_overviews/contribute/guide.md +++ b/_overviews/contribute/guide.md @@ -47,7 +47,7 @@ This is the impatient developer's checklist for the steps to submit a bug-fix pu 5. [Fix the bug, or implement the new small feature][hackers-implement], include new tests (yes, for bug fixes too). 6. [Test, rinse][hackers-test] and [test some more][partest-guide] until [all the tests pass][hackers-verify]. 7. [Commit your changes][hackers-commit] to your feature branch in your fork. Please choose your commit message based on the [Git Hygiene](https://github.com/scala/scala#user-content-git-hygiene) section of the Scala project README. -8. If necessary [re-write git history](https://git-scm.com/book/en/Git-Branching-Rebasing) so that [commits are organized by major steps to the fix/feature]( +8. If necessary [re-write git history](https://git-scm.com/book/en/v2/Git-Branching-Rebasing) so that [commits are organized by major steps to the fix/feature]( https://github.com/scala/scala#git-hygiene). For bug fixes, a single commit is requested, for features several commits may be desirable (but each separate commit must compile and pass all tests) 9. [Submit a pull request][hackers-submit]. 10. [Work with a reviewer](https://github.com/scala/scala#reviewing) to [get your pull request merged in][hackers-review]. diff --git a/_overviews/contribute/hacker-guide.md b/_overviews/contribute/hacker-guide.md index e78df88b1..ea77feee0 100644 --- a/_overviews/contribute/hacker-guide.md +++ b/_overviews/contribute/hacker-guide.md @@ -51,7 +51,7 @@ Hacking Scala begins with creating a branch for your work item. To develop Scala and [GitHub](https://github.com/). This section of the guide provides a short walkthrough, but if you are new to Git, it probably makes sense to familiarize yourself with Git first. We recommend -* the [Git Pro](https://git-scm.com/book/en/) online book. +* the [Git Pro](https://git-scm.com/book/en/v2) online book. * the help page on [Forking a Git Repository](https://help.github.com/articles/fork-a-repo). * this great training tool [LearnGitBranching](https://pcottle.github.io/learnGitBranching/). One-hour hands-on training helps more than 1000 hours reading. @@ -96,7 +96,7 @@ Since in our example, we're going to fix an existing bug 16:39 ~/Projects/scala (master)$ git checkout -b ticket/6725 Switched to a new branch 'ticket/6725' -If you are new to Git and branching, read the [Branching Chapter](https://git-scm.com/book/en/Git-Branching) in the Git Pro book. +If you are new to Git and branching, read the [Branching Chapter](https://git-scm.com/book/en/v2/Git-Branching-Branches-in-a-Nutshell) in the Git Pro book. ### Build @@ -333,11 +333,11 @@ Let's go into each of these points in more detail. ### Commit -The [Git Basics](https://git-scm.com/book/en/Git-Basics) chapter in the Git online book covers most of the basic workflow during this stage. +The [Git Basics](https://git-scm.com/book/en/v2/Git-Basics-Getting-a-Git-Repository) chapter in the Git online book covers most of the basic workflow during this stage. There are two things you should know here: 1. Commit messages are often the only way to understand the intentions of authors of code written a few years ago. Thus, writing a quality is of utmost importance. The more context you provide for the change you've introduced, the larger the chance that some future maintainer understand your intentions. Consult [the pull request policies](https://github.com/scala/scala/blob/2.12.x/CONTRIBUTING.md) for more information about the desired style of your commits. -2. Keeping Scala's git history clean is also important. Therefore we won't accept pull requests for bug fixes that have more than one commit. For features, it is okay to have several commits, but all tests need to pass after every single commit. To clean up your commit structure, you want to [rewrite history](https://git-scm.com/book/en/Git-Branching-Rebasing) using `git rebase` so that your commits are against the latest revision of `master`. +2. Keeping Scala's git history clean is also important. Therefore we won't accept pull requests for bug fixes that have more than one commit. For features, it is okay to have several commits, but all tests need to pass after every single commit. To clean up your commit structure, you want to [rewrite history](https://git-scm.com/book/en/v2/Git-Branching-Rebasing) using `git rebase` so that your commits are against the latest revision of `master`. Once you are satisfied with your work, synced with `master` and cleaned up your commits you are ready to submit a patch to the central Scala repository. Before proceeding make sure you have pushed all of your local changes to your fork on GitHub. diff --git a/_overviews/core/binary-compatibility-of-scala-releases.md b/_overviews/core/binary-compatibility-of-scala-releases.md index ebe205403..e65407c92 100644 --- a/_overviews/core/binary-compatibility-of-scala-releases.md +++ b/_overviews/core/binary-compatibility-of-scala-releases.md @@ -23,7 +23,7 @@ Thus, backward compatibility precludes the removal of (non-private) methods, as For Scala 2, the *minor* version is the *third* number in a version, e.g., 10 in v2.13.10. The major version is the second number, which is 13 in our example. -Scala 2 up to 2.13.14 guarantees both backward and forward compatibility across *minor* releases within a single major release. +Scala 2 up to 2.13.15 guarantees both backward and forward compatibility across *minor* releases within a single major release. This is about to change now that [SIP-51 has been accepted](https://docs.scala-lang.org/sips/drop-stdlib-forwards-bin-compat.html), future Scala 2.13 releases may be backward compatible only. For Scala 3, the minor version is the *second* number in a version, e.g., 2 in v3.2.1. diff --git a/_overviews/core/implicit-classes.md b/_overviews/core/implicit-classes.md index eca05c593..ed141370c 100644 --- a/_overviews/core/implicit-classes.md +++ b/_overviews/core/implicit-classes.md @@ -77,7 +77,7 @@ Implicit classes have the following restrictions: **2. They may only take one non-implicit argument in their constructor.** - implicit class RichDate(date: java.util.Date) // OK! + implicit class RichDate(date: java.time.LocalDate) // OK! implicit class Indexer[T](collection: Seq[T], index: Int) // BAD! implicit class Indexer[T](collection: Seq[T])(implicit index: Index) // OK! diff --git a/_overviews/core/nightlies.md b/_overviews/core/nightlies.md new file mode 100644 index 000000000..8155ea2bf --- /dev/null +++ b/_overviews/core/nightlies.md @@ -0,0 +1,87 @@ +--- +layout: singlepage-overview +title: Nightly Versions of Scala +permalink: /overviews/core/:title.html +--- + +We regularly publish nightly versions of both Scala 3 and 2 so that users can preview and test the contents of upcoming releases. + +Here's how to find and use these versions. + +## Scala 3 + +Scala 3 nightly versions are published to Maven Central. If you know the full version number of the nightly you want to use, you can use it just like any other Scala 3 version. + +One quick way to get that version number is to visit [https://dotty.epfl.ch](https://dotty.epfl.ch) and look in the upper left corner. + +Another way is to scrape Maven Central, as shown in this script: [https://raw.githubusercontent.com/VirtusLab/community-build3/master/scripts/lastVersionNightly.sc](https://raw.githubusercontent.com/VirtusLab/community-build3/master/scripts/lastVersionNightly.sc) + +A third way is to use [scala-cli](https://scala-cli.virtuslab.org), as follows. (Since Scala 3.5.0, the `scala` command runs `scala-cli`.) + +### scala-cli + +You can run nightlies with commands such as: + + scala-cli -S 3.nightly + scala-cli -S 3.3.nightly + +The default command is `repl`, but all the other scala-cli subcommands such as `compile` and `run` work, too. It also works with `//>` directives in your script itself, for example: + + //> using scala 3.nightly + +See this [scala-cli doc page](https://scala-cli.virtuslab.org/docs/commands/compile#scala-nightlies) for details. + +## Scala 2.13 or 2.12 + +We informally refer to Scala 2 “nightly” versions, but technically it's a misnomer. A so-called “nightly” is built for every merged PR. + +Scala 2 nightly versions are published to a special resolver. Unless you are using scala-cli, you'll need to add that resolver to your build configuration in order to use these versions. + +### quick version (sbt) + + Global / resolvers += "scala-integration" at + "https://scala-ci.typesafe.com/artifactory/scala-integration/" + scalaVersion := "2.13.15-bin-abcd123" + +For a 2.12 nightly, substitute e.g. `2.12.20` for `2.13.15`; in either case, it's the version number of the _next_ release on that branch. + +For `abcd123`, substitute the first 7 characters of the SHA of the latest commit to the [2.13.x branch](https://github.com/scala/scala/commits/2.13.x) or [2.12.x branch](https://github.com/scala/scala/commits/2.12.x) that has a green checkmark. (Clicking the checkmark will show a CI job name with the whole version in its name.) + +A quick way to find out the full version number of a current nightly is to use [scala-cli](https://scala-cli.virtuslab.org), as follows. + +### quick version (scala-cli) + +You can run nightlies with: + + scala-cli -S 2.13.nightly + scala-cli -S 2.nightly # same as 2.13.nightly + scala-cli -S 2.12.nightly + +The default command is `repl`, but all the other scala-cli subcommands such as `compile` and `run` work, too. It also works with `//>` directives in your script itself, for example: + + //> using scala 2.nightly + +### Longer explanation + +We no longer publish `-SNAPSHOT` versions of Scala 2. + +But the team does publish nightly versions, each with its own fixed version number. The version number of a nightly looks like e.g. `2.13.1-bin-abcd123`. (`-bin-` signals binary compatibility to sbt; all 2.13.x releases since 2.13.0 are binary compatible with each other.) + +To tell sbt to use one of these nightlies, you need to do three things. + +First, add the resolver where the nightlies are kept: + + Global / resolvers += "scala-integration" at + "https://scala-ci.typesafe.com/artifactory/scala-integration/" + +Second, specify the Scala version: + + scalaVersion := "2.13.1-bin-abcd123" + +But that isn't a real version number. Manually substitute a version number containing the 7-character SHA of the last commit in the [scala/scala repository](https://github.com/scala/scala) for which a nightly version was published. Look at [https://travis-ci.org/scala/scala/branches](https://travis-ci.org/scala/scala/branches) and you'll see the SHA in the upper right corner of the 2.13.x (or 2.12.x) section. + +As soon as 2.13.1 is released, the version number in the nightly will bump to 2.13.2, and so on. + +If you have a multiproject build, be sure you set these settings across all projects when you modify your build definition. Or, you may set them temporarily in the sbt shell with `++2.13.1-bin-abcd123` (sbt 0.13.x) or `++2.13.1-bin-abcd123!` (sbt 1.x; the added exclamation point is necessary to force a version not included in `crossScalaVersions` to be used). + +Ideally, we would suggest an automated way to ask Travis-CI for the right SHA. This is presumably possible via Travis-CI's API, but as far as we know, nobody has looked into it yet. (Is there a volunteer?) diff --git a/_overviews/getting-started/index.md b/_overviews/getting-started/index.md deleted file mode 100644 index 8c33b915d..000000000 --- a/_overviews/getting-started/index.md +++ /dev/null @@ -1,243 +0,0 @@ ---- -layout: singlepage-overview -title: Getting Started -partof: getting-started -languages: [fr, ja, ru, uk] -includeTOC: true -newcomer_resources: - - title: Are You Coming From Java? - description: What you should know to get to speed with Scala after your initial setup. - icon: "fa fa-coffee" - link: /tutorials/scala-for-java-programmers.html - - title: Scala in the Browser - description: > - To start experimenting with Scala right away, use "Scastie" in your browser. - icon: "fa fa-cloud" - link: https://scastie.scala-lang.org/pEBYc5VMT02wAGaDrfLnyw - -redirect_from: - - /getting-started.html - - /scala3/getting-started.html # we deleted the scala 3 version of this page ---- - -The instructions below cover both Scala 2 and Scala 3. - -
-{% altDetails need-help-info-box 'Need Help?' class=help-info %} -*If you are having trouble with setting up Scala, feel free to ask for help in the `#scala-users` channel of -[our Discord](https://discord.com/invite/scala).* -{% endaltDetails %} -
- -## Resources For Newcomers - -{% include inner-documentation-sections.html links=page.newcomer_resources %} - -## Install Scala on your computer - -Installing Scala means installing various command-line tools such as the Scala compiler and build tools. -We recommend using the Scala installer tool "Coursier" that automatically installs all the requirements, but you can still manually install each tool. - -### Using the Scala Installer (recommended way) - -The Scala installer is a tool named [Coursier](https://get-coursier.io/docs/cli-overview), whose main command is named `cs`. -It ensures that a JVM and standard Scala tools are installed on your system. -Install it on your system with the following instructions. - - -{% tabs install-cs-setup-tabs class=platform-os-options %} - - -{% tab macOS for=install-cs-setup-tabs %} -Run the following command in your terminal, following the on-screen instructions: -{% include code-snippet.html language='bash' codeSnippet=site.data.setup-scala.macOS-brew %} -{% altDetails cs-setup-macos-nobrew "Alternatively for Apple Silicon, or if you don't use Homebrew:" %} - On the Apple Silicon (M1, M2, …) architecture: - {% include code-snippet.html language='bash' codeSnippet=site.data.setup-scala.macOS-arm64 %} - Otherwise, on the x86-64 architecture: - {% include code-snippet.html language='bash' codeSnippet=site.data.setup-scala.macOS-x86-64 %} -{% endaltDetails %} -{% endtab %} - - - -{% tab Linux for=install-cs-setup-tabs %} - Run the following command in your terminal, following the on-screen instructions. - - On the x86-64 architecture: - {% include code-snippet.html language='bash' codeSnippet=site.data.setup-scala.linux-x86-64 %} - Otherwise, on the ARM64 architecture: - {% include code-snippet.html language='bash' codeSnippet=site.data.setup-scala.linux-arm64 %} -{% endtab %} - - - -{% tab Windows for=install-cs-setup-tabs %} - Download and execute [the Scala installer for Windows]({{site.data.setup-scala.windows-link}}) - based on Coursier, and follow the on-screen instructions. -{% endtab %} - - - -{% tab Other for=install-cs-setup-tabs defaultTab %} - - Follow the documentation from Coursier on - [how to install and run `cs setup`](https://get-coursier.io/docs/cli-installation). -{% endtab %} - - -{% endtabs %} - - ->    You may need to restart your terminal, log out, -> or reboot in order for the changes to take effect. -{: .help-info} - - -{% altDetails testing-your-setup 'Testing your setup' %} -Check your setup with the command `scala -version`, which should output: -```bash -$ scala -version -Scala code runner version {{site.scala-3-version}} -- Copyright 2002-2022, LAMP/EPFL -``` -{% endaltDetails %} - - - -Along with managing JVMs, `cs setup` also installs useful command-line tools: - -| Commands | Description | -|----------|-------------| -| `scalac` | the Scala compiler | -| `scala` | the Scala REPL and script runner | -| `scala-cli`| [Scala CLI](https://scala-cli.virtuslab.org), interactive toolkit for Scala | -| `sbt`, `sbtn` | The [sbt](https://www.scala-sbt.org/) build tool | -| `amm` | [Ammonite](https://ammonite.io/) is an enhanced REPL | -| `scalafmt` | [Scalafmt](https://scalameta.org/scalafmt/) is the Scala code formatter | - -For more information about `cs`, read -[coursier-cli documentation](https://get-coursier.io/docs/cli-overview). - -> `cs setup` installs the Scala 3 compiler and runner by default (the `scalac` and -> `scala` commands, respectively). Whether you intend to use Scala 2 or 3, -> this is usually not an issue because most projects use a build tool that will -> use the correct version of Scala irrespective of the one installed "globally". -> Nevertheless, you can always launch a specific version of Scala using -> ``` -> $ cs launch scala:{{ site.scala-version }} -> $ cs launch scalac:{{ site.scala-version }} -> ``` -> If you prefer Scala 2 to be run by default, you can force that version to be installed with: -> ``` -> $ cs install scala:{{ site.scala-version }} scalac:{{ site.scala-version }} -> ``` - -### ...or manually - -You only need two tools to compile, run, test, and package a Scala project: Java 8 or 11, -and sbt. -To install them manually: - -1. if you don't have Java 8 or 11 installed, download - Java from [Oracle Java 8](https://www.oracle.com/java/technologies/javase-jdk8-downloads.html), [Oracle Java 11](https://www.oracle.com/java/technologies/javase-jdk11-downloads.html), - or [AdoptOpenJDK 8/11](https://adoptopenjdk.net/). Refer to [JDK Compatibility](/overviews/jdk-compatibility/overview.html) for Scala/Java compatibility detail. -1. Install [sbt](https://www.scala-sbt.org/download.html) - -## Create a "Hello World" project with sbt - -Once you have installed sbt, you are ready to create a Scala project, which -is explained in the following sections. - -To create a project, you can either use the command line or an IDE. -If you are familiar with the command line, we recommend that approach. - -### Using the command line - -sbt is a build tool for Scala. sbt compiles, runs, -and tests your Scala code. (It can also publish libraries and do many other tasks.) - -To create a new Scala project with sbt: - -1. `cd` to an empty folder. -1. Run the command `sbt new scala/scala3.g8` to create a Scala 3 project, or `sbt new scala/hello-world.g8` to create a Scala 2 project. - This pulls a project template from GitHub. - It will also create a `target` folder, which you can ignore. -1. When prompted, name the application `hello-world`. This will - create a project called "hello-world". -1. Let's take a look at what just got generated: - -``` -- hello-world - - project (sbt uses this for its own files) - - build.properties - - build.sbt (sbt's build definition file) - - src - - main - - scala (all of your Scala code goes here) - - Main.scala (Entry point of program) <-- this is all we need for now -``` - -More documentation about sbt can be found in the [Scala Book](/scala3/book/tools-sbt.html) (see [here](/overviews/scala-book/scala-build-tool-sbt.html) for the Scala 2 version) -and in the official sbt [documentation](https://www.scala-sbt.org/1.x/docs/index.html) - -### With an IDE - -You can skip the rest of this page and go directly to [Building a Scala Project with IntelliJ and sbt](/getting-started/intellij-track/building-a-scala-project-with-intellij-and-sbt.html) - - -## Open hello-world project - -Let's use an IDE to open the project. The most popular ones are [IntelliJ](https://www.jetbrains.com/idea/) and -[VSCode](https://scalameta.org/metals/docs/editors/vscode). -They both offer rich IDE features, but you can still use [many other editors](https://scalameta.org/metals/docs/editors/overview.html). - -### Using IntelliJ - -1. Download and install [IntelliJ Community Edition](https://www.jetbrains.com/help/idea/installation-guide.html) -1. Install the Scala plugin by following [the instructions on how to install IntelliJ plugins](https://www.jetbrains.com/help/idea/discover-intellij-idea-for-scala.html) -1. Open the `build.sbt` file then choose *Open as a project* - -### Using VSCode with metals - -1. Download [VSCode](https://code.visualstudio.com/Download) -1. Install the Metals extension from [the Marketplace](https://marketplace.visualstudio.com/items?itemName=scalameta.metals) -1. Next, open the directory containing a `build.sbt` file (this should be the directory `hello-world` if you followed the previous instructions). When prompted to do so, select *Import build*. - -### Play with the source code - -View these two files in your IDE: - -- _build.sbt_ -- _src/main/scala/Main.scala_ - -When you run your project in the next step, the configuration in _build.sbt_ will be used to run the code in _src/main/scala/Main.scala_. - -## Run Hello World - -If you’re comfortable using your IDE, you can run the code in _Main.scala_ from your IDE. - -Otherwise, you can run the application from a terminal with these steps: - -1. `cd` into `hello-world`. -1. Run `sbt`. This opens up the sbt console. -1. Type `~run`. The `~` is optional and causes sbt to re-run on every file save, - allowing for a fast edit/run/debug cycle. sbt will also generate a `target` directory - which you can ignore. - -When you’re finished experimenting with this project, press `[Enter]` to interrupt the `run` command. -Then type `exit` or press `[Ctrl+D]` to exit sbt and return to your command line prompt. - -## Next Steps - -Once you've finished the above tutorials, consider checking out: - -* [The Scala Book](/scala3/book/introduction.html) (see the Scala 2 version [here](/overviews/scala-book/introduction.html)), which provides a set of short lessons introducing Scala’s main features. -* [The Tour of Scala](/tour/tour-of-scala.html) for bite-sized introductions to Scala's features. -* [Learning Resources](/learn.html), which includes online interactive tutorials and courses. -* [Our list of some popular Scala books](/books.html). -* [The migration guide](/scala3/guides/migration/compatibility-intro.html) helps you to migrate your existing Scala 2 code base to Scala 3. - -## Getting Help -There are a multitude of mailing lists and real-time chat rooms in case you want to quickly connect with other Scala users. Check out our [community](https://scala-lang.org/community/) page for a list of these resources, and for where to reach out for help. diff --git a/_overviews/getting-started/install-scala.md b/_overviews/getting-started/install-scala.md new file mode 100644 index 000000000..9514530fa --- /dev/null +++ b/_overviews/getting-started/install-scala.md @@ -0,0 +1,345 @@ +--- +layout: singlepage-overview +title: Getting Started +partof: getting-started +languages: [fr, ja, ru, uk] +includeTOC: true +newcomer_resources: + - title: Are You Coming From Java? + description: What you should know to get to speed with Scala after your initial setup. + icon: "fa fa-coffee" + link: /tutorials/scala-for-java-programmers.html + - title: Scala in the Browser + description: > + To start experimenting with Scala right away, use "Scastie" in your browser. + icon: "fa fa-cloud" + link: https://scastie.scala-lang.org/pEBYc5VMT02wAGaDrfLnyw + +redirect_from: + - /getting-started.html + - /scala3/getting-started.html # we deleted the scala 3 version of this page +--- + +The instructions below cover both Scala 2 and Scala 3. + +
+{% altDetails need-help-info-box 'Need Help?' class=help-info %} +*If you are having trouble with setting up Scala, feel free to ask for help in the `#scala-users` channel of +[our Discord](https://discord.com/invite/scala).* +{% endaltDetails %} +
+ +## Resources For Newcomers + +{% include inner-documentation-sections.html links=page.newcomer_resources %} + +## Install Scala on your computer + +Installing Scala means installing various command-line tools such as the Scala compiler and build tools. +We recommend using the Scala installer tool "Coursier" that automatically installs all the requirements, but you can still manually install each tool. + +### Using the Scala Installer (recommended way) + +The Scala installer is a tool named [Coursier](https://get-coursier.io/docs/cli-overview), whose main command is named `cs`. +It ensures that a JVM and standard Scala tools are installed on your system. +Install it on your system with the following instructions. + + +{% tabs install-cs-setup-tabs class=platform-os-options %} + + +{% tab macOS for=install-cs-setup-tabs %} +Run the following command in your terminal, following the on-screen instructions: +{% include code-snippet.html language='bash' codeSnippet=site.data.setup-scala.macOS-brew %} +{% altDetails cs-setup-macos-nobrew "Alternatively, if you don't use Homebrew:" %} + On the Apple Silicon (M1, M2, …) architecture: + {% include code-snippet.html language='bash' codeSnippet=site.data.setup-scala.macOS-arm64 %} + Otherwise, on the x86-64 architecture: + {% include code-snippet.html language='bash' codeSnippet=site.data.setup-scala.macOS-x86-64 %} +{% endaltDetails %} +{% endtab %} + + + +{% tab Linux for=install-cs-setup-tabs %} + Run the following command in your terminal, following the on-screen instructions. + + On the x86-64 architecture: + {% include code-snippet.html language='bash' codeSnippet=site.data.setup-scala.linux-x86-64 %} + Otherwise, on the ARM64 architecture: + {% include code-snippet.html language='bash' codeSnippet=site.data.setup-scala.linux-arm64 %} +{% endtab %} + + + +{% tab Windows for=install-cs-setup-tabs %} + Download and execute [the Scala installer for Windows]({{site.data.setup-scala.windows-link}}) + based on Coursier, and follow the on-screen instructions. +{% endtab %} + + + +{% tab Other for=install-cs-setup-tabs defaultTab %} + + Follow the documentation from Coursier on + [how to install and run `cs setup`](https://get-coursier.io/docs/cli-installation). +{% endtab %} + + +{% endtabs %} + + +>    You may need to restart your terminal, log out, +> or reboot in order for the changes to take effect. +{: .help-info} + + +{% altDetails testing-your-setup 'Testing your setup' %} +Check your setup with the command `scala -version`, which should output: +```bash +$ scala -version +Scala code runner version: 1.4.3 +Scala version (default): {{site.scala-3-version}} +``` +{% endaltDetails %} + + + +Along with managing JVMs, `cs setup` also installs useful command-line tools: + +| Commands | Description | +|----------|-------------| +| `scalac` | the Scala compiler | +| `scala`, `scala-cli` | [Scala CLI](https://scala-cli.virtuslab.org), interactive toolkit for Scala | +| `sbt`, `sbtn` | The [sbt](https://www.scala-sbt.org/) build tool | +| `amm` | [Ammonite](https://ammonite.io/) is an enhanced REPL | +| `scalafmt` | [Scalafmt](https://scalameta.org/scalafmt/) is the Scala code formatter | + +For more information about `cs`, read +[coursier-cli documentation](https://get-coursier.io/docs/cli-overview). + +> `cs setup` installs the Scala 3 compiler and runner by default (the `scalac` and +> `scala` commands, respectively). Whether you intend to use Scala 2 or 3, +> this is usually not an issue because most projects use a build tool that will +> use the correct version of Scala irrespective of the one installed "globally". +> Nevertheless, you can always launch a specific version of Scala using +> ``` +> $ cs launch scala:{{ site.scala-version }} +> $ cs launch scalac:{{ site.scala-version }} +> ``` +> If you prefer Scala 2 to be run by default, you can force that version to be installed with: +> ``` +> $ cs install scala:{{ site.scala-version }} scalac:{{ site.scala-version }} +> ``` + +### ...or manually + +You only need two tools to compile, run, test, and package a Scala project: Java 8 or 11, +and Scala CLI. +To install them manually: + +1. if you don't have Java 8 or 11 installed, download + Java from [Oracle Java 8](https://www.oracle.com/java/technologies/javase-jdk8-downloads.html), [Oracle Java 11](https://www.oracle.com/java/technologies/javase-jdk11-downloads.html), + or [AdoptOpenJDK 8/11](https://adoptopenjdk.net/). Refer to [JDK Compatibility](/overviews/jdk-compatibility/overview.html) for Scala/Java compatibility detail. +1. Install [Scala CLI](https://scala-cli.virtuslab.org/install) + +## Using the Scala CLI + +In a directory of your choice, which we will call ``, create a file named `hello.scala` with the following code: +```scala +//> using scala {{site.scala-3-version}} + +@main +def hello(): Unit = + println("Hello, World!") +``` + +You can define a method with the `def` keyword and mark it as a "main" method with the `@main` annotation, designating it as +the entry point in program execution. The method's type is `Unit`, which means it does not return a value. `Unit` +can be thought of as an analogue to the `void` keyword found in other languages. The `println` method will print the `"Hello, World!"` +string to standard output. + +To run the program, execute `scala run hello.scala` command from a terminal, within the `` directory. The file will be compiled and executed, with console output +similar to following: +``` +$ scala run hello.scala +Compiling project (Scala {{site.scala-3-version}}, JVM (20)) +Compiled project (Scala {{site.scala-3-version}}, JVM (20)) +Hello, World! +``` + +### Handling command-line arguments + +Rewrite the `hello.scala` file so that the program greets the person running it. +```scala +//> using scala {{site.scala-3-version}} + +@main +def hello(name: String): Unit = + println(s"Hello, $name!") +``` + +The `name` argument is expected to be provided when executing the program, and if it's not found, the execution will fail. +The `println` method receives an interpolated string, as indicated by the `s` letter preceding its content. `$name` will be substituted by +the content of the `name` argument. + +To pass the arguments when executing the program, put them after `--`: +``` +$ scala run hello.scala -- Gabriel +Compiling project (Scala {{site.scala-3-version}}, JVM (20)) +Compiled project (Scala {{site.scala-3-version}}, JVM (20)) +Hello, Gabriel! +``` + +You can read more about [main methods](/scala3/book/methods-main-methods.html) and [string interpolation](/scala3/book/string-interpolation.html) in the Scala Book. + +### Adding dependencies + +We now write a program that will count the files and directories present in its working directory. +We use the [os-lib](https://github.com/com-lihaoyi/os-lib) library from the [Scala toolkit](/toolkit/introduction.html) +for that purpose. A dependency on the library can be added with the `//> using` directive. Put the following code in `counter.scala`. +```scala +//> using scala {{site.scala-3-version}} +//> using dep "com.lihaoyi::os-lib:0.10.7" + +@main +def countFiles(): Unit = + val paths = os.list(os.pwd) + println(paths.length) +``` + +In the code above, `os.pwd` returns the current working directory. We pass it to `os.list`, which returns a sequence +of paths directly within the directory passed as an argument. We use a `val` to declare an immutable value, in this example storing the +sequence of paths. + +Execute the program. The dependency will be automatically downloaded. The execution should result in a similar output: +``` +$ scala run counter.scala +Compiling project (Scala {{site.scala-3-version}}, JVM (20)) +Compiled project (Scala {{site.scala-3-version}}, JVM (20)) +4 +``` +The printed number should be 4: `hello.scala`, `counter.scala` and two hidden directories created automatically when a program is executed: +`.bsp` containing information about project used by IDEs, and `.scala-build` containing the results of compilation. + +As it turns out, the `os-lib` library is a part of Scala Toolkit, a collection of libraries recommended for tasks like testing, +operating system interaction or handling JSONs. You can read more about the libraries included in the toolkit [here](/toolkit/introduction.html). +To include the toolkit libraries, use the `//> using toolkit 0.5.0` directive: +```scala +//> using scala {{site.scala-3-version}} +//> using toolkit 0.5.0 + +@main +def countFiles(): Unit = + val paths = os.list(os.pwd) + println(paths.length) +``` + +This program is identical to the one above. However, other toolkit libraries will also be available to use, should you need them. + +### Using the REPL + +You can execute code interactively using the REPL provided by the `scala` command. Execute `scala` in the console without any arguments. +``` +$ scala +Welcome to Scala {{site.scala-3-version}} (20-ea, Java OpenJDK 64-Bit Server VM). +Type in expressions for evaluation. Or try :help. + +scala> +``` + +Write a line of code to be executed and press enter. +``` +scala> println("Hello, World!") +Hello, World! + +scala> +``` + +The result will be printed immediately after executing the line. You can declare values: +``` +scala> val i = 1 +val i: Int = 1 + +scala> +``` + +A new value of type `Int` has been created. If you provide an expression that can be evaluated, its result will be stored in an automatically created value. +``` +scala> i + 3 +val res0: Int = 4 + +scala> +``` +You can exit the REPL with `:exit`. + +## Using an IDE + +> You can read a short summary of Scala IDEs on [a dedicated page](/getting-started/scala-ides.html). + +Let's use an IDE to open the code we wrote above. The most popular ones are [IntelliJ](https://www.jetbrains.com/idea/) and +[VSCode](https://scalameta.org/metals/docs/editors/vscode). +They both offer rich IDE features, but you can still use [many other editors](https://scalameta.org/metals/docs/editors/overview.html). + +### Prepare the project + +First, remove all the using directives, and put them in a single file `project.scala` in the `` directory. +This makes it easier to import as a project in an IDE: + +```scala +//> using scala {{site.scala-3-version}} +//> using toolkit 0.5.0 +``` + +> Optionally, you can re-initialise the necessary IDE files from within the `` directory with the command `scala setup-ide .`, but these files will already exist if you have previously run the project with the Scala CLI `run` command. + +### Using IntelliJ + +1. Download and install [IntelliJ Community Edition](https://www.jetbrains.com/help/idea/installation-guide.html) +1. Install the Scala plugin by following [the instructions on how to install IntelliJ plugins](https://www.jetbrains.com/help/idea/discover-intellij-idea-for-scala.html) +1. Open the `` directory, which should be imported automatically as a BSP project. + +### Using VSCode with Metals + +1. Download [VSCode](https://code.visualstudio.com/Download) +1. Install the Metals extension from [the Marketplace](https://marketplace.visualstudio.com/items?itemName=scalameta.metals) +1. Next, open the `` directory in VSCode. Metals should activate and begin importing the project automatically. + +### Play with the source code + +View these three files in your IDE: + +- _project.scala_ +- _hello.scala_ +- _counter.scala_ + +You should notice the benefits of an IDE, such as syntax highlighting, and smart code interactions. +For example you can place the cursor over any part of the code, such as `os.pwd` in _counter.scala_ and documentation for the method will appear. + +When you run your project in the next step, the configuration in _project.scala_ will be used to run the code in the other source files. + +### Run the code + +If you’re comfortable using your IDE, you can run the code in _counter.scala_ from your IDE. +Attached to the `countFiles` method should be a prompt button. Click it to run the method. This should run without issue. +The `hello` method in _hello.scala_ needs arguments however, so will require extra configuration via the IDE to provide the argument. + +Otherwise, you can run either application from the IDE's built-in terminal as described in above sections. + +## Next steps + +Now that you have tasted a little bit of Scala, you can further explore the language itself, consider checking out: + +* [The Scala Book](/scala3/book/introduction.html) (see the Scala 2 version [here](/overviews/scala-book/introduction.html)), which provides a set of short lessons introducing Scala’s main features. +* [The Tour of Scala](/tour/tour-of-scala.html) for bite-sized introductions to Scala's features. +* [Learning Resources](/learn.html), which includes online interactive tutorials and courses. +* [Our list of some popular Scala books](/books.html). + +There are also other tutorials for other build-tools you can use with Scala: +* [Getting Started with Scala and sbt](/getting-started/sbt-track/getting-started-with-scala-and-sbt-on-the-command-line.html) +* [Using Scala and Maven](/tutorials/scala-with-maven.html) + +## Getting Help +There are a multitude of mailing lists and real-time chat rooms in case you want to quickly connect with other Scala users. Check out our [community](https://scala-lang.org/community/) page for a list of these resources, and for where to reach out for help. diff --git a/_overviews/getting-started/scala-ides.md b/_overviews/getting-started/scala-ides.md new file mode 100644 index 000000000..9f210d4b1 --- /dev/null +++ b/_overviews/getting-started/scala-ides.md @@ -0,0 +1,55 @@ +--- +layout: singlepage-overview +title: Scala IDEs + +partof: scala-ides + +permalink: /getting-started/:title.html + +keywords: +- Scala +- IDE +- JetBrains +- IntelliJ +- VSCode +- Metals +--- + +It's of course possible to write Scala code in any editor and compile and run the code from the command line. But most developers prefer to use an IDE (Integrated Development Environment), especially for coding anything beyond simple exercises. + +The following IDEs are available for Scala: + +## IntelliJ IDEA + Scala plugin + +[https://jetbrains.com/scala](https://jetbrains.com/scala) + +![](../../resources/images/getting-started/IntelliJScala.png) + +IntelliJ IDEA is a cross-platform IDE developed by JetBrains that provides a consistent experience for a wide range of programming languages and technologies. It also supports Scala through the IntelliJ Scala Plugin, which is being developed at JetBrains. First, install IntelliJ IDEA Community Edition (unless you don't already use the Ultimate edition) and then add the IntelliJ Scala Plugin. + +IntelliJ IDEA and Scala Plugin will assist you in virtually every part of a Scala software developer's work. Use it if you like a solid integrated experience, sane default settings, and tested solutions. + +For more information, check out our tutorial [Getting Started with Scala in IntelliJ](/getting-started/intellij-track/building-a-scala-project-with-intellij-and-sbt.html) + +## Visual Studio Code + Metals + +[https://scalameta.org/metals](https://scalameta.org/metals) + +![](../../resources/images/getting-started/VSCodeMetals.png) + +Visual Studio Code, commonly called VS Code, is a source code editor from Microsoft. To add Scala support, you install an extension called Metals. + +(Why "Metals"? Because the underlying technologies are Scalameta and LSP ([Language Server Protocol](https://microsoft.github.io/language-server-protocol/)), and "Meta" + "LS" equals "Metals".) + +In contrast to IntelliJ IDEA + Scala Plugin, VS Code + Metals is aimed at people who like to get feedback and code intelligence straight from the compiler, which enables them to also try out experimental Scala features. + +## Your favorite editor + Metals + +Metals is most commonly used with VS Code, but it's also available for the following popular editors: + +* Emacs +* Vim +* Sublime Text +* Helix + +as documented [here](https://scalameta.org/metals/docs/#editor-support). diff --git a/_overviews/jdk-compatibility/overview.md b/_overviews/jdk-compatibility/overview.md index 8d8849cc7..99b953306 100644 --- a/_overviews/jdk-compatibility/overview.md +++ b/_overviews/jdk-compatibility/overview.md @@ -14,7 +14,7 @@ Minimum Scala versions: | JDK | 3 | 2.13 | 2.12 | 2.11 | |:-----------:|:--------:|:---------:|:---------:|:----------:| -| 23 (ea) | 3.3.5* | 2.13.15* | 2.12.20* | | +| 23 | 3.3.5* | 2.13.15 | 2.12.20 | | | 22 | 3.3.4* | 2.13.13 | 2.12.19 | | | 21 (LTS) | 3.3.1 | 2.13.11 | 2.12.18 | | | 17 (LTS) | 3.0.0 | 2.13.6 | 2.12.15 | | @@ -119,16 +119,18 @@ Scala 2.13.13+ and 2.12.19+ support JDK 22. We are working on adding JDK 22 support to the 3.3.x release series. (Support may be available in nightly builds.) -For possible Scala issues, see the [jdk11](https://github.com/scala/bug/labels/jdk11), [jdk17](https://github.com/scala/bug/labels/jdk17), and [jdk21](https://github.com/scala/bug/labels/jdk21) labels in the Scala 2 bug tracker. +For possible Scala 2 issues, see the [jdk11](https://github.com/scala/bug/labels/jdk11), [jdk17](https://github.com/scala/bug/labels/jdk17), and [jdk21](https://github.com/scala/bug/labels/jdk21) labels in the Scala 2 bug tracker. ## JDK 23 compatibility notes -Early access builds of JDK 23 are available. JDK 23 will be non-LTS. +JDK 23 is non-LTS. -We are working on adding JDK 23 support to Scala 3 and Scala 2. -(Support may be available in nightly builds.) +Scala 2.13.15+ and Scala 2.12.20+ support JDK 23. -For possible Scala issues, see the [jdk11](https://github.com/scala/bug/labels/jdk11), [jdk17](https://github.com/scala/bug/labels/jdk17), and [jdk21](https://github.com/scala/bug/labels/jdk21) labels in the Scala 2 bug tracker. +We are working on adding JDK 23 support to Scala 3. +(Support may be available in nightly builds and/or release candidates.) + +For possible Scala 2 issues, see the [jdk11](https://github.com/scala/bug/labels/jdk11), [jdk17](https://github.com/scala/bug/labels/jdk17), and [jdk21](https://github.com/scala/bug/labels/jdk21) labels in the Scala 2 bug tracker. ## GraalVM Native Image compatibility notes diff --git a/_overviews/plugins/index.md b/_overviews/plugins/index.md index ccbdad19e..0b1ea54d5 100644 --- a/_overviews/plugins/index.md +++ b/_overviews/plugins/index.md @@ -268,12 +268,12 @@ object foo extends ScalaModule { ``` Please notice, that compiler plugins are typically bound to the full -version of the compiler, hence you have to use the `:::` (instead of -normal `::`) between the organization and the artifact name, +version of the compiler, hence you have to use the `:::` (instead of +normal `::`) between the organization and the artifact name, to declare your dependency. For more information about plugin usage in Mill, please refer to the -[Mill documentation](https://com-lihaoyi.github.io/mill/mill/Configuring_Mill.html#_scala_compiler_plugins). +[Mill documentation for Scala compiler plugins](https://mill-build.org/mill/Scala_Module_Config.html#_scala_compiler_plugins). ## Developing compiler plugins with an IDE diff --git a/_overviews/scala-book/preliminaries.md b/_overviews/scala-book/preliminaries.md index cbc5221df..8308f5981 100644 --- a/_overviews/scala-book/preliminaries.md +++ b/_overviews/scala-book/preliminaries.md @@ -21,7 +21,7 @@ That being said, there are a few good things to know before you read this book. ## Installing Scala -First, to run the examples in this book you’ll need to install Scala on your computer. See our general [Getting Started]({{site.baseurl}}/getting-started/index.html) page for details on how to use Scala (a) in an IDE and (b) from the command line. +First, to run the examples in this book you’ll need to install Scala on your computer. See our general [Getting Started]({{site.baseurl}}/getting-started/install-scala.html) page for details on how to use Scala (a) in an IDE and (b) from the command line. diff --git a/_overviews/scala-book/scala-build-tool-sbt.md b/_overviews/scala-book/scala-build-tool-sbt.md index ebc04193a..c329d06aa 100644 --- a/_overviews/scala-book/scala-build-tool-sbt.md +++ b/_overviews/scala-book/scala-build-tool-sbt.md @@ -161,5 +161,5 @@ Here’s a list of other build tools you can use to build Scala projects: - [Ant](http://ant.apache.org/) - [Gradle](https://gradle.org/) - [Maven](https://maven.apache.org/) -- [Fury](https://propensive.com/opensource/fury) +- [Fury](https://github.com/propensive/fury) - [Mill](https://com-lihaoyi.github.io/mill/) diff --git a/_overviews/scala-book/two-types-variables.md b/_overviews/scala-book/two-types-variables.md index 8c6a105c7..3ce00a0e5 100644 --- a/_overviews/scala-book/two-types-variables.md +++ b/_overviews/scala-book/two-types-variables.md @@ -94,8 +94,7 @@ object Hello3 extends App { As before: - Save that code in a file named *Hello3.scala* -- Compile it with `scalac Hello3.scala` -- Run it with `scala Hello3` +- Compile and run it with `scala run Hello3.scala` diff --git a/_overviews/scala3-book/ca-implicit-conversions.md b/_overviews/scala3-book/ca-implicit-conversions.md index b35902d71..0fb665eda 100644 --- a/_overviews/scala3-book/ca-implicit-conversions.md +++ b/_overviews/scala3-book/ca-implicit-conversions.md @@ -150,7 +150,7 @@ object `Conversions`: import scala.language.implicitConversions object Conversions { - implicit def fromStringToUser(name: String): User = (name: String) => User(name) + implicit def fromStringToUser(name: String): User = User(name) } ~~~ {% endtab %} diff --git a/_overviews/scala3-book/control-structures.md b/_overviews/scala3-book/control-structures.md index 6598cb604..b982cf654 100644 --- a/_overviews/scala3-book/control-structures.md +++ b/_overviews/scala3-book/control-structures.md @@ -810,6 +810,53 @@ speak(Person("Bam Bam")) // "Bam Bam says, Bam bam!" {% endtab %} {% endtabs %} +#### Binding matched patterns to variables + +You can bind the matched pattern to a variable to use type-specific behavior. + +{% tabs pattern-binding class=tabs-scala-version %} +{% tab 'Scala 2' for=pattern-binding %} +```scala +trait Animal { + val name: String +} +case class Cat(name: String) extends Animal { + def meow: String = "Meow" +} +case class Dog(name: String) extends Animal { + def bark: String = "Bark" +} + +def speak(animal: Animal) = animal match { + case c @ Cat(name) if name == "Felix" => println(s"$name says, ${c.meow}!") + case d @ Dog(name) if name == "Rex" => println(s"$name says, ${d.bark}!") + case _ => println("I don't know you!") +} + +speak(Cat("Felix")) // "Felix says, Meow!" +speak(Dog("Rex")) // "Rex says, Bark!" +``` +{% endtab %} +{% tab 'Scala 3' for=pattern-binding %} +```scala +trait Animal: + val name: String +case class Cat(name: String) extends Animal: + def meow: String = "Meow" +case class Dog(name: String) extends Animal: + def bark: String = "Bark" + +def speak(animal: Animal) = animal match + case c @ Cat(name) if name == "Felix" => println(s"$name says, ${c.meow}!") + case d @ Dog(name) if name == "Rex" => println(s"$name says, ${d.bark}!") + case _ => println("I don't know you!") + +speak(Cat("Felix")) // "Felix says, Meow!" +speak(Dog("Rex")) // "Rex says, Bark!" +``` +{% endtab %} +{% endtabs %} + ### Using a `match` expression as the body of a method Because `match` expressions return a value, they can be used as the body of a method. diff --git a/_overviews/scala3-book/domain-modeling-fp.md b/_overviews/scala3-book/domain-modeling-fp.md index ce72aca42..bc08f034c 100644 --- a/_overviews/scala3-book/domain-modeling-fp.md +++ b/_overviews/scala3-book/domain-modeling-fp.md @@ -777,7 +777,7 @@ extension (p: Pizza) p.copy(crustType = ct) ``` In the above code, we define the different methods on pizzas as _extension methods_. -With `extension (p: Pizza)` we say that we want to make the methods available on instances of `Pizza`. The reciever +With `extension (p: Pizza)` we say that we want to make the methods available on instances of `Pizza`. The receiver in this case is `p`. {% endtab %} diff --git a/_overviews/scala3-book/fun-eta-expansion.md b/_overviews/scala3-book/fun-eta-expansion.md index e45f48cc6..0854c0977 100644 --- a/_overviews/scala3-book/fun-eta-expansion.md +++ b/_overviews/scala3-book/fun-eta-expansion.md @@ -1,7 +1,7 @@ --- -title: Eta Expansion +title: Eta-Expansion type: section -description: This page discusses Eta Expansion, the Scala technology that automatically and transparently converts methods into functions. +description: This page discusses Eta-Expansion, the Scala technology that automatically and transparently converts methods into functions. languages: [ru, zh-cn] num: 31 previous-page: fun-function-variables @@ -9,14 +9,14 @@ next-page: fun-hofs --- -When you look at the Scaladoc for the `map` method on Scala collections classes, you see that it’s defined to accept a _function_: +When you look at the Scaladoc for the `map` method on Scala collections classes, you see that it’s defined to accept a _function_ value: {% tabs fun_1 %} {% tab 'Scala 2 and 3' for=fun_1 %} ```scala -def map[B](f: (A) => B): List[B] - ----------- +def map[B](f: A => B): List[B] +// ^^^^^^ function type from `A` to `B` ``` {% endtab %} @@ -26,7 +26,7 @@ Indeed, the Scaladoc clearly states, “`f` is the _function_ to apply to each e But despite that, somehow you can pass a _method_ into `map`, and it still works: {% tabs fun_2 %} -{% tab 'Scala 2 and 3' for=fun_2 %} +{% tab 'Scala 2 and 3' %} ```scala def times10(i: Int) = i * 10 // a method @@ -36,80 +36,96 @@ List(1, 2, 3).map(times10) // List(10,20,30) {% endtab %} {% endtabs %} -Have you ever wondered how this works---how you can pass a _method_ into `map`, which expects a _function_? - -The technology behind this is known as _Eta Expansion_. +Why does this work? The process behind this is known as _eta-expansion_. It converts an expression of _method type_ to an equivalent expression of _function type_, and it does so seamlessly and quietly. ## The differences between methods and functions -{% comment %} -NOTE: I got the following “method” definition from this page (https://dotty.epfl.ch/docs/reference/changed-features/eta-expansion-spec.html), but I’m not sure it’s 100% accurate now that methods can exist outside of classes/traits/objects. -I’ve made a few changes to that description that I hope are more accurate and up to date. -{% endcomment %} - -Historically, _methods_ have been a part of the definition of a class, although in Scala 3 you can now have methods outside of classes, such as [Toplevel definitions][toplevel] and [extension methods][extension]. +The key difference between methods and functions is that _a function is an object_, i.e. it is an instance of a class, and in turn has its own methods (e.g. try `f.apply` on a function `f`). -Unlike methods, _functions_ are complete objects themselves, making them first-class entities. +_Methods_ are not values that can be passed around, i.e. they can only be called via method application (e.g. `foo(arg1, arg2, ...)`). Methods can be _converted_ to a value by creating a function value that will call the method when supplied with the required arguments. This is known as eta-expansion. -Their syntax is also different. -This example shows how to define a method and a function that perform the same task, determining if the given integer is even: +More concretely: with automatic eta-expansion, the compiler automatically converts any _method reference_, without supplied arguments, to an equivalent _anonymous function_ that will call the method. For example, the reference to `times10` in the code above gets rewritten to `x => times10(x)`, as seen here: -{% tabs fun_3 %} -{% tab 'Scala 2 and 3' for=fun_3 %} +{% tabs fun_2_expanded %} +{% tab 'Scala 2 and 3' %} ```scala -def isEvenMethod(i: Int) = i % 2 == 0 // a method -val isEvenFunction = (i: Int) => i % 2 == 0 // a function +def times10(i: Int) = i * 10 +List(1, 2, 3).map(x => times10(x)) // eta expansion of `.map(times10)` ``` {% endtab %} {% endtabs %} -The function truly is an object, so you can use it just like any other variable, such as putting it in a list: +> For the curious, the term eta-expansion has its origins in the [Lambda Calculus](https://en.wikipedia.org/wiki/Lambda_calculus). + +## When does eta-expansion happen? -{% tabs fun_4 %} -{% tab 'Scala 2 and 3' for=fun_4 %} +Automatic eta-expansion is a desugaring that is context-dependent (i.e. the expansion conditionally activates, depending on the surrounding code of the method reference.) + +{% tabs fun_5 class=tabs-scala-version %} +{% tab 'Scala 2' %} +In Scala 2 eta-expansion only occurs automatically when the expected type is a function type. +For example, the following will fail: ```scala -val functions = List(isEvenFunction) +def isLessThan(x: Int, y: Int): Boolean = x < y + +val methods = List(isLessThan) +// ^^^^^^^^^^ +// error: missing argument list for method isLessThan +// Unapplied methods are only converted to functions when a function type is expected. +// You can make this conversion explicit by writing `isLessThan _` or `isLessThan(_,_)` instead of `isLessThan`. ``` +See [below](#manual-eta-expansion) for how to solve this issue with manual eta-expansion. {% endtab %} -{% endtabs %} -{% tabs fun_5 class=tabs-scala-version %} -{% tab 'Scala 2' for=fun_5 %} +{% tab 'Scala 3' %} + +New to Scala 3, method references can be used everywhere as a value, they will be automatically converted to a function object with a matching type. e.g. ```scala -// this example shows the Scala 2 error message -val methods = List(isEvenMethod) - ^ -error: missing argument list for method isEvenMethod -Unapplied methods are only converted to functions when a function type is expected. -You can make this conversion explicit by writing `isEvenMethod _` or `isEvenMethod(_)` instead of `isEvenMethod`. -``` +def isLessThan(x: Int, y: Int): Boolean = x < y -Conversely, a method technically isn’t an object, so in Scala 2 you couldn’t put a method in a `List`, at least not directly, as shown in this example: +val methods = List(isLessThan) // works +``` {% endtab %} +{% endtabs %} + +## Manual eta-expansion -{% tab 'Scala 3' for=fun_5 %} +You can always manually eta-expand a method to a function value, here are some examples how: + +{% tabs fun_6 class=tabs-scala-version %} +{% tab 'Scala 2' %} ```scala -val functions = List(isEvenFunction) // works -val methods = List(isEvenMethod) // works +val methodsA = List(isLessThan _) // way 1: expand all parameters +val methodsB = List(isLessThan(_, _)) // way 2: wildcard application +val methodsC = List((x, y) => isLessThan(x, y)) // way 3: anonymous function ``` -The important part for Scala 3 is that the Eta Expansion technology is improved, so now when you attempt to use a method as a variable, it just works---you don’t have to handle the manual conversion yourself. +{% endtab %} + +{% tab 'Scala 3' %} + +```scala +val methodsA = List(isLessThan(_, _)) // way 1: wildcard application +val methodsB = List((x, y) => isLessThan(x, y)) // way 2: anonymous function +``` {% endtab %} {% endtabs %} +## Summary + For the purpose of this introductory book, the important things to know are: -- Eta Expansion is the Scala technology that lets you use methods just like functions -- The technology has been improved in Scala 3 to be almost completely seamless +- eta-expansion is a helpful desugaring that lets you use methods just like functions, +- the automatic eta-expansion been improved in Scala 3 to be almost completely seamless. For more details on how this works, see the [Eta Expansion page][eta_expansion] in the Reference documentation. diff --git a/_overviews/scala3-book/methods-main-methods.md b/_overviews/scala3-book/methods-main-methods.md index 797d7d04a..e2a30abfb 100644 --- a/_overviews/scala3-book/methods-main-methods.md +++ b/_overviews/scala3-book/methods-main-methods.md @@ -25,7 +25,7 @@ Scala 3 offers a new way to define programs that can be invoked from the command To run this program, save the line of code in a file named as e.g. *Hello.scala*---the filename doesn’t have to match the method name---and run it with `scala`: ```bash -$ scala Hello.scala +$ scala run Hello.scala Hello, World ``` @@ -64,10 +64,10 @@ For example, given this `@main` method that takes an `Int`, a `String`, and a va {% endtab %} {% endtabs %} -When you compile that code, it creates a main program named `happyBirthday` that’s called like this: +Pass the arguments after `--`: ``` -$ scala happyBirthday 23 Lisa Peter +$ scala run happyBirthday.scala -- 23 Lisa Peter Happy 23rd Birthday, Lisa and Peter! ``` @@ -79,10 +79,10 @@ The program implemented from an `@main` method checks that there are enough argu If a check fails, the program is terminated with an error message: ``` -$ scala happyBirthday 22 +$ scala run happyBirthday.scala -- 22 Illegal command line after first argument: more arguments expected -$ scala happyBirthday sixty Fred +$ scala run happyBirthday.scala -- sixty Fred Illegal command line: java.lang.NumberFormatException: For input string: "sixty" ``` @@ -176,11 +176,9 @@ object happyBirthday { {% endtab %} {% endtabs %} -If you place that code in a file named *happyBirthday.scala*, you can then compile it with `scalac` and run it with `scala`, as shown previously: +If you place that code in a file named *happyBirthday.scala*, you can then compile and run it with `scala`, as shown previously: ```bash -$ scalac happyBirthday.scala - -$ scala happyBirthday 23 Lisa Peter +$ scala run happyBirthday.scala -- 23 Lisa Peter Happy 23rd Birthday, Lisa and Peter! ``` diff --git a/_overviews/scala3-book/scala-for-javascript-devs.md b/_overviews/scala3-book/scala-for-javascript-devs.md index 0def7dcbc..b488ed8a9 100644 --- a/_overviews/scala3-book/scala-for-javascript-devs.md +++ b/_overviews/scala3-book/scala-for-javascript-devs.md @@ -970,7 +970,7 @@ val nums = List(1, 2, 3) // preferred
for i <- nums do -
  val i = i * 2 +
  val j = i * 2
  println(j)

// also available @@ -992,7 +992,7 @@ val nums = List(1, 2, 3) let str = "ab";
for (let i = 1; i < 3; i++) {
  for (var j = 0; j < str.length; j++) { -
    for (let k = 1; k < 11; k++) { +
    for (let k = 1; k < 11; k += 5) {
      let c = str.charAt(j);
      console.log(`i: ${i} j: ${c} k: ${k}`);
    } diff --git a/_overviews/scala3-book/string-interpolation.md b/_overviews/scala3-book/string-interpolation.md index 2e47a872b..e1c4f1005 100644 --- a/_overviews/scala3-book/string-interpolation.md +++ b/_overviews/scala3-book/string-interpolation.md @@ -34,7 +34,7 @@ The `s` that you place before the string is just one possible interpolator that provides. Scala provides three string interpolation methods out of the box: `s`, `f` and `raw`. -Further, a string interpolator is a just special method so it is possible to define your +Further, a string interpolator is just a special method, so it is possible to define your own. For instance, some database libraries define a `sql` interpolator that returns a database query. diff --git a/_overviews/scala3-book/taste-hello-world.md b/_overviews/scala3-book/taste-hello-world.md index 4bdf996f0..52fc532e5 100644 --- a/_overviews/scala3-book/taste-hello-world.md +++ b/_overviews/scala3-book/taste-hello-world.md @@ -47,53 +47,23 @@ object hello { {% endtabs %} -Next, compile the code with `scalac`: +Next, compile and run the code with `scala`: ```bash -$ scalac hello.scala +$ scala run hello.scala ``` -If you’re coming to Scala from Java, `scalac` is just like `javac`, so that command creates several files: - - -{% tabs hello-world-outputs class=tabs-scala-version %} - -{% tab 'Scala 2' for=hello-world-outputs %} -```bash -$ ls -1 -hello$.class -hello.class -hello.scala +The command should produce an output similar to: ``` -{% endtab %} - -{% tab 'Scala 3' for=hello-world-outputs %} -```bash -$ ls -1 -hello$package$.class -hello$package.class -hello$package.tasty -hello.scala -hello.class -hello.tasty -``` -{% endtab %} - -{% endtabs %} - - -Like Java, the _.class_ files are bytecode files, and they’re ready to run in the JVM. - -Now you can run the `hello` method with the `scala` command: - -```bash -$ scala hello +Compiling project (Scala {{site.scala-3-version}}, JVM (20)) +Compiled project (Scala {{site.scala-3-version}}, JVM (20)) Hello, World! ``` Assuming that worked, congratulations, you just compiled and ran your first Scala application. > More information about sbt and other tools that make Scala development easier can be found in the [Scala Tools][scala_tools] chapter. +> The Scala CLI documentation can be found [here](https://scala-cli.virtuslab.org/). ## Ask For User Input @@ -152,16 +122,13 @@ use the `+` operator on strings to join `"Hello, "` with `name` and `"!"`, makin > You can learn more about using `val` by reading [Variables and Data Types](/scala3/book/taste-vars-data-types.html). -Then compile the code with `scalac`: - -```bash -$ scalac helloInteractive.scala -``` -Then run it with `scala helloInteractive`, this time the program will pause after asking for your name, +Then run the code with `scala`. This time the program will pause after asking for your name, and wait until you type a name and press return on the keyboard, looking like this: ```bash -$ scala helloInteractive +$ scala run helloInteractive.scala +Compiling project (Scala {{site.scala-3-version}}, JVM (20)) +Compiled project (Scala {{site.scala-3-version}}, JVM (20)) Please enter your name: ▌ ``` @@ -169,7 +136,9 @@ Please enter your name: When you enter your name at the prompt, the final interaction should look like this: ```bash -$ scala helloInteractive +$ scala run helloInteractive.scala +Compiling project (Scala {{site.scala-3-version}}, JVM (20)) +Compiled project (Scala {{site.scala-3-version}}, JVM (20)) Please enter your name: Alvin Alexander Hello, Alvin Alexander! diff --git a/_overviews/scala3-book/taste-intro.md b/_overviews/scala3-book/taste-intro.md index 4820bdfe2..72f74faee 100644 --- a/_overviews/scala3-book/taste-intro.md +++ b/_overviews/scala3-book/taste-intro.md @@ -23,4 +23,4 @@ can be installed by following our [getting started guide][get-started]. [reference]: {{ site.scala3ref }}/overview.html -[get-started]: {% link _overviews/getting-started/index.md %} +[get-started]: {% link _overviews/getting-started/install-scala.md %} diff --git a/_overviews/scala3-macros/tutorial/macros.md b/_overviews/scala3-macros/tutorial/macros.md index 0768813b4..52c7a725d 100644 --- a/_overviews/scala3-macros/tutorial/macros.md +++ b/_overviews/scala3-macros/tutorial/macros.md @@ -290,7 +290,7 @@ Note, that `matches` only performs a limited amount of normalization and while f ### Arbitrary Expressions Last but not least, it is possible to create an `Expr[T]` from arbitary Scala code by enclosing it in [quotes][quotes]. -For example, `'{ ${expr}; true }` will generate an `Expr[Int]` equivalent to `Expr.block(List(expr), Expr(true))`. +For example, `'{ ${expr}; true }` will generate an `Expr[Boolean]` equivalent to `Expr.block(List(expr), Expr(true))`. The subsequent section on [Quoted Code][quotes] presents quotes in more detail. [contributing]: {% link scala3/contribute-to-docs.md %} diff --git a/_overviews/scala3-migration/incompat-type-inference.md b/_overviews/scala3-migration/incompat-type-inference.md index a5f1552c0..bb6fc3052 100644 --- a/_overviews/scala3-migration/incompat-type-inference.md +++ b/_overviews/scala3-migration/incompat-type-inference.md @@ -14,26 +14,15 @@ The new algorithm is better than the old one, but sometime it can fail where Sca > It is always good practice to write the result types of all public values and methods explicitly. > It prevents the public API of your library from changing with the Scala version, because of different inferred types. -> +> > This can be done prior to the Scala 3 migration by using the [ExplicitResultTypes](https://scalacenter.github.io/scalafix/docs/rules/ExplicitResultTypes.html) rule in Scalafix. ## Return Type of an Override Method In Scala 3 the return type of an override method is inferred by inheritance from the base method, whereas in Scala 2.13 it is inferred from the left hand side of the override method. -```scala -class Parent { - def foo: Foo = new Foo -} - -class Child extends Parent { - override def foo = new RichFoo(super.foo) -} -``` - -In this example, `Child#foo` returns a `RichFoo` in Scala 2.13 but a `Foo` in Scala 3. -It can lead to compiler errors as demonstrated below. - +{% tabs define_parent_child %} +{% tab 'Scala 2 and 3' %} ```scala class Foo @@ -48,13 +37,24 @@ class Parent { class Child extends Parent { override def foo = new RichFoo(super.foo) } +``` +{% endtab %} +{% endtabs %} + +In this example, `Child#foo` returns a `RichFoo` in Scala 2.13 but a `Foo` in Scala 3. +It can lead to compiler errors as demonstrated below. +{% tabs extend_parent_child %} +{% tab 'Scala 3 Only' %} +```scala (new Child).foo.show // Scala 3 error: value show is not a member of Foo ``` +{% endtab %} +{% endtabs %} In some rare cases involving implicit conversions and runtime casting it could even cause a runtime failure. -The solution is to make the return type of the override method explicit: +The solution is to make the return type of the override method explicit so that it matches what is inferred in 2.13: {% highlight diff %} class Child extends Parent { @@ -68,19 +68,32 @@ class Child extends Parent { Scala 2 reflective calls are dropped and replaced by the broader [Programmatic Structural Types]({{ site.scala3ref }}/changed-features/structural-types.html). Scala 3 can imitate Scala 2 reflective calls by making `scala.reflect.Selectable.reflectiveSelectable` available wherever `scala.language.reflectiveCalls` is imported. -However the Scala 3 compiler does not infer structural types by default, and thus fails at compiling: +{% tabs define_structural %} +{% tab 'Scala 2 and 3' %} ```scala import scala.language.reflectiveCalls val foo = new { def bar: Unit = ??? } +``` +{% endtab %} +{% endtabs %} +However the Scala 3 compiler does not infer structural types by default. +It infers the type `Object` for `foo` instead of `{ def bar: Unit }`. +Therefore, the following structural selection fails to compile: + +{% tabs use_structural %} +{% tab 'Scala 3 Only' %} +```scala foo.bar // Error: value bar is not a member of Object ``` +{% endtab %} +{% endtabs %} -The straightforward solution is to write down the structural type. +The straightforward solution is to explicitly write down the structural type. {% highlight diff %} import scala.language.reflectiveCalls diff --git a/_overviews/scala3-migration/plugin-kind-projector.md b/_overviews/scala3-migration/plugin-kind-projector.md index 39639d128..ab996ec58 100644 --- a/_overviews/scala3-migration/plugin-kind-projector.md +++ b/_overviews/scala3-migration/plugin-kind-projector.md @@ -9,11 +9,11 @@ next-page: external-resources In the future, Scala 3 will use the `_` underscore symbol for placeholders in type lambdas---just as the underscore is currently used for placeholders in (ordinary) term-level lambdas. -The new type lambda syntax is not enabled by default, to enable it, use a compiler flag `-Ykind-projector:underscores`. Note that enabling underscore type lambdas will disable usage of `_` as a wildcard, you will only be able to write wildcards using the `?` symbol. +The new type lambda syntax is not enabled by default, to enable it, use a compiler flag `-Ykind-projector:underscores`. Note that enabling underscore type lambdas will disable usage of `_` as a wildcard, you will only be able to write wildcards using the `?` symbol. If you wish to cross-compile a project for Scala 2 & Scala 3 while using underscore type lambdas for both, you may do so starting with [kind-projector](https://github.com/typelevel/kind-projector) version `0.13.0` and up and Scala 2 versions `2.13.6` and `2.12.14`. To enable it, add the compiler flags `-Xsource:3 -P:kind-projector:underscore-placeholders` to your build. -As in Scala 3, this will disable usage of `_` as a wildcard, however, the flag `-Xsource:3` will allow you to replace it with the `?` symbol. +As in Scala 3, this will disable usage of `_` as a wildcard, however, the flag `-Xsource:3` will allow you to replace it with the `?` symbol. The following `sbt` configuration will set up the correct flags to cross-compile with new syntax: @@ -34,31 +34,49 @@ In turn, you will also have to rewrite all usages of `_` as the wildcard to use For example the following usage of the wildcard: +{% tabs wildcard_scala2 %} +{% tab 'Scala 2 Only' %} ```scala -def getWidget(widgets: Set[_ <: Widget], name: String): Option[Widget] = widgets.find(_.name == name) +def getWidget(widgets: Set[_ <: Widget], name: String): Option[Widget] = + widgets.find(_.name == name) ``` +{% endtab %} +{% endtabs %} Must be rewritten to: +{% tabs wildcard_scala3 %} +{% tab 'Scala 3 Only' %} ```scala -def getWidget(widgets: Set[? <: Widget], name: String): Option[Widget] = widgets.find(_.name == name) +def getWidget(widgets: Set[? <: Widget], name: String): Option[Widget] = + widgets.find(_.name == name) ``` +{% endtab %} +{% endtabs %} And the following usages of kind-projector's `*` placeholder: +{% tabs kind_projector_scala2 %} +{% tab 'Scala 2 Only' %} ```scala Tuple2[*, Double] // equivalent to: type R[A] = Tuple2[A, Double] Either[Int, +*] // equivalent to: type R[+A] = Either[Int, A] Function2[-*, Long, +*] // equivalent to: type R[-A, +B] = Function2[A, Long, B] ``` +{% endtab %} +{% endtabs %} Must be rewritten to: +{% tabs kind_projector_scala3 %} +{% tab 'Scala 3 Only' %} ```scala Tuple2[_, Double] // equivalent to: type R[A] = Tuple2[A, Double] Either[Int, +_] // equivalent to: type R[+A] = Either[Int, A] Function2[-_, Long, +_] // equivalent to: type R[-A, +B] = Function2[A, Long, B] ``` +{% endtab %} +{% endtabs %} ## Compiling Existing Code @@ -66,11 +84,15 @@ Even without migrating to underscore type lambdas, you will likely be able to co Use the flag `-Ykind-projector` to enable support for `*`-based type lambdas (without enabling underscore type lambdas), the following forms will now compile: +{% tabs kind_projector_cross %} +{% tab 'Scala 2 and 3' %} ```scala Tuple2[*, Double] // equivalent to: type R[A] = Tuple2[A, Double] Either[Int, +*] // equivalent to: type R[+A] = Either[Int, A] Function2[-*, Long, +*] // equivalent to: type R[-A, +B] = Function2[A, Long, B] ``` +{% endtab %} +{% endtabs %} ## Rewriting Incompatible Constructs @@ -82,6 +104,8 @@ Scala 3's `-Ykind-projector` & `-Ykind-projector:underscores` implement only a s You must rewrite ALL of the following forms: +{% tabs kind_projector_illegal_scala2 %} +{% tab 'Scala 2 Only' %} ```scala // classic EitherT[*[_], Int, *] // equivalent to: type R[F[_], B] = EitherT[F, Int, B] @@ -92,36 +116,58 @@ EitherT[_[_], Int, _] // equivalent to: type R[F[_], B] = EitherT[F, Int, B] // named Lambda Lambda[(F[_], A) => EitherT[F, Int, A]] ``` +{% endtab %} +{% endtabs %} Into the following long-form to cross-compile with Scala 3: +{% tabs kind_projector_illegal_cross %} +{% tab 'Scala 2 and 3' %} ```scala type MyLambda[F[_], A] = EitherT[F, Int, A] MyLambda ``` +{% endtab %} +{% endtabs %} Alternatively you may use Scala 3's [Native Type Lambdas]({{ site.scala3ref }}/new-types/type-lambdas.html) if you do not need to cross-compile: +{% tabs kind_projector_illegal_scala3 %} +{% tab 'Scala 3 Only' %} ```scala [F[_], A] =>> EitherT[F, Int, A] ``` +{% endtab %} +{% endtabs %} For `Lambda` you must rewrite the following form: +{% tabs kind_projector_illegal_lambda_scala2 %} +{% tab 'Scala 2 Only' %} ```scala Lambda[(`+E`, `+A`) => Either[E, A]] ``` +{% endtab %} +{% endtabs %} To the following to cross-compile: +{% tabs kind_projector_illegal_lambda_cross %} +{% tab 'Scala 2 and 3' %} ```scala λ[(`+E`, `+A`) => Either[E, A]] ``` +{% endtab %} +{% endtabs %} Or alternatively to Scala 3 type lambdas: +{% tabs kind_projector_illegal_lambda_scala3 %} +{% tab 'Scala 3 Only' %} ```scala [E, A] =>> Either[E, A] ``` +{% endtab %} +{% endtabs %} Note: Scala 3 type lambdas no longer need `-` or `+` variance markers on parameters, these are now inferred. diff --git a/_overviews/scala3-migration/tooling-scala2-xsource3.md b/_overviews/scala3-migration/tooling-scala2-xsource3.md index 63ecbb6e8..e88f711c3 100644 --- a/_overviews/scala3-migration/tooling-scala2-xsource3.md +++ b/_overviews/scala3-migration/tooling-scala2-xsource3.md @@ -105,6 +105,7 @@ The following table shows backported Scala 3 language semantics available in `-X | `case-apply-copy-access`: modifiers of synthetic methods | fatal warning | constructor modifiers are used for apply / copy methods of case classes | | `case-companion-function`: companions are Functions | fatal warning at use site | synthetic case companion objects no longer extend FunctionN, but are adapted at use site with warning | | `infer-override`: override type inference | fatal warning | inferred type of member uses type of overridden member | +| `double-definitions`: definitions differing in empty parens 2 | fatal warning | double definition error | Example 1: @@ -114,9 +115,17 @@ Example 1: + 2 {% endhighlight %} +Example 2: + +{% highlight scala %} +class C(x: Int) { + def x(): Int = x // allowed in Scala 2, double definition error in Scala 3 +} +{% endhighlight %} + ### Changes affecting binary encoding -As of Scala 2.13.14, there are 3 changes in `-Xsource-features` that affect binary encoding of classfiles: +As of Scala 2.13.15, there are 3 changes in `-Xsource-features` that affect binary encoding of classfiles: 1. `case-apply-copy-access`: the constructor modifiers of case classes (`case class C private[p] (x: Int)`) are copied to the synthetic `apply` and `copy` methods. 1. `case-companion-function`: the synthetic companion objects of case classes no longer extend `FunctionN`. diff --git a/_overviews/scala3-migration/tooling-syntax-rewriting.md b/_overviews/scala3-migration/tooling-syntax-rewriting.md index 9ce122c1a..5cd51e0cc 100644 --- a/_overviews/scala3-migration/tooling-syntax-rewriting.md +++ b/_overviews/scala3-migration/tooling-syntax-rewriting.md @@ -52,7 +52,7 @@ Each of the first four options corresponds to a specific syntax: | Syntax | Compiler Option | |-|-| | Significant Indentation | `-indent` | -| Classical Braces | `-noindent` | +| Classical Braces | `-no-indent` | As we will see in further detail these options can be used in combination with the `-rewrite` option to automate the conversion to a particular syntax. diff --git a/_overviews/scala3-migration/tutorial-macro-cross-building.md b/_overviews/scala3-migration/tutorial-macro-cross-building.md index b456b521a..8d45ed3f9 100644 --- a/_overviews/scala3-migration/tutorial-macro-cross-building.md +++ b/_overviews/scala3-migration/tutorial-macro-cross-building.md @@ -101,11 +101,11 @@ If you try to compile with Scala 3 you should see some errors of the same kind a {% highlight text %} sbt:example> ++3.3.1 sbt:example> example / compile -[error] -- Error: /example/src/main/scala/location/Location.scala:15:35 +[error] -- Error: /example/src/main/scala/location/Location.scala:15:35 [error] 15 | val location = typeOf[Location] [error] | ^ [error] | No TypeTag available for location.Location -[error] -- Error: /example/src/main/scala/location/Location.scala:18:4 +[error] -- Error: /example/src/main/scala/location/Location.scala:18:4 [error] 18 | q"new $location($path, $line)" [error] | ^ [error] |Scala 2 macro cannot be used in Dotty. See https://dotty.epfl.ch/docs/reference/dropped-features/macros.html @@ -165,7 +165,7 @@ They must have the exact same signature than their Scala 2.13 counterparts. package location object Macros: - def location: Location = ??? + inline def location: Location = ??? ``` {% endtab %} {% endtabs %} @@ -191,7 +191,7 @@ object Macros: private def locationImpl(using quotes: Quotes): Expr[Location] = import quotes.reflect.Position val pos = Position.ofMacroExpansion - val file = Expr(pos.sourceFile.jpath.toString) + val file = Expr(pos.sourceFile.path.toString) val line = Expr(pos.startLine + 1) '{new Location($file, $line)} ``` diff --git a/_overviews/scala3-migration/tutorial-macro-mixing.md b/_overviews/scala3-migration/tutorial-macro-mixing.md index 34389b9d1..4a013b428 100644 --- a/_overviews/scala3-migration/tutorial-macro-mixing.md +++ b/_overviews/scala3-migration/tutorial-macro-mixing.md @@ -7,7 +7,7 @@ previous-page: tutorial-macro-mixing next-page: tooling-syntax-rewriting --- -This tutorial shows how to mix Scala 2.13 and Scala 3 macros in a single artifact. This means that consumers can use '-Ytasty-reader' from Scala 2.13 code that uses your macros. +This tutorial shows how to mix Scala 2.13 and Scala 3 macros in a single artifact. This means that consumers can use `-Ytasty-reader` from Scala 2.13 code that uses your macros. There are two main benefits of this: diff --git a/_overviews/toolkit/OrderedListOfMdFiles b/_overviews/toolkit/OrderedListOfMdFiles index ea248772f..b2790bd58 100644 --- a/_overviews/toolkit/OrderedListOfMdFiles +++ b/_overviews/toolkit/OrderedListOfMdFiles @@ -27,3 +27,10 @@ http-client-request-body.md http-client-json.md http-client-upload-file.md http-client-what-else.md +web-server-intro.md +web-server-static.md +web-server-dynamic.md +web-server-query-parameters.md +web-server-input.md +web-server-websockets.md +web-server-cookies-and-decorators.md diff --git a/_overviews/toolkit/http-client-what-else.md b/_overviews/toolkit/http-client-what-else.md index 11b577449..865a03155 100644 --- a/_overviews/toolkit/http-client-what-else.md +++ b/_overviews/toolkit/http-client-what-else.md @@ -4,7 +4,7 @@ type: section description: An incomplete list of features of sttp num: 29 previous-page: http-client-upload-file -next-page: +next-page: web-server-intro --- {% include markdown.html path="_markdown/install-upickle.md" %} diff --git a/_overviews/toolkit/introduction.md b/_overviews/toolkit/introduction.md index 1656ed966..9bc97cb2d 100644 --- a/_overviews/toolkit/introduction.md +++ b/_overviews/toolkit/introduction.md @@ -22,6 +22,10 @@ toolkit-index: description: Sending HTTP requests and uploading files with sttp. icon: "fa fa-globe" link: /toolkit/http-client-intro.html + - title: Web servers + description: Building web servers with Cask. + icon: "fa fa-server" + link: /toolkit/web-server-intro.html --- ## What is the Scala Toolkit? diff --git a/_overviews/toolkit/testing-run-only.md b/_overviews/toolkit/testing-run-only.md index fcd56fde3..1469fbd57 100644 --- a/_overviews/toolkit/testing-run-only.md +++ b/_overviews/toolkit/testing-run-only.md @@ -22,7 +22,7 @@ scala-cli test example --test-only example.MyTests {% tab 'sbt' %} To run a single `example.MyTests` suite in sbt, use the `testOnly` task: ``` -sbt:example> example/testOnly example.MyTests +sbt:example> testOnly example.MyTests ``` {% endtab %} {% tab 'Mill' %} diff --git a/_overviews/toolkit/testing-run.md b/_overviews/toolkit/testing-run.md index 12b087128..09bc23e73 100644 --- a/_overviews/toolkit/testing-run.md +++ b/_overviews/toolkit/testing-run.md @@ -27,7 +27,7 @@ scala-cli test example {% tab 'sbt' %} In the sbt shell, the following command runs all the tests of the project `example`: ``` -sbt:example> example/test +sbt:example> test # MyTests: # + sum of two integers 0.006s # [info] Passed: Total 1, Failed 0, Errors 0, Passed 1 @@ -74,7 +74,7 @@ test("failing test") { ``` # MyTests: # + sum of two integers 0.008s -# ==> X MyTests.failing test 0.015s munit.ComparisonFailException: ./example/MyTests.test.scala:13 +# ==> X MyTests.failing test 0.015s munit.ComparisonFailException: ./MyTests.test.scala:13 # 12: val expected = 4 # 13: assertEquals(obtained, expected) # 14: } diff --git a/_overviews/toolkit/web-server-cookies-and-decorators.md b/_overviews/toolkit/web-server-cookies-and-decorators.md new file mode 100644 index 000000000..36caeac4d --- /dev/null +++ b/_overviews/toolkit/web-server-cookies-and-decorators.md @@ -0,0 +1,188 @@ +--- +title: How to use cookies and decorators? +type: section +description: Using cookies and decorators with Cask +num: 36 +previous-page: web-server-websockets +next-page: +--- + +{% include markdown.html path="_markdown/install-cask.md" %} + +## Using cookies + +Cookies are saved by adding them to the `cookies` parameter of the `cask.Response` constructor. + +In this example, we are building a rudimentary authentication service. The `getLogin` method provides a form where +the user can enter their username and password. The `postLogin` method reads the credentials. If they match the expected ones, it generates a session +identifier is generated, saves it in the application state, and sends back a cookie with the identifier. + +Cookies can be read either with a method parameter of `cask.Cookie` type or by accessing the `cask.Request` directly. +If using the former method, the names of parameters have to match the names of cookies. If a cookie with a matching name is not +found, an error response will be returned. In the `checkLogin` function, the former method is used, as the cookie is not +present before the user logs in. + +To delete a cookie, set its `expires` parameter to an instant in the past, for example `Instant.EPOCH`. + +{% tabs web-server-cookies-1 class=tabs-scala-version %} +{% tab 'Scala 2' %} + +```scala +import java.util.UUID +import java.util.concurrent.ConcurrentHashMap + +object Example extends cask.MainRoutes { + + val sessionIds = ConcurrentHashMap.newKeySet[String]() + + @cask.get("/login") + def getLogin(): cask.Response[String] = { + val html = + """ + | + | + |
+ |
+ |
+ |
+ |

+ | + |
+ | + |""".stripMargin + + cask.Response(data = html, headers = Seq("Content-Type" -> "text/html")) + } + + @cask.postForm("/login") + def postLogin(name: String, password: String): cask.Response[String] = { + if (name == "user" && password == "password") { + val sessionId = UUID.randomUUID().toString + sessionIds.add(sessionId) + cask.Response(data = "Success!", cookies = Seq(cask.Cookie("sessionId", sessionId))) + } else { + cask.Response(data = "Authentication failed", statusCode = 401) + } + } + + @cask.get("/check") + def checkLogin(request: cask.Request): String = { + val sessionId = request.cookies.get("sessionId") + if (sessionId.exists(cookie => sessionIds.contains(cookie.value))) { + "You are logged in" + } else { + "You are not logged in" + } + } + + @cask.get("/logout") + def logout(sessionId: cask.Cookie) = { + sessionIds.remove(sessionId.value) + cask.Response(data = "Successfully logged out!", cookies = Seq(cask.Cookie("sessionId", "", expires = Instant.EPOCH))) + } + + initialize() +} +``` +{% endtab %} +{% tab 'Scala 3' %} +```scala +import java.util.UUID +import java.util.concurrent.ConcurrentHashMap + +object Example extends cask.MainRoutes: + + val sessionIds = ConcurrentHashMap.newKeySet[String]() + + @cask.get("/login") + def getLogin(): cask.Response[String] = + val html = + """ + | + | + |
+ |
+ |
+ |
+ |

+ | + |
+ | + |""".stripMargin + + cask.Response(data = html, headers = Seq("Content-Type" -> "text/html")) + + @cask.postForm("/login") + def postLogin(name: String, password: String): cask.Response[String] = + if name == "user" && password == "password" then + val sessionId = UUID.randomUUID().toString + sessionIds.add(sessionId) + cask.Response(data = "Success!", cookies = Seq(cask.Cookie("sessionId", sessionId))) + else + cask.Response(data = "Authentication failed", statusCode = 401) + + @cask.get("/check") + def checkLogin(request: cask.Request): String = + val sessionId = request.cookies.get("sessionId") + if sessionId.exists(cookie => sessionIds.contains(cookie.value)) then + "You are logged in" + else + "You are not logged in" + + @cask.get("/logout") + def logout(sessionId: cask.Cookie): cask.Response[String] = + sessionIds.remove(sessionId.value) + cask.Response(data = "Successfully logged out!", cookies = Seq(cask.Cookie("sessionId", "", expires = Instant.EPOCH))) + + initialize() +``` +{% endtab %} +{% endtabs %} + +## Using decorators + +Decorators can be used for extending endpoints functionality with validation or new parameters. They are defined by extending +`cask.RawDecorator` class. They are used as annotations. + +In this example, the `loggedIn` decorator is used to check if the user is logged in before accessing the `/decorated` +endpoint. + +The decorator class can pass additional arguments to the decorated endpoint using a map. The passed arguments are available +through the last argument group. Here we are passing the session identifier to an argument named `sessionId`. + +{% tabs web-server-cookies-2 class=tabs-scala-version %} +{% tab 'Scala 2' %} +```scala +class loggedIn extends cask.RawDecorator { + override def wrapFunction(ctx: cask.Request, delegate: Delegate): Result[Raw] = { + ctx.cookies.get("sessionId") match { + case Some(cookie) if sessionIds.contains(cookie.value) => delegate(Map("sessionId" -> cookie.value)) + case _ => cask.router.Result.Success(cask.model.Response("You aren't logged in", 403)) + } + } +} + +@loggedIn() +@cask.get("/decorated") +def decorated()(sessionId: String): String = { + s"You are logged in with id: $sessionId" +} +``` +{% endtab %} +{% tab 'Scala 3' %} +```scala +class loggedIn extends cask.RawDecorator: + override def wrapFunction(ctx: cask.Request, delegate: Delegate): Result[Raw] = + ctx.cookies.get("sessionId") match + case Some(cookie) if sessionIds.contains(cookie.value) => + delegate(Map("sessionId" -> cookie.value)) + case _ => + cask.router.Result.Success(cask.model.Response("You aren't logged in", 403)) + + +@loggedIn() +@cask.get("/decorated") +def decorated()(sessionId: String): String = s"You are logged in with id: $sessionId" +``` +{% endtab %} +{% endtabs %} \ No newline at end of file diff --git a/_overviews/toolkit/web-server-dynamic.md b/_overviews/toolkit/web-server-dynamic.md new file mode 100644 index 000000000..49101505c --- /dev/null +++ b/_overviews/toolkit/web-server-dynamic.md @@ -0,0 +1,237 @@ +--- +title: How to serve a dynamic page? +type: section +description: Serving a dynamic page with Cask +num: 32 +previous-page: web-server-static +next-page: web-server-query-parameters +--- + +{% include markdown.html path="_markdown/install-cask.md" %} + +## Serving dynamically generated content + +You can create an endpoint returning dynamically generated content with the `@cask.get` annotation. + +{% tabs web-server-dynamic-1 class=tabs-scala-version %} +{% tab 'Scala 2' %} +```scala +import java.time.ZonedDateTime + +object Example extends cask.MainRoutes { + @cask.get("/time") + def dynamic(): String = s"Current date is: ${ZonedDateTime.now()}" + + initialize() +} +``` +{% endtab %} +{% tab 'Scala 3' %} +```scala +import java.time.ZonedDateTime + +object Example extends cask.MainRoutes: + @cask.get("/time") + def dynamic(): String = s"Current date is: ${ZonedDateTime.now()}" + + initialize() +``` +{% endtab %} +{% endtabs %} + +The example above creates an endpoint that returns the current date and time available at `/time`. The exact response will be +recreated every time you refresh the webpage. + +Since the endpoint method has the `String` output type, the result will be sent with the `text/plain` [content type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type). +If you want an HTML output to be interpreted by the browser, you will need to set the `Content-Type` header manually +or [use the Scalatags templating library](/toolkit/web-server-dynamic.html#using-html-templates), supported by Cask. + +### Running the example + +Run the example the same way as before, assuming you use the same project structure as described in [the static file tutorial](/toolkit/web-server-static.html). + +{% tabs web-server-dynamic-2 class=tabs-build-tool %} +{% tab 'Scala CLI' %} +In the terminal, the following command will start the server: +``` +scala-cli run Example.scala +``` +{% endtab %} +{% tab 'sbt' %} +In the terminal, the following command will start the server: +``` +sbt example/run +``` +{% endtab %} +{% tab 'Mill' %} +In the terminal, the following command will start the server: +``` +./mill run +``` +{% endtab %} +{% endtabs %} + +Access the endpoint at [http://localhost:8080/time](http://localhost:8080/time). You should see a result similar to the one below. + +``` +Current date is: 2024-07-22T09:07:05.752534+02:00[Europe/Warsaw] +``` + +## Using path segments + +Cask gives you the ability to access segments of the URL path within the endpoint function. +Building on the example above, you can add a segment to specify that the endpoint should return the date and time +in a given city. + +{% tabs web-server-dynamic-3 class=tabs-scala-version %} +{% tab 'Scala 2' %} +```scala +import java.time.{ZoneId, ZonedDateTime} + +object Example extends cask.MainRoutes { + + private def getZoneIdForCity(city: String): Option[ZoneId] = { + import scala.jdk.CollectionConverters._ + ZoneId.getAvailableZoneIds.asScala.find(_.endsWith("/" + city)).map(ZoneId.of) + } + + @cask.get("/time/:city") + def dynamicWithCity(city: String): String = { + getZoneIdForCity(city) match { + case Some(zoneId) => s"Current date is: ${ZonedDateTime.now().withZoneSameInstant(zoneId)}" + case None => s"Couldn't find time zone for city $city" + } + } + + initialize() +} +``` +{% endtab %} +{% tab 'Scala 3' %} +```scala +import java.time.{ZoneId, ZonedDateTime} + +object Example extends cask.MainRoutes: + + private def getZoneIdForCity(city: String): Option[ZoneId] = + import scala.jdk.CollectionConverters.* + ZoneId.getAvailableZoneIds.asScala.find(_.endsWith("/" + city)).map(ZoneId.of) + + @cask.get("/time/:city") + def dynamicWithCity(city: String): String = + getZoneIdForCity(city) match + case Some(zoneId) => s"Current date is: ${ZonedDateTime.now().withZoneSameInstant(zoneId)}" + case None => s"Couldn't find time zone for city $city" + + initialize() +``` +{% endtab %} +{% endtabs %} + +In the example above, the `:city` segment in `/time/:city` is available through the `city` argument of the endpoint method. +The name of the argument must be identical to the segment name. The `getZoneIdForCity` helper method finds the timezone for +a given city, and then the current date and time are translated to that timezone. + +Accessing the endpoint at [http://localhost:8080/time/Paris](http://localhost:8080/time/Paris) will result in: +``` +Current date is: 2024-07-22T09:08:33.806259+02:00[Europe/Paris] +``` + +You can use more than one path segment in an endpoint by adding more arguments to the endpoint method. It's also possible to use paths +with an unspecified number of segments (for example `/path/foo/bar/baz/`) by giving the endpoint method an argument with `cask.RemainingPathSegments` type. +Consult the [documentation](https://com-lihaoyi.github.io/cask/index.html#variable-routes) for more details. + +## Using HTML templates + +To create an HTML response, you can combine Cask with the [Scalatags](https://com-lihaoyi.github.io/scalatags/) templating library. + +Import the Scalatags library: + +{% tabs web-server-dynamic-4 class=tabs-build-tool %} +{% tab 'Scala CLI' %} +Add the Scalatags dependency in `Example.sc` file: +```scala +//> using dep "com.lihaoyi::scalatags::0.13.1" +``` +{% endtab %} +{% tab 'sbt' %} +Add the Scalatags dependency in `build.sbt` file: +```scala +libraryDependencies += "com.lihaoyi" %% "scalatags" % "0.13.1" +``` +{% endtab %} +{% tab 'Mill' %} +Add the Scalatags dependency in `build.cs` file: +```scala +ivy"com.lihaoyi::scalatags::0.13.1" +``` +{% endtab %} +{% endtabs %} + +Now the example above can be rewritten to use a template. Cask will build a response out of the `doctype` automatically, +setting the `Content-Type` header to `text/html`. + +{% tabs web-server-dynamic-5 class=tabs-scala-version %} +{% tab 'Scala 2' %} +```scala +import java.time.{ZoneId, ZonedDateTime} +import scalatags.Text.all._ + +object Example extends cask.MainRoutes { + + private def getZoneIdForCity(city: String): Option[ZoneId] = { + import scala.jdk.CollectionConverters._ + ZoneId.getAvailableZoneIds.asScala.find(_.endsWith("/" + city)).map(ZoneId.of) + } + + @cask.get("/time/:city") + def dynamicWithCity(city: String): doctype = { + val text = getZoneIdForCity(city) match { + case Some(zoneId) => s"Current date is: ${ZonedDateTime.now().withZoneSameInstant(zoneId)}" + case None => s"Couldn't find time zone for city $city" + } + + doctype("html")( + html( + body( + p(text) + ) + ) + ) + } + + initialize() +} +``` +{% endtab %} +{% tab 'Scala 3' %} +```scala +import java.time.{ZoneId, ZonedDateTime} +import scalatags.Text.all.* + +object Example extends cask.MainRoutes: + + private def getZoneIdForCity(city: String): Option[ZoneId] = + import scala.jdk.CollectionConverters.* + ZoneId.getAvailableZoneIds.asScala.find(_.endsWith("/" + city)).map(ZoneId.of) + + @cask.get("/time/:city") + def dynamicWithCity(city: String): doctype = + val text = getZoneIdForCity(city) match + case Some(zoneId) => s"Current date is: ${ZonedDateTime.now().withZoneSameInstant(zoneId)}" + case None => s"Couldn't find time zone for city $city" + doctype("html")( + html( + body( + p(text) + ) + ) + ) + + initialize() +``` +{% endtab %} +{% endtabs %} + +Here we get the text of the response and wrap it in a Scalatags template. Notice that the return type changed from `String` +to `doctype`. \ No newline at end of file diff --git a/_overviews/toolkit/web-server-input.md b/_overviews/toolkit/web-server-input.md new file mode 100644 index 000000000..8be4c659d --- /dev/null +++ b/_overviews/toolkit/web-server-input.md @@ -0,0 +1,243 @@ +--- +title: How to handle user input? +type: section +description: Handling user input with Cask +num: 34 +previous-page: web-server-query-parameters +next-page: web-server-websockets +--- + +{% include markdown.html path="_markdown/install-cask.md" %} + +## Handling form-encoded input + +To create an endpoint that handles the data provided in an HTML form, use the `@cask.postForm` annotation. Add arguments to the endpoint method +with names corresponding to names of fields in the form and set the form method to `post`. + +{% tabs web-server-input-1 class=tabs-scala-version %} +{% tab 'Scala 2' %} +```scala +object Example extends cask.MainRoutes { + + @cask.get("/form") + def getForm(): cask.Response[String] = { + val html = + """ + | + | + |
+ |
+ |
+ |
+ |

+ | + |
+ | + |""".stripMargin + + cask.Response(data = html, headers = Seq("Content-Type" -> "text/html")) + } + + @cask.postForm("/form") + def formEndpoint(name: String, surname: String): String = + "Hello " + name + " " + surname + + initialize() +} +``` +{% endtab %} +{% tab 'Scala 3' %} +```scala +object Example extends cask.MainRoutes: + + @cask.get("/form") + def getForm(): cask.Response[String] = + val html = + """ + | + | + |
+ |
+ |
+ |
+ |

+ | + |
+ | + |""".stripMargin + + cask.Response(data = html, headers = Seq("Content-Type" -> "text/html")) + + @cask.postForm("/form") + def formEndpoint(name: String, surname: String): String = + "Hello " + name + " " + surname + + initialize() +``` +{% endtab %} +{% endtabs %} + +In this example we create a form asking for name and surname of a user and then redirect the user to a greeting page. Notice the +use of `cask.Response`. The `cask.Response` type allows the user to set the status code, headers and cookies. The default +content type for an endpoint method returning a `String` is `text/plain`. Set it to `text/html` in order for the browser to display the form correctly. + +The `formEndpoint` endpoint reads the form data using the `name` and `surname` parameters. The names of parameters must +be identical to the field names of the form. + +## Handling JSON-encoded input + +JSON fields are handled in the same way as form fields, except that we use the `@cask.PostJson` annotation. The fields +will be read into the endpoint method arguments. + +{% tabs web-server-input-2 class=tabs-scala-version %} +{% tab 'Scala 2' %} +```scala +object Example extends cask.MainRoutes { + + @cask.postJson("/json") + def jsonEndpoint(name: String, surname: String): String = + "Hello " + name + " " + surname + + initialize() +} +``` +{% endtab %} +{% tab 'Scala 3' %} +```scala +object Example extends cask.MainRoutes: + + @cask.postJson("/json") + def jsonEndpoint(name: String, surname: String): String = + "Hello " + name + " " + surname + + initialize() +``` +{% endtab %} +{% endtabs %} + +Send the POST request using `curl`: + +```shell +curl --header "Content-Type: application/json" \ + --data '{"name":"John","surname":"Smith"}' \ + http://localhost:8080/json +``` + +The response will be: +``` +Hello John Smith +``` + +The endpoint will accept JSONs that have only the fields with names specified as the endpoint method arguments. If there +are more fields than expected, some fields are missing or have an incorrect data type, an error message +will be returned with the response code 400. + +To handle the case when the fields of the JSON are not known in advance, you can use an argument with the `ujson.Value` type, +from uPickle library. + +{% tabs web-server-input-3 class=tabs-scala-version %} +{% tab 'Scala 2' %} +```scala +object Example extends cask.MainRoutes { + + @cask.postJson("/json") + def jsonEndpoint(value: ujson.Value): String = + value.toString + + initialize() +} + +``` +{% endtab %} +{% tab 'Scala 3' %} +```scala +object Example extends cask.MainRoutes: + + @cask.postJson("/json") + def jsonEndpoint(value: ujson.Value): String = + value.toString + + initialize() + +``` +{% endtab %} +{% endtabs %} + +In this example the JSON is merely converted to `String`. Check the [*uPickle tutorial*](/toolkit/json-intro.html) for more information +on what can be done with the `ujson.Value` type. + +Send a POST request. +```shell +curl --header "Content-Type: application/json" \ + --data '{"value":{"name":"John","surname":"Smith"}}' \ + http://localhost:8080/json2 +``` + +The server will respond with: +``` +"{\"name\":\"John\",\"surname\":\"Smith\"}" +``` + +## Handling JSON-encoded output + +Cask endpoints can return JSON objects returned by uPickle library functions. Cask will automatically handle the `ujson.Value` +type and set the `Content-Type` header to `application/json`. + +In this example, the `TimeData` case class stores the information about the time zone and current time in a chosen +location. To serialize a case class into JSON, use type class derivation or define the serializer in its companion object in the case of Scala 2. + +{% tabs web-server-input-4 class=tabs-scala-version %} +{% tab 'Scala 2' %} +```scala +import java.time.{ZoneId, ZonedDateTime} + +object Example extends cask.MainRoutes { + import upickle.default.{ReadWriter, macroRW, writeJs} + case class TimeData(timezone: Option[String], time: String) + object TimeData { + implicit val rw: ReadWriter[TimeData] = macroRW + } + + private def getZoneIdForCity(city: String): Option[ZoneId] = { + import scala.jdk.CollectionConverters._ + ZoneId.getAvailableZoneIds.asScala.find(_.endsWith("/" + city)).map(ZoneId.of) + } + + @cask.get("/time_json/:city") + def timeJSON(city: String): ujson.Value = { + val timezone = getZoneIdForCity(city) + val time = timezone match { + case Some(zoneId) => s"Current date is: ${ZonedDateTime.now().withZoneSameInstant(zoneId)}" + case None => s"Couldn't find time zone for city $city" + } + writeJs(TimeData(timezone.map(_.toString), time)) + } + + initialize() +} +``` +{% endtab %} +{% tab 'Scala 3' %} +```scala +import java.time.{ZoneId, ZonedDateTime} + +object Example extends cask.MainRoutes: + import upickle.default.{ReadWriter, writeJs} + case class TimeData(timezone: Option[String], time: String) derives ReadWriter + + private def getZoneIdForCity(city: String): Option[ZoneId] = + import scala.jdk.CollectionConverters.* + ZoneId.getAvailableZoneIds.asScala.find(_.endsWith("/" + city)).map(ZoneId.of) + + @cask.get("/time_json/:city") + def timeJSON(city: String): ujson.Value = + val timezone = getZoneIdForCity(city) + val time = timezone match + case Some(zoneId)=> s"Current date is: ${ZonedDateTime.now().withZoneSameInstant(zoneId)}" + case None => s"Couldn't find time zone for city $city" + writeJs(TimeData(timezone.map(_.toString), time)) + + initialize() +``` +{% endtab %} +{% endtabs %} \ No newline at end of file diff --git a/_overviews/toolkit/web-server-intro.md b/_overviews/toolkit/web-server-intro.md new file mode 100644 index 000000000..4a1efcdc6 --- /dev/null +++ b/_overviews/toolkit/web-server-intro.md @@ -0,0 +1,23 @@ +--- +title: Building web servers with Cask +type: chapter +description: The introduction of the Cask library +num: 30 +previous-page: http-client-what-else +next-page: web-server-static +--- + +Cask is an HTTP micro-framework, providing a simple and flexible way to build web applications. + +Its main focus is on the ease of use, which makes it ideal for newcomers, at the cost of eschewing some features other +frameworks provide, like asynchronicity. + +To define an endpoint it's enough to annotate a function with an annotation specifying the request path. +Cask allows for building the response manually using tools that the library provides, specifying the content, headers, +status code, etc. An endpoint function can also return a string, a [uPickle](https://com-lihaoyi.github.io/upickle/) JSON type, or a [Scalatags](https://com-lihaoyi.github.io/scalatags/) +template. In that case, Cask will automatically create a response with the appropriate headers. + +Cask comes bundled with the uPickle library for handling JSONs, supports WebSockets and allows for extending endpoints with +decorators, which can be used to handle authentication or rate limiting. + +{% include markdown.html path="_markdown/install-cask.md" %} diff --git a/_overviews/toolkit/web-server-query-parameters.md b/_overviews/toolkit/web-server-query-parameters.md new file mode 100644 index 000000000..8da1dc9cc --- /dev/null +++ b/_overviews/toolkit/web-server-query-parameters.md @@ -0,0 +1,74 @@ +--- +title: How to handle query parameters? +type: section +description: Handling query parameters with Cask +num: 33 +previous-page: web-server-dynamic +next-page: web-server-input +--- + +{% include markdown.html path="_markdown/install-cask.md" %} + +Query parameters are the key-value pairs coming after the question mark in a URL. They can be used for filtering, +sorting or limiting the results provided by the server. For example, in the `/time?city=Paris` URL, the `city` part +is the name of a parameter, and `Paris` is its value. Cask allows for reading the query parameters by defining an endpoint +method with arguments matching the names of the expected parameters and not matching any of the URL segments. + +In this example, we give an `Option` type and the default value `None` to the `city` parameter. This tells Cask that it is optional. +If not provided, the time for the current timezone will be returned. + +{% tabs web-server-query-1 class=tabs-scala-version %} +{% tab 'Scala 2' %} +```scala +import java.time.{ZoneId, ZonedDateTime} + +object Example extends cask.MainRoutes { + + private def getZoneIdForCity(city: String): Option[ZoneId] = { + import scala.jdk.CollectionConverters._ + ZoneId.getAvailableZoneIds.asScala.find(_.endsWith("/" + city)).map(ZoneId.of) + } + + @cask.get("/time") + def dynamicWithParam(city: Option[String] = None): String = { + city match { + case Some(value) => getZoneIdForCity(value) match { + case Some(zoneId) => s"Current date is: ${ZonedDateTime.now().withZoneSameInstant(zoneId)}" + case None => s"Couldn't find time zone for city $value" + } + case None => s"Current date is: ${ZonedDateTime.now()}" + } + } + + initialize() +} +``` +{% endtab %} +{% tab 'Scala 3' %} +```scala +import java.time.{ZoneId, ZonedDateTime} + +object Example extends cask.MainRoutes: + + private def getZoneIdForCity(city: String): Option[ZoneId] = + import scala.jdk.CollectionConverters.* + ZoneId.getAvailableZoneIds.asScala.find(_.endsWith("/" + city)).map(ZoneId.of) + + @cask.get("/time") + def dynamicWithParam(city: Option[String] = None): String = + city match + case Some(value) => getZoneIdForCity(value) match + case Some(zoneId) => s"Current date is: ${ZonedDateTime.now().withZoneSameInstant(zoneId)}" + case None => s"Couldn't find time zone for city $value" + case None => s"Current date is: ${ZonedDateTime.now()}" + + initialize() +``` +{% endtab %} +{% endtabs %} + +Run the example as before and access the endpoint at [http://localhost:8080/time?city=Paris](http://localhost:8080/time?city=Paris). +You should get a result similar to the following one. +``` +Current date is: 2024-07-22T10:08:18.218736+02:00[Europe/Paris] +``` diff --git a/_overviews/toolkit/web-server-static.md b/_overviews/toolkit/web-server-static.md new file mode 100644 index 000000000..780941efb --- /dev/null +++ b/_overviews/toolkit/web-server-static.md @@ -0,0 +1,159 @@ +--- +title: How to serve a static file? +type: section +description: Serving a static file with Cask +num: 31 +previous-page: web-server-intro +next-page: web-server-dynamic +--- + +{% include markdown.html path="_markdown/install-cask.md" %} + +## Serving a static file + +An endpoint is a specific URL where a particular webpage can be accessed. In Cask, an endpoint is a function returning the +webpage data, together with an annotation describing its URL. + +To create an endpoint serving static files, we need two things: an HTML file with the page content and a function that +points to that file. + +Create a minimal HTML file named `hello.html` with the following contents. + +```html + + + + Hello World + + +

Hello world

+ + +``` + +Place it in the `resources` directory. + +{% tabs web-server-static-1 class=tabs-build-tool %} +{% tab 'Scala CLI' %} +``` +example +├── Example.scala +└── resources + └── hello.html +``` +{% endtab %} +{% tab 'sbt' %} +``` +example +└──src + └── main + ├── resources + │ └── hello.html + └── scala + └── Example.scala +``` +{% endtab %} +{% tab 'Mill' %} +``` +example +├── src +│ └── Example.scala +└── resources + └── hello.html +``` +{% endtab %} +{% endtabs %} + +The `@cask.staticFiles` annotation specifies at which path the webpage will be available. The endpoint function returns +the location of the file. + +{% tabs web-server-static-2 class=tabs-scala-version %} +{% tab 'Scala 2' %} +```scala +object Example extends cask.MainRoutes { + @cask.staticFiles("/static") + def staticEndpoint(): String = "src/main/resources" // or "resources" if not using SBT + + initialize() +} +``` +{% endtab %} +{% tab 'Scala 3' %} +```scala +object Example extends cask.MainRoutes: + @cask.staticFiles("/static") + def staticEndpoint(): String = "src/main/resources" // or "resources" if not using SBT + + initialize() +``` +{% endtab %} +{% endtabs %} + +In the example above, `@cask.staticFiles` instructs the server to look for files accessed at the `/static` path in the +`src/main/resources` directory. Cask will match any subpath coming after `/static` and append it to the directory path. +If you access the `/static/hello.html` file, it will serve the file available at `src/main/resources/hello.html`. +The directory path can be any path available to the server, relative or not. If the requested file cannot be found in the +specified location, the server will return a 404 response with an error message. + +The `Example` object inherits from the `cask.MainRoutes` class. It provides the main function that starts the server. The `initialize()` +method call initializes the server routes, i.e., the association between URL paths and the code that handles them. + +### Using the resources directory + +The `@cask.staticResources` annotation works in the same way as the `@cask.staticFiles` used above, with the difference that +the path returned by the endpoint method describes the location of files _inside_ the resources directory. Since the +previous example conveniently used the resources directory, it can be simplified with `@cask.staticResources`. + +{% tabs web-server-static-3 class=tabs-scala-version %} +{% tab 'Scala 2' %} +```scala +object Example extends cask.MainRoutes { + @cask.staticResources("/static") + def staticEndpoint(): String = "." + + initialize() +} +``` +{% endtab %} +{% tab 'Scala 3' %} +```scala +object Example extends cask.MainRoutes: + @cask.staticResources("/static") + def staticEndpoint(): String = "." + + initialize() +``` +{% endtab %} +{% endtabs %} + +In the endpoint method, the location is set to `"."`, telling the server that the files are available directly in the +resources directory. In general, you can use any nested location within the resources directory. For instance, you could opt +for placing your HTML files in the `static` directory inside the resources directory or using different directories to sort out +files used by different endpoints. + +## Running the example + +Run the example with the build tool of your choice. + +{% tabs munit-unit-test-4 class=tabs-build-tool %} +{% tab 'Scala CLI' %} +In the terminal, the following command will start the server: +``` +scala run Example.scala +``` +{% endtab %} +{% tab 'sbt' %} +In the terminal, the following command will start the server: +``` +sbt example/run +``` +{% endtab %} +{% tab 'Mill' %} +In the terminal, the following command will start the server: +``` +./mill run +``` +{% endtab %} +{% endtabs %} + +The example page will be available at [http://localhost:8080/static/hello.html](http://localhost:8080/static/hello.html). diff --git a/_overviews/toolkit/web-server-websockets.md b/_overviews/toolkit/web-server-websockets.md new file mode 100644 index 000000000..653bd7f15 --- /dev/null +++ b/_overviews/toolkit/web-server-websockets.md @@ -0,0 +1,118 @@ +--- +title: How to use websockets? +type: section +description: Using websockets with Cask +num: 35 +previous-page: web-server-input +next-page: web-server-cookies-and-decorators +--- + +{% include markdown.html path="_markdown/install-cask.md" %} + +You can create a WebSocket endpoint with the `@cask.websocket` annotation. The endpoint method should return a +`cask.WsHandler` instance defining how the communication should take place. It can also return a `cask.Response`, which rejects the +attempt at forming a WebSocket connection. + +The connection can also be closed by sending a `cask.Ws.close()` message through the WebSocket channel. + +Create an HTML file named `websockets.html` with the following content and place it in the `resources ` directory. + +```html + + + +
+ + +
+
+ + + +``` + +The JavaScript code opens a WebSocket connection using the `ws://localhost:8080/websocket` endpoint. The `ws.onmessage` +event handler is executed when the server pushes a message to the browser and `ws.onclose` when the connection is closed. + +Create an endpoint for serving static files using the `@cask.staticResources` annotation and an endpoint for handling +the WebSocket connection. + +{% tabs web-server-websocket-1 class=tabs-scala-version %} +{% tab 'Scala 2' %} +```scala +@cask.staticResources("/static") +def static() = "." + +private def getZoneIdForCity(city: String): Option[ZoneId] = { + import scala.jdk.CollectionConverters._ + ZoneId.getAvailableZoneIds.asScala.find(_.endsWith("/" + city)).map(ZoneId.of) +} + +@cask.websocket("/websocket") +def websocket(): cask.WsHandler = { + cask.WsHandler { channel => + cask.WsActor { + case cask.Ws.Text("") => channel.send(cask.Ws.Close()) + case cask.Ws.Text(city) => + val text = getZoneIdForCity(city) match { + case Some(zoneId) => s"Current date is: ${ZonedDateTime.now().withZoneSameInstant(zoneId)}" + case None => s"Couldn't find time zone for city $city" + } + channel.send(cask.Ws.Text(text)) + } + } +} + +initialize() +``` +{% endtab %} +{% tab 'Scala 3' %} +```scala +@cask.staticResources("/static") +def static() = "." + +private def getZoneIdForCity(city: String): Option[ZoneId] = + import scala.jdk.CollectionConverters.* + ZoneId.getAvailableZoneIds.asScala.find(_.endsWith("/" + city)).map(ZoneId.of) + +@cask.websocket("/websocket") +def websocket(): cask.WsHandler = + cask.WsHandler { channel => + cask.WsActor { + case cask.Ws.Text("") => channel.send(cask.Ws.Close()) + case cask.Ws.Text(city) => + val text = getZoneIdForCity(city) match + case Some(zoneId) => s"Current date is: ${ZonedDateTime.now().withZoneSameInstant(zoneId)}" + case None => s"Couldn't find time zone for city $city" + channel.send(cask.Ws.Text(text)) + } + } + +initialize() +``` +{% endtab %} +{% endtabs %} + +In the `cask.WsHandler` we define a `cask.WsActor`. It reacts to events (of type `cask.util.Ws.Event`) and uses the +WebSocket channel to send messages. In this example, we receive the name of a city and return the current time there. If the server +receives an empty message, the connection is closed. \ No newline at end of file diff --git a/_overviews/tutorials/scala-for-java-programmers.md b/_overviews/tutorials/scala-for-java-programmers.md index bc4a551d9..ac31c42dd 100644 --- a/_overviews/tutorials/scala-for-java-programmers.md +++ b/_overviews/tutorials/scala-for-java-programmers.md @@ -160,38 +160,49 @@ package, so can be accessed from anywhere in a program. > **Note:** The following assumes you are using Scala on the command line +If we save the above program in a file called +`HelloWorld.scala`, we can run it by issuing the following +command (the greater-than sign `>` represents the shell prompt +and should not be typed): + +```shell +> scala run HelloWorld.scala +``` + +The program will be automatically compiled (with compiled classes somewhere in the newly created `.scala-build` directory) +and executed, producing an output similar to: +``` +Compiling project (Scala {{site.scala-3-version}}, JVM (20)) +Compiled project (Scala {{site.scala-3-version}}, JVM (20)) +Hello, World! +``` + #### Compiling From the Command Line -To compile the example, we use `scalac`, the Scala compiler. `scalac` +To compile the example, we use `scala compile` command, which will invoke the Scala compiler, `scalac`. `scalac` works like most compilers: it takes a source file as argument, maybe some options, and produces one or several output files. The outputs it produces are standard Java class files. -If we save the above program in a file called -`HelloWorld.scala`, we can compile it by issuing the following -command (the greater-than sign `>` represents the shell prompt -and should not be typed): - ```shell -> scalac HelloWorld.scala +> scala compile HelloWorld.scala -d . ``` -This will generate a few class files in the current directory. One of +This will generate a few class files in the current directory (`-d` option sets the compilation output directory). One of them will be called `HelloWorld.class`, and contains a class which can be directly executed using the `scala` command, as the following section shows. #### Running From the Command Line -Once compiled, a Scala program can be run using the `scala` command. +Once compiled, the program can be run using the `scala run` command. Its usage is very similar to the `java` command used to run Java -programs, and accepts the same options. The above example can be +programs, and accepts similar options. The above example can be executed using the following command, which produces the expected output: ```shell -> scala -classpath . HelloWorld - +> scala run --main-class HelloWorld -classpath . Hello, World! ``` @@ -649,7 +660,7 @@ they can be used to define the type of the trees for our example: ```scala abstract class Tree object Tree { - case class Sum(l: Tree, r: Tree) extends Tree + case class Sum(left: Tree, right: Tree) extends Tree case class Var(n: String) extends Tree case class Const(v: Int) extends Tree } @@ -682,7 +693,7 @@ but also to implement ADTs. Here is how they can be used to define the type of the trees for our example: ```scala enum Tree: - case Sum(l: Tree, r: Tree) + case Sum(left: Tree, right: Tree) case Var(n: String) case Const(v: Int) ``` @@ -750,7 +761,7 @@ Scala as follows, using a pattern match on a tree value `t`: import Tree._ def eval(t: Tree, ev: Environment): Int = t match { - case Sum(l, r) => eval(l, ev) + eval(r, ev) + case Sum(left, right) => eval(left, ev) + eval(right, ev) case Var(n) => ev(n) case Const(v) => v } @@ -762,7 +773,7 @@ def eval(t: Tree, ev: Environment): Int = t match { import Tree.* def eval(t: Tree, ev: Environment): Int = t match - case Sum(l, r) => eval(l, ev) + eval(r, ev) + case Sum(left, right) => eval(left, ev) + eval(right, ev) case Var(n) => ev(n) case Const(v) => v ``` @@ -773,12 +784,12 @@ def eval(t: Tree, ev: Environment): Int = t match You can understand the precise meaning of the pattern match as follows: 1. it first checks if the tree `t` is a `Sum`, and if it - is, it binds the left sub-tree to a new variable called `l` and - the right sub-tree to a variable called `r`, and then proceeds + is, it binds the left sub-tree to a new variable called `left` and + the right sub-tree to a variable called `right`, and then proceeds with the evaluation of the expression following the arrow; this expression can (and does) make use of the variables bound by the - pattern appearing on the left of the arrow, i.e., `l` and - `r`, + pattern appearing on the left of the arrow, i.e., `left` and + `right`, 2. if the first check does not succeed, that is, if the tree is not a `Sum`, it goes on and checks if `t` is a `Var`; if it is, it binds the name contained in the `Var` node to a @@ -841,7 +852,7 @@ obtain the following definition: import Tree._ def derive(t: Tree, v: String): Tree = t match { - case Sum(l, r) => Sum(derive(l, v), derive(r, v)) + case Sum(left, right) => Sum(derive(left, v), derive(right, v)) case Var(n) if v == n => Const(1) case _ => Const(0) } @@ -853,7 +864,7 @@ def derive(t: Tree, v: String): Tree = t match { import Tree.* def derive(t: Tree, v: String): Tree = t match - case Sum(l, r) => Sum(derive(l, v), derive(r, v)) + case Sum(left, right) => Sum(derive(left, v), derive(right, v)) case Var(n) if v == n => Const(1) case _ => Const(0) ``` diff --git a/_overviews/tutorials/scala-on-android.md b/_overviews/tutorials/scala-on-android.md index 459e485ce..49749aa85 100644 --- a/_overviews/tutorials/scala-on-android.md +++ b/_overviews/tutorials/scala-on-android.md @@ -68,7 +68,7 @@ Make sure your `gcc` is at least version 6. [You can try following these steps]( #### The example app -if you reached this point and everything seems to work, it means you probably should be able to compile and run the example app called [HelloScala](https://github.com/makingthematrix/scalaonandroid/tree/main/helloscala). HelloScala is based on [HelloGluon](https://github.com/gluonhq/gluon-samples/tree/master/HelloGluon) from [Gluon samples](https://github.com/gluonhq/gluon-samples). Gluon is a company that maintains JavaFX and provides libraries that give us a layer of abstraction between our code and the device — be it desktop, Android, or iOS. It has some interesting implications: for example, you will see in the code that we check if we are on the desktop instead of Android, because if yes then we need to provide window size for our app. If we are on Android, we can just let the app’s window take the whole screen. If you decide to write something more complex with this tech stack, you will quickly see that you can use Gluon’s libraries and JavaFX (maybe together with [ScalaFX](http://www.scalafx.org/)) to achieve the same results other developers get by tinkering with Android SDK, while you are writing code that can be easily re-used on other platforms as well. +if you reached this point and everything seems to work, it means you probably should be able to compile and run the example app called [HelloScala](https://github.com/makingthematrix/scalaonandroid/tree/main/helloscala). HelloScala is based on [HelloGluon](https://github.com/gluonhq/gluon-samples/tree/master/HelloGluon) from [Gluon samples](https://github.com/gluonhq/gluon-samples). Gluon is a company that maintains JavaFX and provides libraries that give us a layer of abstraction between our code and the device — be it desktop, Android, or iOS. It has some interesting implications: for example, you will see in the code that we check if we are on the desktop instead of Android, because if yes then we need to provide window size for our app. If we are on Android, we can just let the app’s window take the whole screen. If you decide to write something more complex with this tech stack, you will quickly see that you can use Gluon’s libraries and JavaFX (maybe together with [ScalaFX](https://scalafx.github.io/)) to achieve the same results other developers get by tinkering with Android SDK, while you are writing code that can be easily re-used on other platforms as well. In the `pom.xml` of HelloScala you will find a list of plugins and dependencies our example app uses. Let’s take a look at some of them. @@ -136,7 +136,7 @@ If you managed to build one of the example apps and want to code something more - Install [Scene Builder](https://gluonhq.com/products/scene-builder/) and learn how to build GUI with it. Apart from the docs, you can find a lot of tutorials about it on YouTube. - Look through [Gluon’s documentation of Glisten and Attach](https://docs.gluonhq.com/) to learn how to make your app look better on a mobile device, and how to get access to your device’s features. - Download an example from [Gluon’s list of samples](https://docs.gluonhq.com/) and rewrite it to Scala. And when you do, let me know! -- Look into [ScalaFX](http://www.scalafx.org/) — a more declarative, Scala-idiomatic wrapper over JavaFX. +- Look into [ScalaFX](https://scalafx.github.io/) — a more declarative, Scala-idiomatic wrapper over JavaFX. - Download some other examples from [the “Scala on Android” repository on GitHub](https://github.com/makingthematrix/scalaonandroid). Contact me, if you write an example app of your own and want me to include it. - Join us on the official Scala discord — we have a [#scala-android channel](https://discord.gg/UuDawpq7) there. - There is also an [#android channel](https://discord.gg/XHMt6Yq4) on the “Learning Scala” discord. diff --git a/_pl/tour/automatic-closures.md b/_pl/tour/automatic-closures.md deleted file mode 100644 index 8251a6be3..000000000 --- a/_pl/tour/automatic-closures.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -layout: tour -title: Automatyczna konstrukcja domknięć -partof: scala-tour - -num: 30 -language: pl -next-page: annotations -previous-page: operators ---- - -Scala pozwala na przekazywanie funkcji bezparametrycznych jako argumenty dla metod. Kiedy tego typu metoda jest wywołana, właściwe parametry dla funkcji bezparametrycznych nie są ewaluowane i przekazywana jest pusta funkcja, która enkapsuluje obliczenia odpowiadającego parametru (tzw. *wywołanie-przez-nazwę*). - -Poniższy kod demonstruje działanie tego mechanizmu: - -```scala mdoc -def whileLoop(cond: => Boolean)(body: => Unit): Unit = - if (cond) { - body - whileLoop(cond)(body) - } -var i = 10 -whileLoop (i > 0) { - println(i) - i -= 1 -} -``` - -Funkcja `whileLoop` pobiera dwa parametry: `cond` i `body`. Kiedy funkcja jest aplikowana, jej właściwe parametry nie są ewaluowane. Lecz gdy te parametry są wykorzystane w ciele `whileLoop`, zostanie ewaluowana niejawnie utworzona funkcja zwracająca ich prawdziwą wartość. Zatem metoda `whileLoop` implementuje rekursywnie pętlę while w stylu Javy. - -Możemy połączyć ze sobą wykorzystanie [operatorów infiksowych/postfiksowych](operators.html) z tym mechanizmem aby utworzyć bardziej złożone wyrażenia. - -Oto implementacja pętli w stylu wykonaj-dopóki: - -```scala mdoc:reset -def loop(body: => Unit): LoopUnlessCond = - new LoopUnlessCond(body) -protected class LoopUnlessCond(body: => Unit) { - def unless(cond: => Boolean): Unit = { - body - if (!cond) unless(cond) - } -} -var i = 10 -loop { - println("i = " + i) - i -= 1 -} unless (i == 0) -``` - -Funkcja `loop` przyjmuje ciało pętli oraz zwraca instancję klasy `LoopUnlessCond` (która enkapsuluje to ciało). Warto zwrócić uwagę, że ciało tej funkcji nie zostało jeszcze ewaluowane. Klasa `LoopUnlessCond` posiada metodę `unless`, którą możemy wykorzystać jako *operator infiksowy*. W ten sposób uzyskaliśmy całkiem naturalną składnię dla naszej nowej pętli: `loop { < stats > } unless ( < cond > )`. - -Oto wynik działania programu `TargetTest2`: - -``` -i = 10 -i = 9 -i = 8 -i = 7 -i = 6 -i = 5 -i = 4 -i = 3 -i = 2 -i = 1 -``` diff --git a/_pt-br/tour/annotations.md b/_pt-br/tour/annotations.md index 5d0842e42..55d49cc44 100644 --- a/_pt-br/tour/annotations.md +++ b/_pt-br/tour/annotations.md @@ -5,7 +5,7 @@ partof: scala-tour num: 31 next-page: packages-and-imports -previous-page: automatic-closures +previous-page: operators language: pt-br --- diff --git a/_pt-br/tour/automatic-closures.md b/_pt-br/tour/automatic-closures.md deleted file mode 100644 index dfa5b9dd1..000000000 --- a/_pt-br/tour/automatic-closures.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -layout: tour -title: Construção Automática de Closures de Tipo-Dependente -partof: scala-tour - -num: 30 -next-page: annotations -previous-page: operators -language: pt-br ---- - -_Nota de tradução: A palavra `closure` em pode ser traduzida como encerramento/fechamento, porém é preferível utilizar a notação original_ - -Scala permite funções sem parâmetros como parâmetros de métodos. Quando um tal método é chamado, os parâmetros reais para nomes de função sem parâmetros não são avaliados e uma função nula é passada em vez disso, tal função encapsula a computação do parâmetro correspondente (isso é conhecido por avaliação *call-by-name*). - -O código a seguir demonstra esse mecanismo: - -```scala mdoc -def whileLoop(cond: => Boolean)(body: => Unit): Unit = - if (cond) { - body - whileLoop(cond)(body) - } -var i = 10 -whileLoop (i > 0) { - println(i) - i -= 1 -} -``` - -A função `whileLoop` recebe dois parâmetros: `cond` e `body`. Quando a função é aplicada, os parâmetros reais não são avaliados. Mas sempre que os parâmetros formais são usados no corpo de `whileLoop`, as funções nulas criadas implicitamente serão avaliadas em seu lugar. Assim, o nosso método `whileLoop` implementa um while-loop Java-like com um esquema de implementação recursiva. - -Podemos combinar o uso de [operadores infix/postfix](operators.html) com este mecanismo para criar declarações mais complexas (com uma sintaxe agradável). - -Aqui está a implementação de uma instrução que executa loop a menos que uma condição seja satisfeita: - -```scala mdoc:reset -def loop(body: => Unit): LoopUnlessCond = - new LoopUnlessCond(body) -protected class LoopUnlessCond(body: => Unit) { - def unless(cond: => Boolean): Unit = { - body - if (!cond) unless(cond) - } -} -var i = 10 -loop { - println("i = " + i) - i -= 1 -} unless (i == 0) -``` - -A função `loop` aceita apenas um corpo e retorna uma instância da classe` LoopUnlessCond` (que encapsula este objeto de corpo). Note que o corpo ainda não foi avaliado. A classe `LoopUnlessCond` tem um método `unless` que podemos usar como um *operador infix*. Dessa forma, obtemos uma sintaxe bastante natural para nosso novo loop: `loop { } unless ( )`. - -Aqui está a saída de quando o `TargetTest2` é executado: - -``` -i = 10 -i = 9 -i = 8 -i = 7 -i = 6 -i = 5 -i = 4 -i = 3 -i = 2 -i = 1 -``` diff --git a/_pt-br/tour/operators.md b/_pt-br/tour/operators.md index b38e0e310..627b968e7 100644 --- a/_pt-br/tour/operators.md +++ b/_pt-br/tour/operators.md @@ -4,7 +4,7 @@ title: Operadores partof: scala-tour num: 29 -next-page: automatic-closures +next-page: annotations previous-page: type-inference language: pt-br --- diff --git a/_pt-br/tour/tour-of-scala.md b/_pt-br/tour/tour-of-scala.md index a455ddd00..970b47d0c 100644 --- a/_pt-br/tour/tour-of-scala.md +++ b/_pt-br/tour/tour-of-scala.md @@ -38,7 +38,6 @@ Um [mecanismo de inferência de tipo local](type-inference.html) se encarrega pa Na prática, o desenvolvimento de aplicações de um determinado domínio geralmente requer uma linguagem de domínio específico. Scala fornece uma combinação única de mecanismos de linguagem que facilitam a adição suave de novas construções de linguagem na forma de bibliotecas: * qualquer método pode ser utilizado como um [operador infix ou postfix](operators.html) -* [closures são construídas automaticamente dependendo do tipo esperado](automatic-closures.html) (tipo alvo). Uma utilização conjunta de ambos os recursos facilita a definição de novas instruções sem estender a sintaxe e sem usar meta-programação como macros. diff --git a/_ru/getting-started/index.md b/_ru/getting-started/install-scala.md similarity index 99% rename from _ru/getting-started/index.md rename to _ru/getting-started/install-scala.md index e9d8e0bee..f6968753f 100644 --- a/_ru/getting-started/index.md +++ b/_ru/getting-started/install-scala.md @@ -90,7 +90,8 @@ newcomer_resources: Проверьте корректность установки с помощью команды `scala -version`, которая должна вывести: ```bash $ scala -version -Scala code runner version {{site.scala-3-version}} -- Copyright 2002-2022, LAMP/EPFL +Scala code runner version: 1.4.3 +Scala version (default): {{site.scala-3-version}} ``` Если сообщение не выдано, возможно, необходимо перезайти в терминал (или перезагрузиться), чтобы изменения вступили в силу. diff --git a/_ru/index.md b/_ru/index.md index df628955a..7ac8c2a45 100644 --- a/_ru/index.md +++ b/_ru/index.md @@ -11,7 +11,7 @@ sections: - title: "Приступая к работе" description: "Установите Scala на свой компьютер и начните писать код на Scala!" icon: "fa fa-rocket" - link: /ru/getting-started/index.html + link: /ru/getting-started/install-scala.html - title: "Тур по Scala" description: "Вступительный обзор по основным возможностям языка." icon: "fa fa-flag" diff --git a/_ru/scala3/book/taste-intro.md b/_ru/scala3/book/taste-intro.md index 74f5996a5..58e15d8e2 100644 --- a/_ru/scala3/book/taste-intro.md +++ b/_ru/scala3/book/taste-intro.md @@ -28,4 +28,4 @@ next-page: taste-hello-world [reference]: {{ site.scala3ref }}/overview.html -[get-started]: {% link _overviews/getting-started/index.md %} +[get-started]: {% link _overviews/getting-started/install-scala.md %} diff --git a/_ru/scala3/book/tools-sbt.md b/_ru/scala3/book/tools-sbt.md index 6dd330c28..be70b9b05 100644 --- a/_ru/scala3/book/tools-sbt.md +++ b/_ru/scala3/book/tools-sbt.md @@ -485,4 +485,4 @@ sbt:HelloScalaTest> test - [The sbt documentation](https://www.scala-sbt.org/1.x/docs/) - [The ScalaTest website](https://www.scalatest.org/) -[getting_started]: {{ site.baseurl }}/ru/getting-started/index.html +[getting_started]: {{ site.baseurl }}/ru/getting-started/install-scala.html diff --git a/_ru/tour/automatic-closures.md b/_ru/tour/automatic-closures.md deleted file mode 100644 index 8f0a8d445..000000000 --- a/_ru/tour/automatic-closures.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -layout: tour -title: Конструкция Автоматического Замыкания Зависимого Типа -language: ru -partof: scala-tour -num: 14 ---- - -Scala допускает использование в качестве параметров методов имена беспараметрических функций. При вызове такого метода фактические параметры для беспараметрических функций не вычисляются, а передается функция с нулем аргументов, которая захватывает вычисление соответствующего параметра (так называемый *вызов по имени*). - -Следующий код демонстрирует этот механизм: - - object TargetTest1 extends Application { - def whileLoop(cond: => Boolean)(body: => Unit): Unit = - if (cond) { - body - whileLoop(cond)(body) - } - var i = 10 - whileLoop (i > 0) { - println(i) - i -= 1 - } - } - -Функция whileLoop принимает два параметра `cond` и `body`. При использовании функции значения этих параметров не вычисляются. Но всякий раз, когда параметры используются в теле `whileLoop`, их значение будет вычисляться заново через использование автоматически созданных неявно вызываемых функций. Таким образом, наш метод `whileLoop` реализует Java-подобный цикл while-loop со схемой рекурсивной реализации. - -Мы можем комбинировать использование [инфиксных/постфиксных операторов](operators.html) с этим механизмом для создания более сложных выражений (с хорошим синтаксисом). - -Вот реализация loop-unless выражения: - - object TargetTest2 extends Application { - def loop(body: => Unit): LoopUnlessCond = - new LoopUnlessCond(body) - protected class LoopUnlessCond(body: => Unit) { - def unless(cond: => Boolean): Unit = { - body - if (!cond) unless(cond) - } - } - var i = 10 - loop { - println("i = " + i) - i -= 1 - } unless (i == 0) - } -Функция `loop` принимает только тело цикла и возвращает экземпляр класса `LoopUnlessCond` (который захватывает это тело цикла). Обратите внимание, что тело еще не вычислено. Класс `LoopUnlessCond` имеет метод `unless`, который мы можем использовать как *инфиксный оператор*. Таким образом, мы получаем вполне естественный синтаксис для нашего нового цикла: `loop { < выражение > } unless ( < условие > )`. - - -Ниже приведен вывод выполнения `TargetTest2`: - - i = 10 - i = 9 - i = 8 - i = 7 - i = 6 - i = 5 - i = 4 - i = 3 - i = 2 - i = 1 diff --git a/_sips/process-specification.md b/_sips/process-specification.md index 8263fae13..93a15ce81 100644 --- a/_sips/process-specification.md +++ b/_sips/process-specification.md @@ -266,12 +266,11 @@ The current committee members are: - Lukas Rytz ([@lrytz](https://github.com/lrytz)), Lightbend - Martin Odersky ([@odersky](https://github.com/odersky)), EPFL - Oron Port ([@soronpo](https://github.com/soronpo)), DFiant Inc -- Paweł Marks ([@Kordyjan](https://github.com/Kordyjan)), VirtusLab - Sébastien Doeraene ([@sjrd](https://github.com/sjrd)), Scala Center The current Chairperson is: -- Anatolii Kmetiuk ([@Toli](https://github.com/anatoliykmetyuk)), Scala Center +- Dimi Racordon ([@kyouko-taiga](https://github.com/kyouko-taiga)), EPFL The current Secretary is: diff --git a/_sips/sips/alternative-bind-patterns.md b/_sips/sips/alternative-bind-patterns.md deleted file mode 100644 index dd6b1fd86..000000000 --- a/_sips/sips/alternative-bind-patterns.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: SIP-60 - Alternative bind patterns -status: waiting-for-implementation -pull-request-number: 74 -stage: implementation - ---- diff --git a/_sips/sips/alternative-bind-variables.md b/_sips/sips/alternative-bind-variables.md new file mode 100644 index 000000000..58cc4d4dd --- /dev/null +++ b/_sips/sips/alternative-bind-variables.md @@ -0,0 +1,331 @@ +--- +layout: sip +permalink: /sips/:title.html +stage: implementation +status: waiting-for-implementation +presip-thread: https://contributors.scala-lang.org/t/pre-sip-bind-variables-for-alternative-patterns/6321/13 +title: SIP-60 - Bind variables within alternative patterns +--- + +**By: Yilin Wei** + +## History + +| Date | Version | +|---------------|--------------------| +| Sep 17th 2023 | Initial Draft | +| Jan 16th 2024 | Amendments | + +## Summary + +Pattern matching is one of the most commonly used features in Scala by beginners and experts alike. Most of +the features of pattern matching compose beautifully — for example, a user who learns about bind variables +and guard patterns can mix the two features intuitively. + +One of the few outstanding cases where this is untrue, is when mixing bind variables and alternative patterns. The part of +current [specification](https://scala-lang.org/files/archive/spec/2.13/08-pattern-matching.html) which we are concerned with is under section **8.1.12** and is copied below, with the relevant clause +highlighted. + +> … All alternative patterns are type checked with the expected type of the pattern. **They may not bind variables other than wildcards**. The alternative … + +We propose that this restriction be lifted and this corner case be eliminated. + +Removing the corner case would make the language easier to teach, reduce friction and allow users to express intent in a more natural manner. + +## Motivation + +## Scenario + +The following scenario is shamelessly stolen from [PEP 636](https://peps.python.org/pep-0636), which introduces pattern matching to the +Python language. + +Suppose a user is writing classic text adventure game such as [Zork](https://en.wikipedia.org/wiki/Zork). For readers unfamiliar with +text adventure games, the player typically enters freeform text into the terminal in the form of commands to interact with the game +world. Examples of commands might be `"pick up rabbit"` or `"open door"`. + +Typically, the commands are tokenized and parsed. After a parsing stage we may end up with a encoding which is similar to the following: + +```scala +enum Word + case Get, North, Go, Pick, Up + case Item(name: String) + + case class Command(words: List[Word]) +``` + +In this encoding, the string `pick up jar`, would be parsed as `Command(List(Pick, Up, Item("jar")))`. + +Once the command is parsed, we want to actually *do* something with the command. With this particular encoding, +we would naturally reach for a pattern match — in the simplest case, we could get away with a single recursive function for +our whole program. + +Suppose we take the simplest example where we want to match on a command like `"north"`. The pattern match consists of +matching on a single stable identifier, `North` and the code would look like this: + +~~~ scala +import Command.* + +def loop(cmd: Command): Unit = + cmd match + case Command(North :: Nil) => // Code for going north +~~~ + +However as we begin play-testing the actual text adventure, we observe that users type `"go north"`. We decide +our program should treat the two distinct commands as synonyms. At this point we would reach for an alternative pattern `|` and +refactor the code like so: + +~~~ scala + case Command(North :: Nil | Go :: North :: Nil) => // Code for going north +~~~ + +This clearly expresses our intent that the two commands map to the same underlying logic. + +Later we decide that we want more complex logic in our game; perhaps allowing the user to pick up +items with a command like `pick up jar`. We would then extend our function with another case, binding the variable `name`: + +~~~ scala + case Command(Pick :: Up :: Item(name) :: Nil) => // Code for picking up items +~~~ + +Again, we might realise through our play-testing that users type `get` as a synonym for `pick up`. After playing around +with alternative patterns, we may reasonably write something like: + +~~~ scala + case Command(Pick :: Up :: Item(name) :: Nil | Get :: Item(name) :: Nil) => // Code for picking up items +~~~ + +Unfortunately at this point, we are stopped in our tracks by the compiler. The bind variable for `name` cannot be used in conjunction with alternative patterns. +We must either choose a different encoding. We carefully consult the specification and that this is not possible. + +We can, of course, work around it by hoisting the logic to a helper function to the nearest scope which function definitions: + +~~~ scala +def loop(cmd: Cmd): Unit = + def pickUp(item: String): Unit = // Code for picking up item + cmd match + case Command(Pick :: Up :: Item(name)) => pickUp(name) + case Command(Get :: Item(name)) => pickUp(name) +~~~ + +Or any number of different encodings. However, all of them are less intuitive and less obvious than the code we tried to write. + +## Commentary + +Removing the restriction leads to more obvious encodings in the case of alternative patterns. Arguably, the language +would be simpler and easier to teach — we do not have to remember that bind patterns and alternatives +do not mix and need to teach newcomers the workarounds. + +For languages which have pattern matching, a significant number also support the same feature. Languages such as [Rust](https://github.com/rust-lang/reference/pull/957) and [Python](https://peps.python.org/pep-0636/#or-patterns) have +supported it for some time. While +this is not a great reason for Scala to do the same, having the feature exist in other languages means that users +that are more likely to expect the feature. + +A smaller benefit for existing users, is that removing the corner case leads to code which is +easier to review; the absolute code difference between adding a bind variable within an alternative versus switching to a different +encoding entirely is smaller and conveys the intent of such changesets better. + +It is acknowledged, however, that such cases where we share the same logic with an alternative branches are relatively rare compared to +the usage of pattern matching in general. The current restrictions are not too arduous to workaround for experienced practitioners, which +can be inferred from the relatively low number of comments from the original [issue](https://github.com/scala/bug/issues/182) first raised in 2007. + +To summarize, the main arguments for the proposal are to make the language more consistent, simpler and easier to teach. The arguments +against a change are that it will be low impact for the majority of existing users. + +## Proposed solution + +Removing the alternative restriction means that we need to specify some additional constraints. Intuitively, we +need to consider the restrictions on variable bindings within each alternative branch, as well as the types inferred +for each binding within the scope of the pattern. + +## Bindings + +The simplest case of mixing an alternative pattern and bind variables, is where we have two `UnApply` methods, with +a single alternative pattern. For now, we specifically only consider the case where each bind variable is of the same +type, like so: + +~~~ scala +enum Foo: + case Bar(x: Int) + case Baz(y: Int) + + def fun = this match + case Bar(z) | Baz(z) => ... // z: Int +~~~ + +For the expression to make sense with the current semantics around pattern matches, `z` must be defined in both branches; otherwise the +case body would be nonsensical if `z` was referenced within it (see [missing variables](#missing-variables) for a proposed alternative). + +Removing the restriction would also allow recursive alternative patterns: + +~~~ scala +enum Foo: + case Bar(x: Int) + case Baz(x: Int) + +enum Qux: + case Quux(y: Int) + case Corge(x: Foo) + + def fun = this match + case Quux(z) | Corge(Bar(z) | Baz(z)) => ... // z: Int +~~~ + +Using an `Ident` within an `UnApply` is not the only way to introduce a binding within the pattern scope. +We also expect to be able to use an explicit binding using an `@` like this: + +~~~ scala +enum Foo: + case Bar() + case Baz(bar: Bar) + + def fun = this match + case Baz(x) | x @ Bar() => ... // x: Foo.Bar +~~~ + +## Types + +We propose that the type of each variable introduced in the scope of the pattern be the least upper-bound of the type +inferred within within each branch. + +~~~ scala +enum Foo: + case Bar(x: Int) + case Baz(y: String) + + def fun = this match + case Bar(x) | Baz(x) => // x: Int | String +~~~ + +We do not expect any inference to happen between branches. For example, in the case of a GADT we would expect the second branch of +the following case to match all instances of `Bar`, regardless of the type of `A`. + +~~~ scala +enum Foo[A]: + case Bar(a: A) + case Baz(i: Int) extends Foo[Int] + + def fun = this match + case Baz(x) | Bar(x) => // x: Int | A +~~~ + +### Given bind variables + +It is possible to introduce bindings to the contextual scope within a pattern match branch. + +Since most bindings will be anonymous but be referred to within the branches, we expect the _types_ present in the contextual scope for each branch to be the same rather than the _names_. + +~~~ scala + case class Context() + + def run(using ctx: Context): Unit = ??? + + enum Foo: + case Bar(ctx: Context) + case Baz(i: Int, ctx: Context) + + def fun = this match + case Bar(given Context) | Baz(_, given Context) => run // `Context` appears in both branches +~~~ + +This begs the question of what to do in the case of an explicit `@` binding where the user binds a variable to the same _name_ but to different types. We can either expose a `String | Int` within the contextual scope, or simply reject the code as invalid. + +~~~ scala + enum Foo: + case Bar(s: String) + case Baz(i: Int) + + def fun = this match + case Bar(x @ given String) | Baz(x @ given Int) => ??? +~~~ + +To be consistent with the named bindings, we argue that the code should compile and a contextual variable added to the scope with the type of `String | Int`. + +### Quoted patterns + +[Quoted patterns](https://docs.scala-lang.org/scala3/guides/macros/quotes.html#quoted-patterns) will not be supported in this SIP and the behaviour of quoted patterns will remain the same as currently i.e. any quoted pattern appearing in an alternative pattern binding a variable or type variable will be rejected as illegal. + +### Alternatives + +#### Enforcing a single type for a bound variable + +We could constrain the type for each bound variable within each alternative branch to be the same type. Notably, this is what languages such as Rust, which do not have sub-typing do. + +However, since untagged unions are part of Scala 3 and the fact that both are represented by the `|`, it felt more natural to discard this restriction. + +#### Type ascriptions in alternative branches + +Another suggestion is that an _explicit_ type ascription by a user ought to be defined for all branches. For example, in the currently proposed rules, the following code would infer the return type to be `Int | A` even though the user has written the statement `id: Int`. + +~~~scala +enum Foo[A]: + case Bar[A](a: A) + case Baz[A](a: A) + + def test = this match + case Bar(id: Int) | Baz(id) => id +~~~ + +In the author's subjective opinion, it is more natural to view the alternative arms as separate branches — which would be equivalent to the function below. + +~~~scala +def test = this match + case Bar(id: Int) => id + case Baz(id) => id +~~~ + +On the other hand, if it is decided that each bound variable ought to be the same type, then arguably "sharing" explicit type ascriptions across branches would reduce boilerplate. + +#### Missing variables + +Unlike in other languages, we could assign a type, `A | Null`, to a bind variable which is not present in all of the alternative branches. Rust, for example, is constrained by the fact that the size of a variable must be known and untagged unions do not exist. + +Arguably, missing a variable entirely is more likely to be an error — the absence of a requirement for `var` declarations before assigning variables in Python means that beginners can easily assign variables to the wrong variable. + +It may be, that the enforcement of having to have the same bind variables within each branch ought to be left to a linter rather thana a hard restriction within the language itself. + +## Specification + +We do not believe there are any syntax changes since the current specification already allows the proposed syntax. + +We propose that the following clauses be added to the specification: + +Let $`p_1 | \ldots | p_n`$ be an alternative pattern at an arbitrary depth within a case pattern and $`\Gamma_n`$ is the named scope associated with each alternative. + +If `p_i` is a quoted pattern binding a variable or type variable, the alternative pattern is considered invalid. Otherwise, let the named variables introduced within each alternative $`p_n`$, be $`x_i \in \Gamma_n`$ and the unnamed contextual variables within each alternative have the type $`T_i \in \Gamma_n`$. + +Each $`p_n`$ must introduce the same set of bindings, i.e. for each $`n`$, $`\Gamma_n`$ must have the same **named** members $`\Gamma_{n+1}`$ and the set of $`{T_0, ... T_n}`$ must be the same. + +If $`X_{n,i}`$, is the type of the binding $`x_i`$ within an alternative $`p_n`$, then the consequent type, $`X_i`$, of the +variable $`x_i`$ within the pattern scope, $`\Gamma`$ is the least upper-bound of all the types $`X_{n, i}`$ associated with +the variable, $`x_i`$ within each branch. + +## Compatibility + +We believe the changes would be backwards compatible. + +# Related Work + +The language feature exists in multiple languages. Of the more popular languages, Rust added the feature in [2021](https://github.com/rust-lang/reference/pull/957) and +Python within [PEP 636](https://peps.python.org/pep-0636/#or-patterns), the pattern matching PEP in 2020. Of course, Python is untyped and Rust does not have sub-typing +but the semantics proposed are similar to this proposal. + +Within Scala, the [issue](https://github.com/scala/bug/issues/182) first raised in 2007. The author is also aware of attempts to fix this issue by [Lionel Parreaux](https://github.com/dotty-staging/dotty/compare/main...LPTK:dotty:vars-in-pat-alts) and the associated [feature request](https://github.com/lampepfl/dotty-feature-requests/issues/12) which +was not submitted to the main dotty repository. + +The associated [thread](https://contributors.scala-lang.org/t/pre-sip-bind-variables-for-alternative-patterns/6321) has some extra discussion around semantics. Historically, there have been multiple similar suggestions — in [2023](https://contributors.scala-lang.org/t/qol-sound-binding-in-pattern-alternatives/6226) by Quentin Bernet and in [2021](https://contributors.scala-lang.org/t/could-it-be-possible-to-allow-variable-binging-in-patmat-alternatives-for-scala-3-x/5235) by Alexey Shuksto. + +## Implementation + +The author has a current in-progress implementation focused on the typer which compiles the examples with the expected types. Interested + parties are welcome to see the WIP [here](https://github.com/lampepfl/dotty/compare/main...yilinwei:dotty:main). + +### Further work + +#### Quoted patterns + +More investigation is needed to see how quoted patterns with bind variables in alternative patterns could be supported. + +## Acknowledgements + +Many thanks to **Zainab Ali** for proof-reading the draft, **Nicolas Stucki** and **Guillaume Martres** for their pointers on the dotty +compiler codebase. diff --git a/_sips/sips/better-fors.md b/_sips/sips/better-fors.md new file mode 100644 index 000000000..ed614f712 --- /dev/null +++ b/_sips/sips/better-fors.md @@ -0,0 +1,381 @@ +--- +layout: sip +permalink: /sips/:title.html +stage: implementation +status: under-review +title: SIP-62 - For comprehension improvements +--- + +**By: Kacper Korban (VirtusLab)** + +## History + +| Date | Version | +|---------------|--------------------| +| June 6th 2023 | Initial Draft | +| Feb 15th 2024 | Reviewed Version | + +## Summary + +`for`-comprehensions in Scala 3 improved their usability in comparison to Scala 2, but there are still some pain points relating both usability of `for`-comprehensions and simplicity of their desugaring. + +This SIP tries to address some of those problems, by changing the specification of `for`-comprehensions. From user perspective, the biggest change is allowing aliases at the start of the `for`-comprehensions. e.g. + +``` +for { + x = 1 + y <- Some(2) +} yield x + y +``` + +## Motivation + +There are some clear pain points related to Scala'3 `for`-comprehensions and those can be divided into two categories: + +1. User-facing and code simplicity problems + + Specifically, for the following example written in a Haskell-style do-comprehension + + ```haskell + do + a = largeExpr(arg) + b <- doSth(a) + combineM(a, b) + ``` + in Scala we would have to write + + ```scala + val a = largeExpr(b) + for + b <- doSth(a) + x <- combineM(a, b) + yield x + ``` + + This complicates the code, even in this simple example. +2. The simplicity of desugared code + + The second pain point is that the desugared code of `for`-comprehensions can often be surprisingly complicated. + + e.g. + ```scala + for + a <- doSth(arg) + b = a + yield a + b + ``` + + Intuition would suggest for the desugared code will be of the form + + ```scala + doSth(arg).map { a => + val b = a + a + b + } + ``` + + But because of the possibility of an `if` guard being immediately after the pure alias, the desugared code is of the form + + ```scala + doSth(arg).map { a => + val b = a + (a, b) + }.map { case (a, b) => + a + b + } + ``` + + These unnecessary assignments and additional function calls not only add unnecessary runtime overhead but can also block other optimizations from being performed. + +## Proposed solution + +This SIP suggests the following changes to `for` comprehensions: + +1. Allow `for` comprehensions to start with pure aliases + + e.g. + ```scala + for + a = 1 + b <- Some(2) + c <- doSth(a) + yield b + c + ``` +2. Simpler conditional desugaring of pure aliases. i.e. whenever a series of pure aliases is not immediately followed by an `if`, use a simpler way of desugaring. + + e.g. + ```scala + for + a <- doSth(arg) + b = a + yield a + b + ``` + + will be desugared to + + ```scala + doSth(arg).map { a => + val b = a + a + b + } + ``` + + but + + ```scala + for + a <- doSth(arg) + b = a + if b > 1 + yield a + b + ``` + + will be desugared to + + ```scala + doSth(arg).map { a => + val b = a + (a, b) + }.withFilter { case (a, b) => + b > 1 + }.map { case (a, b) => + a + b + } + ``` + +3. Avoiding redundant `map` calls if the yielded value is the same as the last bound value. + + e.g. + ```scala + for + a <- List(1, 2, 3) + yield a + ``` + + will just be desugared to + + ```scala + List(1, 2, 3) + ``` + +### Detailed description + +#### Ad 1. Allow `for` comprehensions to start with pure aliases + +Allowing `for` comprehensions to start with pure aliases is a straightforward change. + +The Enumerators syntax will be changed from: + +``` +Enumerators ::= Generator {semi Enumerator | Guard} +``` + +to + +``` +Enumerators ::= {Pattern1 `=' Expr semi} Generator {semi Enumerator | Guard} +``` + +Which will allow adding 0 or more aliases before the first generator. + +When desugaring is concerned, a for comprehension starting with pure aliases will generate a block with those aliases as `val` declarations and the rest of the desugared `for` as an expression. Unless the aliases are followed by a guard, then the desugaring should result in an error. + +New desugaring rule will be added: + +```scala +For any N: + for (P_1 = E_1; ... P_N = E_N; ...) + ==> + { + val x_2 @ P_2 = E_2 + ... + val x_N @ P_N = E_N + for (...) + } +``` + +e.g. + +```scala +for + a = 1 + b <- Some(2) + c <- doSth(a) +yield b + c +``` + +will desugar to + +```scala +{ + val a = 1 + for + b <- Some(2) + c <- doSth(a) + yield b + c +} +``` + +#### Ad 2. Simpler conditional desugaring of pure aliases. i.e. whenever a series of pure aliases is not immediately followed by an `if`, use a simpler way of desugaring. + +Currently, for consistency, all pure aliases are desugared as if they are followed by an `if` condition. Which makes the desugaring more complicated than expected. + +e.g. + +The following code: + +```scala +for + a <- doSth(arg) + b = a +yield a + b +``` + +will be desugared to: + +```scala +doSth(arg).map { a => + val b = a + (a, b) +}.map { case (a, b) => + a + b +} +``` + +The proposed change is to introduce a simpler desugaring for common cases, when aliases aren't followed by a guard, and keep the old desugaring method for the other cases. + +A new desugaring rules will be introduced for simple desugaring. + +```scala +For any N: + for (P <- G; P_1 = E_1; ... P_N = E_N; ...) + ==> + G.flatMap (P => for (P_1 = E_1; ... P_N = E_N; ...)) + +And: + + for () yield E ==> E + +(Where empty for-comprehensions are excluded by the parser) +``` + +It delegares desugaring aliases to the newly introduced rule from the previous impreovement. i.e. + +```scala +For any N: + for (P_1 = E_1; ... P_N = E_N; ...) + ==> + { + val x_2 @ P_2 = E_2 + ... + val x_N @ P_N = E_N + for (...) + } +``` + +One other rule also has to be changed, so that the current desugaring method, of passing all the aliases in a tuple with the result, will only be used when desugaring a generator, followed by some aliases, followed by a guard. + +```scala +For any N: + for (P <- G; P_1 = E_1; ... P_N = E_N; if E; ...) + ==> + for (TupleN(P, P_1, ... P_N) <- + for (x @ P <- G) yield { + val x_1 @ P_1 = E_2 + ... + val x_N @ P_N = E_N + TupleN(x, x_1, ..., x_N) + }; if E; ...) +``` + +This changes will make the desugaring work in the following way: + +```scala +for + a <- doSth(arg) + b = a +yield a + b +``` + +will be desugared to + +```scala +doSth(arg).map { a => + val b = a + a + b +} +``` + +but + +```scala +for + a <- doSth(arg) + b = a + if b > 1 +yield a + b +``` + +will be desugared to + +```scala +doSth(arg).map { a => + val b = a + (a, b) +}.withFilter { case (a, b) => + b > 1 +}.map { case (a, b) => + a + b +} +``` + +#### Ad 3. Avoiding redundant `map` calls if the yielded value is the same as the last bound value. + +This change is strictly an optimization. This allows for the compiler to get rid of the final `map` call, if the yielded value is the same as the last bound pattern. The pattern can be either a single variable binding or a tuple. + +One desugaring rule has to be modified for this purpose. + +```scala + for (P <- G) yield P ==> G +If P is a variable or a tuple of variables and G is not a withFilter. + + for (P <- G) yield E ==> G.map (P => E) +Otherwise +``` + +e.g. +```scala +for + a <- List(1, 2, 3) +yield a +``` + +will just be desugared to + +```scala +List(1, 2, 3) +``` + +### Compatibility + +This change may change the semantics of some programs. It may remove some `map` calls in the desugared code, which may change the program semantics (if the `map` implementation was side-effecting). + +For example the following code will now have only one `map` call, instead of two: +```scala +for + a <- doSth(arg) + b = a +yield a + b +``` + +### Other concerns + +As far as I know, there are no widely used Scala 3 libraries that depend on the desugaring specification of `for`-comprehensions. + +## Links + +1. Scala contributors discussion thread (pre-SIP): https://contributors.scala-lang.org/t/pre-sip-improve-for-comprehensions-functionality/3509/51 +2. Github issue discussion about for desugaring: https://github.com/lampepfl/dotty/issues/2573 +3. Scala 2 implementation of some of the improvements: https://github.com/oleg-py/better-monadic-for +4. Implementation of one of the simplifications: https://github.com/lampepfl/dotty/pull/16703 +5. Draft implementation branch: https://github.com/dotty-staging/dotty/tree/improved-fors diff --git a/_sips/sips/binary-integer-literals.md b/_sips/sips/binary-integer-literals.md index 0a854792f..ef761601f 100644 --- a/_sips/sips/binary-integer-literals.md +++ b/_sips/sips/binary-integer-literals.md @@ -1,8 +1,8 @@ --- layout: sip title: SIP-42 - Support Binary Integer Literals -stage: implementation -status: waiting-for-implementation +stage: completed +status: shipped permalink: /sips/:title.html --- diff --git a/_sips/sips/clause-interleaving.md b/_sips/sips/clause-interleaving.md index 120271e8d..69619914d 100644 --- a/_sips/sips/clause-interleaving.md +++ b/_sips/sips/clause-interleaving.md @@ -1,8 +1,8 @@ --- layout: sip title: SIP-47 - Clause Interleaving -stage: implementation -status: under-review +stage: completed +status: accepted permalink: /sips/:title.html --- diff --git a/_sips/sips/for-comprehension-improvements.md b/_sips/sips/for-comprehension-improvements.md deleted file mode 100644 index 8fcaefe85..000000000 --- a/_sips/sips/for-comprehension-improvements.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: SIP-62 - For comprehension improvements -status: waiting-for-implementation -pull-request-number: 79 -stage: implementation - ---- diff --git a/_sips/sips/implicit-macro-conversions.md b/_sips/sips/implicit-macro-conversions.md new file mode 100644 index 000000000..82e7ab2ff --- /dev/null +++ b/_sips/sips/implicit-macro-conversions.md @@ -0,0 +1,7 @@ +--- +title: SIP-66 - Implicit macro conversions +status: under-review +pull-request-number: 86 +stage: design + +--- diff --git a/_sips/sips/match-types-spec.md b/_sips/sips/match-types-spec.md index 1624e64cc..aa868061d 100644 --- a/_sips/sips/match-types-spec.md +++ b/_sips/sips/match-types-spec.md @@ -286,8 +286,18 @@ At the top level, `variance = 1` and `scrutIsWidenedAbstract = false`. * If `q` is a skolem type `∃α:X`, fail as not specific. * Otherwise, compute `matchPattern(ti, q.Y, 0, scrutIsWidenedAbstract)`. * Otherwise, the underlying type definition of `q.Y` is of the form `= U`: - * If `q` is a skolem type `∃α:X` and `U` refers to `α`, fail as not specific. - * Otherwise, compute `matchPattern(ti, U, 0, scrutIsWidenedAbstract)`. + * If `q` is not a skolem type `∃α:X`, compute `matchPattern(ti, U, 0, scrutIsWidenedAbstract)`. + * Otherwise, let `U' = dropSkolem(U)` be computed as follow: + * `dropSkolem(q)` is undefined. + * `dropSkolem(p.T) = p'.T` where `p' = dropSkolem(p)` if the latter is defined. Otherwise: + * If the underlying type of `p.T` is of the form `= V`, then `dropSkolem(V)`. + * Otherwise `dropSkolem(p.T)` is undefined. + * `dropSkolem(p.x) = p'.x` where `p' = dropSkolem(p)` if the latter is defined. Otherwise: + * If the dealiased underlying type of `p.x` is a singleton type `r.y`, then `dropSkolem(r.y)`. + * Otherwise `dropSkolem(p.x)` is undefined. + * For all other types `Y`, `dropSkolem(Y)` is the type formed by replacing each component `Z` of `Y` by `dropSkolem(Z)`. + * If `U'` is undefined, fail as not specific. + * Otherwise, compute `matchPattern(ti, U', 0, scrutIsWidenedAbstract)`. * If `T` is a concrete type alias to a type lambda: * Let `P'` be the beta-reduction of `P`. * Compute `matchPattern(P', X, variance, scrutIsWidenedAbstract)`. diff --git a/_sips/sips/mprove-the-syntax-of-context-bounds-and-givens.md b/_sips/sips/mprove-the-syntax-of-context-bounds-and-givens.md deleted file mode 100644 index f7c291416..000000000 --- a/_sips/sips/mprove-the-syntax-of-context-bounds-and-givens.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: 'SIP-64: Improve the Syntax of Context Bounds and Givens' -status: under-review -pull-request-number: 81 -stage: design - ---- diff --git a/_sips/sips/multiple-assignments.md b/_sips/sips/multiple-assignments.md index 0eb30e241..b55044b7d 100644 --- a/_sips/sips/multiple-assignments.md +++ b/_sips/sips/multiple-assignments.md @@ -1,7 +1,331 @@ --- -title: SIP-59 - Multiple assignments -status: waiting-for-implementation -pull-request-number: 73 +layout: sip +permalink: /sips/:title.html stage: implementation - +status: under-review +presip-thread: https://contributors.scala-lang.org/t/pre-sip-multiple-assignments/6425 +title: SIP-59 - Multiple Assignments --- + +**By: Dimi Racordon** + +## History + +| Date | Version | +|---------------|--------------------| +| Jan 17th 2024 | Initial Draft | + +## Summary + +This proposal discusses the syntax and semantics of a construct to assign multiple variables with a single expression. +This feature would simplify the implementation of operations expressed in terms of relationships between multiple variables, such as [`std::swap`](https://en.cppreference.com/w/cpp/algorithm/swap) in C++. + +## Motivation + +It happens that one has to assign multiple variables "at once" in an algorithm. +For example, let's consider the Fibonacci sequence: + +```scala +class FibonacciIterator() extends Iterator[Int]: + + private var a: Int = 0 + private var b: Int = 1 + + def hasNext = true + def next() = + val r = a + val n = a + b + a = b + b = n + r +``` + +The same iterator could be rewritten more concisely if we could assign multiple variables at once. +For example, we can write the following in Swift: + +```swift +struct FibonacciIterator: IteratorProtocol { + + private var a: Int = 0 + private var b: Int = 1 + init() {} + + mutating func next() -> Int? { + defer { (a, b) = (b, a + b) } + return a + } + +} +``` + +Though the differences may seem frivolous at first glance, they are in fact important. +If we look at a formal definition of the Fibonacci sequence (e.g., on [Wikipedia](https://en.wikipedia.org/wiki/Fibonacci_sequence)), we might see something like: + +> The Fibonacci sequence is given by *F(n) = F(n-1) + F(n+1)* where *F(0) = 0* and *F(1) = 1*. + +Although this declarative description says nothing about an evaluation order, it becomes a concern in our Scala implementation as we must encode the relationship into multiple operational steps. +This decomposition offers opportunities to get things wrong: + +```scala +def next() = + val r = a + a = b + b = a + b // invalid semantics, the value of `a` changed "too early" + r +``` + +In contrast, our Swift implementation can remain closer to the formal definition and is therefore more legible and less error-prone. + +Multiple assignments show up in many general-purpose algorithms (e.g., insertion sort, partition, min-max element, ...). +But perhaps the most fundamental one is `swap`, which consists of exchanging two values. + +We often swap values that are stored in some collection. +In this particular case, all is well in Scala because we can ask the collection to swap elements at given positions: + +```scala +extension [T](self: mutable.ArrayBuffer[T]) + def swapAt(i: Int, j: Int) = + val t = self(i) + self(i) = self(j) + self(j) = t + +val a = mutable.ArrayBuffer(1, 2, 3) +a.swapAt(0, 2) +println(a) // ArrayBuffer(3, 2, 1) +``` + +Sadly, one can't implement a generic swap method that wouldn't rely on the ability to index a container. +The only way to express this operation in Scala is to "inline" the pattern implemented by `swapAt` every time we need to swap two values. + +Having to rewrite this boilerplate is unfortunate. +Here is an example in a realistic algorithm: + +```scala +extension [T](self: Seq[T])(using Ordering[T]) + def minMaxElements: Option[(T, T)] = + import math.Ordering.Implicits.infixOrderingOps + + // Return None for collections smaller than 2 elements. + var i = self.iterator + if (!i.hasNext) { return None } + var l = i.next() + if (!i.hasNext) { return None } + var h = i.next() + + // Confirm the initial bounds. + if (h < l) { val t = l; l = h; h = l } + + // Process the remaining elements. + def loop(): Option[(T, T)] = + if (i.hasNext) { + val n = i.next() + if (n < l) { l = n } else if (n > h) { h = n } + loop() + } else { + Some((l, h)) + } + loop() +``` + +*Note: implementation shamelessly copied from [swift-algorithms](https://github.com/apple/swift-algorithms/blob/main/Sources/Algorithms/MinMax.swift).* + +The swap occurs in the middle of the method with the sequence of expressions `val t = l; l = h; h = l`. +To borrow from the words of Edgar Dijskstra [1, Chapter 11]: + +> [that] is combersome and ugly compared with the [multiple] assignment. + +While `swap` is a very common operation, it's only an instance of a more general class of operations that are expressed in terms of relationships between multiple variables. +The definition of the Fibonacci sequence is another example. + +## Proposed solution + +The proposed solution is to add a language construct to assign multiple variables in a single expression. +Using this construct, swapping two values can be written as follows: + +```scala +var a = 2 +var b = 4 +(a, b) = (b, a) +println(s"$a$b") // 42 +``` + +The above Fibonacci iterator can be rewritten as follows: + +```scala +class FibonacciIterator() extends Iterator[Int]: + + private var a: Int = 0 + private var b: Int = 1 + + def hasNext = true + def next() = + val r = a + (a, b) = (b, a + b) + r +``` + +Multiple assignments also alleviate the need for a swap method on collections, as the same idiomatic pattern can be reused to exchange elements at given indices: + +```scala +val a = mutable.ArrayBuffer(1, 2, 3) +(a(0), a(2)) = (a(2), a(0)) +println(a) // ArrayBuffer(3, 2, 1) +``` + +### Specification + +A multiple assignment is an expression of the form `AssignTarget ‘=’ Expr` where: + +``` +AssignTarget ::= ‘(’ AssignTargetNode {‘,’ AssignTargetNode} ‘)’ +AssignTargetNode ::= Expr | AssignTarget +``` + +An assignment target describes a structural pattern that can only be matched by a compatible composition of tuples. +For example, the following program is legal. + +```scala +def f: (Boolean, Int) = (true, 42) +val a = mutable.ArrayBuffer(1, 2, 3) +def b = a +var x = false + +(x, a(0)) = (false, 1337) +(x, a(1)) = f +((x, a(1)), b(2)) = (f, 9000) +(x) = Tuple1(false) +``` + +A mismatch between the structure of a multiple assignment's target and the result of its RHS is a type error. +It cannot be detected during parsing because at this stage the compiler would not be able to determine the shape of an arbitrary expression's result. +For example, all multiple assignments in the following program are ill-typed: + +```scala +def f: (Boolean, Int) = (true, 42) +val a = mutable.ArrayBuffer(1, 2, 3) +def b = a +var x = false + +(a(1), x) = f // type mismatch +(x, a(1), b(2)) = (f, 9000) // structural mismatch +(x) = false // structural mismatch +(x) = (1, 2) // structural mismatch +``` + +Likewise, `(x) = Tuple1(false)` is _not_ equivalent to `x = Tuple1(false)`. +The former is a multiple assignment while the latter is a regular assignment, as described by the [current grammar](https://docs.scala-lang.org/scala3/reference/syntax.html) (see `Expr1`). +Though this distinction is subtle, multiple assignments involving unary tuples should be rare. + +The operational semantics of multiple assignments (aka concurrent assignments) have been studied extensively in scienific literature (e.g., [1, 2]). +A first intuition is that the most desirable semantics can be achieved by fully evaluating the RHS of the assignment before assigning any expression in the LHS [1]. +However, additional considerations must be given w.r.t. the independence of the variables on the LHS to guarantee deterministic results. +For example, consider the following expression: + +```scala +(x, x) = (1, 2) +``` + +While one may conclude that such an expression should be an error [1], it is in general difficult to guarantee value independence in a language with pervasive reference semantics. +Further, it is desirable to write expressions of the form `(a(0), a(2)) = (a(2), a(0))`, as shown in the previous section. +Another complication is that multiple assignments should uphold the general left-to-right evaluation semantics of the Scala language. +For example, `a.b = c` requires `a` to be evaluated _before_ `c`. + +Note that regular assignments desugar to function calls (e.g., `a(b) = c` is sugar for `a.update(b, c)`). +One property of these desugarings is always the last expression being evaluated before the method performing the assignment is called. +Given this observation, we address the abovementioned issues by defining the following algorithm: + +1. Traverse the LHS structure in inorder and for each leaf: + - Evaluate each outermost subexpression to its value + - Form a closure capturing these values and accepting a single argument to perform the desugared assignment + - Associate that closure to the leaf +2. Compute the value of the RHS, which forms a tree +3. Traverse the LHS and RHS structures pairwise in inorder and for each leaf: + - Apply the closure formerly associated to the LHS on RHS value + +For instance, consider the following definitions. + +```scala +def f: (Boolean, Int) = (true, 42) +val a = mutable.ArrayBuffer(1, 2, 3) +def b = a +var x = false +``` + +The evaluation of the expression `((x, a(a(0))), b(2)) = (f, 9000)` is as follows: + +1. form a closure `f0 = (rhs) => x_=(rhs)` +2. evaluate `a(0)`; result is `1` +3. form a closure `f1 = (rhs) => a.update(1, rhs)` +4. evaluate `b`; result is `a` +5. evaluate `2` +6. form a closure `f2 = (rhs) => a.update(2, rhs)` +7. evaluate `(f, 9000)`; result is `((true, 42), 9000)` +8. evaluate `f0(true)` +9. evaluate `f1(42)` +10. evaluate `f2(9000)` + +After the assignment, `x == true` and `a == List(1, 42, 9000)`. + +The compiler is allowed to ignore this procedure and generate different code for optimization purposes as long as it can guarantee that such a change is not observable. +For example, given two local variables `x` and `y`, their assignments in `(x, y) = (1, 2)` can be reordered or even performed in parallel. + +### Compatibility + +This proposal is purely additive and have no backward binary or TASTy compatibility consequences. +The semantics of the proposed new construct is fully expressible in terms of desugaring into current syntax, interpreteted with current semantics. + +The proposed syntax is not currently legal Scala. +Therefore no currently existing program could be interpreted with different semantics using a newer compiler version supporting multiple assignments. + +### Other concerns + +One understandable concern of the proposed syntax is that the semantics of multiple assignments resembles that of pattern matching, yet it has different semantics. +For example: + +```scala +val (a(x), b) = (true, "!") // 1 + +(a(x), b) = (true, "!") // 2 +``` + +If `a` is instance of a type with a companion extractor object, the two lines above have completely different semantics. +The first declares two local bindings `x` and `b`, applying pattern matching to determine their value from the tuple `(true, "!")`. +The second is assigning `a(x)` and `b` to the values `true` and `"!"`, respectively. + +Though possibly surprising, the difference in behavior is easy to explain. +The first line applies pattern matching because it starts with `val`. +The second doesn't because it involves no pattern matching introducer. +Further, note that a similar situation can already be reproduced in current Scala: + +```scala +val a(x) = true // 1 + +a(x) = true // 2 +``` + +## Alternatives + +The current proposal supports arbitrary tree structures on the LHS of the assignment. +A simpler alternative would be to only support flat sequences, allowing the syntax to dispense with parentheses. + +```scala +a, b = b, a +``` + +While this approach is more lightweight, the reduced expressiveness inhibits potentially interesting use cases. +Further, consistently using tuple syntax on both sides of the equality operator clearly distinguishes regular and multiple assignments. + +## Related work + +A Pre-SIP discussion took place prior to this proposal (see [here](https://contributors.scala-lang.org/t/pre-sip-multiple-assignments/6425/1)). + +Multiple assignments are present in many contemporary languages. +This proposal already illustrated them in Swift, but they are also commonly used in Python. +Multiple assigments have also been studied extensively in scienific literature (e.g., [1, 2]). + +## FAQ + +## References + +1. Edsger W. Dijkstra: A Discipline of Programming. Prentice-Hall 1976, ISBN 013215871X +2. Ralph-Johan Back, Joakim von Wright: Refinement Calculus - A Systematic Introduction. Graduate Texts in Computer Science, Springer 1998, ISBN 978-0-387-98417-9 diff --git a/_sips/sips/named-tuples.md b/_sips/sips/named-tuples.md index 0e6ad99d7..db240571a 100644 --- a/_sips/sips/named-tuples.md +++ b/_sips/sips/named-tuples.md @@ -1,7 +1,777 @@ --- -title: SIP-58 - Named Tuples -status: waiting-for-implementation -pull-request-number: 72 +layout: sip +permalink: /sips/named-tuples.html stage: implementation - +status: under-review +presip-thread: https://contributors.scala-lang.org/t/pre-sip-named-tuples/6403/164 +title: SIP-58 - Named Tuples --- + +**By: Martin Odersky** + +## History + +| Date | Version | +|---------------|--------------------| +| Jan 13th 2024 | Initial Draft | + +## Summary + +We propose to add new form of tuples where the elements are named. +Named tuples can be types, terms, or patterns. Syntax examples: +```scala +type Person = (name: String, age: Int) +val Bob: Person = (name = "Bob", age = 33) + +Bob match + case (name = n, age = 22) => ... +``` + +We also propose to revive SIP 43 to support patterns with named fields. Named pattern fields for case classes are analogous to named patterns for tuple elements. User-defined named pattern matching is supported since named tuples can be results of extractor methods. + +## Motivation + + 1. Named tuples are a convenient lightweight way to return multiple results from a function. But the absence of names obscures their meaning, and makes decomposition with _1, _2 ugly and hard to read. The existing alternative is to define a class instead. This does name fields, but is more heavy-weight, both in terms of notation and generated bytecode. Named tuples give the same convenience of definition as regular tuples at far better readability. + + 1. Named tuples are an almost ideal substrate on which to implement relational algebra and other database oriented operations. They are a good representation of database rows and allow the definition of generic operations such as projections and joins since they can draw on Scala 3’s existing generic machinery for tuples based on match types. + + 1. Named tuples make named pattern matching trivial to implement. The discussion on SIP 43 showed that without them it’s unclear how to implement named pattern matching at all. + +## Proposed solution + +The elements of a tuple can now be named. Example: +```scala +type Person = (name: String, age: Int) +val Bob: Person = (name = "Bob", age = 33) + +Bob match + case (name, age) => + println(s"$name is $age years old") + +val persons: List[Person] = ... +val minors = persons.filter: p => + p.age < 18 +``` +Named bindings in tuples are similar to function parameters and arguments. We use `name: Type` for element types and `name = value` for element values. It is illegal to mix named and unnamed elements in a tuple, or to use the same same +name for two different elements. + +Fields of named tuples can be selected by their name, as in the line `p.age < 18` above. + +Example: + +~~~ scala +// This is an @main method +@main def foo(x: Int): Unit = + println(x) +~~~ + +### Conformance and Convertibility + +The order of names in a named tuple matters. For instance, the type `Person` above and the type `(age: Int, name: String)` would be different, incompatible types. + +Values of named tuple types can also be be defined using regular tuples. For instance: +```scala +val Laura: Person = ("Laura", 25) + +def register(person: Person) = ... +register(person = ("Silvain", 16)) +register(("Silvain", 16)) +``` +This follows since a regular tuple `(T_1, ..., T_n)` is treated as a subtype of a named tuple `(N_1 = T_1, ..., N_n = T_n)` with the same element types. + +In the other direction, one can convert a named tuple to an unnamed tuple with the `toTuple` method. Example: +```scala +val x: (String, Int) = Bob.toTuple // ok +``` +`toTuple` is defined as an extension method in the `NamedTuple` object. +It returns the given tuple unchanged and simply "forgets" the names. + +A `.toTuple` selection is inserted implicitly by the compiler if it encounters a named tuple but the expected type is a regular tuple. So the following works as well: +```scala +val x: (String, Int) = Bob // works, expanded to Bob.toTuple +``` +The difference between subtyping in one direction and automatic `.toTuple` conversions in the other is relatively minor. The main difference is that `.toTuple` conversions don't work inside type constructors. So the following is OK: +```scala + val names = List("Laura", "Silvain") + val ages = List(25, 16) + val persons: List[Person] = names.zip(ages) +``` +But the following would be illegal. +```scala + val persons: List[Person] = List(Bob, Laura) + val pairs: List[(String, Int)] = persons // error +``` +We would need an explicit `_.toTuple` selection to express this: +```scala + val pairs: List[(String, Int)] = persons.map(_.toTuple) +``` +Note that conformance rules for named tuples are analogous to the rules for named parameters. One can assign parameters by position to a named parameter list. +```scala + def f(param: Int) = ... + f(param = 1) // OK + f(2) // Also OK +``` +But one cannot use a name to pass an argument to an unnamed parameter: +```scala + val f: Int => T + f(2) // OK + f(param = 2) // Not OK +``` +The rules for tuples are analogous. Unnamed tuples conform to named tuple types, but the opposite requires a conversion. + +### Pattern Matching + +When pattern matching on a named tuple, the pattern may be named or unnamed. +If the pattern is named it needs to mention only a subset of the tuple names, and these names can come in any order. So the following are all OK: +```scala +Bob match + case (name, age) => ... + +Bob match + case (name = x, age = y) => ... + +Bob match + case (age = x) => ... + +Bob match + case (age = x, name = y) => ... +``` + +### Expansion + +Named tuples are in essence just a convenient syntax for regular tuples. In the internal representation, a named tuple type is represented at compile time as a pair of two tuples. One tuple contains the names as literal constant string types, the other contains the element types. The runtime representation of a named tuples consists of just the element values, whereas the names are forgotten. This is achieved by declaring `NamedTuple` +in package `scala` as an opaque type as follows: +```scala + opaque type NamedTuple[N <: Tuple, +V <: Tuple] >: V = V +``` +For instance, the `Person` type would be represented as the type +```scala +NamedTuple[("name", "age"), (String, Int)] +``` +`NamedTuple` is an opaque type alias of its second, value parameter. The first parameter is a string constant type which determines the name of the element. Since the type is just an alias of its value part, names are erased at runtime, and named tuples and regular tuples have the same representation. + +A `NamedTuple[N, V]` type is publicly known to be a supertype (but not a subtype) of its value paramater `V`, which means that regular tuples can be assigned to named tuples but not _vice versa_. + +The `NamedTuple` object contains a number of extension methods for named tuples hat mirror the same functions in `Tuple`. Examples are +`apply`, `head`, `tail`, `take`, `drop`, `++`, `map`, or `zip`. +Similar to `Tuple`, the `NamedTuple` object also contains types such as `Elem`, `Head`, `Concat` +that describe the results of these extension methods. + +The translation of named tuples to instances of `NamedTuple` is fixed by the specification and therefore known to the programmer. This means that: + + - All tuple operations also work with named tuples "out of the box". + - Macro libraries can rely on this expansion. + +### Computed Field Names + +The `Selectable` trait now has a `Fields` type member that can be instantiated +to a named tuple. + +```scala +trait Selectable: + type Fields <: NamedTuple.AnyNamedTuple +``` + +If `Fields` is instantiated in a subclass of `Selectable` to some named tuple type, +then the available fields and their types will be defined by that type. Assume `n: T` +is an element of the `Fields` type in some class `C` that implements `Selectable`, +that `c: C`, and that `n` is not otherwise legal as a name of a selection on `c`. +Then `c.n` is a legal selection, which expands to `c.selectDynamic("n").asInstanceOf[T]`. + +It is the task of the implementation of `selectDynamic` in `C` to ensure that its +computed result conforms to the predicted type `T` + +As an example, assume we have a query type `Q[T]` defined as follows: + +```scala +trait Q[T] extends Selectable: + type Fields = NamedTuple.Map[NamedTuple.From[T], Q] + def selectDynamic(fieldName: String) = ... +``` + +Assume in the user domain: +```scala +case class City(zipCode: Int, name: String, population: Int) +val city: Q[City] +``` +Then +```scala +city.zipCode +``` +has type `Q[Int]` and it expands to +```scala +city.selectDynamic("zipCode").asInstanceOf[Q[Int]] +``` + +### The NamedTuple.From Type + +The `NamedTuple` object contains a type definition +```scala + type From[T] <: AnyNamedTuple +``` +`From` is treated specially by the compiler. When `NamedTuple.From` is applied to +an argument type that is an instance of a case class, the type expands to the named +tuple consisting of all the fields of that case class. +Here, _fields_ means: elements of the first parameter section. For instance, assuming +```scala +case class City(zip: Int, name: String, population: Int) +``` +then `NamedTuple.From[City]` is the named tuple +```scala +(zip: Int, name: String, population: Int) +``` +The same works for enum cases expanding to case classes, abstract types with case classes as upper bound, alias types expanding to case classes +and singleton types with case classes as underlying type (in terms of the implementation, the `classSymbol` of a type must be a case class). + +`From` is also defined on named tuples. If `NT` is a named tuple type, then `From[NT] = NT`. + +### Pattern Matching with Named Fields in General + +We allow named patterns not just for named tuples but also for case classes. For instance: +```scala +city match + case c @ City(name = "London") => println(c.population) + case City(name = n, zip = 1026, population = pop) => println(pop) +``` + +Named constructor patterns are analogous to named tuple patterns. In both cases + + - every name must match the name some field of the selector, + - names can come in any order, + - not all fields of the selector need to be matched. + +This revives SIP 43, with a much simpler desugaring than originally proposed. +Named patterns are compatible with extensible pattern matching simply because +`unapply` results can be named tuples. + +### Operations on Named Tuples + +The operations on named tuples are defined in object `scala.NamedTuple`. The current version of this object is listed in Appendix A. + +### Restrictions + +The following restrictions apply to named tuples and named pattern arguments: + + 1. Either all elements of a tuple or constructor pattern are named or none are named. It is illegal to mix named and unnamed elements in a tuple. For instance, the following is in error: + ```scala + val illFormed1 = ("Bob", age = 33) // error + ``` + 2. Each element name in a named tuple or constructor pattern must be unique. For instance, the following is in error: + ```scala + val illFormed2 = (name = "", age = 0, name = true) // error + ``` + 3. Named tuples and case classes can be matched with either named or regular patterns. But regular tuples and other selector types can only be matched with regular tuple patterns. For instance, the following is in error: + ```scala + (tuple: Tuple) match + case (age = x) => // error + ``` + +### Use Case + +As a a use case showing some advanced capabilities of named tuples (including computed field names and the `From` type), +we show an implementation of embedded queries in Scala. For expressions that look like working with collections are instead +used to directly generate a query AST that can be further optimized and mapped to a variety of query languages. The code +is given in Appendix B. + +### Syntax Changes + +The syntax of Scala is extended as follows to support named tuples and +named constructor arguments: +``` +SimpleType ::= ... + | ‘(’ NameAndType {‘,’ NameAndType} ‘)’ +NameAndType ::= id ':' Type + +SimpleExpr ::= ... + | '(' NamedExprInParens {‘,’ NamedExprInParens} ')' +NamedExprInParens ::= id '=' ExprInParens + +Patterns ::= Pattern {‘,’ Pattern} + | NamedPattern {‘,’ NamedPattern} +NamedPattern ::= id '=' Pattern +``` + +### Compatibility + +Named tuple types and expressions are simply desugared to types and trees already known to Scala. The desugaring happens before the checking, so does not influence Tasty generation. + +Pattern matching with named fields requires some small additions to Typer and the PatternMatcher phase. It does not change the Tasty format, though. + +Backward source compatibility is partially preserved since additions to types and patterns come with new syntax that was not expressible before. When looking at tuple expressions, we have two instances of a source incompatibility: + +```scala +var age: Int +(age = 1) +``` +This was an assignment in parentheses before, and is a named tuple of arity one now. It is however not idiomatic Scala code, since assignments are not usually enclosed in parentheses. + +Also, if we have +```scala +class C: + infix def f(age: Int) +val c: C +``` +then +```scala +c f (age = 1) +``` +will now construct a tuple as second operand instead of passing a named parameter. + +These problems can be detected and diagnosed fairly straightforwardly: When faced with a unary named tuple, try to interpret it as an assignment, and if that succeeds, issue a migration error and suggest a workaround of these kinds: +```scala + {age = 1} // ok + c.f(age = 1) // ok +``` + +### Open questions + + 1. What is the precise set of types and operations we want to add to `NamedTuple`. This could also evolve after this SIP is completed. + + 2. Should there be an implicit conversion from named tuples to ordinary tuples? + +## Alternatives + +### Structural Types + +We also considered to expand structural types. Structural types allow to abstract over existing classes, but require reflection or some other library-provided mechanism for element access. By contrast, named tuples have a separate representation as tuples, which can be manipulated directly. Since elements are ordered, traversals can be defined, and this allows the definition of type generic algorithms over named tuples. Structural types don’t allow such generic algorithms directly. Be could define mappings between structural types and named tuples, which could be used to implement such algorithms. These mappings would certainly become simpler if they map to/from named tuples than if they had to map to/from user-defined "HMap"s. + +By contrast to named tuples, structural types are unordered and have width subtyping. This comes with the price that no natural element ordering exist, and that one usually needs some kind of dictionary structure for access. We believe that the following advantages of named tuples over structural types outweigh the loss of subtyping flexibility: + + - Better integration since named tuples and normal tuples share the same representation. + - Better efficiency, since no dictionary is needed. + - Natural traversal order allows the formulation of generic algorithms such as projections and joins. + +### Conformance + +A large part of Pre-SIP discussion centered around subtyping rules, whether ordinary tuples should subtype named-tuples (as in this proposal) or _vice versa_ or maybe no subtyping at all. + +Looking at precedent in other languages it feels like we we do want some sort of subtyping for easy convertibility and an implicit conversion in the other direction. This proposal picks _unnamed_ <: _named_ for the subtyping and _named_ -> _unnamed_ for the conversion. + +The discussion established that both forms of subtyping are sound. My personal opinion is that the subtyping of this proposal is both more useful and safer than the one in the other direction. There is also the problem that changing the subtyping direction would be incompatible with the current structure of `Tuple` and `NamedTuple` since for instance `zip` is already an inline method on `Tuple` so it could not be overridden in `NamedTuple`. To make this work requires a refactoring of `Tuple` to use more extension methods, and the questions whether this is feasible and whether it can be made binary backwards compatible are unknown. I personally will not work on this, if others are willing to make the effort we can discuss the alternative subtyping as well. + +_Addendum:_ Turning things around, adopting _named_ <: _unnamed_ for the subtyping and `_unnamed_ -> _named_ for the conversion leads to weaker typing with undetected errors. Consider: +```scala +type Person = (name: String, age: Int) +val bob: Person +bob.zip((firstName: String, agee: Int)) +``` +This should report a type error. +But in the alternative scheme, we'd have `(firstName: String, agee: Int) <: (String, Int)` by subtyping and then +`(String, Int) -> (name: String, age: Int)` by implicit naming conversion. This is clearly not what we want. + +By contrast, in the implemented scheme, we will not convert `(firstName: String, agee: Int)` to `(String, Int)` since a conversion is only attempted if the expected type is a regular tuple, and in our scenario it is a named tuple instead. + +My takeaway is that these designs have rather subtle consequences and any alterations would need a full implementation before they can be judged. For instance, the situation with `zip` was a surprise to me, which came up since I first implemented `_.toTuple` as a regular implicit conversion instead of a compiler adaptation. + +A possibly simpler design would be to drop all conformance and conversion rules. The problem with this approach is worse usability and problems with smooth migration. Migration will be an issue since right now everything is a regular tuple. If we make it hard to go from there to named tuples, everything will tend to stay a regular tuple and named tuples will be much less used than we would hope for. + + +### Spread Operator + +An idea I was thinking of but that I did not include in this proposal highlights another potential problem with subtyping. Consider adding a _spread_ operator `*` for tuples and named tuples. if `x` is a tuple then `f(x*)` is `f` applied to all fields of `x` expanded as individual arguments. Likewise, if `y` is a named tuple, then `f(y*)` is `f` applied to all elements of `y` as named arguments. +Now, if named tuples would be subtypes of tuples, this would actually be ambiguous since widening `y` in `y*` to a regular tuple would yield a different call. But with the subtyping direction we have, this would work fine. + +I believe tuple spread is a potentially useful addition that would fit in well with Scala. But it's not immediately relevant to this proposal, so is left out for now. + + +## Related work + +This section should list prior work related to the proposal, notably: + +- [Pre-SIP Discussion](https://contributors.scala-lang.org/t/pre-sip-named-tuples/6403) + +- [SIP 43 on Pattern Matching with Named Fields](https://github.com/scala/improvement-proposals/pull/44) + +- [Experimental Implementation](https://github.com/lampepfl/dotty/pull/19174) + +## FAQ + +## Appendix A: NamedTuple Definition + +Here is the current definition of `NamedTuple`. This is part of the library and therefore subject to future changes and additions. + +```scala +package scala +import annotation.experimental +import compiletime.ops.boolean.* + +@experimental +object NamedTuple: + + opaque type AnyNamedTuple = Any + opaque type NamedTuple[N <: Tuple, +V <: Tuple] >: V <: AnyNamedTuple = V + + def apply[N <: Tuple, V <: Tuple](x: V): NamedTuple[N, V] = x + + def unapply[N <: Tuple, V <: Tuple](x: NamedTuple[N, V]): Some[V] = Some(x) + + extension [V <: Tuple](x: V) + inline def withNames[N <: Tuple]: NamedTuple[N, V] = x + + export NamedTupleDecomposition.{Names, DropNames} + + extension [N <: Tuple, V <: Tuple](x: NamedTuple[N, V]) + + /** The underlying tuple without the names */ + inline def toTuple: V = x + + /** The number of elements in this tuple */ + inline def size: Tuple.Size[V] = toTuple.size + + // This intentionally works for empty named tuples as well. I think NnEmptyTuple is a dead end + // and should be reverted, justy like NonEmptyList is also appealing at first, but a bad idea + // in the end. + + /** The value (without the name) at index `n` of this tuple */ + inline def apply(n: Int): Tuple.Elem[V, n.type] = + inline toTuple match + case tup: NonEmptyTuple => tup(n).asInstanceOf[Tuple.Elem[V, n.type]] + case tup => tup.productElement(n).asInstanceOf[Tuple.Elem[V, n.type]] + + /** The first element value of this tuple */ + inline def head: Tuple.Elem[V, 0] = apply(0) + + /** The tuple consisting of all elements of this tuple except the first one */ + inline def tail: Tuple.Drop[V, 1] = toTuple.drop(1) + + /** The last element value of this tuple */ + inline def last: Tuple.Last[V] = apply(size - 1).asInstanceOf[Tuple.Last[V]] + + /** The tuple consisting of all elements of this tuple except the last one */ + inline def init: Tuple.Init[V] = toTuple.take(size - 1).asInstanceOf[Tuple.Init[V]] + + /** The tuple consisting of the first `n` elements of this tuple, or all + * elements if `n` exceeds `size`. + */ + inline def take(n: Int): NamedTuple[Tuple.Take[N, n.type], Tuple.Take[V, n.type]] = + toTuple.take(n) + + /** The tuple consisting of all elements of this tuple except the first `n` ones, + * or no elements if `n` exceeds `size`. + */ + inline def drop(n: Int): NamedTuple[Tuple.Drop[N, n.type], Tuple.Drop[V, n.type]] = + toTuple.drop(n) + + /** The tuple `(x.take(n), x.drop(n))` */ + inline def splitAt(n: Int): NamedTuple[Tuple.Split[N, n.type], Tuple.Split[V, n.type]] = + toTuple.splitAt(n) + + /** The tuple consisting of all elements of this tuple followed by all elements + * of tuple `that`. The names of the two tuples must be disjoint. + */ + inline def ++ [N2 <: Tuple, V2 <: Tuple](that: NamedTuple[N2, V2])(using Tuple.Disjoint[N, N2] =:= true) + : NamedTuple[Tuple.Concat[N, N2], Tuple.Concat[V, V2]] + = toTuple ++ that.toTuple + + // inline def :* [L] (x: L): NamedTuple[Append[N, ???], Append[V, L] = ??? + // inline def *: [H] (x: H): NamedTuple[??? *: N], H *: V] = ??? + + /** The named tuple consisting of all element values of this tuple mapped by + * the polymorphic mapping function `f`. The names of elements are preserved. + * If `x = (n1 = v1, ..., ni = vi)` then `x.map(f) = `(n1 = f(v1), ..., ni = f(vi))`. + */ + inline def map[F[_]](f: [t] => t => F[t]): NamedTuple[N, Tuple.Map[V, F]] = + toTuple.map(f).asInstanceOf[NamedTuple[N, Tuple.Map[V, F]]] + + /** The named tuple consisting of all elements of this tuple in reverse */ + inline def reverse: NamedTuple[Tuple.Reverse[N], Tuple.Reverse[V]] = + toTuple.reverse + + /** The named tuple consisting of all elements values of this tuple zipped + * with corresponding element values in named tuple `that`. + * If the two tuples have different sizes, + * the extra elements of the larger tuple will be disregarded. + * The names of `x` and `that` at the same index must be the same. + * The result tuple keeps the same names as the operand tuples. + */ + inline def zip[V2 <: Tuple](that: NamedTuple[N, V2]): NamedTuple[N, Tuple.Zip[V, V2]] = + toTuple.zip(that.toTuple) + + /** A list consisting of all element values */ + inline def toList: List[Tuple.Union[V]] = toTuple.toList.asInstanceOf[List[Tuple.Union[V]]] + + /** An array consisting of all element values */ + inline def toArray: Array[Object] = toTuple.toArray + + /** An immutable array consisting of all element values */ + inline def toIArray: IArray[Object] = toTuple.toIArray + + end extension + + /** The size of a named tuple, represented as a literal constant subtype of Int */ + type Size[X <: AnyNamedTuple] = Tuple.Size[DropNames[X]] + + /** The type of the element value at position N in the named tuple X */ + type Elem[X <: AnyNamedTuple, N <: Int] = Tuple.Elem[DropNames[X], N] + + /** The type of the first element value of a named tuple */ + type Head[X <: AnyNamedTuple] = Elem[X, 0] + + /** The type of the last element value of a named tuple */ + type Last[X <: AnyNamedTuple] = Tuple.Last[DropNames[X]] + + /** The type of a named tuple consisting of all elements of named tuple X except the first one */ + type Tail[X <: AnyNamedTuple] = Drop[X, 1] + + /** The type of the initial part of a named tuple without its last element */ + type Init[X <: AnyNamedTuple] = + NamedTuple[Tuple.Init[Names[X]], Tuple.Init[DropNames[X]]] + + /** The type of the named tuple consisting of the first `N` elements of `X`, + * or all elements if `N` exceeds `Size[X]`. + */ + type Take[X <: AnyNamedTuple, N <: Int] = + NamedTuple[Tuple.Take[Names[X], N], Tuple.Take[DropNames[X], N]] + + /** The type of the named tuple consisting of all elements of `X` except the first `N` ones, + * or no elements if `N` exceeds `Size[X]`. + */ + type Drop[X <: AnyNamedTuple, N <: Int] = + NamedTuple[Tuple.Drop[Names[X], N], Tuple.Drop[DropNames[X], N]] + + /** The pair type `(Take(X, N), Drop[X, N]). */ + type Split[X <: AnyNamedTuple, N <: Int] = (Take[X, N], Drop[X, N]) + + /** Type of the concatenation of two tuples `X` and `Y` */ + type Concat[X <: AnyNamedTuple, Y <: AnyNamedTuple] = + NamedTuple[Tuple.Concat[Names[X], Names[Y]], Tuple.Concat[DropNames[X], DropNames[Y]]] + + /** The type of the named tuple `X` mapped with the type-level function `F`. + * If `X = (n1 : T1, ..., ni : Ti)` then `Map[X, F] = `(n1 : F[T1], ..., ni : F[Ti])`. + */ + type Map[X <: AnyNamedTuple, F[_ <: Tuple.Union[DropNames[X]]]] = + NamedTuple[Names[X], Tuple.Map[DropNames[X], F]] + + /** A named tuple with the elements of tuple `X` in reversed order */ + type Reverse[X <: AnyNamedTuple] = + NamedTuple[Tuple.Reverse[Names[X]], Tuple.Reverse[DropNames[X]]] + + /** The type of the named tuple consisting of all element values of + * named tuple `X` zipped with corresponding element values of + * named tuple `Y`. If the two tuples have different sizes, + * the extra elements of the larger tuple will be disregarded. + * The names of `X` and `Y` at the same index must be the same. + * The result tuple keeps the same names as the operand tuples. + * For example, if + * ``` + * X = (n1 : S1, ..., ni : Si) + * Y = (n1 : T1, ..., nj : Tj) where j >= i + * ``` + * then + * ``` + * Zip[X, Y] = (n1 : (S1, T1), ..., ni: (Si, Ti)) + * ``` + * @syntax markdown + */ + type Zip[X <: AnyNamedTuple, Y <: AnyNamedTuple] = + Tuple.Conforms[Names[X], Names[Y]] match + case true => + NamedTuple[Names[X], Tuple.Zip[DropNames[X], DropNames[Y]]] + + type From[T] <: AnyNamedTuple + +end NamedTuple + +/** Separate from NamedTuple object so that we can match on the opaque type NamedTuple. */ +@experimental +object NamedTupleDecomposition: + import NamedTuple.* + + /** The names of a named tuple, represented as a tuple of literal string values. */ + type Names[X <: AnyNamedTuple] <: Tuple = X match + case NamedTuple[n, _] => n + + /** The value types of a named tuple represented as a regular tuple. */ + type DropNames[NT <: AnyNamedTuple] <: Tuple = NT match + case NamedTuple[_, x] => x +``` + +## Appendix B: Embedded Queries Case Study + +```scala +import language.experimental.namedTuples +import NamedTuple.{NamedTuple, AnyNamedTuple} + +/* This is a demonstrator that shows how to map regular for expressions to + * internal data that can be optimized by a query engine. It needs NamedTuples + * and type classes but no macros. It's so far very provisional and experimental, + * intended as a basis for further exploration. + */ + +/** The type of expressions in the query language */ +trait Expr[Result] extends Selectable: + + /** This type is used to support selection with any of the field names + * defined by Fields. + */ + type Fields = NamedTuple.Map[NamedTuple.From[Result], Expr] + + /** A selection of a field name defined by Fields is implemented by `selectDynamic`. + * The implementation will add a cast to the right Expr type corresponding + * to the field type. + */ + def selectDynamic(fieldName: String) = Expr.Select(this, fieldName) + + /** Member methods to implement universal equality on Expr level. */ + def == (other: Expr[?]): Expr[Boolean] = Expr.Eq(this, other) + def != (other: Expr[?]): Expr[Boolean] = Expr.Ne(this, other) + +object Expr: + + /** Sample extension methods for individual types */ + extension (x: Expr[Int]) + def > (y: Expr[Int]): Expr[Boolean] = Gt(x, y) + def > (y: Int): Expr[Boolean] = Gt(x, IntLit(y)) + extension (x: Expr[Boolean]) + def &&(y: Expr[Boolean]): Expr[Boolean] = And(x, y) + def || (y: Expr[Boolean]): Expr[Boolean] = Or(x, y) + + // Note: All field names of constructors in the query language are prefixed with `$` + // so that we don't accidentally pick a field name of a constructor class where we want + // a name in the domain model instead. + + // Some sample constructors for Exprs + case class Gt($x: Expr[Int], $y: Expr[Int]) extends Expr[Boolean] + case class Plus(x: Expr[Int], y: Expr[Int]) extends Expr[Int] + case class And($x: Expr[Boolean], $y: Expr[Boolean]) extends Expr[Boolean] + case class Or($x: Expr[Boolean], $y: Expr[Boolean]) extends Expr[Boolean] + + // So far Select is weakly typed, so `selectDynamic` is easy to implement. + // Todo: Make it strongly typed like the other cases + case class Select[A]($x: Expr[A], $name: String) extends Expr + + case class Single[S <: String, A]($x: Expr[A]) + extends Expr[NamedTuple[S *: EmptyTuple, A *: EmptyTuple]] + + case class Concat[A <: AnyNamedTuple, B <: AnyNamedTuple]($x: Expr[A], $y: Expr[B]) + extends Expr[NamedTuple.Concat[A, B]] + + case class Join[A <: AnyNamedTuple](a: A) + extends Expr[NamedTuple.Map[A, StripExpr]] + + type StripExpr[E] = E match + case Expr[b] => b + + // Also weakly typed in the arguents since these two classes model universal equality */ + case class Eq($x: Expr[?], $y: Expr[?]) extends Expr[Boolean] + case class Ne($x: Expr[?], $y: Expr[?]) extends Expr[Boolean] + + /** References are placeholders for parameters */ + private var refCount = 0 + + case class Ref[A]($name: String = "") extends Expr[A]: + val id = refCount + refCount += 1 + override def toString = s"ref$id(${$name})" + + /** Literals are type-specific, tailored to the types that the DB supports */ + case class IntLit($value: Int) extends Expr[Int] + + /** Scala values can be lifted into literals by conversions */ + given Conversion[Int, IntLit] = IntLit(_) + + /** The internal representation of a function `A => B` + * Query languages are ususally first-order, so Fun is not an Expr + */ + case class Fun[A, B](param: Ref[A], f: B) + + type Pred[A] = Fun[A, Expr[Boolean]] + + /** Explicit conversion from + * (name_1: Expr[T_1], ..., name_n: Expr[T_n]) + * to + * Expr[(name_1: T_1, ..., name_n: T_n)] + */ + extension [A <: AnyNamedTuple](x: A) def toRow: Join[A] = Join(x) + + /** Same as _.toRow, as an implicit conversion */ + given [A <: AnyNamedTuple]: Conversion[A, Expr.Join[A]] = Expr.Join(_) + +end Expr + +/** The type of database queries. So far, we have queries + * that represent whole DB tables and queries that reify + * for-expressions as data. + */ +trait Query[A] + +object Query: + import Expr.{Pred, Fun, Ref} + + case class Filter[A]($q: Query[A], $p: Pred[A]) extends Query[A] + case class Map[A, B]($q: Query[A], $f: Fun[A, Expr[B]]) extends Query[B] + case class FlatMap[A, B]($q: Query[A], $f: Fun[A, Query[B]]) extends Query[B] + + // Extension methods to support for-expression syntax for queries + extension [R](x: Query[R]) + + def withFilter(p: Ref[R] => Expr[Boolean]): Query[R] = + val ref = Ref[R]() + Filter(x, Fun(ref, p(ref))) + + def map[B](f: Ref[R] => Expr[B]): Query[B] = + val ref = Ref[R]() + Map(x, Fun(ref, f(ref))) + + def flatMap[B](f: Ref[R] => Query[B]): Query[B] = + val ref = Ref[R]() + FlatMap(x, Fun(ref, f(ref))) +end Query + +/** The type of query references to database tables */ +case class Table[R]($name: String) extends Query[R] + +// Everything below is code using the model ----------------------------- + +// Some sample types +case class City(zipCode: Int, name: String, population: Int) +type Address = (city: City, street: String, number: Int) +type Person = (name: String, age: Int, addr: Address) + +@main def Test = + + val cities = Table[City]("cities") + + val q1 = cities.map: c => + c.zipCode + val q2 = cities.withFilter: city => + city.population > 10_000 + .map: city => + city.name + + val q3 = + for + city <- cities + if city.population > 10_000 + yield city.name + + val q4 = + for + city <- cities + alt <- cities + if city.name == alt.name && city.zipCode != alt.zipCode + yield + city + + val addresses = Table[Address]("addresses") + val q5 = + for + city <- cities + addr <- addresses + if addr.street == city.name + yield + (name = city.name, num = addr.number) + + val q6 = + cities.map: city => + (name = city.name, zipCode = city.zipCode) + + def run[T](q: Query[T]): Iterator[T] = ??? + + def x1: Iterator[Int] = run(q1) + def x2: Iterator[String] = run(q2) + def x3: Iterator[String] = run(q3) + def x4: Iterator[City] = run(q4) + def x5: Iterator[(name: String, num: Int)] = run(q5) + def x6: Iterator[(name: String, zipCode: Int)] = run(q6) +``` diff --git a/_sips/sips/replace-nonsensical-unchecked-annotation.md b/_sips/sips/replace-nonsensical-unchecked-annotation.md index def0ff658..3bb860994 100644 --- a/_sips/sips/replace-nonsensical-unchecked-annotation.md +++ b/_sips/sips/replace-nonsensical-unchecked-annotation.md @@ -2,7 +2,7 @@ layout: sip permalink: /sips/:title.html stage: implementation -status: waiting-for-implementation +status: under-review presip-thread: https://contributors.scala-lang.org/t/pre-sip-replace-non-sensical-unchecked-annotations/6342 title: SIP-57 - Replace non-sensical @unchecked annotations --- diff --git a/_sips/sips/typeclasses-syntax.md b/_sips/sips/typeclasses-syntax.md new file mode 100644 index 000000000..385bb7dc1 --- /dev/null +++ b/_sips/sips/typeclasses-syntax.md @@ -0,0 +1,685 @@ +--- +layout: sip +stage: implementation +status: under-review +presip-thread: https://contributors.scala-lang.org/t/pre-sip-improve-syntax-for-context-bounds-and-givens/6576/97 +title: SIP-64 - Improve Syntax for Context Bounds and Givens +--- + +**By: Martin Odersky** + +## History + +| Date | Version | +|---------------|--------------------| +| March 11, 2024| Initial Draft | +| July 18, 2014 | Revised Draft | + +## Summary + +We propose some syntactic improvements that make context bounds and given clauses more +expressive and easier to read. The proposed additions and changes comprise: + + - naming context bounds, as in `A: Monoid as a`, + - a new syntax for multiple context bounds, as in `A: {Monoid, Ord}`, + - context bounds for type members, + - replacing abstract givens with a more powerful and convenient mechanism, + - a cleaner syntax for given definitions that eliminates some syntactic warts. + +## Motivation + +This SIP is part of an effort to get state-of-the art typeclasses and generic in Scala. It fixes several existing pain points: + + - The inability to name context bounds causes awkward and obscure workarounds in practice. + - The syntax for multiple context bounds is not very clear or readable. + - The existing syntax for givens is unfortunate, which hinders learning and adoption. + - Abstract givens are hard to specify and implement and their syntax is easily confused + with simple concrete givens. + +These pain points are worth fixing on their own, independently of any other proposed improvements to typeclass support. What's more, the changes +are time sensitive since they affect existing syntax that was introduced in 3.0, so it's better to make the change at a time when not that much code using the new syntax is written yet. + +## Proposed Solution + +### 1. Naming Context Bounds + +Context bounds are a convenient and legible abbreviation. A problem so far is that they are always anonymous, one cannot name the implicit parameter to which a context bound expands. For instance, consider the classical pair of type classes +```scala + trait SemiGroup[A]: + extension (x: A) def combine(y: A): A + + trait Monoid[A] extends SemiGroup[A]: + def unit: A +``` +and a `reduce` method defined like this: +```scala +def reduce[A : Monoid](xs: List[A]): A = ??? +``` +Since we don't have a name for the `Monoid` instance of `A`, we need to resort to `summon` in the body of `reduce`: +```scala +def reduce[A : Monoid](xs: List[A]): A = + xs.foldLeft(summon[Monoid[A]].unit)(_ `combine` _) +``` +That's generally considered too painful to write and read, hence people usually adopt one of two alternatives. Either, eschew context bounds and switch to using clauses: +```scala +def reduce[A](xs: List[A])(using m: Monoid[A]): A = + xs.foldLeft(m.unit)(_ `combine` _) +``` +Or, plan ahead and define a "trampoline" method in `Monoid`'s companion object: +```scala + trait Monoid[A] extends SemiGroup[A]: + def unit: A + object Monoid: + def unit[A](using m: Monoid[A]): A = m.unit + ... + def reduce[A : Monoid](xs: List[A]): A = + xs.foldLeft(Monoid.unit)(_ `combine` _) +``` +This is all accidental complexity which can be avoided by the following proposal. + +**Proposal:** Allow to name a context bound, like this: +```scala + def reduce[A : Monoid as m](xs: List[A]): A = + xs.foldLeft(m.unit)(_ `combine` _) +``` + +We use `as x` after the type to bind the instance to `x`. This is analogous to import renaming, which also introduces a new name for something that comes before. + +**Benefits:** The new syntax is simple and clear. It avoids the awkward choice between concise context bounds that can't be named and verbose using clauses that can. + +### 2. New Syntax for Aggregate Context Bounds + +Aggregate context bounds like `A : X : Y` are not obvious to read, and it becomes worse when we add names, e.g. `A : X as x : Y as y`. + +**Proposal:** Allow to combine several context bounds inside `{...}`, analogous +to import clauses. Example: + +```scala + trait A: + def showMax[X : {Ordering, Show}](x: X, y: X): String + class B extends A: + def showMax[X : {Ordering as ordering, Show as show}](x: X, y: X): String = + show.asString(ordering.max(x, y)) +``` + +The old syntax with multiple `:` should be phased out over time. There's more about migration at the end of this SIP. + + +### 3. Expansion of Context Bounds + +With named context bounds, we need a revision to how the witness parameters of such bounds are added. Context bounds are currently translated to implicit parameters in the last parameter list of a method or class. This is a problem if a context bound is mentioned in one of the preceding parameter types. For example, consider a type class of parsers with associated type members `Input` and `Result` describing the input type on which the parsers operate and the type of results they produce: +```scala +trait Parser[P]: + type Input + type Result +``` +Here is a method `run` that runs a parser on an input of the required type: +```scala +def run[P : Parser as p](in: p.Input): p.Result +``` +With the current translation this does not work since it would be expanded to: +```scala + def run[P](x: p.Input)(using p: Parser[P]): p.Result +``` +Note that the `p` in `p.Input` refers to the `p` introduced in the using clause, which comes later. So this is ill-formed. + +This problem would be fixed by changing the translation of context bounds so that they expand to using clauses immediately after the type parameter. But such a change is infeasible, for two reasons: + + 1. It would be a source- and binary-incompatible change. We cannot simply change the expansion of existing using clauses because + then clients that pass explicit using arguments would no longer work. + 2. Putting using clauses earlier can impair type inference. A type in + a using clause can be constrained by term arguments coming before that + clause. Moving the using clause first would miss those constraints, which could cause ambiguities in implicit search. + +But there is an alternative which is feasible: + +**Proposal:** Map the context bounds of a method or class as follows: + + 1. If one of the bounds is referred to by its term name in a subsequent parameter clause, the context bounds are mapped to a using clause immediately preceding the first such parameter clause. + 2. Otherwise, if the last parameter clause is a using (or implicit) clause, merge all parameters arising from context bounds in front of that clause, creating a single using clause. + 3. Otherwise, let the parameters arising from context bounds form a new using clause at the end. + +Rules (2) and (3) are the status quo, and match Scala 2's rules. Rule (1) is new but since context bounds so far could not be referred to, it does not apply to legacy code. Therefore, binary compatibility is maintained. + +**Discussion** More refined rules could be envisaged where context bounds are spread over different using clauses so that each comes as late as possible. But it would make matters more complicated and the gain in expressiveness is not clear to me. + + +### 4. Context Bounds for Type Members, Deferred Givens + +It's not very orthogonal to allow subtype bounds for both type parameters and abstract type members, but context bounds only for type parameters. What's more, we don't even have the fallback of an explicit using clause for type members. The only alternative is to also introduce a set of abstract givens that get implemented in each subclass. This is extremely heavyweight and opaque to newcomers. + +**Proposal**: Allow context bounds for type members. Example: + +```scala + class Collection: + type Element : Ord +``` + +The question is how these bounds are expanded. Context bounds on type parameters +are expanded into using clauses. But for type members this does not work, since we cannot refer to a member type of a class in a parameter type of that class. What we are after is an equivalent of using parameter clauses but represented as class members. + +**Proposal:** +Introduce a new way to implement a given definition in a trait like this: +```scala +given T = deferred +``` +`deferred` is a new method in the `scala.compiletime` package, which can appear only as the right hand side of a given defined in a trait. Any class implementing that trait will provide an implementation of this given. If a definition is not provided explicitly, it will be synthesized by searching for a given of type `T` in the scope of the inheriting class. Specifically, the scope in which this given will be searched is the environment of that class augmented by its parameters but not containing its members (since that would lead to recursive resolutions). If an implementation _is_ provided explicitly, it counts as an override of a concrete definition and needs an `override` modifier. + +Deferred givens allow a clean implementation of context bounds in traits, +as in the following example: +```scala +trait Sorted: + type Element : Ord + +class SortedSet[A : Ord] extends Sorted: + type Element = A +``` +The compiler expands this to the following implementation. +```scala +trait Sorted: + type Element + given Ord[Element] = compiletime.deferred + +class SortedSet[A](using evidence$0: Ord[A]) extends Sorted: + type Element = A + override given Ord[Element] = evidence$0 +``` + +The using clause in class `SortedSet` provides an implementation for the deferred given in trait `Sorted`. + +**Benefits:** + + - Better orthogonality, type parameters and abstract type members now accept the same kinds of bounds. + - Better ergonomics, since deferred givens get naturally implemented in inheriting classes, no need for boilerplate to fill in definitions of abstract givens. + +**Alternative:** It was suggested that we use a modifier for a deferred given instead of a `= deferred`. Something like `deferred given C[T]`. But a modifier does not suggest the concept that a deferred given will be implemented automatically in subclasses unless an explicit definition is written. In a sense, we can see `= deferred` as the invocation of a magic macro that is provided by the compiler. So from a user's point of view a given with `deferred` right hand side is not abstract. +It is a concrete definition where the compiler will provide the correct implementation. And if users want to provide their own overriding +implementations, they will need an explicit `override` modifier. + +### 5. Abolish Abstract Givens + +With `deferred` givens there is no need anymore to also define abstract givens. The two mechanisms are very similar, but the user experience for +deferred givens is generally more ergonomic. Abstract givens also are uncomfortably close to concrete class instances. Their syntax clashes +with the quite common case where we want to establish a given without any nested definitions. For instance, consider a given that constructs a type tag: +```scala +class Tag[T] +``` +Then this works: +```scala +given Tag[String]() +given Tag[String] with {} +``` +But the following more natural syntax fails: +```scala +given Tag[String] +``` +The last line gives a rather cryptic error: +``` +1 |given Tag[String] + | ^ + | anonymous given cannot be abstract +``` +The underlying problem is that abstract givens are very rare (and should become completely unnecessary once deferred givens are introduced), yet occupy a syntax that looks very close to the more common case of concrete +typeclasses without nested definitions. + +**Proposal:** In the future, let the `= deferred` mechanism be the only way to deliver the functionality of abstract givens. Deprecate the current version of abstract givens, and remove them in a future Scala version. + +**Benefits:** + + - Simplification of the language since a feature is dropped + - Eliminate non-obvious and misleading syntax. + +The only downside is that deferred givens are restricted to be used in traits, whereas abstract givens are also allowed in abstract classes. But I would be surprised if actual code relied on that difference, and such code could in any case be easily rewritten to accommodate the restriction. + + +### 6. Context Bounds for Polymorphic Functions + +Currently, context bounds can be used in methods, but not in function types or function literals. It would be nice propose to drop this irregularity and allow context bounds also in these places. Example: + +```scala +type Comparer = [X: Ord] => (x: X, y: X) => Boolean +val less: Comparer = [X: Ord as ord] => (x: X, y: X) => + ord.compare(x, y) < 0 +``` + +The expansion of such context bounds is analogous to the expansion in method types, except that instead of adding a using clause in a method, we insert a context function type. + +For instance, the `type` and `val` definitions above would expand to +```scala +type Comparer = [X] => (x: X, y: X) => Ord[X] ?=> Boolean +val less: Comparer = [X] => (x: X, y: X) => (ord: Ord[X]) ?=> + ord.compare(x, y) < 0 +``` + +The expansion of using clauses does look inside alias types. For instance, +here is a variation of the previous example that uses a parameterized type alias: +```scala +type Cmp[X] = (x: X, y: X) => Ord[X] ?=> Boolean +type Comparer2 = [X: Ord] => Cmp[X] +``` +The expansion of the right hand side of `Comparer2` expands the `Cmp[X]` alias +and then inserts the context function at the same place as what's done for `Comparer`. + +### 7. Cleanup of Given Syntax + +A good language syntax is like a Bach fugue: A small set of motifs is combined in a multitude of harmonic ways. Dissonances and irregularities should be avoided. + +When designing Scala 3, I believe that, by and large, we achieved that goal, except in one area, which is the syntax of givens. There _are_ some glaring dissonances, as seen in this code for defining an ordering on lists: +```scala +given [A](using Ord[A]): Ord[List[A]] with + def compare(x: List[A], y: List[A]) = ... +``` +The `:` feels utterly foreign in this position. It's definitely not a type ascription, so what is its role? Just as bad is the trailing `with`. Everywhere else we use braces or trailing `:` to start a scope of nested definitions, so the need of `with` sticks out like a sore thumb. + +Sometimes unconventional syntax grows on you and becomes natural after a while. But here it was unfortunately the opposite. The longer I used given definitions in this style the more awkward they felt, in particular since the rest of the language seemed so much better put together by comparison. And I believe many others agree with me on this. Since the current syntax is unnatural and esoteric, this means it's difficult to discover and very foreign even after that. This makes it much harder to learn and apply givens than it need be. + +The previous conditional given syntax was inspired by method definitions. If we add the optional name to the previous example, we obtain something akin to an implicit method in Scala 2: +```scala +given listOrd[A](using Ord[A]): Ord[List[A]] with + def compare(x: List[A], y: List[A]) = ... +``` +The anonymous syntax was then obtained by simply dropping the name. +But without a name, the syntax looks weird and inconsistent. + +This is a problem since at least for typeclasses, anonymous givens should be the norm. +Givens are like extends clauses. We state a _fact_, that a +type implements a type class, or that a value can be used implicitly. We don't need a name for that fact. It's analogous to extends clauses, where we state that a class is a subclass of some other class or trait. We would not think it useful to name an extends clause, it's simply a fact that is stated. +It's also telling that every other language that defines type classes uses anonymous syntax. Somehow, nobody ever found it necessary to name these instances. + +A more intuitive and in my opinion cleaner alternative is to decree that a given should always look like it _implements a type_. Conditional givens should look like they implement function types. The `Ord` typeclass instances for `Int` and `List` would then look like this: +```scala +given Ord[String]: + def compare(x: String, y: String) = ... + +given [A : Ord] => Ord[List[A]]: + def compare(x: List[A], y: List[A]) = ... +``` +The second, conditional instance looks like it implements the function type +```scala +[A : Ord] => Ord[List[A]] +``` +Another way to see this is as an implication: +If `A` is a type that is `Ord`, then `List[A]` is `Ord` (and the rest of the given clause gives the implementation that makes it so). +Equivalently, `A` is `Ord` _implies_ `List[A]` is `Ord`, hence the `=>`. + +Yet another related meaning is that the given clause establishes a _context function_ of type `[A: Ord] ?=> Ord[List[A]]` that is automatically applied to evidence arguments of type `Ord[A]` and that yields instances of type `Ord[List[A]]`. Since givens are in any case applied automatically to all their arguments, we don't need to specify that separately with `?=>`, a simple `=>` arrow is sufficiently clear and is easier to read. + +All these viewpoints are equivalent, in a deep sense. This is exactly the Curry Howard isomorphism, which equates function types and implications. + +**Proposal:** Change the syntax for given clauses so that a `given` clause consists of the following elements: + + - An optional name binding `id :` + - Zero or more _conditions_, which introduce type or value parameters. Each precondition ends in a `=>`. + - the implemented _type_, + - an implementation which consists of either an `=` and an expression, + or a template body. + +**Examples:** + +Here is an enumeration of common forms of given definitions in the new syntax. We show the following use cases: + + 1. A simple typeclass instance, such as `Ord[Int]`. + 2. A parameterized type class instance, such as `Ord` for lists. + 3. A type class instance with an explicit context parameter. + 4. A type class instance with a named eexplicit context parameter. + 4. A simple given alias. + 5. A parameterized given alias + 6. A given alias with an explicit context parameter. + 8. An abstract or deferred given + 9. A by-name given, e.g. if we have a given alias of a mutable variable, and we + want to make sure that it gets re-evaluated on each access. +```scala + // Simple typeclass + given Ord[Int]: + def compare(x: Int, y: Int) = ... + + // Parameterized typeclass with context bound + given [A: Ord] => Ord[List[A]]: + def compare(x: List[A], y: List[A]) = ... + + // Parameterized typeclass with context parameter + given [A] => Ord[A] => Ord[List[A]]: + def compare(x: List[A], y: List[A]) = ... + + // Parameterized typeclass with named context parameter + given [A] => (ord: Ord[A]) => Ord[List[A]]: + def compare(x: List[A], y: List[A]) = ... + + // Simple alias + given Ord[Int] = IntOrd() + + // Parameterized alias with context bound + given [A: Ord] => Ord[List[A]] = + ListOrd[A] + + // Parameterized alias with context parameter + given [A] => Ord[A] => Ord[List[A]] = + ListOrd[A] + + // Abstract or deferred given + given Context = deferred + + // By-name given + given () => Context = curCtx +``` +Here are the same examples, with optional names provided: +```scala + // Simple typeclass + given intOrd: Ord[Int]: + def compare(x: Int, y: Int) = ... + + // Parameterized typeclass with context bound + given listOrd: [A: Ord] => Ord[List[A]]: + def compare(x: List[A], y: List[A]) = ... + + // Parameterized typeclass with context parameter + given listOrd: [A] => Ord[A] => Ord[List[A]]: + def compare(x: List[A], y: List[A]) = ... + + // Parameterized typeclass with named context parameter + given listOrd: [A] => (ord: Ord[A]) => Ord[List[A]]: + def compare(x: List[A], y: List[A]) = ... + + // Simple alias + given intOrd: Ord[Int] = IntOrd() + + // Parameterized alias with context bound + given listOrd: [A: Ord] => Ord[List[A]] = + ListOrd[A] + + // Parameterized alias with context parameter + given listOrd: [A] => Ord[A] => Ord[List[A]] = + ListOrd[A] + + // Abstract or deferred given + given context: Context = deferred + + // By-name given + given context: () => Context = curCtx +``` + +**By Name Givens** + +We sometimes find it necessary that a given alias is re-evaluated each time it is called. For instance, say we have a mutable variable `curCtx` and we want to define a given that returns the current value of that variable. A normal given alias will not do since by default given aliases are mapped to +lazy vals. + +In general, we want to avoid re-evaluation of the given. But there are situations like the one above where we want to specify _by-name_ evaluation instead. The proposed new syntax for this is shown in the last clause above. This is arguably the a natural way to express by-name givens. We want to use a conditional given, since these map to methods, but the set of preconditions is empty, hence the `()` parameter. Equivalently, under the context function viewpoint, we are defining a context function of the form `() ?=> T`, and these are equivalent to by-name parameters. + +Compare with the current best way to do achieve this, which is to use a dummy type parameter. +```scala + given [DummySoThatItsByName]: Context = curCtx +``` +This has the same effect, but feels more like a hack than a clean solution. + +**Dropping `with`** + +In the new syntax, all typeclass instances introduce definitions like normal +class bodies, enclosed in braces `{...}` or following a `:`. The irregular +requirement to use `with` is dropped. In retrospect, the main reason to introduce `with` was since a definition like + +```scala +given [A](using Ord[A]): Ord[List[A]]: + def compare(x: List[A], y: List[A]) = ... +``` +was deemed to be too cryptic, with the double meaning of colons. But since that syntax is gone, we don't need `with` anymore. There's still a double meaning of colons, e.g. in +```scala +given intOrd: Ord[Int]: + ... +``` +but since now both uses of `:` are very familiar (type ascription _vs_ start of nested definitions), it's manageable. Besides, the problem occurs only for named typeclass instances, which should be the exceptional case anyway. + + +**Possible ambiguities** + +If one wants to define a given for an a actual function type (which is probably not advisable in practice), one needs to enclose the function type in parentheses, i.e. `given ([A] => F[A])`. This is true in the currently implemented syntax and stays true for all discussed change proposals. + +The double meaning of : with optional prefix names is resolved as usual. A : at the end of a line starts a nested definition block. If for some obscure reason one wants to define a named given on multiple lines, one has to format it as follows: +```scala + given intOrd + : Ord = ... +``` +**Comparison with Status Quo** + +To facilitate a systematic comparison, here is the listing of all 9x2 cases discussed previously with the current syntax. + +Unnamed: +```scala + // Simple typeclass + given Ord[Int] with + def compare(x: Int, y: Int) = ... + + // Parameterized typeclass with context bound + given [A: Ord]: Ord[List[A]] with + def compare(x: List[A], y: List[A]) = ... + + // Parameterized typeclass with context parameter + given [A](using Ord[A]): Ord[List[A]] with + def compare(x: List[A], y: List[A]) = ... + + // Parameterized typeclass with named context parameter + given [A](using ord: Ord[A]): Ord[List[A]] with + def compare(x: List[A], y: List[A]) = ... + + // Simple alias + given Ord[Int] = IntOrd() + + // Parameterized alias with context bound + given [A: Ord]: Ord[List[A]] = + ListOrd[A] + + // Parameterized alias with context parameter + given [A](using Ord[A]): Ord[List[A]] = + ListOrd[A] + + // Abstract or deferred given: no unnamed form possible + + // By-name given + given [DummySoItsByName]: Context = curCtx +``` +Named: +```scala + // Simple typeclass + given intOrd: Ord[Int] with + def compare(x: Int, y: Int) = ... + + // Parameterized typeclass with context bound + given listOrd[A: Ord]: Ord[List[A]] with + def compare(x: List[A], y: List[A]) = ... + + // Parameterized typeclass with context parameter + given listOrd[A](using Ord[A]): Ord[List[A]] with + def compare(x: List[A], y: List[A]) = ... + + // Parameterized typeclass with named context parameter + given listOrd[A](using ord: Ord[A]): Ord[List[A]] with + def compare(x: List[A], y: List[A]) = ... + + // Simple alias + given intOrd: Ord[Int] = IntOrd() + + // Parameterized alias with context bound + given listOrd[A: Ord]: Ord[List[A]] = + ListOrd[A] + + // Parameterized alias with context parameter + given listOrd[A](using Ord[A]): Ord[List[A]] = + ListOrd[A] + + // Abstract or deferred given + given context: Context + + // By-name given + given context[DummySoItsByName]: Context = curCtx +``` + +**Summary** + +This will be a fairly significant change to the given syntax. I believe there's still a possibility to do this. Not so much code has migrated to new style givens yet, and code that was written can be changed fairly easily. Specifically, there are about a 900K definitions of `implicit def`s +in Scala code on Github and about 10K definitions of `given ... with`. So about 1% of all code uses the Scala 3 syntax, which would have to be changed again. + +Changing something introduced just recently in Scala 3 is not fun, +but I believe these adjustments are preferable to let bad syntax +sit there and fester. The cost of changing should be amortized by improved developer experience over time, and better syntax would also help in migrating Scala 2 style implicits to Scala 3. But we should do it quickly before a lot more code +starts migrating. + +Migration to the new syntax is straightforward, and can be supported by automatic rewrites. For a transition period we can support both the old and the new syntax. It would be a good idea to backport the new given syntax to the LTS version of Scala so that code written in this version can already use it. The current LTS would then support old and new-style givens indefinitely, whereas new Scala 3.x versions would phase out the old syntax over time. + + +## Summary of Syntax Changes + +Here is the complete context-free syntax for all proposed features. +``` +TmplDef ::= 'given' GivenDef +GivenDef ::= [id ':'] GivenSig +GivenSig ::= GivenImpl + | '(' ')' '=>' GivenImpl + | GivenConditional '=>' GivenSig +GivenImpl ::= GivenType ([‘=’ Expr] | TemplateBody) + | ConstrApps TemplateBody +GivenConditional ::= DefTypeParamClause + | DefTermParamClause + | '(' FunArgTypes ')' + | GivenType +GivenType ::= AnnotType1 {id [nl] AnnotType1} + +TypeDef ::= id [TypeParamClause] TypeAndCtxBounds +TypeParamBounds ::= TypeAndCtxBounds +TypeAndCtxBounds ::= TypeBounds [‘:’ ContextBounds] +ContextBounds ::= ContextBound | '{' ContextBound {',' ContextBound} '}' +ContextBound ::= Type ['as' id] + +FunType ::= FunTypeArgs (‘=>’ | ‘?=>’) Type + | DefTypeParamClause '=>' Type +FunExpr ::= FunParams (‘=>’ | ‘?=>’) Expr + | DefTypeParamClause ‘=>’ Expr +``` + +## Compatibility + +All additions are fully compatible with existing Scala 3. The prototype implementation contains a parser that accepts both old and new idioms. That said, we would +want to deprecate and remove over time the following existing syntax: + + 1. Multiple context bounds of the form `X : A : B : C`. + 2. The previous syntax for given clauses which required a `:` in front of the implemented type and a `with` after it. + 3. Abstract givens + +The changes under (1) and (2) can be automated using existing rewrite technology in the compiler or Scalafix. The changes in (3) are more global in nature but are still straightforward. + +## Alternatives + +One alternative put forward in the Pre-SIP was to deprecate context bounds altogether and only promote using clauses. This would still be a workable system and arguably lead to a smaller language. On the other hand, dropping context bounds for using clauses worsens +some of the ergonomics of expressing type classes. First, it is longer. Second, it separates the introduction of a type name and the constraints on that type name. Typically, there can be many normal parameters between a type parameter and the using clause that characterized it. By contrast, context bounds follow the +general principle that an entity should be declared together with its type, and in a very concrete sense context bounds define types of types. So I think context bounds are here to stay, and improvements to the ergonomics of context bounds will be appreciated. + +The Pre-SIP also contained a proposal for a default naming convention of context bounds. If no explicit `as` clause is given, the name of the witness for +`X : C` would be `X`, instead of a synthesized name as is the case now. This led to extensive discussions how to accommodate multiple context bounds. +I believe that a default naming convention for witnesses will be very beneficial in the long run, but as of today there are several possible candidate solutions, including: + + 1. Use default naming for single bounds only. + 2. If there are multiple bounds, as in `X: {A, B, C}` create a synthetic companion object `X` where selections `X.m` translate into + witness selections `A.m`, `B.m`, or `C.m`. Disallow any references to the companion that remain after that expansion. + 3. Like (2), but use the synthetic companion approach also for single bounds. + 4. Create real aggregate given objects that represent multiple bounds. + +Since it is at present not clear what the best solution would be, I decided to defer the question of default names to a later SIP. + +This SIP proposed originally a different syntax for givens that made use +of postfix `as name` for optional names and still followed method syntax in some elements. The 9x2 variants of the original proposal are as follows. + +```scala + // Simple typeclass + given Ord[Int]: + def compare(x: Int, y: Int) = ... + + // Parameterized typeclass + given [A: Ord] => Ord[List[A]]: + def compare(x: List[A], y: List[A]) = ... + + // Typeclass with context parameter + given [A](using Ord[A]) => Ord[List[A]]: + def compare(x: List[A], y: List[A]) = ... + + // Typeclass with named context parameter + given [A](using ord: Ord[A]) => Ord[List[A]]: + def compare(x: List[A], y: List[A]) = ... + + // Simple alias + given Ord[Int] = IntOrd() + + // Parameterized alias + given [A: Ord] => Ord[List[A]] = + ListOrd[A] + + // Alias with explicit context parameter + given [A](using Ord[A]) => Ord[List[A]] = + ListOrd[A] + + // Abstract or deferred given + given Context = deferred + + // By-name given + given => Context = curCtx +``` +Named: + +```scala + // Simple typeclass + given Ord[Int] as intOrd: + def compare(x: Int, y: Int) = ... + + // Parameterized typeclass + given [A: Ord] => Ord[List[A]] as listOrd: + def compare(x: List[A], y: List[A]) = ... + + // Typeclass with context parameter + given [A](using Ord[A]) => Ord[List[A]] as listOrd: + def compare(x: List[A], y: List[A]) = ... + + // Typeclass with named context parameter + given [A](using ord: Ord[A]) => Ord[List[A]] as listOrd: + def compare(x: List[A], y: List[A]) = ... + + // Simple alias + given Ord[Int] as intOrd = IntOrd() + + // Parameterized alias + given [A: Ord] => Ord[List[A]] as listOrd = + ListOrd[A] + + // Alias with using clause + given [A](using Ord[A]) => Ord[List[A]] as listOrd = + ListOrd[A] + + // Abstract or deferred given + given Context as context = deferred + + // By-name given + given => Context as context = curCtx +``` + +The discussion on contributors raised some concerns with that original proposal. One concern was that changing to postfix `as` for optional names +would be too much of a change, in particular for simple given aliases. Another concern was that the `=>` felt unfamiliar in this place since it resembled a function type yet other syntactic elements followed method syntax. The revised proposal given here addresses these points by +going back to the usual `name:` syntax for optional names and doubling down +on function syntax to reinforce the intuition that givens implement types. + + +## Summary + +The proposed set of changes removes awkward syntax and makes dealing with context bounds and givens a lot more regular and pleasant. In summary, the main proposed changes are: + + 1. Allow to name context bounds with `as` clauses. + 2. Introduce a less cryptic syntax for multiple context bounds. + 3. Refine the rules how context bounds are expanded to account for explicit names. + 4. Allow context bounds on type members which expand to deferred givens. + 5. Drop abstract givens since they are largely redundant with deferred givens. + 6. Allow context bounds for polymorphic functions. + 7. Introduce a more regular and clearer syntax for givens. + +These changes were implemented under the experimental language import +```scala +import language.experimental.modularity +``` +which also covers some other prospective changes slated to be proposed future SIPs. The new system has proven to work well and to address several fundamental issues people were having with +existing implementation techniques for type classes. + +The changes proposed in this SIP are time-sensitive since we would like to correct some awkward syntax choices in Scala 3 before more code migrates to the new constructs (so far, it seems most code still uses Scala 2 style implicits, which will eventually be phased out). It is easy to migrate to the new syntax and to support both old and new for a transition period. diff --git a/_sips/sips/unroll-default-arguments-for-binary-compatibility.md b/_sips/sips/unroll-default-arguments-for-binary-compatibility.md deleted file mode 100644 index 0acee8e40..000000000 --- a/_sips/sips/unroll-default-arguments-for-binary-compatibility.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: SIP-61 - Unroll Default Arguments for Binary Compatibility -status: under-review -pull-request-number: 78 -stage: design - ---- diff --git a/_sips/sips/unroll-default-arguments.md b/_sips/sips/unroll-default-arguments.md new file mode 100644 index 000000000..5af8cddeb --- /dev/null +++ b/_sips/sips/unroll-default-arguments.md @@ -0,0 +1,934 @@ +--- +layout: sip +permalink: /sips/:title.html +stage: implementation +status: waiting-for-implementation +title: SIP-61 - Unroll Default Arguments for Binary Compatibility +--- + +**By: Li Haoyi** + +## History + +| Date | Version | +|---------------|--------------------| +| Feb 14th 2024 | Initial Draft | + +## Summary + +This SIP proposes an `@unroll` annotation lets you add additional parameters +to method `def`s,`class` construtors, or `case class`es, without breaking binary +compatibility. `@unroll` works by generating "unrolled" or "telescoping" forwarders: + +```scala +// Original +def foo(s: String, n: Int = 1, @unroll b: Boolean = true, @unroll l: Long = 0) = s + n + b + l + +// Generated +def foo(s: String, n: Int, b: Boolean) = foo(s, n, b, 0) +def foo(s: String, n: Int) = foo(s, n, true, 0) +``` + +In contrast to most existing or proposed alternatives that require you to contort your +code to become binary compatible (see [Major Alternatives](#major-alternatives)), +`@unroll` allows you to write Scala with vanilla `def`s/`class`es/`case class`es, add +a single annotation, and your code will maintain binary compatibility as new default +parameters and fields are added over time. + +`@unroll`'s only constraints are that: + +1. New parameters need to have a default value +2. New parameters can only be added on the right +3. The `@unroll`ed methods must be abstract or final + +These are both existing industry-wide standard when dealing with data and schema evolution +(e.g. [Schema evolution in Avro, Protocol Buffers and Thrift — Martin Kleppmann’s blog](https://martin.kleppmann.com/2012/12/05/schema-evolution-in-avro-protocol-buffers-thrift.html)), +and are also the way the new parameters interact with _source compatibility_ in +the Scala language. Thus these constraints should be immediately familiar to any +experienced programmers, and would be easy to follow without confusion. + +Prior Discussion can be found [here](https://contributors.scala-lang.org/t/can-we-make-adding-a-parameter-with-a-default-value-binary-compatible/6132) + +## Motivation + +Maintaining binary compatibility of Scala libraries as they evolve over time is +difficult. Although tools like https://github.com/lightbend/mima help _surface_ +issues, actually _resolving_ those issues is a different challenge. + +Some kinds of library changes are fundamentally impossible to make compatible, +e.g. removing methods or classes. But there is one big class of binary compatibility +issues that are "spurious": adding default parameters to methods, `class` constructors, +or `case class`es. + +Adding a default parameter is source-compatible, but not binary compatible: a user +downstream of a library that adds a default parameter does not need to make any +changes to their code, but _does_ need to re-compile it. This is "spurious" because +there is no _fundamental_ incompatibility here: semantically, a new default parameter +is meant to be optional! Old code invoking that method without a new default parameter +is exactly the user intent, and works just fine if the downstream code is re-compiled. + +Other languages, such as Python, have the same default parameter language feature but face +no such compatibility issues with their use. Even Scala codebases compiled from source +do not suffer these restrictions: adding a default parameter to the right side of a parameter +list is for all intents and purposes backwards compatible in a mono-repo setup. +The fact that such addition is binary incompatible is purely an implementation restriction +of Scala's binary artifact format and distribution strategy. + +**Binary compatibility is generally more important than Source compatibility**. When +you hit a source compatibility issue, you can always change the source code you are +compiling, whether manually or via your build tool. In contrast, when you hit binary +compatibility issues, it can come in the form of diamond dependencies that would require +_re-compiling all of your transitive dependencies_, a task that is far more difficult and +often impractical. + +There are many approaches to resolving these "spurious" binary compatibility issues, +but most of them involve either tremendous amounts of boilerplate writing +binary-compatibility forwarders, giving up on core language features like Case Classes +or Default Parameters, or both. Consider the following code snippet +([link](https://github.com/com-lihaoyi/mainargs/blob/1d04a6bd19aaca401d11fe26da31615a8bc9213c/mainargs/src/Parser.scala)) +from the [com-lihaoyi/mainargs](https://github.com/com-lihaoyi/mainargs) library, which +duplicates the parameters of `def constructEither` no less than five times in +order to maintain binary compatibility as the library evolves and more default +parameters are added to `def constructEither`: + +```scala + def constructEither( + args: Seq[String], + allowPositional: Boolean, + allowRepeats: Boolean, + totalWidth: Int, + printHelpOnExit: Boolean, + docsOnNewLine: Boolean, + autoPrintHelpAndExit: Option[(Int, PrintStream)], + customName: String, + customDoc: String, + sorted: Boolean, + ): Either[String, T] = constructEither( + args, + allowPositional, + allowRepeats, + totalWidth, + printHelpOnExit, + docsOnNewLine, + autoPrintHelpAndExit, + customName, + customDoc, + sorted, + ) + + def constructEither( + args: Seq[String], + allowPositional: Boolean = false, + allowRepeats: Boolean = false, + totalWidth: Int = 100, + printHelpOnExit: Boolean = true, + docsOnNewLine: Boolean = false, + autoPrintHelpAndExit: Option[(Int, PrintStream)] = Some((0, System.out)), + customName: String = null, + customDoc: String = null, + sorted: Boolean = true, + nameMapper: String => Option[String] = Util.kebabCaseNameMapper + ): Either[String, T] = ??? + + /** binary compatibility shim. */ + private[mainargs] def constructEither( + args: Seq[String], + allowPositional: Boolean, + allowRepeats: Boolean, + totalWidth: Int, + printHelpOnExit: Boolean, + docsOnNewLine: Boolean, + autoPrintHelpAndExit: Option[(Int, PrintStream)], + customName: String, + customDoc: String, + nameMapper: String => Option[String] + ): Either[String, T] = constructEither( + args, + allowPositional, + allowRepeats, + totalWidth, + printHelpOnExit, + docsOnNewLine, + autoPrintHelpAndExit, + customName, + customDoc, + sorted = true, + nameMapper = nameMapper + ) +``` + +Apart from being extremely verbose and full of boilerplate, like any boilerplate this is +also extremely error-prone. Bugs like [com-lihaoyi/mainargs#106](https://github.com/com-lihaoyi/mainargs/issues/106) +slip through when a mistake is made in that boilerplate. These bugs are impossible to catch +using a normal test suite, as they only appear in the presence of version skew. The above code +snippet actually _does_ have such a bug, that the test suite _did not_ catch. See if you can +spot it! + +Sebastien Doraene's talk [Designing Libraries for Source and Binary Compatibility](https://www.youtube.com/watch?v=2wkEX6MCxJs) +explores some of the challenges, and discusses the workarounds. + + +## Requirements + +### Backwards Compatibility + +Given: + +* Two libraries, **Upstream** and **Downstream**, where **Downstream** depends on **Upstream** + +* If we use a _newer_ version of **Upstream** which contains an added + default parameter together with an _older_ version of **Downstream** compiled + against an _older_ version of **Upstream** before that default parameter was added + +* The behavior should be binary compatible and semantically indistinguishable from using + a verion of **Downstream** compiled against the _newer_ version of **Upstream** + +**Note:** we do not aim for _Forwards_ compatibility. Using an _older_ +version of **Upstream** with a _newer_ version of **Downstream** compiled against a +_newer_ version of **Upstream** is not a use case we want to support. The vast majority +of OSS software does not promise forwards compatibility, including software such as +the JVM, so we should just follow suite + +### All Overrides Are Equivalent + +All versions of an `@unroll`ed method `def foo` should have the same semantics when called +with the same parameters. We must be careful to ensure: + +1. All our different method overrides point at the same underlying implementation +2. Abstract methods are properly implemented, and no method would fail with an + `AbstractMethodError` when called +3. We properly forward the necessary argument and default parameter values when + calling the respective implementation. + +## Proposed solution + + +The proposed solution is to provide a `scala.annotation.unroll` annotation, that +can be applied to methods `def`s, `class` constructors, or `case class`es to generate +"unrolled" or "telescoping" versions of a method that forward to the primary implementation: + +```scala + def constructEither( + args: Seq[String], + allowPositional: Boolean = false, + allowRepeats: Boolean = false, + totalWidth: Int = 100, + printHelpOnExit: Boolean = true, + docsOnNewLine: Boolean = false, + autoPrintHelpAndExit: Option[(Int, PrintStream)] = Some((0, System.out)), + customName: String = null, + customDoc: String = null, + @unroll sorted: Boolean = true, + @unroll nameMapper: String => Option[String] = Util.kebabCaseNameMapper + ): Either[String, T] = ??? +``` + +This allows the developer to write the minimal amount of code they _want_ to write, +and add a single annotation to allow binary compatibility to old versions. In this +case, we annotated `sorted` and `nameMapper` with `@unroll`, which generates forwarders that make +`def constructEither` binary compatible with older versions that have fewer parameters, +up to a version before `sorted` or `nameMapper` was added. Any existing method `def`, `class`, or +`case class` can be evolved in this way, by addition of `@unroll` the first time +a default argument is added to their signature after its initial definition. + +### Unrolling `def`s + +Consider a library that is written as follows: + +```scala +object Unrolled{ + def foo(s: String, n: Int = 1) = s + n + b + l +} +``` + +If over time a new default parameter is added: + +```scala +object Unrolled{ + def foo(s: String, n: Int = 1, b: Boolean = true) = s + n + b + l +} +``` + +And another + +```scala +object Unrolled{ + def foo(s: String, n: Int = 1, b: Boolean = true, l: Long = 0) = s + n + b + l +} +``` + +This is a source-compatible change, but not binary-compatible: JVM bytecode compiled against an +earlier version of the library would be expecting to call `def foo(String, Int)`, but will fail +because the signature is now `def foo(String, Int, Boolean)` or `def foo(String, Int, Boolean, Long)`. +On the JVM this will result in a `MethodNotFoundError` at runtime, a common experience for anyone +who upgrading the versions of their dependencies. Similar concerns are present with Scala.js and +Scala-Native, albeit the failure happens at link-time rather than run-time + +`@unroll` is an annotation that can be applied as follows, to the first "additional" default +parameter that was added in each published version of the library (in this case, +`b: Boolean = true` and `l: Long = 0`) + + +```scala +import scala.annotation.unroll + +object Unrolled{ + def foo(s: String, n: Int = 1, @unroll b: Boolean = true, @unroll l: Long = 0) = s + n + b + l +} +``` + +The `@unroll` annotation takes `def foo` and generates synthetic forwarders for the purpose +of maintaining binary compatibility for old callers who may be expecting the previous signature. +These forwarders do nothing but forward the call to the current implementation, using the +given default parameter values: + +```scala +object Unrolled{ + def foo(s: String, n: Int = 1, @unroll b: Boolean = true, @unroll l: Long = 0) = s + n + b + l + + def foo(s: String, n: Int, b: Boolean) = foo(s, n, b, 0) + def foo(s: String, n: Int) = foo(s, n, true, 0) +} +``` + +As a result, old callers who expect `def foo(String, Int, Boolean)` or `def foo(String, Int, Boolean, Long)` +can continue to work, even as new parameters are added to `def foo`. The only restriction is that +new parameters can only be added on the right, and they must be provided with a default value. + +If multiple default parameters are added at once (e.g. `b` and `l` below) you can also +choose to only `@unroll` the first default parameter of each batch, to avoid generating +unnecessary forwarders: + +```scala +object Unrolled{ + def foo(s: String, n: Int = 1, @unroll b: Boolean = true, l: Long = 0) = s + n + b + l + + def foo(s: String, n: Int) = foo(s, n, true, 0) +} +``` + +If there are multiple parameter lists (e.g. for curried methods or methods taking implicits) only one +parameter list can be unrolled (though it does not need to be the first one). e.g. this works: + +```scala +object Unrolled{ + def foo(s: String, + n: Int = 1, + @unroll b: Boolean = true, + @unroll l: Long = 0) + (implicit blah: Blah) = s + n + b + l +} +``` + +As does this + +```scala +object Unrolled{ + def foo(blah: Blah) + (s: String, + n: Int = 1, + @unroll b: Boolean = true, + @unroll l: Long = 0) = s + n + b + l +} +``` + +`@unroll`ed methods can be defined in `object`s, `class`es, or `trait`s. Other cases are shown below. + +### Unrolling `class`es + +Class constructors and secondary constructors are treated by `@unroll` just like any +other method: + +```scala +class Unrolled(s: String, n: Int = 1, @unroll b: Boolean = true, @unroll l: Long = 0){ + def foo = s + n + b + l +} +``` + +Unrolls to: + +```scala +class Unrolled(s: String, n: Int = 1, @unroll b: Boolean = true, @unroll l: Long = 0){ + def foo = s + n + b + l + + def this(s: String, n: Int, b: Boolean) = this(s, n, b, 0) + def this(s: String, n: Int) = this(s, n, true, 0) +} +``` + +### Unrolling `class` secondary constructors + +```scala +class Unrolled() { + var foo = "" + + def this(s: String, n: Int = 1, @unroll b: Boolean = true, @unroll l: Long = 0) = { + this() + foo = s + n + b + l + } +} +``` + +Unrolls to: + +```scala +class Unrolled() { + var foo = "" + + def this(s: String, n: Int = 1, @unroll b: Boolean = true, @unroll l: Long = 0) = { + this() + foo = s + n + b + l + } + + def this(s: String, n: Int, b: Boolean) = this(s, n, b, 0) + def this(s: String, n: Int) = this(s, n, true, 0) +} +``` + +### Case Classes + +`case class`es can also be `@unroll`ed. Unlike normal `class` constructors +and method `def`s, `case class`es have several generated methods (`apply`, `copy`) +that need to be kept in sync with their primary constructor. `@unroll` thus +generates forwarders for those methods as well, based on the presence of the +`@unroll` annotation in the primary constructor: + +```scala +case class Unrolled(s: String, n: Int = 1, @unroll b: Boolean = true){ + def foo = s + n + b +} +``` + +Unrolls to: + +```scala +case class Unrolled(s: String, n: Int = 1, @unroll b: Boolean = true, @unroll l: Long = 0L){ + def this(s: String, n: Int) = this(s, n, true, 0L) + def this(s: String, n: Int, b: Boolean) = this(s, n, b, 0L) + + def copy(s: String, n: Int) = copy(s, n, this.b, this.l) + def copy(s: String, n: Int, b: Boolean) = copy(s, n, b, this.l) + + def foo = s + n + b +} +object Unrolled{ + def apply(s: String, n: Int) = apply(s, n, true, 0L) + def apply(s: String, n: Int, b: Boolean) = apply(s, n, b, 0L) +} +``` + +Notes: + +1. `@unroll`ed `case class`es are fully binary and backwards compatible in Scala 3, but not in Scala 2 + +2. `.unapply` does not need to be duplicated in Scala 3.x, as its signature + `def unapply(x: Unrolled): Unrolled` does not change when new `case class` fields are + added. + +3. Even in Scala 2.x, where `def unapply(x: Unrolled): Option[TupleN]` is not + binary compatible, pattern matching on `case class`es is already binary compatible + to addition of new fields due to + [Option-less Pattern Matching](https://docs.scala-lang.org/scala3/reference/changed-features/pattern-matching.html). + Thus, only calls to `.tupled` or `.curried` on the `case class` companion `object`, or direct calls + to `.unapply` on an unrolled `case class` in Scala 2.x (shown below) + will cause a crash if additional fields were added: + +```scala +def foo(t: (String, Int)) = println(t) +Unrolled.unapply(unrolled).map(foo) +``` + +In Scala 3, `@unroll`ing a `case class` also needs to generate a `fromProduct` +implementation in the companion object, as shown below: + +```scala +def fromProduct(p: Product): CaseClass = p.productArity match + case 2 => + CaseClass( + p.productElement(0).asInstanceOf[...], + p.productElement(1).asInstanceOf[...], + ) + case 3 => + CaseClass( + p.productElement(0).asInstanceOf[...], + p.productElement(1).asInstanceOf[...], + p.productElement(2).asInstanceOf[...], + ) + ... +``` + +This is not necessary for preserving binary compatibility - the method signature of +`def fromProduct` does not change depending on the number of fields - but it is +necessary to preserve semantic compatibility. `fromProduct` by default does not +take into account field default values, and this change is necessary to make it +use them when the given `p: Product` has a smaller `productArity` than the current +`CaseClass` implementation + + +### Hiding Generated Forwarder Methods + +As the generated forwarder methods are intended only for binary compatibility purposes, +we should generally hide them: IDEs, downstream compilers, ScalaDoc, etc. should behave as +if the generated methods do not exist. + +This is done in two different ways: + +1. In Scala 2, we generate the methods in a post-`pickler` phase. This ensures they do + not appear in the scala signature, and thus are not exposed to downstream tooling + +2. In Scala 3, the generated methods are flagged as `Invisible` + +## Limitations + +### Only the one parameter list of multi-parameter list methods can be `@unroll`ed. + +Unrolling multiple parameter lists would generate a number +of forwarder methods exponential with regard to the number of parameter lists unrolled, +and the generated forwarders may begin to conflict with each other. We can choose to spec +this out and implement it later if necessary, but for 99% of use cases `@unroll`ing one +parameter list should be enough. Typically, only one parameter list in a method has default +arguments, with other parameter lists being `implicit`s or a single callback/blocks, neither +of which usually has default values. + +### Unrolled forwarder methods can collide with manually-defined overrides + +This is similar to any other generated methods. We can raise an error to help users +debug such scenarios, but such name collisions are inevitably possible given how binary +compatibility on the JVM works. + +### `@unroll`ed case classes are only fully binary compatible in Scala 3 + + +They are _almost_ binary compatible in Scala 2. Direct calls to `unapply` are binary +incompatible, but most common pattern matching of `case class`es goes through a different +code path that _is_ binary compatible. There are also the `AbstractFunctionN` traits, from +which the companion object inherits `.curried` and `.tupled` members. Luckily, `unapply` +was made binary compatible in Scala 3, and `AbstractFunctionN`, `.curried`, and `.tupled` +were removed + +### While `@unroll`ed `case class`es are *not* fully _source_ compatible + +This is due to the fact that pattern matching requires all arguments to +be specified. This proposal does not change that. Future improvements related to +[Pattern Matching on Named Fields](https://github.com/scala/improvement-proposals/pull/44) +may bring improvements here. But as we discussed earlier, binary compatibility is generally +more important than source compatibility, and so we do not need to wait for any source +compatibility improvements to land before proceeding with these binary compatibility +improvements. + +### Binary and semantic compatibility for macro-derived derive typeclasses is out of scope + + +This propsosal does not have any opinion on whether or not macro-derivation is be binary/source/semantically +compatible. That is up to the +individual macro implementations to decide. e.g., [uPickle](https://github.com/com-lihaoyi/upickle) +has a very similar rule about adding `case class` fields, except that field ordering +does not matter. Trying to standardize this across all possible macros and all possible +typeclasses is out of scope + +### `@unroll` generates a quadratic amount of generated bytecode as more default parameters are added + +Each forwarder has `O(num-params)` size, and there are `O(num-default-params)` +forwarders. We do not expect this to be a problem in practice, as the small size of the +generated forwarder methods means the constant factor is small, but one could imagine +the `O(n^2)` asymptotic complexity becoming a problem if a method accumulates hundreds of +default parameters over time. In such extreme scenarios, some kind of builder pattern +(such as those listed in [Major Alternatives](#major-alternatives)) may be preferable. + +### `@unroll` only supports `final` methods. + +`object` methods and constructors are naturally +final, but `class` or `trait` methods that are `@unroll`ed need to be explicitly marked `final`. +It has proved difficult to implement the semantics of `@unroll` in the presence of downstream +overrides, `super`, etc. where the downstream overrides can be compiled against by different +versions of the upstream code. If we can come up with some implementation that works, we can +lift this restriction later, but for now I have not managed to do so and so this restriction +stays. + +### Challenges of Non-Final Methods and Overriding + +To elaborate a bit on the issues with non-final methods and overriding, consider the following +case with four classes, `Upstream`, `Downstream`, `Main1` and `Main2`, each of which is compiled +against different versions of each other (hence the varying number of parameters for `foo`): + +```scala +class Upstream{ // V2 + def foo(s: String, n: Int = 1, @unroll b: Boolean = true, @unroll l: Long = 0) = s + n + b + l +} +``` + +```scala +class Downstream extends Upstream{ // compiled against Upstream V1 + override def foo(s: String, n: Int = 1) = super.foo(s, n) + s + n +} +``` + +```scala +object Main1 { // compiled against Upstream V2 + def main(args: Array[String]): Unit = { + new Downstream().foo("hello", 123, false, 456L) + } +} +``` + +```scala +object Main2 { // compiled against Upstream V1 + def main(args: Array[String]): Unit = { + new Downstream().foo("hello", 123) + } +} +``` + + +The challenge here is: how do we make sure that `Main1` and `Main2`, who call +`new Downstream().foo`, correctly pick up the version of `def foo` that is +provided by `Downstream`? + +With the current implementation, the `override def foo` inside `Downstream` would only +override one of `Upstream`'s synthetic forwarders, but would not override the actual +primary implementation. As a result, we would see `Main1` calling the implementation +of `foo` from `Upstream`, while `Main2` calls the implementation of `foo` from +`Downstream`. So even though both `Main1` and `Main2` have the same +`Upstream` and `Downstream` code on the classpath, they end up calling different +implementations based on what they were compiled against. + +We cannot perform the method search and dispatch _within_ the `def foo` methods, +because it depends on exactly _how_ `foo` is called: the `InvokeVirtual` call from +`Main1` is meant to resolve to `Downstream#foo`, while the `InvokeSpecial` call +from `Downstream#foo`'s `super.foo` is meant to resolve to `Upstream#foo`. But a +method implementation cannot know how it was called, and thus it is impossible +for `def foo` to forward the call to the right place. + +Like our treatment of [Abstract Methods](#abstract-methods), this scenario can never +happen according to what version combinations are supported by our definition of +[Backwards Compatibility](#backwards-compatibility), but nevertheless is a real +concern due to the requirement that [All Overrides Are Equivalent](#all-overrides-are-equivalent). + +It may be possible to loosen this restriction to also allow abstract methods that +are implemented only once by a final method. See the section about +[Abstract Methods](#abstract-methods) for details. + +## Major Alternatives + +The major alternatives to `@unroll` are listed below: + +1. [data-class](https://index.scala-lang.org/alexarchambault/data-class) +2. [SBT Contrabad](https://www.scala-sbt.org/contraband/) +3. [Structural Data Structures](https://contributors.scala-lang.org/t/pre-sip-structural-data-structures-that-can-evolve-in-a-binary-compatible-way/5684) +4. Avoiding language features like `case class`es or default parameters, as suggested by the + [Binary Compatibility for Library Authors](https://docs.scala-lang.org/overviews/core/binary-compatibility-for-library-authors.html) documentation. + +While those alternate approaches _do work_ - `data-class` and `SBT Datatype` are used heavily +in various open-source projects - I believe they are inferior to the approach that `@unroll` +takes: + +### Case Class v.s. not-a-Case-Class + +The first major difference between `@unroll` and the above alternatives is that these alternatives +all introduce something new: some kind of _not-a-case-class_ `class` that is to be used +when binary compatibility is desired. This _not-a-case-class_ has different syntax from +`case class`es, different semantics, different methods, and so on. + +In contrast, `@unroll` does not introduce any new language-level or library-level constructs. +The `@unroll` annotation is purely a compiler-backend concern for maintaining binary +compatibility. At a language level, `@unroll` allows you to keep using normal method `def`s, +`class`es and `case class`es with exactly the same syntax and semantics you have been using +all along. + +Having people be constantly choosing between _case-class_ and _not-a-case-class_ when +designing their data types, is inferior to simply using `case class`es all the time + + +### Scala Syntax v.s. Java-esque Syntax + + +The alternatives linked above all build a +Java-esque "[inner platform](https://en.wikipedia.org/wiki/Inner-platform_effect)" +on top of the Scala language, with its own conventions like `.withFoo` methods. + +In contrast, `@unroll` makes use of the existing Scala language's default parameters +to achieve the same effect. + +If we think Scala is nicer to write then Java due to its language +features, then `@unroll`'s approach of leveraging those language features is nicer +to use than the alternative's Java-esque syntax. + +Having implementation-level problems - which is what binary compatibility across version +skew is - bleed into the syntax and semantics of the language is also inferior to having it +be controlled by an annotation. Martin Odersky has said that annotations are intended for +things that do not affect typechecking, and `@unroll` fits the bill perfectly. + + +### Evolving Any Class v.s. Evolving Pre-determined Classes + +The alternatives given require that the developer has to decide _up front_ whether their +data type needs to be evolved while maintaining binary compatibility. + +In contrast, `@unroll` allows you to evolve any existing `class` or `case class`. + +In general, trying to decide which classes _will need to evolve later on_ is a difficult +task that is easy to get wrong. `@unroll` totally removes that requirement, allowing +you to take _any_ `class` or `case class` and evolve it later in a binary compatible way. + + +### Binary Compatibility for Methods and Classes + +Lastly, the above alternatives only solve _half_ the problem: how to evolve `case class`es. +This is _schema evolution_. + +Binary compatility is not just a problem for `case class`es adding new fields: normal +`class` constructors, instance method `def`s, static method `def`s, etc. have default +parameters added all the time as well. + +In contrast, `@unroll` allows the evolution of `def`s and normal `class`es, in addition +to `case class`es, all using the same approach: + +1. `@unroll`ing `case class`es is about _schema evolution_ +2. `@unroll`ing concrete method `def`s is about _API evolution_ +3. `@unroll`ing abstract method `def`s is about _protocol evolution_ + +All three cases above have analogous best practices in the broader software engineering +world: whether you are adding an optional column to a database table, adding an +optional flag to a command-line tool, are extending an existing protocol with optional +fields that may need handling by both clients and servers implementing that protocol. + +`@unroll` solves all three problems at once - schema evolution, API evolution, and protocol +evolution. It does so with the same Scala-level syntax and semantics, with the same requirements +and limitations that common schema/API/protocol-evolution best-practices have in the broader +software engineering community. + +### Abstract Methods + +Apart from `final` methods, `@unroll` also supports purely abstract methods. Consider +the following example with a trait `Unrolled` and an implementation `UnrolledObj`: + +```scala +trait Unrolled{ // version 3 + def foo(s: String, n: Int = 1, @unroll b: Boolean = true, @unroll l: Long = 0): String +} +``` +```scala +object UnrolledObj extends Unrolled{ // version 3 + def foo(s: String, n: Int = 1, @unroll b: Boolean = true, @unroll l: Long = 0) = s + n + b +} +``` + +This unrolls to: +```scala +trait Unrolled{ // version 3 + def foo(s: String, n: Int = 1, @unroll b: Boolean = true, @unroll l: Long = 0): String = foo(s, n, b) + def foo(s: String, n: Int, b: Boolean): String = foo(s, n) + def foo(s: String, n: Int): String +} +``` +```scala +object UnrolledObj extends Unrolled{ // version 3 + def foo(s: String, n: Int = 1, @unroll b: Boolean = true, @unroll l: Long = 0) = s + n + b + l + def foo(s: String, n: Int, b: Boolean) = foo(s, n, b, 0) + def foo(s: String, n: Int) = foo(s, n, true) +} +``` + +Note that both the abstract methods from `trait Unrolled` and the concrete methods +from `object UnrolledObj` generate forwarders when `@unroll`ed, but the forwarders +are generated _in opposite directions_! Unrolled concrete methods forward from longer +parameter lists to shorter parameter lists, while unrolled abstract methods forward +from shorter parameter lists to longer parameter lists. For example, we may have a +version of `object UnrolledObj` that was compiled against an earlier version of `trait Unrolled`: + + +```scala +object UnrolledObj extends Unrolled{ // version 2 + def foo(s: String, n: Int = 1, @unroll b: Boolean = true) = s + n + b + def foo(s: String, n: Int) = foo(s, n, true) +} +``` + +But further downstream code calling `.foo` on `UnrolledObj` may expect any of the following signatures, +depending on what version of `Unrolled` and `UnrolledObj` it was compiled against: + +```scala +UnrolledObj.foo(String, Int) +UnrolledObj.foo(String, Int, Boolean) +UnrolledObj.foo(String, Int, Boolean, Long) +``` + +Because such downstream code cannot know which version of `Unrolled` that `UnrolledObj` +was compiled against, we need to ensure all such calls find their way to the correct +implementation of `def foo`, which may be at any of the above signatures. This "double +forwarding" strategy ensures that regardless of _which_ version of `.foo` gets called, +it ends up eventually forwarding to the actual implementation of `foo`, with +the correct combination of passed arguments and default arguments + +```scala +UnrolledObj.foo(String, Int) // forwards to UnrolledObj.foo(String, Int, Boolean) +UnrolledObj.foo(String, Int, Boolean) // actual implementation +UnrolledObj.foo(String, Int, Boolean, Long) // forwards to UnrolledObj.foo(String, Int, Boolean) +``` + +As is the case for `@unroll`ed methods on `trait`s and `class`es, `@unroll`ed +implementations of an abtract method must be final. + +#### Are Reverse Forwarders Really Necessary? + +This "double forwarding" strategy is not strictly necessary to support +[Backwards Compatibility](#backwards-compatibility): the "reverse" forwarders +generated for abstract methods are only necessary when a downstream callsite +of `UnrolledObj.foo` is compiled against a newer version of the original +`trait Unrolled` than the `object UnrolledObj` was, as shown below: + +```scala +trait Unrolled{ // version 3 + def foo(s: String, n: Int = 1, @unroll b: Boolean = true, @unroll l: Long = 0): String = foo(s, n, b) + // generated + def foo(s: String, n: Int, b: Boolean): String = foo(s, n) + def foo(s: String, n: Int): String +} +``` +```scala +object UnrolledObj extends Unrolled{ // version 2 + def foo(s: String, n: Int = 1, @unroll b: Boolean = true) = s + n + b + // generated + def foo(s: String, n: Int) = foo(s, n, true) +} +``` +```scala +// version 3 +UnrolledObj.foo("hello", 123, true, 456L) +``` + +If we did not have the reverse forwarder from `foo(String, Int, Boolean, Long)` to +`foo(String, Int, Boolean)`, this call would fail at runtime with an `AbstractMethodError`. +It also will get caught by MiMa as a `ReversedMissingMethodProblem`. + +This configuration of version is not allowed given our definition of backwards compatibility: +that definition assumes that `Unrolled` must be of a greater or equal version than `UnrolledObj`, +which itself must be of a greater or equal version than the final call to `UnrolledObj.foo`. However, +the reverse forwarders are needed to fulfill our requirement +[All Overrides Are Equivalent](#all-overrides-are-equivalent): +looking at `trait Unrolled // version 3` and `object UnrolledObj // version 2` in isolation, +we find that without the reverse forwarders the signature `foo(String, Int, Boolean, Long)` +is defined but not implemented. Such an un-implemented abstract method is something +we want to avoid, even if our artifact version constraints mean it should technically +never get called. + +## Minor Alternatives: + + +### `@unrollAll` + +Currently, `@unroll` generates a forwarder only for the annotated default parameter; +if you want to generate multiple forwarders, you need to `@unroll` each one. In the +vast majority of scenarios, we want to unroll every default parameters we add, and in +many cases default parameters are added one at a time. In this case, an `@unrollAll` +annotation may be useful, a shorthand for applying `@unroll` to the annotated default +parameter and every parameter to the right of it: + +```scala +object Unrolled{ + def foo(s: String, n: Int = 1, @unrollAll b: Boolean = true, l: Long = 0) = s + n + b + l +} +``` +```scala +object Unrolled{ + def foo(s: String, n: Int = 1, b: Boolean = true, l: Long = 0) = s + n + b + l + + def foo(s: String, n: Int, b: Boolean) = foo(s, n, b, 0) + def foo(s: String, n: Int) = foo(s, n, true, 0) +} +``` + + + +### Generating Forwarders For Parameter Type Widening or Result Type Narrowing + +While this proposal focuses on generating forwarders for addition of default parameters, +you can also imagine similar forwarders being generated if method parameter types +are widened or if result types are narrowed: + +```scala +// Before +def foo(s: String, n: Int = 1, b: Boolean = true) = s + n + b + l + +// After +def foo(@unrollType[String] s: Object, n: Int = 1, b: Boolean = true) = s.toString + n + b + l + +// Generated +def foo(s: Object, n: Int = 1, b: Boolean = true) = s.toString + n + b + l +def foo(s: String, n: Int = 1, b: Boolean = true) = foo(s, n, b) +``` + +This would follow the precedence of how Java's and Scala's covariant method return +type overrides are implemented: when a class overrides a method with a new +implementation with a narrower return type, a forwarder method is generated to +allow anyone calling the original signature \to be forwarded to the narrower signature. + +This is not currently implemented in `@unroll`, but would be a straightforward addition. + +### Incremental Forwarders or Direct Forwarders + +Given this: + +```scala +def foo(s: String, n: Int = 1, @unroll b: Boolean = true, @unroll l: Long = 0) = s + n + b + l +``` + +There are two ways to do the forwarders. First option, which I used in above, is +to have each forwarder directly call the primary method: + +```scala +def foo(s: String, n: Int, b: Boolean) = foo(s, n, b, 0) +def foo(s: String, n: Int) = foo(s, n, true, 0) +``` + +Second option is to have each forwarder incrementally call the next forwarder, which +will eventually end up calling the primary method: + +```scala +def foo(s: String, n: Int, b: Boolean) = foo(s, n, b, 0) +def foo(s: String, n: Int) = foo(s, n, true) +``` + +The first option results in shorter stack traces, while the second option results in +roughly half as much generated bytecode in the method bodies (though it's still `O(n^2)`). + +In order to allow `@unroll`ing of [Abstract Methods](#abstract-methods), we had to go with +the second option. This is because when an abstract method is overriden, it is not necessarily +true that the longest override that contains the implementation. Thus we need to forward +between the different `def foo` overrides one at a time until the override containing the +implementation is found. + + + +## Implementation & Testing + +This SIP has a full implementation for Scala {2.12, 2.13, 3} X {JVM, JS, Native} +in the following repository, as a compiler plugin: + +- https://github.com/com-lihaoyi/unroll + +As the `@unroll` annotation is purely a compile-time construct and does not need to exist +at runtime, `@unroll` can be added to Scala 2.13.x without breaking forwards compatibility. + +The linked repo also contains an extensive test suite that uses both MIMA as well +as classpath-mangling to validate that it provides both the binary and semantic +compatibility benefits claimed in this document. In fact, it has even discovered +bugs in the upstream Scala implementation related to binary compatibility, e.g. +[scala-native/scala-native#3747](https://github.com/scala-native/scala-native/issues/3747) + +I have also opened pull requests to a number of popular OSS Scala libraries, +using `@unroll` as a replacement for manually writing binary compatibility stubs, +and the 100s of lines of boilerplate reduction can be seen in the links below: + +- https://github.com/com-lihaoyi/mainargs/pull/113/files +- https://github.com/com-lihaoyi/mill/pull/3008/files +- https://github.com/com-lihaoyi/upickle/pull/555/files +- https://github.com/com-lihaoyi/os-lib/pull/254 + +These pull requests all pass both the test suite as well as the MIMA +`check-binary-compatibility` job, demonstrating that this approach does work +in real-world codebases. At time of writing, these are published under the following +artifacts and can be used in your own projects already: + +- Compiler Plugin: `ivy"com.lihaoyi::unroll-plugin:0.1.12"` +- Annotation: `ivy"com.lihaoyi::unroll-annotation:0.1.12"` diff --git a/_style/naming-conventions.md b/_style/naming-conventions.md index 1e796b414..7cdbc493f 100644 --- a/_style/naming-conventions.md +++ b/_style/naming-conventions.md @@ -195,44 +195,36 @@ getter and setter. For example: ### Parentheses -Unlike Ruby, Scala attaches significance to whether or not a method is -*declared* with parentheses (only applicable to methods of -[arity](https://en.wikipedia.org/wiki/Arity)-0). For example: +Scala allows a parameterless, zero-[arity](https://en.wikipedia.org/wiki/Arity) +method to be declared with an empty parameter list: def foo1() = ... +or with no parameter lists at all: + def foo2 = ... -These are different methods at compile-time. While `foo1` can be called -with or without the parentheses, `foo2` *may not* be called *with* -parentheses. - -Thus, it is actually quite important that proper guidelines be observed -regarding when it is appropriate to declare a method without parentheses -and when it is not. - -Methods which act as accessors of any sort (either encapsulating a field -or a logical property) should be declared *without* parentheses except -if they have side effects. While Ruby and Lift use a `!` to indicate -this, the usage of parens is preferred (please note that fluid APIs and -internal domain-specific languages have a tendency to break the -guidelines given below for the sake of syntax. Such exceptions should -not be considered a violation so much as a time when these rules do not -apply. In a DSL, syntax should be paramount over convention). - -Further, the callsite should follow the declaration; if declared with -parentheses, call with parentheses. While there is temptation to save a -few characters, if you follow this guideline, your code will be *much* -more readable and maintainable. - - // doesn't change state, call as birthdate - def birthdate = firstName - - // updates our internal state, call as age() - def age() = { - _age = updateAge(birthdate) - _age - } +By convention, parentheses are used to indicate that a method has +side effects, such as altering the receiver. + +On the other hand, the absence of parentheses indicates that a +method is like an accessor: it returns a value without altering the +receiver, and on the same receiver in the same state, it always +returns the same answer. + +The callsite should follow the declaration; if declared with +parentheses, call with parentheses. + +These conventions are followed in the Scala standard library and +you should follow them in your own code as well. + +Additional notes: + +* Scala 3 errors if you leave out the parentheses at the call site. Scala 2 merely warns. +* Scala 3 and 2 both error if the call site has parentheses where the definition doesn't. +* Java-defined methods are exempt from this distinction and may be called either way. +* If a method _does_ take parameters, there isn't any convention for indicating whether it also has side effects. +* Creating an object isn't considered a side effect. So for example, Scala collections have an `iterator` method with no parens. Yes, you get a new iterator each time. And yes, iterators are mutable. But every fresh iterator is the same until it has been altered by calling a side-effecting method such as `Iterator#next()`, which _is_ declared with parentheses. See this [2018 design discussion](https://github.com/scala/collection-strawman/issues/520). ### Symbolic Method Names diff --git a/_style/scaladoc.md b/_style/scaladoc.md index 7a78bd713..12dc9528a 100644 --- a/_style/scaladoc.md +++ b/_style/scaladoc.md @@ -208,7 +208,7 @@ sure to indicate the actual method names: * @return a new Person instance with the age determined by the * birthdate and current date. */ - def apply(name: String, birthDate: java.util.Date) = {} + def apply(name: String, birthDate: java.time.LocalDate) = {} } If your object holds implicit conversions, provide an example in the diff --git a/_th/cheatsheets/index.md b/_th/cheatsheets/index.md index 49861d3f0..b59cf817a 100644 --- a/_th/cheatsheets/index.md +++ b/_th/cheatsheets/index.md @@ -66,9 +66,9 @@ language: "th" | `for (i <- 1 until 5) {`
`println(i)`
`}` | ทำความเข้าใจ for : ทำซ้ำโดยละเว้นขอบเขตบน | | จับคู่รูปแบบ | | | Good
`(xs zip ys) map { case (x,y) => x*y }`
Bad
`(xs zip ys) map( (x,y) => x*y )` | ใช้ case ใน arg ของฟังก์ชันสำหรับ จับคู่รูปแบบ (pattern maching) | -| Bad
`val v42 = 42`
`Some(3) match {`
` case Some(v42) => println("42")`
` case _ => println("Not 42")`
`}` | "v42" ถูกตีความว่าเป็นชื่อที่ตรงกับค่า Int และพิมพ์ "42" | -| Good
`val v42 = 42`
`Some(3) match {`
`` case Some(`v42`) => println("42")``
`case _ => println("Not 42")`
`}` | "v42" กับ backticks ถูกตีความว่าเป็น v42 val ที่มีอยู่และ
พิมพ์ "Not 42" | -| Good
`val UppercaseVal = 42`
`Some(3) match {`
` case Some(UppercaseVal) => println("42")`
` case _ => println("Not 42")`
`}` | UppercaseVal ถือว่าเป็น val ที่มีอยู่ไม่ใช่ตัวแปรรูปแบบใหม่
เพราะมันเริ่มต้นด้วยตัวอักษรตัวพิมพ์ใหญ่ ดังนั้นค่าที่มีอยู่ใน
UppercaseVal จะถูกตรวจสอบเทียบกับ 3 และพิมพ์ "Not 42" | +| Bad
`val v42 = 42`
`Some(3) match {`
` case Some(v42) => println("42")`
` case _ => println("Not 42")`
`}` | "v42" ถูกตีความว่าเป็นชื่อที่ตรงกับค่า Int และแสดงค่า "42" | +| Good
`val v42 = 42`
`Some(3) match {`
`` case Some(`v42`) => println("42")``
`case _ => println("Not 42")`
`}` | "v42" กับ backticks ถูกตีความว่าเป็น v42 val ที่มีอยู่และ
แสดงค่า "Not 42" | +| Good
`val UppercaseVal = 42`
`Some(3) match {`
` case Some(UppercaseVal) => println("42")`
` case _ => println("Not 42")`
`}` | UppercaseVal ถือว่าเป็น val ที่มีอยู่ไม่ใช่ตัวแปรรูปแบบใหม่
เพราะมันเริ่มต้นด้วยตัวอักษรตัวพิมพ์ใหญ่ ดังนั้นค่าที่มีอยู่ใน
UppercaseVal จะถูกตรวจสอบเทียบกับ 3 และแสดงค่า "Not 42" | | การใช้งาน object | | | `class C(x: R)` _เหมือนกับ_
`class C(private val x: R)`
`var c = new C(4)` | ตัวสร้างพารามิเตอร์ - x มีเฉพาะในคลาส body เท่านั้น | | `class C(val x: R)`
`var c = new C(4)`
`c.x` | ตัวสร้างพารามิเตอร์ - กำหนดสมาชิกสาธารณะโดยอัตโนมัติ | diff --git a/_th/tour/automatic-closures.md b/_th/tour/automatic-closures.md deleted file mode 100644 index 1c30b693a..000000000 --- a/_th/tour/automatic-closures.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: tour -title: Automatic Closures -partof: scala-tour - -language: th ---- diff --git a/_th/tour/classes.md b/_th/tour/classes.md index 7800a5f49..3054dbcc8 100644 --- a/_th/tour/classes.md +++ b/_th/tour/classes.md @@ -11,7 +11,7 @@ next-page: traits previous-page: unified-types --- -คลาสใน Scala เป็นพิมพ์เขียวสำหรับสร้าง object ในคลาสสามารถมี method, value, ตัวแปร, type, object, +คลาสใน Scala เป็นพิมพ์เขียวสำหรับสร้าง object ในคลาสสามารถมี method, value, ตัวแปร, type, object, trait และคลาส ซึ่งเรียกรวมๆ กันว่า _members_ หรือ _สมาชิก_ ของคลาส type, object และ trait จะกล่าวถึงภายหลัง ## การกำหนดคลาส @@ -22,7 +22,7 @@ class User val user1 = new User ``` -keyword `new` ใช้เพื่อสร้าง instance ของคลาส `User` มี constructor เริ่มต้นซึ่งไม่รับค่า argument เพราะว่าไม่ได้กำหนด constructor ไว้ตอนประกาสคลาส +keyword `new` ใช้เพื่อสร้าง instance ของคลาส `User` มี constructor เริ่มต้นซึ่งไม่รับค่า argument เพราะว่าไม่ได้กำหนด constructor ไว้ตอนประกาสคลาส อย่างไรก็ตาม, เราอาจจะมี constructor และ body ของคลาส ตัวอย่างดังนี้ เป็นการประกาศคลาสสำหรับจุด (point): @@ -41,11 +41,11 @@ class Point(var x: Int, var y: Int) { val point1 = new Point(2, 3) point1.x // 2 -println(point1) // พิมพ์ (2, 3) +println(point1) // แสดงค่า (2, 3) ``` คลาส `Point` นี้มีสมาชิก 4 ตัว คือ ตัวแปร `x` และ `y` และ method `move` และ `toString` -ไม่เหมือนภาษาอื่นๆ, ซึ่ง constructor หลักจะอยู่ใน class signature `(var x: Int, var y: Int)` +ไม่เหมือนภาษาอื่นๆ, ซึ่ง constructor หลักจะอยู่ใน class signature `(var x: Int, var y: Int)` method `move` รับ argument ชนิดตัวเลข 2 ตัว และ return เป็นค่า Unit `()` ซึ่งไม่มีค่า จะมีผลลัพธ์คลายกับ `void` ในภาษาที่เหมือน Java, `toString` ในทางกลับกัน ไม่รับ argument ใดๆ แต่ return เป็นค่า `String` ซึ่งแทนที่ method `toString` จาก [`AnyRef`](unified-types.html) โดยจะมี keyword `override` @@ -58,7 +58,7 @@ class Point(var x: Int = 0, var y: Int = 0) val origin = new Point // x and y are both set to 0 val point1 = new Point(1) -println(point1.x) // พิมพ์ 1 +println(point1.x) // แสดงค่า 1 ``` @@ -68,13 +68,13 @@ println(point1.x) // พิมพ์ 1 ```scala mdoc:nest class Point(var x: Int = 0, var y: Int = 0) val point2 = new Point(y=2) -println(point2.y) // พิมพ์ 2 +println(point2.y) // แสดงค่า 2 ``` นี้เป็นวิธีปฏิบัติที่ดีเพื่อจะทำให้โค้ดชัดเจนมากขึ้น ## Private Members และ Getter/Setter -สมาชิกของคลาสจะเป็น public โดยค่าเริ่มต้น ใช้ access modifier `private` +สมาชิกของคลาสจะเป็น public โดยค่าเริ่มต้น ใช้ access modifier `private` เพื่อซ่อนสมาชิกนั้นจากภายนอกของคลาส ```scala mdoc:reset class Point { @@ -97,10 +97,10 @@ class Point { val point1 = new Point point1.x = 99 -point1.y = 101 // พิมพ์คำเตือน warning +point1.y = 101 // แสดงค่า "WARNING: Out of bounds" ``` -คลาส `Point` เวอร์ชันนี้ ข้อมูลจะถูกเก็บไว้ในตัวแปรชนิด private ที่ชื่อว่า `_x` และ `_y` และมี method ที่ชื่อว่า `def x` และ `def y` ทีจะใช้ในการเข้าถึงข้อมูล private เป็น getter, `def x_=` และ `def y=` -เป็น method สำหรับตรวจสอบข้อมูลและ setting ค่าของตัวแปร `_x` และ `_y` +คลาส `Point` เวอร์ชันนี้ ข้อมูลจะถูกเก็บไว้ในตัวแปรชนิด private ที่ชื่อว่า `_x` และ `_y` และมี method ที่ชื่อว่า `def x` และ `def y` ทีจะใช้ในการเข้าถึงข้อมูล private เป็น getter, `def x_=` และ `def y=` +เป็น method สำหรับตรวจสอบข้อมูลและ setting ค่าของตัวแปร `_x` และ `_y` สังเกตว่า syntax พิเศษนี้สำหรับ setter: คือ method ที่ตามด้วย `_=` ไปยังตัวระบุของ setter และ parameter ตามหลังมา constructor หลักกำหนด parameter ด้วย `val` และ `var` เป็น public อย่างไรก็ตามเพราะว่า `val` เป็นตัวแปรที่เปลี่ยนแปลงไม่ได้ (immutable) เราไม่สามารถเขียบแบบนี้ได้ diff --git a/_th/tour/named-arguments.md b/_th/tour/named-arguments.md index 161b53b50..0257732c5 100644 --- a/_th/tour/named-arguments.md +++ b/_th/tour/named-arguments.md @@ -10,3 +10,56 @@ language: th next-page: packages-and-imports previous-page: default-parameter-values --- + +เมื่อเราเรียกใช้ method แล้วเราสามารถระบุชื่อ argument (label the argument) สำหรับ parameter ใดๆ ได้ดังนี้: + +{% tabs named-arguments-when-good %} + +{% tab 'Scala 2 and 3' for=named-arguments-when-good %} + +```scala mdoc +def printName(first: String, last: String): Unit = + println(s"$first $last") + +printName("John", "Public") // แสดงค่า "John Public" +printName(first = "John", last = "Public") // แสดงค่า "John Public" +printName(last = "Public", first = "John") // แสดงค่า "John Public" +printName("Elton", last = "John") // แสดงค่า "Elton John" +``` + +{% endtab %} + +{% endtabs %} + +named argument นั้นมีประโยชน์เมื่อ parameter 2 ตัวมี type เดียวกัน\ +ทำให้ argument ที่เราส่งไปให้ function อาจถูกสลับกันโดยไม่ได้ตั้งใจ + +สังเกตว่าเราจะเขียน argument ที่ระบุชื่อในลำดับใดก็ได้\ +แต่ถ้า argument ไม่ได้อยู่ในลำดับของ parameter ใน function จากซ้ายไปขวา แล้ว argument ที่เหลือจะต้องระบุชื่อทั้งหมด + +ในตัวอย่างข้างล่างนี้ named argument ทำให้เราสามารถเว้น parameter `middle` ได้\ +แต่ในกรณีที่เกิด `error: positional after named argument`\ +เนื่องจาก argument ตัวแรกไม่ได้เรียงตามลำดับของ parameter (ตัวแรกไม่ใช่ parameter `first` และ argument ตัวที่ 2 เป็นต้นไปก็ไม่ได้ระบุชื่อด้วย)\ +ดังนั้น เราจะต้องระบุชื่อ argument ตั้งแต่ตัวที่ 2 เป็นต้นไป + +{% tabs named-arguments-when-error %} + +{% tab 'Scala 2 and 3' for=named-arguments-when-error %} + +```scala mdoc:fail +def printFullName(first: String, middle: String = "Q.", last: String): Unit = + println(s"$first $middle $last") + +printFullName(first = "John", last = "Public") // แสดงค่า "John Q. Public" +printFullName("John", last = "Public") // แสดงค่า "John Q. Public" +printFullName("John", middle = "Quincy", "Public") // แสดงค่า "John Quincy Public" +printFullName(last = "Public", first = "John") // แสดงค่า "John Q. Public" +printFullName(last = "Public", "John") // error: positional after named argument +``` + +{% endtab %} + +{% endtabs %} + +เราสามารถใช้ Named Argument กับการเรียกใช้ method ของ Java ได้\ +แต่ทำได้เฉพาะในกรณีที่ Java library นั้นถูกคอมไพล์ด้วยออพชั่น `-parameters` เท่านั้น diff --git a/_th/tour/traits.md b/_th/tour/traits.md index bb6dfddd9..2c2cd6769 100644 --- a/_th/tour/traits.md +++ b/_th/tour/traits.md @@ -173,6 +173,7 @@ val animals = ArrayBuffer.empty[Pet] animals.append(dog) animals.append(cat) animals.foreach(pet => println(pet.name)) // แสดงค่า Harry Sally + ``` {% endtab %} @@ -203,3 +204,5 @@ animals.foreach(pet => println(pet.name)) // แสดงค่า Harry Sally `trait Pet` มี abstract field `name` ซึ่ง implement ไว้ใน constructor ของคลาส `Cat` และ `Dog`\ ในบรรทัดสุดท้าย เราเรียกใช้ `pet.name` ซึ่งได้มีการ implement `name` ไว้ใน subtype ใดๆ ของ trait `Pet` แล้ว +======= + diff --git a/_tour/annotations.md b/_tour/annotations.md index 98f860ba3..a9876fab4 100644 --- a/_tour/annotations.md +++ b/_tour/annotations.md @@ -146,7 +146,7 @@ public class MyJavaClass extends TheirClass ... {% endtab %} {% endtabs %} -An annotation application in Scala looks like a constructor invocation, for instantiating a Java annotation one has to use named arguments: +An annotation application in Scala looks like a constructor invocation, but to instantiate a Java annotation one has to use named arguments: {% tabs annotations_7 %} {% tab 'Scala 2 and 3' for=annotations_7 %} diff --git a/_tour/automatic-closures.md b/_tour/automatic-closures.md deleted file mode 100644 index 2a3d889d3..000000000 --- a/_tour/automatic-closures.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -layout: tour -title: Automatic Type-Dependent Closure Construction -partof: scala-tour - -redirect_from: "/tutorials/tour/automatic-closures.html" ---- - -Scala allows parameterless function names as parameters of methods. When such a method is called, the actual parameters for parameterless function names are not evaluated and a nullary function is passed instead which encapsulates the computation of the corresponding parameter (so-called *call-by-name* evaluation). - -The following code demonstrates this mechanism: - -```scala mdoc -def whileLoop(cond: => Boolean)(body: => Unit): Unit = - if (cond) { - body - whileLoop(cond)(body) - } -var i = 10 -whileLoop (i > 0) { - println(i) - i -= 1 -} -``` - -The function whileLoop takes two parameters `cond` and `body`. When the function is applied, the actual parameters do not get evaluated. But whenever the formal parameters are used in the body of `whileLoop`, the implicitly created nullary functions will be evaluated instead. Thus, our method `whileLoop` implements a Java-like while-loop with a recursive implementation scheme. - -We can combine the use of [infix/postfix operators](operators.html) with this mechanism to create more complex statements (with a nice syntax). - -Here is the implementation of a loop-unless statement: - -```scala mdoc:reset -def loop(body: => Unit): LoopUnlessCond = - new LoopUnlessCond(body) -protected class LoopUnlessCond(body: => Unit) { - def unless(cond: => Boolean): Unit = { - body - if (!cond) unless(cond) - } -} -var i = 10 -loop { - println("i = " + i) - i -= 1 -} unless (i == 0) -``` - -The `loop` function just accepts a body of a loop and returns an instance of class `LoopUnlessCond` (which encapsulates this body object). Note that the body didn't get evaluated yet. Class `LoopUnlessCond` has a method `unless` which we can use as a *infix operator*. This way, we achieve a quite natural syntax for our new loop: `loop { < stats > } unless ( < cond > )`. - -Here's the output when `TargetTest2` gets executed: - - i = 10 - i = 9 - i = 8 - i = 7 - i = 6 - i = 5 - i = 4 - i = 3 - i = 2 - i = 1 diff --git a/_tour/by-name-parameters.md b/_tour/by-name-parameters.md index b8318783f..441aefa59 100644 --- a/_tour/by-name-parameters.md +++ b/_tour/by-name-parameters.md @@ -8,6 +8,7 @@ next-page: annotations previous-page: operators redirect_from: "/tutorials/tour/by-name-parameters.html" +redirect_from: "/tutorials/tour/automatic-closures.html" --- _By-name parameters_ are evaluated every time they are used. They won't be evaluated at all if they are unused. This is similar to replacing the by-name parameters with the passed expressions. They are in contrast to _by-value parameters_. To make a parameter called by-name, simply prepend `=>` to its type. diff --git a/_tour/higher-order-functions.md b/_tour/higher-order-functions.md index 9863069b6..7dc08ea00 100644 --- a/_tour/higher-order-functions.md +++ b/_tour/higher-order-functions.md @@ -16,7 +16,7 @@ The terminology can get a bit confusing at this point, and we use the phrase "higher order function" for both methods and functions that take functions as parameters or that return a function. -In a pure Object Oriented world a good practice is to avoid exposing methods parameterized with functions that might leak object's internal state. Leaking internal state might break the invariants of the object itself thus violating encapsulation. +In a pure Object Oriented world, a good practice is to avoid exposing methods parameterised with functions that might leak an object's internal state. Leaking internal state might break the invariants of the object itself, thus violating encapsulation. One of the most common examples is the higher-order function `map` which is available for collections in Scala. @@ -51,7 +51,7 @@ val newSalaries = salaries.map(x => x * 2) // List(40000, 140000, 80000) {% endtabs %} Notice how `x` is not declared as an Int in the above example. That's because the -compiler can infer the type based on the type of function map expects (see [Currying](/tour/multiple-parameter-lists.html)). An even more idiomatic way to write the same piece of code would be: +compiler can infer the type based on the type of function `map` expects (see [Currying](/tour/multiple-parameter-lists.html)). An even more idiomatic way to write the same piece of code would be: {% tabs map_example_3 %} @@ -187,7 +187,7 @@ object SalaryRaiser: The new method, `promotion`, takes the salaries plus a function of type `Double => Double` (i.e. a function that takes a Double and returns a Double) and returns the product. -Methods and functions usually express behaviours or data transformations, therefore having functions that compose based on other functions can help building generic mechanisms. Those generic operations defer to lock down the entire operation behaviour giving clients a way to control or further customize parts of the operation itself. +Methods and functions usually express behaviours or data transformations. Therefore, having functions that compose based on other functions can allow us to build more generic mechanisms. Such generic operations avoid completely locking down their behaviour in order to give clients a way to control or further customize parts of those operations. ## Functions that return functions diff --git a/_tour/package-objects.md b/_tour/package-objects.md index 52484564a..8be239c93 100644 --- a/_tour/package-objects.md +++ b/_tour/package-objects.md @@ -33,7 +33,7 @@ methods and variables. Any definitions placed at the top level of a package are considered members of the package itself. -> In Scala 2 top-level method, type and variable definitions had to be wrapped in a **package object**. +> In Scala 2, top-level method, type and variable definitions had to be wrapped in a **package object**. > These are still usable in Scala 3 for backwards compatibility. You can see how they work by switching tabs. {% endtab %} diff --git a/_tour/pattern-matching.md b/_tour/pattern-matching.md index 1038e6be1..a89c16775 100644 --- a/_tour/pattern-matching.md +++ b/_tour/pattern-matching.md @@ -236,6 +236,27 @@ def goIdle(device: Device): String = device match `def goIdle` has a different behavior depending on the type of `Device`. This is useful when the case needs to call a method on the pattern. It is a convention to use the first letter of the type as the case identifier (`p` and `c` in this case). +## Binding matched patterns to variables +You can use variable binding to get type-dependent behavior while simultaneously extracting fields from the matched pattern. + +{% tabs pattern-matching-variable-binding class=tabs-scala-version %} +{% tab 'Scala 2' for=pattern-matching-variable-binding %} +```scala mdoc +def goIdleWithModel(device: Device): String = device match { + case p @ Phone(model) => s"$model: ${p.screenOff}" + case c @ Computer(model) => s"$model: ${c.screenSaverOn}" +} +``` +{% endtab %} +{% tab 'Scala 3' for=pattern-matching-variable-binding %} +```scala +def goIdleWithModel(device: Device): String = device match + case p @ Phone(model) => s"$model: ${p.screenOff}" + case c @ Computer(model) => s"$model: ${c.screenSaverOn}" +``` +{% endtab %} +{% endtabs %} + ## Sealed types You may have noticed that in the examples above the base types are qualified diff --git a/_tour/tour-of-scala.md b/_tour/tour-of-scala.md index 59e4072e3..49f0d2b7e 100644 --- a/_tour/tour-of-scala.md +++ b/_tour/tour-of-scala.md @@ -28,7 +28,7 @@ Scala is a modern multi-paradigm programming language designed to express common Scala is a pure object-oriented language in the sense that [every value is an object](unified-types.html). Types and behaviors of objects are described by [classes](classes.html) and [traits](traits.html). Classes can be extended by subclassing, and by using a flexible [mixin-based composition](mixin-class-composition.html) mechanism as a clean replacement for multiple inheritance. ## Scala is functional ## -Scala is also a functional language in the sense that [every function is a value](unified-types.html). Scala provides a [lightweight syntax](basics.html#functions) for defining anonymous functions, it supports [higher-order functions](higher-order-functions.html), it allows functions to be [nested](nested-functions.html), and it supports [currying](multiple-parameter-lists.html). Scala's [case classes](case-classes.html) and its built-in support for [pattern matching](pattern-matching.html) provide the functionality of algebraic types, which are used in many functional languages. [Singleton objects](singleton-objects.html) provide a convenient way to group functions that aren't members of a class. +Scala is also a functional language in the sense that [every function is a value](unified-types.html). Scala provides a [lightweight syntax](basics.html#functions) for defining anonymous functions, supports [higher-order functions](higher-order-functions.html), allows functions to be [nested](nested-functions.html), and supports [currying](multiple-parameter-lists.html). Scala's [case classes](case-classes.html) and its built-in support for [pattern matching](pattern-matching.html) provide the functionality of algebraic types, which are used in many functional languages. [Singleton objects](singleton-objects.html) provide a convenient way to group functions that aren't members of a class. Furthermore, Scala's notion of pattern matching naturally extends to the [processing of XML data](https://github.com/scala/scala-xml/wiki/XML-Processing) with the help of [right-ignoring sequence patterns](regular-expression-patterns.html), by way of general extension via [extractor objects](extractor-objects.html). In this context, [for comprehensions](for-comprehensions.html) are useful for formulating queries. These features make Scala ideal for developing applications like web services. diff --git a/_tour/traits.md b/_tour/traits.md index 3ead3d397..f719f9674 100644 --- a/_tour/traits.md +++ b/_tour/traits.md @@ -98,7 +98,7 @@ class IntIterator(to: Int) extends Iterator[Int]: 0 end IntIterator -val iterator = new IntIterator(10) +val iterator = IntIterator(10) iterator.next() // returns 0 iterator.next() // returns 1 ``` diff --git a/_tour/unified-types.md b/_tour/unified-types.md index 191dc28c1..a6f0e9c0d 100644 --- a/_tour/unified-types.md +++ b/_tour/unified-types.md @@ -57,6 +57,8 @@ true Value types can be cast in the following way: Scala Type Hierarchy +Note that `Long` to `Float` conversion is deprecated in new versions of Scala, because of the potential precision lost. + For example: {% tabs unified-types-2 %} diff --git a/_tour/variances.md b/_tour/variances.md index 87fc6029a..706a28ce1 100644 --- a/_tour/variances.md +++ b/_tour/variances.md @@ -112,7 +112,7 @@ From this, we have to conclude that `Box[Cat]` and `Box[Animal]` can't have a su The problem we ran in to above, is that because we could put a Dog in an Animal Box, a Cat Box can't be an Animal Box. -But what if we couldn't put a Dog in the box? Then we could just get our Cat back out and that's not a problem, so than it could follow the subtyping relationship. It turns out, that's indeed something we can do. +But what if we couldn't put a Dog in the box? Then, we could just get our Cat back out without a problem, and it would adhere to the subtyping relationship. It turns out that that's possible to do. {% tabs covariance_1 class=tabs-scala-version %} {% tab 'Scala 2' for=covariance_1 %} diff --git a/_uk/getting-started/index.md b/_uk/getting-started/install-scala.md similarity index 99% rename from _uk/getting-started/index.md rename to _uk/getting-started/install-scala.md index e8275ada9..9725b362e 100644 --- a/_uk/getting-started/index.md +++ b/_uk/getting-started/install-scala.md @@ -75,7 +75,8 @@ _Scastie_ це онлайн “пісочниця”, де ви можете е Перевірте ваші налаштування виконавши команду `scala -version`, яка має вивести: ```bash $ scala -version -Scala code runner version {{site.scala-3-version}} -- Copyright 2002-2022, LAMP/EPFL +Scala code runner version: 1.4.3 +Scala version (default): {{site.scala-3-version}} ``` Якщо це не спрацювало, необхідно завершити сеанс та зайти в систему знову (або перезавантажити), щоб зміни застосувались на вашій системі. {% endaltDetails %} diff --git a/_uk/index.md b/_uk/index.md index 375daaba5..6242b6e80 100644 --- a/_uk/index.md +++ b/_uk/index.md @@ -16,7 +16,7 @@ sections: - title: "Початок роботи" description: "Встанови Scala на свій комп'ютер і почни писати код Scala!" icon: "fa fa-rocket" - link: /uk/getting-started/index.html + link: /uk/getting-started/install-scala.html - title: "Екскурсія по Скала" description: "Короткі введення в основні особливості мови." icon: "fa fa-flag" diff --git a/_zh-cn/overviews/core/implicit-classes.md b/_zh-cn/overviews/core/implicit-classes.md index 6999f419a..e7ec62cad 100644 --- a/_zh-cn/overviews/core/implicit-classes.md +++ b/_zh-cn/overviews/core/implicit-classes.md @@ -61,7 +61,7 @@ Scala 2.10引入了一种叫做隐式类的新特性。隐式类指的是用impl 2. 构造函数只能携带一个非隐式参数。 ```` - implicit class RichDate(date: java.util.Date) // 正确! + implicit class RichDate(date: java.time.LocalDate) // 正确! implicit class Indexer[T](collecton: Seq[T], index: Int) // 错误! implicit class Indexer[T](collecton: Seq[T])(implicit index: Index) // 正确! ```` diff --git a/_zh-cn/overviews/scala3-book/taste-intro.md b/_zh-cn/overviews/scala3-book/taste-intro.md index 1c0a5a6c3..0d2f0fdf5 100644 --- a/_zh-cn/overviews/scala3-book/taste-intro.md +++ b/_zh-cn/overviews/scala3-book/taste-intro.md @@ -24,4 +24,4 @@ permalink: "/zh-cn/scala3/book/:title.html" > 或者,您可以使用 [Scastie](https://scastie.scala-lang.org) 在 Web 浏览器中运行这些示例,这是一个完全在线的编辑器和 Scala 的代码运行器。 [reference]: {{ site.scala3ref }}/overview.html -[get-started]: {% link _overviews/getting-started/index.md %} +[get-started]: {% link _overviews/getting-started/install-scala.md %} diff --git a/_zh-cn/tour/automatic-closures.md b/_zh-cn/tour/automatic-closures.md deleted file mode 100644 index b51652c94..000000000 --- a/_zh-cn/tour/automatic-closures.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: tour -title: Automatic Closures -partof: scala-tour - -language: zh-cn ---- diff --git a/api/all.md b/api/all.md index 334ecac39..db37824fc 100644 --- a/api/all.md +++ b/api/all.md @@ -8,22 +8,22 @@ redirect_from: ## Latest releases -* Scala 3.4.2 - * [Library API](https://www.scala-lang.org/api/3.4.2/) -* Scala 3.3.3 LTS - * [Library API](https://www.scala-lang.org/api/3.3.3/) -* Scala 2.13.14 - * [Library API](https://www.scala-lang.org/api/2.13.14/) - * [Compiler API](https://www.scala-lang.org/api/2.13.14/scala-compiler/scala/) - * [Reflection API](https://www.scala-lang.org/api/2.13.14/scala-reflect/scala/reflect/) -* Scala 2.12.19 - * [Library API](https://www.scala-lang.org/api/2.12.19/) - * [Compiler API](https://www.scala-lang.org/api/2.12.19/scala-compiler/scala/) - * [Reflection API](https://www.scala-lang.org/api/2.12.19/scala-reflect/scala/reflect/) +* Scala {{site.scala-3-version}} + * [Library API](https://www.scala-lang.org/api/{{site.scala-3-version}}/) +* Scala 3.3.4 LTS + * [Library API](https://www.scala-lang.org/api/3.3.4/) +* Scala 2.13.15 + * [Library API](https://www.scala-lang.org/api/2.13.15/) + * [Compiler API](https://www.scala-lang.org/api/2.13.15/scala-compiler/scala/) + * [Reflection API](https://www.scala-lang.org/api/2.13.15/scala-reflect/scala/reflect/) +* Scala 2.12.20 + * [Library API](https://www.scala-lang.org/api/2.12.20/) + * [Compiler API](https://www.scala-lang.org/api/2.12.20/scala-compiler/scala/) + * [Reflection API](https://www.scala-lang.org/api/2.12.20/scala-reflect/scala/reflect/) * Scala Modules - * [XML API](https://www.scala-lang.org/api/2.12.19/scala-xml/scala/xml/) - * [Parser Combinators API](https://www.scala-lang.org/api/2.12.19/scala-parser-combinators/scala/util/parsing/) - * [Swing API](https://www.scala-lang.org/api/2.12.19/scala-swing/scala/swing/) + * [XML API](https://www.scala-lang.org/api/2.12.20/scala-xml/scala/xml/) + * [Parser Combinators API](https://www.scala-lang.org/api/2.12.20/scala-parser-combinators/scala/util/parsing/) + * [Swing API](https://www.scala-lang.org/api/2.12.20/scala-swing/scala/swing/) * Scala 2.11.12 * [Library API](https://www.scala-lang.org/api/2.11.12/) * [Compiler API](https://www.scala-lang.org/api/2.11.12/scala-compiler/) @@ -64,10 +64,20 @@ https://scala-ci.typesafe.com/artifactory/scala-integration/org/scala-lang/ ## Previous releases +* Scala 3.5.1 + * [Library API](https://www.scala-lang.org/api/3.5.1/) +* Scala 3.5.0 + * [Library API](https://www.scala-lang.org/api/3.5.0/) +* Scala 3.4.3 + * [Library API](https://www.scala-lang.org/api/3.4.3/) +* Scala 3.4.2 + * [Library API](https://www.scala-lang.org/api/3.4.2/) * Scala 3.4.1 * [Library API](https://www.scala-lang.org/api/3.4.1/) * Scala 3.4.0 * [Library API](https://www.scala-lang.org/api/3.4.0/) +* Scala 3.3.3 LTS + * [Library API](https://www.scala-lang.org/api/3.3.3/) * Scala 3.3.1 LTS * [Library API](https://www.scala-lang.org/api/3.3.1/) * Scala 3.3.0 LTS @@ -92,6 +102,10 @@ https://scala-ci.typesafe.com/artifactory/scala-integration/org/scala-lang/ * [Library API](https://www.scala-lang.org/api/3.0.1/) * Scala 3.0.0 * [Library API](https://www.scala-lang.org/api/3.0.0/) +* Scala 2.13.14 + * [Library API](https://www.scala-lang.org/api/2.13.14/) + * [Compiler API](https://www.scala-lang.org/api/2.13.14/scala-compiler/scala/) + * [Reflection API](https://www.scala-lang.org/api/2.13.14/scala-reflect/scala/reflect/) * Scala 2.13.13 * [Library API](https://www.scala-lang.org/api/2.13.13/) * [Compiler API](https://www.scala-lang.org/api/2.13.13/scala-compiler/scala/) @@ -148,6 +162,14 @@ https://scala-ci.typesafe.com/artifactory/scala-integration/org/scala-lang/ * [Library API](https://www.scala-lang.org/api/2.13.0/) * [Compiler API](https://www.scala-lang.org/api/2.13.0/scala-compiler/scala/) * [Reflection API](https://www.scala-lang.org/api/2.13.0/scala-reflect/scala/reflect/) +* Scala 2.12.19 + * [Library API](https://www.scala-lang.org/api/2.12.19/) + * [Compiler API](https://www.scala-lang.org/api/2.12.19/scala-compiler/scala/) + * [Reflection API](https://www.scala-lang.org/api/2.12.19/scala-reflect/scala/reflect/) + * Scala Modules + * [XML API](https://www.scala-lang.org/api/2.12.19/scala-xml/scala/xml/) + * [Parser Combinators API](https://www.scala-lang.org/api/2.12.19/scala-parser-combinators/scala/util/parsing/) + * [Swing API](https://www.scala-lang.org/api/2.12.19/scala-swing/scala/swing/) * Scala 2.12.18 * [Library API](https://www.scala-lang.org/api/2.12.18/) * [Compiler API](https://www.scala-lang.org/api/2.12.18/scala-compiler/scala/) diff --git a/docker-compose.yml b/docker-compose.yml index 98cd0dda0..75b187639 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,5 +1,3 @@ -version: '2' - services: jekyll: user: "${UID}:${GID}" diff --git a/index.md b/index.md index fc4605851..22cbc790c 100644 --- a/index.md +++ b/index.md @@ -17,7 +17,7 @@ sections: - title: "Getting Started" description: "Install Scala on your computer and start writing some Scala code!" icon: "fa fa-rocket" - link: /getting-started.html + link: /getting-started/install-scala.html - title: "Tour of Scala" description: "Bite-sized introductions to core language features." icon: "fa fa-flag" diff --git a/resources/images/getting-started/IntelliJScala.png b/resources/images/getting-started/IntelliJScala.png new file mode 100644 index 000000000..c567da38d Binary files /dev/null and b/resources/images/getting-started/IntelliJScala.png differ diff --git a/resources/images/getting-started/VSCodeMetals.png b/resources/images/getting-started/VSCodeMetals.png new file mode 100644 index 000000000..bdc509072 Binary files /dev/null and b/resources/images/getting-started/VSCodeMetals.png differ diff --git a/scripts/run-mdoc.sh b/scripts/run-mdoc.sh index 4f3a7da6e..6d729df4f 100755 --- a/scripts/run-mdoc.sh +++ b/scripts/run-mdoc.sh @@ -1,11 +1,11 @@ #!/bin/bash set -eux -cs launch --scala-version 2.13.14 org.scalameta::mdoc:2.3.3 -- \ +cs launch --scala-version 2.13.15 org.scalameta::mdoc:2.3.3 -- \ --in . \ --out /tmp/mdoc-out/ \ --classpath \ - $(cs fetch --scala-version 2.13.14 -p \ + $(cs fetch --scala-version 2.13.15 -p \ com.chuusai::shapeless:2.3.10 \ org.scala-lang::toolkit:0.1.7 \ org.scala-lang::toolkit-test:0.1.7 \