diff --git a/.eslintrc.yaml b/.eslintrc.yaml
index 3bded1a7ce0ec7..6643b4649d0da5 100644
--- a/.eslintrc.yaml
+++ b/.eslintrc.yaml
@@ -10,6 +10,11 @@ env:
parserOptions:
ecmaVersion: 2017
+overrides:
+ - files: ["doc/api/esm.md", "*.mjs"]
+ parserOptions:
+ sourceType: module
+
rules:
# Possible Errors
# http://eslint.org/docs/rules/#possible-errors
diff --git a/.gitignore b/.gitignore
index dea969504a90f1..f6c0377bd29d3f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,6 +12,7 @@
!.gitignore
!.gitkeep
!.mailmap
+!.nycrc
!.remarkrc
core
diff --git a/.nycrc b/.nycrc
new file mode 100644
index 00000000000000..9e34a976e21ef6
--- /dev/null
+++ b/.nycrc
@@ -0,0 +1,6 @@
+{
+ "exclude": [
+ "**/internal/process/write-coverage.js"
+ ],
+ "reporter": ["html", "text"]
+}
diff --git a/BUILDING.md b/BUILDING.md
index 47630ddf150216..a32b30bb62745c 100644
--- a/BUILDING.md
+++ b/BUILDING.md
@@ -93,6 +93,8 @@ More Developer Tools...`. This step will install `clang`, `clang++`, and
* You may want to setup [firewall rules](tools/macosx-firewall.sh)
to avoid popups asking to accept incoming network connections when running tests:
+If the path to your build directory contains a space, the build will likely fail.
+
```console
$ sudo ./tools/macosx-firewall.sh
```
@@ -126,6 +128,25 @@ To run the tests:
$ make test
```
+To run the tests and generate code coverage reports:
+
+```console
+$ ./configure --coverage
+$ make coverage
+```
+
+This will generate coverage reports for both JavaScript and C++ tests (if you
+only want to run the JavaScript tests then you do not need to run the first
+command `./configure --coverage`).
+
+The `make coverage` command downloads some tools to the project root directory
+and overwrites the `lib/` directory. To clean up after generating the coverage
+reports:
+
+```console
+make coverage-clean
+```
+
To build the documentation:
This will build Node.js first (if necessary) and then use it to build the docs:
@@ -134,7 +155,7 @@ This will build Node.js first (if necessary) and then use it to build the docs:
$ make doc
```
-If you have an existing Node.js you can build just the docs with:
+If you have an existing Node.js build, you can build just the docs with:
```console
$ NODE=/path/to/node make doc-only
@@ -170,11 +191,14 @@ Prerequisites:
including the Community edition (remember to select
"Common Tools for Visual C++ 2015" feature during installation).
* [Visual Studio 2017](https://www.visualstudio.com/downloads/), any edition (including the Build Tools SKU).
- **Required Components:** "MSbuild", "VC++ 2017 v141 toolset" and one of the Windows SDKs (10 or 8.1).
+ **Required Components:** "MSbuild", "VC++ 2017 v141 toolset" and at least one of the Windows SDKs.
+ *Note*: For "Windows 10 SDK (10.0.15063.0)" only the "Desktop C++ x86 and x64" flavor is required.
* Basic Unix tools required for some tests,
[Git for Windows](http://git-scm.com/download/win) includes Git Bash
and tools which can be included in the global `PATH`.
+If the path to your build directory contains a space, the build will likely fail.
+
```console
> .\vcbuild
```
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ba634b10e3f358..0b1e4c96ca8652 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -27,7 +27,9 @@ release.
-8.3.0
+8.5.0
+8.4.0
+8.3.0
8.2.1
8.2.0
8.1.4
diff --git a/COLLABORATOR_GUIDE.md b/COLLABORATOR_GUIDE.md
index 116624a79aefd0..5aa0fb49cd68a6 100644
--- a/COLLABORATOR_GUIDE.md
+++ b/COLLABORATOR_GUIDE.md
@@ -8,7 +8,7 @@
- [Internal vs. Public API](#internal-vs-public-api)
- [Breaking Changes](#breaking-changes)
- [Deprecations](#deprecations)
- - [Involving the CTC](#involving-the-ctc)
+ - [Involving the TSC](#involving-the-TSC)
* [Landing Pull Requests](#landing-pull-requests)
- [Technical HOWTO](#technical-howto)
- [I Just Made a Mistake](#i-just-made-a-mistake)
@@ -30,7 +30,7 @@ pull requests to the Node.js project.
Collaborators should feel free to take full responsibility for
managing issues and pull requests they feel qualified to handle, as
long as this is done while being mindful of these guidelines, the
-opinions of other Collaborators and guidance of the CTC.
+opinions of other Collaborators and guidance of the TSC.
Collaborators may **close** any issue or pull request they believe is
not relevant for the future of the Node.js project. Where this is
@@ -46,7 +46,7 @@ necessary.
All modifications to the Node.js code and documentation should be
performed via GitHub pull requests, including modifications by
-Collaborators and CTC members.
+Collaborators and TSC members.
All pull requests must be reviewed and accepted by a Collaborator with
sufficient expertise who is able to take full responsibility for the
@@ -70,16 +70,16 @@ For non-breaking changes, if there is no disagreement amongst
Collaborators, a pull request may be landed given appropriate review.
Where there is discussion amongst Collaborators, consensus should be
sought if possible. The lack of consensus may indicate the need to
-elevate discussion to the CTC for resolution (see below).
+elevate discussion to the TSC for resolution (see below).
Breaking changes (that is, pull requests that require an increase in
the major version number, known as `semver-major` changes) must be
-elevated for review by the CTC. This does not necessarily mean that the
-PR must be put onto the CTC meeting agenda. If multiple CTC members
+elevated for review by the TSC. This does not necessarily mean that the
+PR must be put onto the TSC meeting agenda. If multiple TSC members
approve (`LGTM`) the PR and no Collaborators oppose the PR, it can be
-landed. Where there is disagreement among CTC members or objections
+landed. Where there is disagreement among TSC members or objections
from one or more Collaborators, `semver-major` pull requests should be
-put on the CTC meeting agenda.
+put on the TSC meeting agenda.
All bugfixes require a test case which demonstrates the defect. The
test should *fail* before the change, and *pass* after the change.
@@ -150,7 +150,7 @@ Exception to each of these points can be made if use or behavior of a given
internal API can be demonstrated to be sufficiently relied upon by the Node.js
ecosystem such that any changes would cause too much breakage. The threshold
for what qualifies as "too much breakage" is to be decided on a case-by-case
-basis by the CTC.
+basis by the TSC.
If it is determined that a currently undocumented object, property, method,
argument, or event *should* be documented, then a pull request adding the
@@ -171,7 +171,7 @@ making and reviewing such changes. Before landing such commits, an effort
must be made to determine the potential impact of the change in the ecosystem
by analyzing current use and by validating such changes through ecosystem
testing using the [Canary in the Goldmine](https://github.com/nodejs/citgm)
-tool. If a change cannot be made without ecosystem breakage, then CTC review is
+tool. If a change cannot be made without ecosystem breakage, then TSC review is
required before landing the change as anything less than semver-major.
If a determination is made that a particular internal API (for instance, an
@@ -183,7 +183,7 @@ breaking changes are made.
### Breaking Changes
Backwards-incompatible changes may land on the master branch at any time after
-sufficient review by collaborators and approval of at least two CTC members.
+sufficient review by collaborators and approval of at least two TSC members.
Examples of breaking changes include, but are not necessarily limited to,
removal or redefinition of existing API arguments, changing return values
@@ -209,7 +209,7 @@ Exception to this rule is given in the following cases:
Such changes *must* be handled as semver-major changes but MAY be landed
without a [Deprecation cycle](#deprecation-cycle).
-From time-to-time, in particularly exceptional cases, the CTC may be asked to
+From time-to-time, in particularly exceptional cases, the TSC may be asked to
consider and approve additional exceptions to this rule.
Purely additive changes (e.g. adding new events to EventEmitter
@@ -244,7 +244,7 @@ Specifically:
* Resolving critical security issues.
* Fixing a critical bug (e.g. fixing a memory leak) requires a breaking
change.
- * There is CTC consensus that the change is required.
+ * There is TSC consensus that the change is required.
* If a breaking commit does accidentally land in a Current or LTS branch, an
attempt to fix the issue will be made before the next release; If no fix is
provided then the commit will be reverted.
@@ -263,6 +263,32 @@ multiple commits. Commit metadata and the reason for the revert should be
appended. Commit message rules about line length and subsystem can be ignored.
A Pull Request should be raised and approved like any other change.
+### Introducing New Modules
+
+Semver-minor commits that introduce new core modules should be treated with
+extra care.
+
+The name of the new core module should not conflict with any existing
+module in the ecosystem unless a written agreement with the owner of those
+modules is reached to transfer ownership.
+
+If the new module name is free, a Collaborator should register a placeholder
+in the module registry as soon as possible, linking to the pull request that
+introduces the new core module.
+
+Pull requests introducing new core modules:
+
+* Must be left open for at least one week for review.
+* Must be labeled using the `ctc-review` label.
+* Must have signoff from at least two CTC members.
+
+New core modules must be landed with a [Stability Index][] of Experimental,
+and must remain Experimental until a semver-major release.
+
+For new modules that involve significant effort, non-trivial additions to
+Node.js or significant new capabilities, an [Enhancement Proposal][] is
+recommended but not required.
+
### Deprecations
Deprecation refers to the identification of Public APIs that should no longer
@@ -294,7 +320,7 @@ operation of running code and therefore should not be viewed as breaking
changes.
Runtime Deprecations and End-of-life APIs (internal or public) *must* be
-handled as semver-major changes unless there is CTC consensus to land the
+handled as semver-major changes unless there is TSC consensus to land the
deprecation as a semver-minor.
All Documentation-Only and Runtime deprecations will be assigned a unique
@@ -320,10 +346,10 @@ request adding the deprecation lands in master). All deprecations included in
a Node.js release should be listed prominently in the "Notable Changes" section
of the release notes.
-### Involving the CTC
+### Involving the TSC
-Collaborators may opt to elevate pull requests or issues to the CTC for
-discussion by assigning the `ctc-review` label. This should be done
+Collaborators may opt to elevate pull requests or issues to the TSC for
+discussion by assigning the `tsc-review` label. This should be done
where a pull request:
- has a significant impact on the codebase,
@@ -331,7 +357,7 @@ where a pull request:
- has failed to reach consensus amongst the Collaborators who are
actively participating in the discussion.
-The CTC should serve as the final arbiter where required.
+The TSC should serve as the final arbiter where required.
## Landing Pull Requests
@@ -512,9 +538,36 @@ your pull request shows the purple merged status then you should still
add the "Landed in .." comment if you added
multiple commits.
+### Troubleshooting
+
+Sometimes, when running `git push upstream master`, you may get an error message
+like this:
+
+```console
+To https://github.com/nodejs/node
+ ! [rejected] master -> master (fetch first)
+error: failed to push some refs to 'https://github.com/nodejs/node'
+hint: Updates were rejected because the remote contains work that you do
+hint: not have locally. This is usually caused by another repository pushing
+hint: to the same ref. You may want to first integrate the remote changes
+hint: (e.g., 'git pull ...') before pushing again.
+hint: See the 'Note about fast-forwards' in 'git push --help' for details.
+```
+
+That means a commit has landed since your last rebase against `upstream/master`.
+To fix this, fetch, rebase, run the tests again (to make sure no interactions
+between your changes and the new changes cause any problems), and push again:
+
+```sh
+git fetch upstream
+git rebase upstream/master
+make -j4 test
+git push upstream master
+```
+
### I Just Made a Mistake
-* Ping a CTC member.
+* Ping a TSC member.
* `#node-dev` on freenode
* With `git`, there's a way to override remote trees by force pushing
(`git push -f`). This should generally be seen as forbidden (since
@@ -543,9 +596,9 @@ Once a Current branch enters LTS, changes in that branch are limited to bug
fixes, security updates, possible npm updates, documentation updates, and
certain performance improvements that can be demonstrated to not break existing
applications. Semver-minor changes are only permitted if required for bug fixes
-and then only on a case-by-case basis with LTS WG and possibly Core Technical
-Committee (CTC) review. Semver-major changes are permitted only if required for
-security related fixes.
+and then only on a case-by-case basis with LTS WG and possibly Technical
+Steering Committee (TSC) review. Semver-major changes are permitted only if
+required for security related fixes.
Once a Current branch moves into Maintenance mode, only **critical** bugs,
**critical** security fixes, and documentation updates will be permitted.
@@ -553,7 +606,7 @@ Once a Current branch moves into Maintenance mode, only **critical** bugs,
#### Landing semver-minor commits in LTS
The default policy is to not land semver-minor or higher commits in any LTS
-branch. However, the LTS WG or CTC can evaluate any individual semver-minor
+branch. However, the LTS WG or TSC can evaluate any individual semver-minor
commit and decide whether a special exception ought to be made. It is
expected that such exceptions would be evaluated, in part, on the scope
and impact of the changes on the code, the risk to ecosystem stability
@@ -563,7 +616,7 @@ commit will have for the ecosystem.
Any collaborator who feels a semver-minor commit should be landed in an LTS
branch should attach the `lts-agenda` label to the pull request. The LTS WG
will discuss the issue and, if necessary, will escalate the issue up to the
-CTC for further discussion.
+TSC for further discussion.
#### How are LTS Branches Managed?
@@ -615,3 +668,5 @@ release. This process of making a release will be a collaboration between the
LTS working group and the Release team.
[backporting guide]: doc/guides/backporting-to-release-lines.md
+[Stability Index]: https://github.com/nodejs/node/pull/doc/api/documentation.md#stability-index
+[Enhancement Proposal]: https://github.com/nodejs/node-eps
diff --git a/GOVERNANCE.md b/GOVERNANCE.md
index d7d60671ac1ad2..20b75bd9718be2 100644
--- a/GOVERNANCE.md
+++ b/GOVERNANCE.md
@@ -1,17 +1,30 @@
# Node.js Project Governance
-The Node.js project is governed by its Collaborators, including a Core Technical
-Committee (CTC) which is responsible for high-level guidance of the project.
+The Node.js project is governed by its Collaborators, including a Technical
+Steering Committee (TSC) which is responsible for high-level guidance of the
+project.
## Collaborators
The [nodejs/node](https://github.com/nodejs/node) GitHub repository is
-maintained by Collaborators who are added by the CTC on an ongoing basis.
+maintained by Collaborators who are added by the TSC on an ongoing basis.
-Individuals identified by the CTC as making significant and valuable
-contributions are made Collaborators and given commit access to the project. If
-you make a significant contribution and are not considered for commit access,
-log an issue or contact a CTC member directly.
+Individuals identified by the TSC as making significant and valuable
+contributions across any Node.js repository may be made Collaborators and given
+commit access to the project. Activities taken into consideration include (but
+are not limited to) the quality of:
+
+* code commits and pull requests
+* documentation commits and pull requests
+* comments on issues and pull requests
+* contributions to the Node.js website
+* assistance provided to end users and novice contributors
+* participation in Working Groups
+* other participation in the wider Node.js community
+
+If individuals making valuable contributions do not believe they have been
+considered for commit access, they may log an issue or contact a TSC member
+directly.
Modifications of the contents of the nodejs/node repository are made on
a collaborative basis. Anybody with a GitHub account may propose a
@@ -28,13 +41,13 @@ be accepted unless:
* Discussions and/or additional changes result in no Collaborators objecting to
the change. Previously-objecting Collaborators do not necessarily have to
sign-off on the change, but they should not be opposed to it.
-* The change is escalated to the CTC and the CTC votes to approve the change.
+* The change is escalated to the TSC and the TSC votes to approve the change.
This should only happen if disagreements between Collaborators cannot be
resolved through discussion.
Collaborators may opt to elevate significant or controversial modifications to
-the CTC by assigning the `ctc-review` label to a pull request or issue. The
-CTC should serve as the final arbiter where required.
+the TSC by assigning the `tsc-review` label to a pull request or issue. The
+TSC should serve as the final arbiter where required.
* [Current list of Collaborators](./README.md#current-project-team-members)
* [A guide for Collaborators](./COLLABORATOR_GUIDE.md)
@@ -49,13 +62,13 @@ Typical activities of a Collaborator include:
* participation in working groups
* merging pull requests
-The CTC periodically reviews the Collaborator list to identify inactive
+The TSC periodically reviews the Collaborator list to identify inactive
Collaborators. Past Collaborators are typically given _Emeritus_ status. Emeriti
-may request that the CTC restore them to active status.
+may request that the TSC restore them to active status.
-## Core Technical Committee
+## Technical Steering Committee
-The Core Technical Committee (CTC) has final authority over this project
+The Technical Steering Committee (TSC) has final authority over this project
including:
* Technical direction
@@ -65,59 +78,19 @@ including:
* Conduct guidelines
* Maintaining the list of additional Collaborators
-* [Current list of CTC members](./README.md#current-project-team-members)
-
-## CTC Membership
-
-CTC seats are not time-limited. There is no fixed size of the CTC. The CTC
-should be of such a size as to ensure adequate coverage of important areas of
-expertise balanced with the ability to make decisions efficiently.
-
-There is no specific set of requirements or qualifications for CTC
-membership beyond these rules.
-
-The CTC may add additional members to the CTC by a standard CTC motion.
-
-When a CTC member's participation in [CTC activities](#ctc-activities) has
-become minimal for a sustained period of time, the CTC will request that the
-member either indicate an intention to increase participation or voluntarily
-resign.
+* [Current list of TSC members](./README.md#current-project-team-members)
-CTC members may only be removed by voluntary resignation or through a standard
-CTC motion.
+The operations of the TSC are governed by the [TSC Charter][] as approved by
+the Node.js Foundation Board of Directors.
-Changes to CTC membership should be posted in the agenda, and may be
-suggested as any other agenda item (see [CTC Meetings](#ctc-meetings) below).
+### TSC Meetings
-No more than 1/3 of the CTC members may be affiliated with the same
-employer. If removal or resignation of a CTC member, or a change of
-employment by a CTC member, creates a situation where more than 1/3 of
-the CTC membership shares an employer, then the situation must be
-immediately remedied by the resignation or removal of one or more CTC
-members affiliated with the over-represented employer(s).
-
-### CTC Activities
-
-Typical activities of a CTC member include:
-
-* attending the weekly meeting
-* commenting on the weekly CTC meeting issue and issues labeled `ctc-review`
-* participating in CTC email threads
-* volunteering for tasks that arise from CTC meetings and related discussions
-* other activities (beyond those typical of Collaborators) that facilitate the
- smooth day-to-day operation of the Node.js project
-
-Note that CTC members are also Collaborators and therefore typically perform
-Collaborator activities as well.
-
-### CTC Meetings
-
-The CTC meets weekly in a voice conference call. The meeting is run by a
-designated meeting chair approved by the CTC. Each meeting is streamed on
+The TSC meets regularly in a voice conference call. The meeting is run by a
+designated meeting chair approved by the TSC. Each meeting is streamed on
YouTube.
-Items are added to the CTC agenda which are considered contentious or
-are modifications of governance, contribution policy, CTC membership,
+Items are added to the TSC agenda which are considered contentious or
+are modifications of governance, contribution policy, TSC membership,
or release process.
The intention of the agenda is not to approve or review all patches.
@@ -125,49 +98,40 @@ That should happen continuously on GitHub and be handled by the larger
group of Collaborators.
Any community member or contributor can ask that something be reviewed
-by the CTC by logging a GitHub issue. Any Collaborator, CTC member, or the
-meeting chair can bring the issue to the CTC's attention by applying the
-`ctc-review` label. If consensus-seeking among CTC members fails for a
-particular issue, it may be added to the CTC meeting agenda by adding the
-`ctc-agenda` label.
-
-Prior to each CTC meeting, the meeting chair will share the agenda with
-members of the CTC. CTC members can also add items to the agenda at the
-beginning of each meeting. The meeting chair and the CTC cannot veto or remove
+by the TSC by logging a GitHub issue. Any Collaborator, TSC member, or the
+meeting chair can bring the issue to the TSC's attention by applying the
+`tsc-review` label. If consensus-seeking among TSC members fails for a
+particular issue, it may be added to the TSC meeting agenda by adding the
+`tsc-agenda` label.
+
+Prior to each TSC meeting, the meeting chair will share the agenda with
+members of the TSC. TSC members can also add items to the agenda at the
+beginning of each meeting. The meeting chair and the TSC cannot veto or remove
items.
-The CTC may invite persons or representatives from certain projects to
-participate in a non-voting capacity.
+The TSC may invite additional persons to participate in a non-voting capacity.
The meeting chair is responsible for ensuring that minutes are taken and that a
pull request with the minutes is submitted after the meeting.
Due to the challenges of scheduling a global meeting with participants in
-several timezones, the CTC will seek to resolve as many agenda items as possible
+several timezones, the TSC will seek to resolve as many agenda items as possible
outside of meetings using
-[the CTC issue tracker](https://github.com/nodejs/CTC/issues). The process in
+[the TSC issue tracker](https://github.com/nodejs/TSC/issues). The process in
the issue tracker is:
-* A CTC member opens an issue explaining the proposal/issue and @-mentions
- @nodejs/ctc.
-* After 72 hours, if there are two or more `LGTM`s from other CTC members and no
- explicit opposition from other CTC members, then the proposal is approved.
-* If there are any CTC members objecting, then a conversation ensues until
+* A TSC member opens an issue explaining the proposal/issue and @-mentions
+ @nodejs/tsc.
+* After 72 hours, if there are two or more `LGTM`s from other TSC members and no
+ explicit opposition from other TSC members, then the proposal is approved.
+* If there are any TSC members objecting, then a conversation ensues until
either the proposal is dropped or the objecting members are persuaded. If
there is an extended impasse, a motion for a vote may be made.
## Consensus Seeking Process
-The CTC follows a
-[Consensus Seeking](http://en.wikipedia.org/wiki/Consensus-seeking_decision-making)
-decision making model.
-
-When an agenda item has appeared to reach a consensus, the meeting chair will
-ask "Does anyone object?" as a final call for dissent from the consensus.
+The TSC follows a [Consensus Seeking][] decision making model as described by
+the [TSC Charter][].
-If an agenda item cannot reach a consensus, a CTC member can call for either a
-closing vote or a vote to table the issue to the next meeting. All votes
-(including votes to close or table) pass if and only if more than 50% of the CTC
-members (excluding individuals who explicitly abstain) vote in favor. For
-example, if there are 20 CTC members, and 5 of those members indicate that they
-abstain, then 8 votes in favor are required for a resolution to pass.
+[TSC Charter]: https://github.com/nodejs/TSC/blob/master/TSC-Charter.md
+[Consensus Seeking]: http://en.wikipedia.org/wiki/Consensus-seeking_decision-making
diff --git a/Makefile b/Makefile
index 652c591e2bfe96..274df0745871cf 100644
--- a/Makefile
+++ b/Makefile
@@ -139,8 +139,9 @@ coverage: coverage-test
coverage-build: all
mkdir -p node_modules
if [ ! -d node_modules/istanbul-merge ]; then \
- $(NODE) ./deps/npm install istanbul-merge; fi
- if [ ! -d node_modules/nyc ]; then $(NODE) ./deps/npm install nyc; fi
+ $(NODE) ./deps/npm install istanbul-merge --no-save --no-package-lock; fi
+ if [ ! -d node_modules/nyc ]; then \
+ $(NODE) ./deps/npm install nyc --no-save --no-package-lock; fi
if [ ! -d gcovr ]; then git clone --depth=1 \
--single-branch git://github.com/gcovr/gcovr.git; fi
if [ ! -d testing ]; then git clone --depth=1 \
@@ -150,7 +151,7 @@ coverage-build: all
"$(CURDIR)/testing/coverage/gcovr-patches.diff"); fi
if [ -d lib_ ]; then $(RM) -r lib; mv lib_ lib; fi
mv lib lib_
- $(NODE) ./node_modules/.bin/nyc instrument lib_/ lib/
+ $(NODE) ./node_modules/.bin/nyc instrument --extension .js --extension .mjs lib_/ lib/
$(MAKE)
coverage-test: coverage-build
@@ -165,7 +166,7 @@ coverage-test: coverage-build
$(NODE) ./node_modules/.bin/istanbul-merge --out \
.cov_tmp/libcov.json 'out/Release/.coverage/coverage-*.json'
(cd lib && .$(NODE) ../node_modules/.bin/nyc report \
- --temp-directory "$(CURDIR)/.cov_tmp" -r html \
+ --temp-directory "$(CURDIR)/.cov_tmp" \
--report-dir "../coverage")
-(cd out && "../gcovr/scripts/gcovr" --gcov-exclude='.*deps' \
--gcov-exclude='.*usr' -v -r Release/obj.target/node \
@@ -192,6 +193,10 @@ v8:
tools/make-v8.sh
$(MAKE) -C deps/v8 $(V8_ARCH).$(BUILDTYPE_LOWER) $(V8_BUILD_OPTIONS)
+ifeq ($(NODE_TARGET_TYPE),static_library)
+test: all
+ $(MAKE) cctest
+else
test: all
$(MAKE) build-addons
$(MAKE) build-addons-napi
@@ -200,6 +205,7 @@ test: all
$(CI_JS_SUITES) \
$(CI_NATIVE_SUITES)
$(MAKE) lint
+endif
test-parallel: all
$(PYTHON) tools/test.py --mode=release parallel -J
@@ -328,7 +334,7 @@ test-all-valgrind: test-build
$(PYTHON) tools/test.py --mode=debug,release --valgrind
CI_NATIVE_SUITES := addons addons-napi
-CI_JS_SUITES := async-hooks doctool inspector known_issues message parallel pseudo-tty sequential
+CI_JS_SUITES := abort async-hooks doctool inspector known_issues message parallel pseudo-tty sequential
# Build and test addons without building anything else
test-ci-native: LOGLEVEL := info
@@ -492,28 +498,25 @@ out/doc/%: doc/%
# check if ./node is actually set, else use user pre-installed binary
gen-json = tools/doc/generate.js --format=json $< > $@
-out/doc/api/%.json: doc/api/%.md
- @[ -e tools/doc/node_modules/js-yaml/package.json ] || \
+gen-html = tools/doc/generate.js --node-version=$(FULLVERSION) --format=html \
+ --template=doc/template.html --analytics=$(DOCS_ANALYTICS) $< > $@
+
+gen-doc = \
+ [ -e tools/doc/node_modules/js-yaml/package.json ] || \
[ -e tools/eslint/node_modules/js-yaml/package.json ] || \
if [ -x $(NODE) ]; then \
cd tools/doc && ../../$(NODE) ../../$(NPM) install; \
else \
cd tools/doc && node ../../$(NPM) install; \
- fi
- [ -x $(NODE) ] && $(NODE) $(gen-json) || node $(gen-json)
+ fi;\
+ [ -x $(NODE) ] && $(NODE) $(1) || node $(1)
+
+out/doc/api/%.json: doc/api/%.md
+ $(call gen-doc, $(gen-json))
# check if ./node is actually set, else use user pre-installed binary
-gen-html = tools/doc/generate.js --node-version=$(FULLVERSION) --format=html \
- --template=doc/template.html --analytics=$(DOCS_ANALYTICS) $< > $@
out/doc/api/%.html: doc/api/%.md
- @[ -e tools/doc/node_modules/js-yaml/package.json ] || \
- [ -e tools/eslint/node_modules/js-yaml/package.json ] || \
- if [ -x $(NODE) ]; then \
- cd tools/doc && ../../$(NODE) ../../$(NPM) install; \
- else \
- cd tools/doc && node ../../$(NPM) install; \
- fi
- [ -x $(NODE) ] && $(NODE) $(gen-html) || node $(gen-html)
+ $(call gen-doc, $(gen-html))
docopen: $(apidocs_html)
@$(PYTHON) -mwebbrowser file://$(PWD)/out/doc/api/all.html
@@ -672,6 +675,11 @@ release-only:
echo 'Please update REPLACEME in Added: tags in doc/api/*.md (See doc/releases.md)' ; \
exit 1 ; \
fi
+ @if [ "$(DISTTYPE)" != "nightly" ] && [ "$(DISTTYPE)" != "next-nightly" ] && \
+ `grep -q DEP00XX doc/api/deprecations.md`; then \
+ echo 'Please update DEP00XX in doc/api/deprecations.md (See doc/releases.md)' ; \
+ exit 1 ; \
+ fi
@if [ "$(shell git status --porcelain | egrep -v '^\?\? ')" = "" ]; then \
exit 0 ; \
else \
@@ -875,20 +883,20 @@ bench: bench-net bench-http bench-fs bench-tls
bench-ci: bench
+JSLINT_TARGETS = benchmark doc lib test tools
+
jslint:
@echo "Running JS linter..."
- $(NODE) tools/eslint/bin/eslint.js --cache --rulesdir=tools/eslint-rules --ext=.js,.md \
- benchmark doc lib test tools
+ $(NODE) tools/eslint/bin/eslint.js --cache --rulesdir=tools/eslint-rules --ext=.js,.mjs,.md \
+ $(JSLINT_TARGETS)
jslint-ci:
@echo "Running JS linter..."
$(NODE) tools/jslint.js $(PARALLEL_ARGS) -f tap -o test-eslint.tap \
- benchmark doc lib test tools
+ $(JSLINT_TARGETS)
CPPLINT_EXCLUDE ?=
CPPLINT_EXCLUDE += src/node_root_certs.h
-CPPLINT_EXCLUDE += src/queue.h
-CPPLINT_EXCLUDE += src/tree.h
CPPLINT_EXCLUDE += $(wildcard test/addons/??_*/*.cc test/addons/??_*/*.h)
CPPLINT_EXCLUDE += $(wildcard test/addons-napi/??_*/*.cc test/addons-napi/??_*/*.h)
# These files were copied more or less verbatim from V8.
diff --git a/README.md b/README.md
index 1960b333702106..ec1488345adb7b 100644
--- a/README.md
+++ b/README.md
@@ -35,7 +35,7 @@ If you need help using or installing Node.js, please use the
* [Building Node.js](#building-nodejs)
* [Security](#security)
* [Current Project Team Members](#current-project-team-members)
- * [CTC (Core Technical Committee)](#ctc-core-technical-committee)
+ * [TSC (Technical Steering Committee)](#tsc-technical-steering-committee)
* [Collaborators](#collaborators)
* [Release Team](#release-team)
@@ -55,9 +55,9 @@ If you need help using or installing Node.js, please use the
channel.
_Please note that unofficial resources are neither managed by (nor necessarily
-endorsed by) the Node.js TSC/CTC. Specifically, such resources are not
+endorsed by) the Node.js TSC. Specifically, such resources are not
currently covered by the [Node.js Moderation Policy][] and the selection and
-actions of resource operators/moderators are not subject to TSC/CTC oversight._
+actions of resource operators/moderators are not subject to TSC oversight._
## Release Types
@@ -114,11 +114,11 @@ documentation of the latest stable version.
### Verifying Binaries
-Current, LTS and Nightly download directories all contain a _SHASUM256.txt_
+Current, LTS and Nightly download directories all contain a _SHASUMS256.txt_
file that lists the SHA checksums for each file available for
download.
-The _SHASUM256.txt_ can be downloaded using curl.
+The _SHASUMS256.txt_ can be downloaded using curl.
```console
$ curl -O https://nodejs.org/dist/vx.y.z/SHASUMS256.txt
@@ -135,10 +135,10 @@ _(Where "node-vx.y.z.tar.gz" is the name of the file you have
downloaded)_
Additionally, Current and LTS releases (not Nightlies) have GPG signed
-copies of SHASUM256.txt files available as SHASUM256.txt.asc. You can use
+copies of SHASUMS256.txt files available as SHASUMS256.txt.asc. You can use
`gpg` to verify that the file has not been tampered with.
-To verify a SHASUM256.txt.asc, you will first need to import all of
+To verify a SHASUMS256.txt.asc, you will first need to import all of
the GPG keys of individuals authorized to create releases. They are
listed at the bottom of this README under [Release Team](#release-team).
Use a command such as this to import the keys:
@@ -173,14 +173,59 @@ Your email will be acknowledged within 24 hours, and you’ll receive a more
detailed response to your email within 48 hours indicating the next steps in
handling your report.
+There are no hard and fast rules to determine if a bug is worth reporting as
+a security issue. The general rule is any issue worth reporting
+must allow an attacker to compromise the confidentiality, integrity
+or availability of the Node.js application or its system for which the attacker
+does not already have the capability.
+
+To illustrate the point, here are some examples of past issues and what the
+Security Reponse Team thinks of them. When in doubt, however, please do send
+us a report nonetheless.
+
+
+### Public disclosure preferred
+
+- [#14519](https://github.com/nodejs/node/issues/14519): _Internal domain
+ function can be used to cause segfaults_. Causing program termination using
+ either the public Javascript APIs or the private bindings layer APIs requires
+ the ability to execute arbitrary Javascript code, which is already the highest
+ level of privilege possible.
+
+- [#12141](https://github.com/nodejs/node/pull/12141): _buffer: zero fill
+ Buffer(num) by default_. The buffer constructor behaviour was documented,
+ but found to be prone to [mis-use](https://snyk.io/blog/exploiting-buffer/).
+ It has since been changed, but despite much debate, was not considered misuse
+ prone enough to justify fixing in older release lines and breaking our
+ API stability contract.
+
+### Private disclosure preferred
+
+- [CVE-2016-7099](https://nodejs.org/en/blog/vulnerability/september-2016-security-releases/):
+ _Fix invalid wildcard certificate validation check_. This is a high severity
+ defect that would allow a malicious TLS server to serve an invalid wildcard
+ certificate for its hostname and be improperly validated by a Node.js client.
+
+- [#5507](https://github.com/nodejs/node/pull/5507): _Fix a defect that makes
+ the CacheBleed Attack possible_. Many, though not all, OpenSSL vulnerabilities
+ in the TLS/SSL protocols also effect Node.js.
+
+- [CVE-2016-2216](https://nodejs.org/en/blog/vulnerability/february-2016-security-releases/):
+ _Fix defects in HTTP header parsing for requests and responses that can allow
+ response splitting_. While the impact of this vulnerability is application and
+ network dependent, it is remotely exploitable in the HTTP protocol.
+
+When in doubt, please do send us a report.
+
+
## Current Project Team Members
The Node.js project team comprises a group of core collaborators and a sub-group
-that forms the _Core Technical Committee_ (CTC) which governs the project. For
-more information about the governance of the Node.js project, see
+that forms the _Technical Steering Committee_ (TSC) which governs the project.
+For more information about the governance of the Node.js project, see
[GOVERNANCE.md](./GOVERNANCE.md).
-### CTC (Core Technical Committee)
+### TSC (Technical Steering Committee)
* [addaleax](https://github.com/addaleax) -
**Anna Henningsen** <anna@addaleax.net> (she/her)
@@ -200,14 +245,14 @@ more information about the governance of the Node.js project, see
**Fedor Indutny** <fedor.indutny@gmail.com>
* [jasnell](https://github.com/jasnell) -
**James M Snell** <jasnell@gmail.com> (he/him)
+* [joshgav](https://github.com/joshgav) -
+**Josh Gavant** <josh.gavant@outlook.com>
* [joyeecheung](https://github.com/joyeecheung) -
**Joyee Cheung** <joyeec9h3@gmail.com> (she/her)
* [mcollina](https://github.com/mcollina) -
**Matteo Collina** <matteo.collina@gmail.com> (he/him)
* [mhdawson](https://github.com/mhdawson) -
**Michael Dawson** <michael_dawson@ca.ibm.com> (he/him)
-* [misterdjules](https://github.com/misterdjules) -
-**Julien Gilli** <jgilli@nodejs.org>
* [mscdex](https://github.com/mscdex) -
**Brian White** <mscdex@mscdex.net>
* [MylesBorins](https://github.com/MylesBorins) -
@@ -227,7 +272,7 @@ more information about the governance of the Node.js project, see
* [Trott](https://github.com/Trott) -
**Rich Trott** <rtrott@gmail.com> (he/him)
-### CTC Emeriti
+### TSC Emeriti
* [chrisdickinson](https://github.com/chrisdickinson) -
**Chris Dickinson** <christopher.s.dickinson@gmail.com>
@@ -237,6 +282,8 @@ more information about the governance of the Node.js project, see
**Alexis Campailla** <orangemocha@nodejs.org>
* [piscisaureus](https://github.com/piscisaureus) -
**Bert Belder** <bertbelder@gmail.com>
+* [nebrius](https://github.com/nebrius) -
+**Bryan Hughes** <bryan@nebri.us>
### Collaborators
@@ -264,6 +311,8 @@ more information about the governance of the Node.js project, see
**Ben Noordhuis** <info@bnoordhuis.nl>
* [brendanashworth](https://github.com/brendanashworth) -
**Brendan Ashworth** <brendan.ashworth@me.com>
+* [BridgeAR](https://github.com/BridgeAR) -
+**Ruben Bridgewater** <ruben@bridgewater.de>
* [bzoz](https://github.com/bzoz) -
**Bartosz Sosnowski** <bartosz@janeasystems.com>
* [calvinmetcalf](https://github.com/calvinmetcalf) -
@@ -508,7 +557,7 @@ Previous releases may also have been signed with one of the following GPG keys:
### Working Groups
Information on the current Node.js Working Groups can be found in the
-[CTC repository](https://github.com/nodejs/CTC/blob/master/WORKING_GROUPS.md).
+[TSC repository](https://github.com/nodejs/TSC/blob/master/WORKING_GROUPS.md).
[npm]: https://www.npmjs.com
[Website]: https://nodejs.org/en/
diff --git a/benchmark/arrays/var-int.js b/benchmark/arrays/var-int.js
index 9ebad611661929..795d4eb838de2f 100644
--- a/benchmark/arrays/var-int.js
+++ b/benchmark/arrays/var-int.js
@@ -1,28 +1,26 @@
'use strict';
-var common = require('../common.js');
+const common = require('../common.js');
-var types = [
- 'Array',
- 'Buffer',
- 'Int8Array',
- 'Uint8Array',
- 'Int16Array',
- 'Uint16Array',
- 'Int32Array',
- 'Uint32Array',
- 'Float32Array',
- 'Float64Array'
-];
-
-var bench = common.createBenchmark(main, {
- type: types,
+const bench = common.createBenchmark(main, {
+ type: [
+ 'Array',
+ 'Buffer',
+ 'Int8Array',
+ 'Uint8Array',
+ 'Int16Array',
+ 'Uint16Array',
+ 'Int32Array',
+ 'Uint32Array',
+ 'Float32Array',
+ 'Float64Array'
+ ],
n: [25]
});
function main(conf) {
- var type = conf.type;
- var clazz = global[type];
- var n = +conf.n;
+ const type = conf.type;
+ const clazz = global[type];
+ const n = +conf.n;
bench.start();
var arr = new clazz(n * 1e6);
diff --git a/benchmark/arrays/zero-float.js b/benchmark/arrays/zero-float.js
index a74cd8ec5bacca..d61b0598a4ecf0 100644
--- a/benchmark/arrays/zero-float.js
+++ b/benchmark/arrays/zero-float.js
@@ -1,28 +1,26 @@
'use strict';
-var common = require('../common.js');
+const common = require('../common.js');
-var types = [
- 'Array',
- 'Buffer',
- 'Int8Array',
- 'Uint8Array',
- 'Int16Array',
- 'Uint16Array',
- 'Int32Array',
- 'Uint32Array',
- 'Float32Array',
- 'Float64Array'
-];
-
-var bench = common.createBenchmark(main, {
- type: types,
+const bench = common.createBenchmark(main, {
+ type: [
+ 'Array',
+ 'Buffer',
+ 'Int8Array',
+ 'Uint8Array',
+ 'Int16Array',
+ 'Uint16Array',
+ 'Int32Array',
+ 'Uint32Array',
+ 'Float32Array',
+ 'Float64Array'
+ ],
n: [25]
});
function main(conf) {
- var type = conf.type;
- var clazz = global[type];
- var n = +conf.n;
+ const type = conf.type;
+ const clazz = global[type];
+ const n = +conf.n;
bench.start();
var arr = new clazz(n * 1e6);
diff --git a/benchmark/arrays/zero-int.js b/benchmark/arrays/zero-int.js
index 7f61aa1a820042..90d491e8073168 100644
--- a/benchmark/arrays/zero-int.js
+++ b/benchmark/arrays/zero-int.js
@@ -1,28 +1,26 @@
'use strict';
-var common = require('../common.js');
+const common = require('../common.js');
-var types = [
- 'Array',
- 'Buffer',
- 'Int8Array',
- 'Uint8Array',
- 'Int16Array',
- 'Uint16Array',
- 'Int32Array',
- 'Uint32Array',
- 'Float32Array',
- 'Float64Array'
-];
-
-var bench = common.createBenchmark(main, {
- type: types,
+const bench = common.createBenchmark(main, {
+ type: [
+ 'Array',
+ 'Buffer',
+ 'Int8Array',
+ 'Uint8Array',
+ 'Int16Array',
+ 'Uint16Array',
+ 'Int32Array',
+ 'Uint32Array',
+ 'Float32Array',
+ 'Float64Array'
+ ],
n: [25]
});
function main(conf) {
- var type = conf.type;
- var clazz = global[type];
- var n = +conf.n;
+ const type = conf.type;
+ const clazz = global[type];
+ const n = +conf.n;
bench.start();
var arr = new clazz(n * 1e6);
diff --git a/benchmark/buffers/buffer-creation.js b/benchmark/buffers/buffer-creation.js
index d1acd2694e4228..4ca0a049228f6c 100644
--- a/benchmark/buffers/buffer-creation.js
+++ b/benchmark/buffers/buffer-creation.js
@@ -19,6 +19,7 @@ function main(conf) {
const len = +conf.len;
const n = +conf.n;
switch (conf.type) {
+ case '':
case 'fast-alloc':
bench.start();
for (let i = 0; i < n * 1024; i++) {
diff --git a/benchmark/buffers/buffer-iterate.js b/benchmark/buffers/buffer-iterate.js
index 7c2044422245c4..7b49bcca717c2b 100644
--- a/benchmark/buffers/buffer-iterate.js
+++ b/benchmark/buffers/buffer-iterate.js
@@ -17,12 +17,13 @@ var methods = {
};
function main(conf) {
- var len = +conf.size;
- var clazz = conf.type === 'fast' ? Buffer : SlowBuffer;
- var buffer = new clazz(len);
+ const len = +conf.size;
+ const clazz = conf.type === 'fast' ? Buffer : SlowBuffer;
+ const buffer = new clazz(len);
buffer.fill(0);
- methods[conf.method](buffer, conf.n);
+ const method = conf.method || 'for';
+ methods[method](buffer, conf.n);
}
diff --git a/benchmark/buffers/buffer-read.js b/benchmark/buffers/buffer-read.js
index 30f3ff05adb15a..67d86bad4eb05c 100644
--- a/benchmark/buffers/buffer-read.js
+++ b/benchmark/buffers/buffer-read.js
@@ -26,14 +26,15 @@ var bench = common.createBenchmark(main, {
});
function main(conf) {
- var noAssert = conf.noAssert === 'true';
- var len = +conf.millions * 1e6;
- var clazz = conf.buf === 'fast' ? Buffer : require('buffer').SlowBuffer;
- var buff = new clazz(8);
- var fn = `read${conf.type}`;
+ const noAssert = conf.noAssert === 'true';
+ const len = +conf.millions * 1e6;
+ const clazz = conf.buf === 'fast' ? Buffer : require('buffer').SlowBuffer;
+ const buff = new clazz(8);
+ const type = conf.type || 'UInt8';
+ const fn = `read${type}`;
buff.writeDoubleLE(0, 0, noAssert);
- var testFunction = new Function('buff', `
+ const testFunction = new Function('buff', `
for (var i = 0; i !== ${len}; i++) {
buff.${fn}(0, ${JSON.stringify(noAssert)});
}
diff --git a/benchmark/buffers/buffer-swap.js b/benchmark/buffers/buffer-swap.js
index 9e36985f5cd209..05cde002943f4a 100644
--- a/benchmark/buffers/buffer-swap.js
+++ b/benchmark/buffers/buffer-swap.js
@@ -73,7 +73,7 @@ function genMethod(method) {
}
function main(conf) {
- const method = conf.method;
+ const method = conf.method || 'swap16';
const len = conf.len | 0;
const n = conf.n | 0;
const aligned = conf.aligned || 'true';
diff --git a/benchmark/buffers/buffer-write.js b/benchmark/buffers/buffer-write.js
index 2f1eb08763b241..8fcfc43f70b5bc 100644
--- a/benchmark/buffers/buffer-write.js
+++ b/benchmark/buffers/buffer-write.js
@@ -46,11 +46,12 @@ var mod = {
};
function main(conf) {
- var noAssert = conf.noAssert === 'true';
- var len = +conf.millions * 1e6;
- var clazz = conf.buf === 'fast' ? Buffer : require('buffer').SlowBuffer;
- var buff = new clazz(8);
- var fn = `write${conf.type}`;
+ const noAssert = conf.noAssert === 'true';
+ const len = +conf.millions * 1e6;
+ const clazz = conf.buf === 'fast' ? Buffer : require('buffer').SlowBuffer;
+ const buff = new clazz(8);
+ const type = conf.type || 'UInt8';
+ const fn = `write${type}`;
if (/Int/.test(fn))
benchInt(buff, fn, len, noAssert);
diff --git a/benchmark/buffers/dataview-set.js b/benchmark/buffers/dataview-set.js
index 16b2628842a4a4..f9663e6a03953d 100644
--- a/benchmark/buffers/dataview-set.js
+++ b/benchmark/buffers/dataview-set.js
@@ -40,11 +40,12 @@ var mod = {
};
function main(conf) {
- var len = +conf.millions * 1e6;
- var ab = new ArrayBuffer(8);
- var dv = new DataView(ab, 0, 8);
- var le = /LE$/.test(conf.type);
- var fn = `set${conf.type.replace(/[LB]E$/, '')}`;
+ const len = +conf.millions * 1e6;
+ const ab = new ArrayBuffer(8);
+ const dv = new DataView(ab, 0, 8);
+ const type = conf.type || 'Uint8';
+ const le = /LE$/.test(type);
+ const fn = `set${type.replace(/[LB]E$/, '')}`;
if (/int/i.test(fn))
benchInt(dv, fn, len, le);
diff --git a/benchmark/dgram/bind-params.js b/benchmark/dgram/bind-params.js
index 92e9b7f85b1e12..411bef98adcf7c 100644
--- a/benchmark/dgram/bind-params.js
+++ b/benchmark/dgram/bind-params.js
@@ -10,6 +10,7 @@ const configs = {
};
const bench = common.createBenchmark(main, configs);
+const noop = () => {};
function main(conf) {
const n = +conf.n;
@@ -19,19 +20,27 @@ function main(conf) {
if (port !== undefined && address !== undefined) {
bench.start();
for (let i = 0; i < n; i++) {
- dgram.createSocket('udp4').bind(port, address).unref();
+ dgram.createSocket('udp4').bind(port, address)
+ .on('error', noop)
+ .unref();
}
bench.end(n);
} else if (port !== undefined) {
bench.start();
for (let i = 0; i < n; i++) {
- dgram.createSocket('udp4').bind(port).unref();
+ dgram.createSocket('udp4')
+ .bind(port)
+ .on('error', noop)
+ .unref();
}
bench.end(n);
} else if (port === undefined && address === undefined) {
bench.start();
for (let i = 0; i < n; i++) {
- dgram.createSocket('udp4').bind().unref();
+ dgram.createSocket('udp4')
+ .bind()
+ .on('error', noop)
+ .unref();
}
bench.end(n);
}
diff --git a/benchmark/dgram/multi-buffer.js b/benchmark/dgram/multi-buffer.js
index 6a7fc9bfaf83ee..a0285c8c59e015 100644
--- a/benchmark/dgram/multi-buffer.js
+++ b/benchmark/dgram/multi-buffer.js
@@ -64,7 +64,7 @@ function server() {
}, dur * 1000);
});
- socket.on('message', function(buf, rinfo) {
+ socket.on('message', function() {
received++;
});
diff --git a/benchmark/dgram/offset-length.js b/benchmark/dgram/offset-length.js
index b897707ded5e58..0445f7b70bec8b 100644
--- a/benchmark/dgram/offset-length.js
+++ b/benchmark/dgram/offset-length.js
@@ -56,7 +56,7 @@ function server() {
}, dur * 1000);
});
- socket.on('message', function(buf, rinfo) {
+ socket.on('message', function() {
received++;
});
diff --git a/benchmark/dgram/single-buffer.js b/benchmark/dgram/single-buffer.js
index 8b81d7fbfc0794..e5fcac63f640fc 100644
--- a/benchmark/dgram/single-buffer.js
+++ b/benchmark/dgram/single-buffer.js
@@ -56,7 +56,7 @@ function server() {
}, dur * 1000);
});
- socket.on('message', function(buf, rinfo) {
+ socket.on('message', function() {
received++;
});
diff --git a/benchmark/dns/lookup.js b/benchmark/dns/lookup.js
index ebe9d05695ef23..bb562d528c5b37 100644
--- a/benchmark/dns/lookup.js
+++ b/benchmark/dns/lookup.js
@@ -5,20 +5,20 @@ const lookup = require('dns').lookup;
const bench = common.createBenchmark(main, {
name: ['', '127.0.0.1', '::1'],
- all: [true, false],
+ all: ['true', 'false'],
n: [5e6]
});
function main(conf) {
const name = conf.name;
const n = +conf.n;
- const all = !!conf.all;
+ const all = conf.all === 'true' ? true : false;
var i = 0;
if (all) {
const opts = { all: true };
bench.start();
- (function cb(err, results) {
+ (function cb() {
if (i++ === n) {
bench.end(n);
return;
@@ -27,7 +27,7 @@ function main(conf) {
})();
} else {
bench.start();
- (function cb(err, result) {
+ (function cb() {
if (i++ === n) {
bench.end(n);
return;
diff --git a/benchmark/http2/headers.js b/benchmark/http2/headers.js
index 09449d1e92f208..078e7a356a0df8 100644
--- a/benchmark/http2/headers.js
+++ b/benchmark/http2/headers.js
@@ -5,7 +5,7 @@ const PORT = common.PORT;
var bench = common.createBenchmark(main, {
n: [1e3],
- nheaders: [100, 1000],
+ nheaders: [0, 10, 100, 1000],
}, { flags: ['--expose-http2', '--no-warnings'] });
function main(conf) {
@@ -14,7 +14,16 @@ function main(conf) {
const http2 = require('http2');
const server = http2.createServer();
- const headersObject = { ':path': '/' };
+ const headersObject = {
+ ':path': '/',
+ ':scheme': 'http',
+ 'accept-encoding': 'gzip, deflate',
+ 'accept-language': 'en',
+ 'content-type': 'text/plain',
+ 'referer': 'https://example.org/',
+ 'user-agent': 'SuperBenchmarker 3000'
+ };
+
for (var i = 0; i < nheaders; i++) {
headersObject[`foo${i}`] = `some header value ${i}`;
}
diff --git a/benchmark/http2/write.js b/benchmark/http2/write.js
new file mode 100644
index 00000000000000..df76794468b4de
--- /dev/null
+++ b/benchmark/http2/write.js
@@ -0,0 +1,28 @@
+'use strict';
+
+const common = require('../common.js');
+const PORT = common.PORT;
+
+var bench = common.createBenchmark(main, {
+ streams: [100, 200, 1000],
+ length: [64 * 1024, 128 * 1024, 256 * 1024, 1024 * 1024],
+}, { flags: ['--expose-http2', '--no-warnings'] });
+
+function main(conf) {
+ const m = +conf.streams;
+ const l = +conf.length;
+ const http2 = require('http2');
+ const server = http2.createServer();
+ server.on('stream', (stream) => {
+ stream.respond();
+ stream.write('ü'.repeat(l));
+ stream.end();
+ });
+ server.listen(PORT, () => {
+ bench.http({
+ path: '/',
+ requests: 10000,
+ maxConcurrentStreams: m,
+ }, () => { server.close(); });
+ });
+}
diff --git a/benchmark/misc/function_call/binding.cc b/benchmark/misc/function_call/binding.cc
index 9008f6c437052a..95d2a56de26e87 100644
--- a/benchmark/misc/function_call/binding.cc
+++ b/benchmark/misc/function_call/binding.cc
@@ -14,4 +14,4 @@ extern "C" void init (Local target) {
NODE_SET_METHOD(target, "hello", Hello);
}
-NODE_MODULE(binding, init)
+NODE_MODULE(NODE_GYP_MODULE_NAME, init)
diff --git a/configure b/configure
index 0b9c3a16ed09ff..cff6324a4d807c 100755
--- a/configure
+++ b/configure
@@ -1,4 +1,15 @@
-#!/usr/bin/env python
+#!/bin/sh
+
+# Locate python2 interpreter and re-execute the script. Note that the
+# mix of single and double quotes is intentional, as is the fact that
+# the ] goes on a new line.
+_=[ 'exec' '/bin/sh' '-c' '''
+which python2.7 >/dev/null && exec python2.7 "$0" "$@"
+which python2 >/dev/null && exec python2 "$0" "$@"
+exec python "$0" "$@"
+''' "$0" "$@"
+]
+del _
import sys
if sys.version_info[0] != 2 or sys.version_info[1] not in (6, 7):
@@ -409,6 +420,8 @@ http2_optgroup.add_option('--debug-nghttp2',
dest='debug_nghttp2',
help='build nghttp2 with DEBUGBUILD (default is false)')
+parser.add_option_group(http2_optgroup)
+
parser.add_option('--with-perfctr',
action='store_true',
dest='with_perfctr',
@@ -437,12 +450,12 @@ parser.add_option('--without-perfctr',
# Dummy option for backwards compatibility
parser.add_option('--with-snapshot',
action='store_true',
- dest='with_snapshot',
+ dest='unused_with_snapshot',
help=optparse.SUPPRESS_HELP)
parser.add_option('--without-snapshot',
action='store_true',
- dest='unused_without_snapshot',
+ dest='without_snapshot',
help=optparse.SUPPRESS_HELP)
parser.add_option('--without-ssl',
@@ -640,8 +653,8 @@ def check_compiler(o):
ok, is_clang, clang_version, gcc_version = try_check_compiler(CXX, 'c++')
if not ok:
warn('failed to autodetect C++ compiler version (CXX=%s)' % CXX)
- elif clang_version < '3.4.2' if is_clang else gcc_version < '4.8.0':
- warn('C++ compiler too old, need g++ 4.8 or clang++ 3.4.2 (CXX=%s)' % CXX)
+ elif clang_version < '3.4.2' if is_clang else gcc_version < '4.9.4':
+ warn('C++ compiler too old, need g++ 4.9.4 or clang++ 3.4.2 (CXX=%s)' % CXX)
ok, is_clang, clang_version, gcc_version = try_check_compiler(CC, 'c')
if not ok:
@@ -827,7 +840,7 @@ def configure_node(o):
cross_compiling = (options.cross_compiling
if options.cross_compiling is not None
else target_arch != host_arch)
- want_snapshots = 1 if options.with_snapshot else 0
+ want_snapshots = not options.without_snapshot
o['variables']['want_separate_host_toolset'] = int(
cross_compiling and want_snapshots)
o['variables']['want_separate_host_toolset_mkpeephole'] = int(
@@ -984,7 +997,7 @@ def configure_v8(o):
o['variables']['v8_optimized_debug'] = 0 # Compile with -O0 in debug builds.
o['variables']['v8_random_seed'] = 0 # Use a random seed for hash tables.
o['variables']['v8_promise_internal_field_count'] = 1 # Add internal field to promises for async hooks.
- o['variables']['v8_use_snapshot'] = b(options.with_snapshot)
+ o['variables']['v8_use_snapshot'] = 'false' if options.without_snapshot else 'true'
o['variables']['v8_trace_maps'] = 1 if options.trace_maps else 0
o['variables']['node_use_v8_platform'] = b(not options.without_v8_platform)
o['variables']['node_use_bundled_v8'] = b(not options.without_bundled_v8)
@@ -1408,6 +1421,8 @@ config = {
'BUILDTYPE': 'Debug' if options.debug else 'Release',
'USE_XCODE': str(int(options.use_xcode or 0)),
'PYTHON': sys.executable,
+ 'NODE_TARGET_TYPE': variables['node_target_type'] if options.enable_static \
+ else '',
}
if options.prefix:
diff --git a/deps/cares/src/ares_gethostbyaddr.c b/deps/cares/src/ares_gethostbyaddr.c
index 9258919a385699..a0a90f6bb1712b 100644
--- a/deps/cares/src/ares_gethostbyaddr.c
+++ b/deps/cares/src/ares_gethostbyaddr.c
@@ -157,7 +157,7 @@ static void addr_callback(void *arg, int status, int timeouts,
}
end_aquery(aquery, status, host);
}
- else if (status == ARES_EDESTRUCTION)
+ else if (status == ARES_EDESTRUCTION || status == ARES_ECANCELLED)
end_aquery(aquery, status, NULL);
else
next_lookup(aquery);
diff --git a/deps/nghttp2/lib/Makefile.msvc b/deps/nghttp2/lib/Makefile.msvc
index cef359ee256cbb..0077dc4e5170a1 100644
--- a/deps/nghttp2/lib/Makefile.msvc
+++ b/deps/nghttp2/lib/Makefile.msvc
@@ -50,7 +50,7 @@ IMP_D := $(OBJ_DIR)/nghttp2d.lib
TARGETS := $(LIB_R) $(DLL_R) $(IMP_R) \
$(LIB_D) $(DLL_D) $(IMP_D)
-EXT_LIBS =
+EXT_LIBS =
NGHTTP2_PDB_R := $(OBJ_DIR)/nghttp2.pdb
NGHTTP2_PDB_D := $(OBJ_DIR)/nghttp2d.pdb
@@ -121,7 +121,7 @@ $(OBJ_DIR):
install: includes/nghttp2/nghttp2.h includes/nghttp2/nghttp2ver.h \
$(TARGETS) \
- copy_headers_and_libs install_nghttp2_pyd_$(USE_CYTHON)
+ copy_headers_and_libs install_nghttp2_pyd_$(USE_CYTHON)
#
# This MUST be done before using the 'install_nghttp2_pyd_1' rule.
@@ -144,14 +144,14 @@ $(LIB_D): $(NGHTTP2_OBJ_D)
$(IMP_R): $(DLL_R)
-$(DLL_R): $(NGHTTP2_OBJ_R) $(OBJ_DIR)/r_nghttp2.res
+$(DLL_R): $(NGHTTP2_OBJ_R) $(OBJ_DIR)/r_nghttp2.res
$(LD) $(LDFLAGS) -dll -out:$@ -implib:$(IMP_R) $(NGHTTP2_OBJ_R) -PDB:$(NGHTTP2_PDB_R) $(OBJ_DIR)/r_nghttp2.res $(EXT_LIBS)
mt -nologo -manifest $@.manifest -outputresource:$@\;2
@echo
$(IMP_D): $(DLL_D)
-
-$(DLL_D): $(NGHTTP2_OBJ_D) $(OBJ_DIR)/d_nghttp2.res
+
+$(DLL_D): $(NGHTTP2_OBJ_D) $(OBJ_DIR)/d_nghttp2.res
$(LD) $(LDFLAGS) -dll -out:$@ -implib:$(IMP_D) $(NGHTTP2_OBJ_D) -PDB:$(NGHTTP2_PDB_D) $(OBJ_DIR)/d_nghttp2.res $(EXT_LIBS)
mt -nologo -manifest $@.manifest -outputresource:$@\;2
@echo
@@ -174,7 +174,7 @@ build_nghttp2_pyd_1: $(addprefix ../python/, setup.py nghttp2.pyx)
python setup.py build_ext -i -f bdist_wininst
install_nghttp2_pyd_0: ;
-
+
install_nghttp2_pyd_1: $(addprefix ../python/, setup.py nghttp2.pyx)
cd ../python ; \
pip install .
diff --git a/deps/nghttp2/lib/includes/nghttp2/nghttp2.h b/deps/nghttp2/lib/includes/nghttp2/nghttp2.h
index 159010040c798c..5696a2ef633653 100644
--- a/deps/nghttp2/lib/includes/nghttp2/nghttp2.h
+++ b/deps/nghttp2/lib/includes/nghttp2/nghttp2.h
@@ -469,6 +469,15 @@ NGHTTP2_EXTERN void nghttp2_rcbuf_decref(nghttp2_rcbuf *rcbuf);
*/
NGHTTP2_EXTERN nghttp2_vec nghttp2_rcbuf_get_buf(nghttp2_rcbuf *rcbuf);
+/**
+ * @function
+ *
+ * Returns nonzero if the underlying buffer is statically allocated,
+ * and 0 otherwise. This can be useful for language bindings that wish
+ * to avoid creating duplicate strings for these buffers.
+ */
+NGHTTP2_EXTERN int nghttp2_rcbuf_is_static(const nghttp2_rcbuf *rcbuf);
+
/**
* @enum
*
@@ -1741,11 +1750,12 @@ typedef int (*nghttp2_on_header_callback2)(nghttp2_session *session,
* The parameter and behaviour are similar to
* :type:`nghttp2_on_header_callback`. The difference is that this
* callback is only invoked when a invalid header name/value pair is
- * received which is silently ignored if this callback is not set.
- * Only invalid regular header field are passed to this callback. In
- * other words, invalid pseudo header field is not passed to this
- * callback. Also header fields which includes upper cased latter are
- * also treated as error without passing them to this callback.
+ * received which is treated as stream error if this callback is not
+ * set. Only invalid regular header field are passed to this
+ * callback. In other words, invalid pseudo header field is not
+ * passed to this callback. Also header fields which includes upper
+ * cased latter are also treated as error without passing them to this
+ * callback.
*
* This callback is only considered if HTTP messaging validation is
* turned on (which is on by default, see
@@ -1754,10 +1764,13 @@ typedef int (*nghttp2_on_header_callback2)(nghttp2_session *session,
* With this callback, application inspects the incoming invalid
* field, and it also can reset stream from this callback by returning
* :enum:`NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE`. By default, the
- * error code is :enum:`NGHTTP2_INTERNAL_ERROR`. To change the error
+ * error code is :enum:`NGHTTP2_PROTOCOL_ERROR`. To change the error
* code, call `nghttp2_submit_rst_stream()` with the error code of
* choice in addition to returning
* :enum:`NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE`.
+ *
+ * If 0 is returned, the header field is ignored, and the stream is
+ * not reset.
*/
typedef int (*nghttp2_on_invalid_header_callback)(
nghttp2_session *session, const nghttp2_frame *frame, const uint8_t *name,
@@ -2448,7 +2461,10 @@ nghttp2_option_set_no_recv_client_magic(nghttp2_option *option, int val);
* `_. See
* :ref:`http-messaging` section for details. For those applications
* who use nghttp2 library as non-HTTP use, give nonzero to |val| to
- * disable this enforcement.
+ * disable this enforcement. Please note that disabling this feature
+ * does not change the fundamental client and server model of HTTP.
+ * That is, even if the validation is disabled, only client can send
+ * requests.
*/
NGHTTP2_EXTERN void nghttp2_option_set_no_http_messaging(nghttp2_option *option,
int val);
@@ -3802,9 +3818,8 @@ nghttp2_submit_response(nghttp2_session *session, int32_t stream_id,
* Submits trailer fields HEADERS against the stream |stream_id|.
*
* The |nva| is an array of name/value pair :type:`nghttp2_nv` with
- * |nvlen| elements. The application is responsible not to include
- * pseudo-header fields (header field whose name starts with ":") in
- * |nva|.
+ * |nvlen| elements. The application must not include pseudo-header
+ * fields (headers whose names starts with ":") in |nva|.
*
* This function creates copies of all name/value pairs in |nva|. It
* also lower-cases all names in |nva|. The order of elements in
diff --git a/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h b/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h
index dd0587d1642c38..38c48bf041f1e8 100644
--- a/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h
+++ b/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h
@@ -29,7 +29,7 @@
* @macro
* Version number of the nghttp2 library release
*/
-#define NGHTTP2_VERSION "1.22.0"
+#define NGHTTP2_VERSION "1.25.0"
/**
* @macro
@@ -37,6 +37,6 @@
* release. This is a 24 bit number with 8 bits for major number, 8 bits
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
*/
-#define NGHTTP2_VERSION_NUM 0x011600
+#define NGHTTP2_VERSION_NUM 0x011900
#endif /* NGHTTP2VER_H */
diff --git a/deps/nghttp2/lib/nghttp2_frame.c b/deps/nghttp2/lib/nghttp2_frame.c
index 90efaff5317161..210df0584443df 100644
--- a/deps/nghttp2/lib/nghttp2_frame.c
+++ b/deps/nghttp2/lib/nghttp2_frame.c
@@ -672,6 +672,9 @@ int nghttp2_frame_pack_altsvc(nghttp2_bufs *bufs, nghttp2_extension *frame) {
nghttp2_buf *buf;
nghttp2_ext_altsvc *altsvc;
+ /* This is required with --disable-assert. */
+ (void)rv;
+
altsvc = frame->payload;
buf = &bufs->head->buf;
diff --git a/deps/nghttp2/lib/nghttp2_hd.c b/deps/nghttp2/lib/nghttp2_hd.c
index e9a109dcc1da94..1eb3be33802c44 100644
--- a/deps/nghttp2/lib/nghttp2_hd.c
+++ b/deps/nghttp2/lib/nghttp2_hd.c
@@ -662,9 +662,9 @@ static int hd_context_init(nghttp2_hd_context *context, nghttp2_mem *mem) {
context->mem = mem;
context->bad = 0;
context->hd_table_bufsize_max = NGHTTP2_HD_DEFAULT_MAX_BUFFER_SIZE;
- rv = hd_ringbuf_init(&context->hd_table, context->hd_table_bufsize_max /
- NGHTTP2_HD_ENTRY_OVERHEAD,
- mem);
+ rv = hd_ringbuf_init(
+ &context->hd_table,
+ context->hd_table_bufsize_max / NGHTTP2_HD_ENTRY_OVERHEAD, mem);
if (rv != 0) {
return rv;
}
diff --git a/deps/nghttp2/lib/nghttp2_pq.h b/deps/nghttp2/lib/nghttp2_pq.h
index 6b0ecfb4763494..1426bef760132c 100644
--- a/deps/nghttp2/lib/nghttp2_pq.h
+++ b/deps/nghttp2/lib/nghttp2_pq.h
@@ -42,7 +42,7 @@ typedef struct {
nghttp2_pq_entry **q;
/* Memory allocator */
nghttp2_mem *mem;
- /* The number of items sotred */
+ /* The number of items stored */
size_t length;
/* The maximum number of items this pq can store. This is
automatically extended when length is reached to this value. */
diff --git a/deps/nghttp2/lib/nghttp2_rcbuf.c b/deps/nghttp2/lib/nghttp2_rcbuf.c
index 24f561af97af7c..7e7814d2d3caac 100644
--- a/deps/nghttp2/lib/nghttp2_rcbuf.c
+++ b/deps/nghttp2/lib/nghttp2_rcbuf.c
@@ -96,3 +96,7 @@ nghttp2_vec nghttp2_rcbuf_get_buf(nghttp2_rcbuf *rcbuf) {
nghttp2_vec res = {rcbuf->base, rcbuf->len};
return res;
}
+
+int nghttp2_rcbuf_is_static(const nghttp2_rcbuf *rcbuf) {
+ return rcbuf->ref == -1;
+}
diff --git a/deps/nghttp2/lib/nghttp2_session.c b/deps/nghttp2/lib/nghttp2_session.c
index 1c060f1b10e612..4bc94cbb1982ad 100644
--- a/deps/nghttp2/lib/nghttp2_session.c
+++ b/deps/nghttp2/lib/nghttp2_session.c
@@ -1524,13 +1524,14 @@ static int session_predicate_response_headers_send(nghttp2_session *session,
if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) {
return NGHTTP2_ERR_INVALID_STREAM_ID;
}
- if (stream->state == NGHTTP2_STREAM_OPENING) {
+ switch (stream->state) {
+ case NGHTTP2_STREAM_OPENING:
return 0;
- }
- if (stream->state == NGHTTP2_STREAM_CLOSING) {
+ case NGHTTP2_STREAM_CLOSING:
return NGHTTP2_ERR_STREAM_CLOSING;
+ default:
+ return NGHTTP2_ERR_INVALID_STREAM_STATE;
}
- return NGHTTP2_ERR_INVALID_STREAM_STATE;
}
/*
@@ -1573,9 +1574,6 @@ session_predicate_push_response_headers_send(nghttp2_session *session,
if (stream->state != NGHTTP2_STREAM_RESERVED) {
return NGHTTP2_ERR_PROTO;
}
- if (stream->state == NGHTTP2_STREAM_CLOSING) {
- return NGHTTP2_ERR_STREAM_CLOSING;
- }
if (session->goaway_flags & NGHTTP2_GOAWAY_RECV) {
return NGHTTP2_ERR_START_STREAM_NOT_ALLOWED;
}
@@ -1610,19 +1608,18 @@ static int session_predicate_headers_send(nghttp2_session *session,
return rv;
}
assert(stream);
- if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) {
- if (stream->state == NGHTTP2_STREAM_CLOSING) {
- return NGHTTP2_ERR_STREAM_CLOSING;
- }
- return 0;
- }
- if (stream->state == NGHTTP2_STREAM_OPENED) {
+
+ switch (stream->state) {
+ case NGHTTP2_STREAM_OPENED:
return 0;
- }
- if (stream->state == NGHTTP2_STREAM_CLOSING) {
+ case NGHTTP2_STREAM_CLOSING:
return NGHTTP2_ERR_STREAM_CLOSING;
+ default:
+ if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) {
+ return 0;
+ }
+ return NGHTTP2_ERR_INVALID_STREAM_STATE;
}
- return NGHTTP2_ERR_INVALID_STREAM_STATE;
}
/*
@@ -2068,14 +2065,6 @@ static int session_prep_frame(nghttp2_session *session,
/* We don't call nghttp2_session_adjust_closed_stream() here,
since we don't keep closed stream in client side */
- estimated_payloadlen = session_estimate_headers_payload(
- session, frame->headers.nva, frame->headers.nvlen,
- NGHTTP2_PRIORITY_SPECLEN);
-
- if (estimated_payloadlen > session->max_send_header_block_length) {
- return NGHTTP2_ERR_FRAME_SIZE_ERROR;
- }
-
rv = session_predicate_request_headers_send(session, item);
if (rv != 0) {
return rv;
@@ -2087,14 +2076,6 @@ static int session_prep_frame(nghttp2_session *session,
} else {
nghttp2_stream *stream;
- estimated_payloadlen = session_estimate_headers_payload(
- session, frame->headers.nva, frame->headers.nvlen,
- NGHTTP2_PRIORITY_SPECLEN);
-
- if (estimated_payloadlen > session->max_send_header_block_length) {
- return NGHTTP2_ERR_FRAME_SIZE_ERROR;
- }
-
stream = nghttp2_session_get_stream(session, frame->hd.stream_id);
if (stream && stream->state == NGHTTP2_STREAM_RESERVED) {
@@ -2121,6 +2102,14 @@ static int session_prep_frame(nghttp2_session *session,
}
}
+ estimated_payloadlen = session_estimate_headers_payload(
+ session, frame->headers.nva, frame->headers.nvlen,
+ NGHTTP2_PRIORITY_SPECLEN);
+
+ if (estimated_payloadlen > session->max_send_header_block_length) {
+ return NGHTTP2_ERR_FRAME_SIZE_ERROR;
+ }
+
rv = nghttp2_frame_pack_headers(&session->aob.framebufs, &frame->headers,
&session->hd_deflater);
@@ -2190,13 +2179,6 @@ static int session_prep_frame(nghttp2_session *session,
nghttp2_stream *stream;
size_t estimated_payloadlen;
- estimated_payloadlen = session_estimate_headers_payload(
- session, frame->push_promise.nva, frame->push_promise.nvlen, 0);
-
- if (estimated_payloadlen > session->max_send_header_block_length) {
- return NGHTTP2_ERR_FRAME_SIZE_ERROR;
- }
-
/* stream could be NULL if associated stream was already
closed. */
stream = nghttp2_session_get_stream(session, frame->hd.stream_id);
@@ -2209,6 +2191,13 @@ static int session_prep_frame(nghttp2_session *session,
assert(stream);
+ estimated_payloadlen = session_estimate_headers_payload(
+ session, frame->push_promise.nva, frame->push_promise.nvlen, 0);
+
+ if (estimated_payloadlen > session->max_send_header_block_length) {
+ return NGHTTP2_ERR_FRAME_SIZE_ERROR;
+ }
+
rv = nghttp2_frame_pack_push_promise(
&session->aob.framebufs, &frame->push_promise, &session->hd_deflater);
if (rv != 0) {
@@ -3332,7 +3321,7 @@ static int session_call_on_invalid_header(nghttp2_session *session,
session, frame, nv->name->base, nv->name->len, nv->value->base,
nv->value->len, nv->flags, session->user_data);
} else {
- return 0;
+ return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
}
if (rv == NGHTTP2_ERR_PAUSE || rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) {
@@ -3422,6 +3411,27 @@ static uint32_t get_error_code_from_lib_error_code(int lib_error_code) {
}
}
+/*
+ * Calls on_invalid_frame_recv_callback if it is set to |session|.
+ *
+ * This function returns 0 if it succeeds, or one of the following
+ * negative error codes:
+ *
+ * NGHTTP2_ERR_CALLBACK_FAILURE
+ * User defined callback function fails.
+ */
+static int session_call_on_invalid_frame_recv_callback(nghttp2_session *session,
+ nghttp2_frame *frame,
+ int lib_error_code) {
+ if (session->callbacks.on_invalid_frame_recv_callback) {
+ if (session->callbacks.on_invalid_frame_recv_callback(
+ session, frame, lib_error_code, session->user_data) != 0) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ }
+ return 0;
+}
+
static int session_handle_invalid_stream2(nghttp2_session *session,
int32_t stream_id,
nghttp2_frame *frame,
@@ -3579,6 +3589,37 @@ static int inflate_header_block(nghttp2_session *session, nghttp2_frame *frame,
if (subject_stream && session_enforce_http_messaging(session)) {
rv = nghttp2_http_on_header(session, subject_stream, frame, &nv,
trailer);
+
+ if (rv == NGHTTP2_ERR_IGN_HTTP_HEADER) {
+ /* Don't overwrite rv here */
+ int rv2;
+
+ rv2 = session_call_on_invalid_header(session, frame, &nv);
+ if (rv2 == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) {
+ rv = NGHTTP2_ERR_HTTP_HEADER;
+ } else {
+ if (rv2 != 0) {
+ return rv2;
+ }
+
+ /* header is ignored */
+ DEBUGF("recv: HTTP ignored: type=%u, id=%d, header %.*s: %.*s\n",
+ frame->hd.type, frame->hd.stream_id, (int)nv.name->len,
+ nv.name->base, (int)nv.value->len, nv.value->base);
+
+ rv2 = session_call_error_callback(
+ session,
+ "Ignoring received invalid HTTP header field: frame type: "
+ "%u, stream: %d, name: [%.*s], value: [%.*s]",
+ frame->hd.type, frame->hd.stream_id, (int)nv.name->len,
+ nv.name->base, (int)nv.value->len, nv.value->base);
+
+ if (nghttp2_is_fatal(rv2)) {
+ return rv2;
+ }
+ }
+ }
+
if (rv == NGHTTP2_ERR_HTTP_HEADER) {
DEBUGF("recv: HTTP error: type=%u, id=%d, header %.*s: %.*s\n",
frame->hd.type, frame->hd.stream_id, (int)nv.name->len,
@@ -3602,34 +3643,6 @@ static int inflate_header_block(nghttp2_session *session, nghttp2_frame *frame,
}
return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
}
-
- if (rv == NGHTTP2_ERR_IGN_HTTP_HEADER) {
- /* Don't overwrite rv here */
- int rv2;
-
- rv2 = session_call_on_invalid_header(session, frame, &nv);
- /* This handles NGHTTP2_ERR_PAUSE and
- NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE as well */
- if (rv2 != 0) {
- return rv2;
- }
-
- /* header is ignored */
- DEBUGF("recv: HTTP ignored: type=%u, id=%d, header %.*s: %.*s\n",
- frame->hd.type, frame->hd.stream_id, (int)nv.name->len,
- nv.name->base, (int)nv.value->len, nv.value->base);
-
- rv2 = session_call_error_callback(
- session,
- "Ignoring received invalid HTTP header field: frame type: "
- "%u, stream: %d, name: [%.*s], value: [%.*s]",
- frame->hd.type, frame->hd.stream_id, (int)nv.name->len,
- nv.name->base, (int)nv.value->len, nv.value->base);
-
- if (nghttp2_is_fatal(rv2)) {
- return rv2;
- }
- }
}
if (rv == 0) {
rv = session_call_on_header(session, frame, &nv);
@@ -4772,11 +4785,13 @@ int nghttp2_session_on_altsvc_received(nghttp2_session *session,
if (frame->hd.stream_id == 0) {
if (altsvc->origin_len == 0) {
- return 0;
+ return session_call_on_invalid_frame_recv_callback(session, frame,
+ NGHTTP2_ERR_PROTO);
}
} else {
if (altsvc->origin_len > 0) {
- return 0;
+ return session_call_on_invalid_frame_recv_callback(session, frame,
+ NGHTTP2_ERR_PROTO);
}
stream = nghttp2_session_get_stream(session, frame->hd.stream_id);
@@ -4789,6 +4804,11 @@ int nghttp2_session_on_altsvc_received(nghttp2_session *session,
}
}
+ if (altsvc->field_value_len == 0) {
+ return session_call_on_invalid_frame_recv_callback(session, frame,
+ NGHTTP2_ERR_PROTO);
+ }
+
return session_call_on_frame_received(session, frame);
}
@@ -5573,8 +5593,8 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in,
iframe->max_niv =
iframe->frame.hd.length / NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH + 1;
- iframe->iv = nghttp2_mem_malloc(mem, sizeof(nghttp2_settings_entry) *
- iframe->max_niv);
+ iframe->iv = nghttp2_mem_malloc(
+ mem, sizeof(nghttp2_settings_entry) * iframe->max_niv);
if (!iframe->iv) {
return NGHTTP2_ERR_NOMEM;
@@ -5951,7 +5971,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in,
DEBUGF("recv: origin_len=%zu\n", origin_len);
- if (2 + origin_len > iframe->payloadleft) {
+ if (origin_len > iframe->payloadleft) {
busy = 1;
iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR;
break;
@@ -6037,9 +6057,10 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in,
/* Use promised stream ID for PUSH_PROMISE */
rv = nghttp2_session_add_rst_stream(
- session, iframe->frame.hd.type == NGHTTP2_PUSH_PROMISE
- ? iframe->frame.push_promise.promised_stream_id
- : iframe->frame.hd.stream_id,
+ session,
+ iframe->frame.hd.type == NGHTTP2_PUSH_PROMISE
+ ? iframe->frame.push_promise.promised_stream_id
+ : iframe->frame.hd.stream_id,
NGHTTP2_INTERNAL_ERROR);
if (nghttp2_is_fatal(rv)) {
return rv;
@@ -7129,6 +7150,7 @@ uint32_t nghttp2_session_get_remote_settings(nghttp2_session *session,
}
assert(0);
+ abort(); /* if NDEBUG is set */
}
uint32_t nghttp2_session_get_local_settings(nghttp2_session *session,
@@ -7149,6 +7171,7 @@ uint32_t nghttp2_session_get_local_settings(nghttp2_session *session,
}
assert(0);
+ abort(); /* if NDEBUG is set */
}
static int nghttp2_session_upgrade_internal(nghttp2_session *session,
diff --git a/deps/nghttp2/lib/nghttp2_session.h b/deps/nghttp2/lib/nghttp2_session.h
index 3e4c1440a5ab73..3e1467f6a356d7 100644
--- a/deps/nghttp2/lib/nghttp2_session.h
+++ b/deps/nghttp2/lib/nghttp2_session.h
@@ -311,7 +311,7 @@ struct nghttp2_session {
/* Unacked local SETTINGS_MAX_CONCURRENT_STREAMS value. We use this
to refuse the incoming stream if it exceeds this value. */
uint32_t pending_local_max_concurrent_stream;
- /* The bitwose OR of zero or more of nghttp2_typemask to indicate
+ /* The bitwise OR of zero or more of nghttp2_typemask to indicate
that the default handling of extension frame is enabled. */
uint32_t builtin_recv_ext_types;
/* Unacked local ENABLE_PUSH value. We use this to refuse
diff --git a/deps/openssl/config/opensslconf.h b/deps/openssl/config/opensslconf.h
index 9b20fb6485aa84..1c89babcf6c864 100644
--- a/deps/openssl/config/opensslconf.h
+++ b/deps/openssl/config/opensslconf.h
@@ -37,6 +37,8 @@
| solaris | x64 | solaris64-x86_64-gcc | o |
| freebsd | ia32 | BSD-x86 | o |
| freebsd | x64 | BSD-x86_64 | o |
+ | netbsd | ia32 | BSD-x86 | o |
+ | netbsd | x64 | BSD-x86_64 | o |
| openbsd | ia32 | BSD-x86 | - |
| openbsd | x64 | BSD-x86_64 | - |
| others | others | linux-elf | - |
@@ -51,6 +53,7 @@
| mac | __APPLE__ && __MACH__ |
| solaris | __sun |
| freebsd | __FreeBSD__ |
+ | netbsd | __NetBSD__ |
| openbsd | __OpenBSD__ |
| linux (not andorid)| __linux__ && !__ANDROID__ |
| android | __ANDROID__ |
@@ -94,6 +97,11 @@
# define OPENSSL_LINUX 1
#endif
+#undef OPENSSL_BSD
+#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
+# define OPENSSL_BSD 1
+#endif
+
#if defined(OPENSSL_LINUX) && defined(__i386__)
# include "./archs/linux-elf/opensslconf.h"
#elif defined(OPENSSL_LINUX) && defined(__ILP32__)
@@ -112,9 +120,9 @@
# include "./archs/VC-WIN32/opensslconf.h"
#elif defined(_WIN32) && defined(_M_X64)
# include "./archs/VC-WIN64A/opensslconf.h"
-#elif (defined(__FreeBSD__) || defined(__OpenBSD__)) && defined(__i386__)
+#elif defined(OPENSSL_BSD) && defined(__i386__)
# include "./archs/BSD-x86/opensslconf.h"
-#elif (defined(__FreeBSD__) || defined(__OpenBSD__)) && defined(__x86_64__)
+#elif defined(OPENSSL_BSD) && defined(__x86_64__)
# include "./archs/BSD-x86_64/opensslconf.h"
#elif defined(__sun) && defined(__i386__)
# include "./archs/solaris-x86-gcc/opensslconf.h"
diff --git a/deps/uv/.gitignore b/deps/uv/.gitignore
index eb54f92488d7b2..7536abd54970a2 100644
--- a/deps/uv/.gitignore
+++ b/deps/uv/.gitignore
@@ -39,6 +39,7 @@ Makefile.in
# Generated by gyp for android
*.target.mk
+/android-toolchain
/out/
/build/gyp
diff --git a/deps/uv/AUTHORS b/deps/uv/AUTHORS
index 4ef241092701d2..3562bb81698b66 100644
--- a/deps/uv/AUTHORS
+++ b/deps/uv/AUTHORS
@@ -299,3 +299,12 @@ Barnabas Gema
Romain Caire
Robert Ayrapetyan
Refael Ackermann
+André Klitzing
+Matthew Taylor
+CurlyMoo
+XadillaX
+Anticrisis
+Jacob Segal
+Maciej Szeptuch (Neverous)
+Joel Winarske
+Gergely Nagy
diff --git a/deps/uv/ChangeLog b/deps/uv/ChangeLog
index 67c99df82eaee7..d0b55750172152 100644
--- a/deps/uv/ChangeLog
+++ b/deps/uv/ChangeLog
@@ -1,3 +1,118 @@
+2017.09.07, Version 1.14.1 (Stable), b0f9fb2a07a5e638b1580fe9a42a356c3ab35f37
+
+Changes since version 1.14.0:
+
+* fs, win: add support for user symlinks (Bartosz Sosnowski)
+
+* cygwin: include uv-posix.h header (Joel Winarske)
+
+* zos: fix semaphore initialization (jBarz)
+
+* zos: improve loop_count benchmark performance (jBarz)
+
+* zos, test: flush out the oob data in callback (jBarz)
+
+* unix,win: check for bad flags in uv_fs_copyfile() (cjihrig)
+
+* unix: modify argv[0] when process title is set (Matthew Taylor)
+
+* unix: don't use req->loop in uv__fs_copyfile() (cjihrig)
+
+* doc: fix a trivial typo (Vladimír Čunát)
+
+* android: fix uv_cond_timedwait on API level < 21 (Gergely Nagy)
+
+* win: add uv__once_init() calls (Bartosz Sosnowski)
+
+* unix,windows: init all requests in fs calls (cjihrig)
+
+* unix,windows: return UV_EINVAL on NULL fs reqs (cjihrig)
+
+* windows: add POST macro to fs functions (cjihrig)
+
+* unix: handle partial sends in uv_fs_copyfile() (A. Hauptmann)
+
+* Revert "win, test: fix double close in test runner" (Bartosz Sosnowski)
+
+* win, test: remove surplus CloseHandle (Bartosz Sosnowski)
+
+
+2017.08.17, Version 1.14.0 (Stable), e0d31e9e21870f88277746b6d59cf07b977cdfea
+
+Changes since version 1.13.1:
+
+* unix: check for NULL in uv_os_unsetenv for parameter name (André Klitzing)
+
+* doc: add thread safety warning for process title (Matthew Taylor)
+
+* unix: always copy process title into local buffer (Matthew Taylor)
+
+* poll: add support for OOB TCP and GPIO interrupts (CurlyMoo)
+
+* win,build: fix appveyor properly (Refael Ackermann)
+
+* win: include filename in dlopen error message (Ben Noordhuis)
+
+* aix: add netmask, mac address into net interfaces (Gireesh Punathil)
+
+* unix, windows: map EREMOTEIO errno (Ben Noordhuis)
+
+* unix: fix wrong MAC of uv_interface_address (XadillaX)
+
+* win,build: fix building from Windows SDK or VS console (Saúl Ibarra Corretgé)
+
+* github: fix link to help repo in issue template (Ben Noordhuis)
+
+* zos: remove nonexistent include from autotools build (Saúl Ibarra Corretgé)
+
+* misc: remove reference to pthread-fixes.h from LICENSE (Saúl Ibarra Corretgé)
+
+* docs: fix guide source code example paths (Anticrisis)
+
+* android: fix compilation with new NDK versions (Saúl Ibarra Corretgé)
+
+* misc: add android-toolchain to .gitignore (Saúl Ibarra Corretgé)
+
+* win, fs: support unusual reparse points (Bartosz Sosnowski)
+
+* android: fix detection of pthread_condattr_setclock (Saúl Ibarra Corretgé)
+
+* android: remove no longer needed check (Saúl Ibarra Corretgé)
+
+* doc: update instructions for building on Android (Saúl Ibarra Corretgé)
+
+* win, process: support semicolons in PATH variable (Bartosz Sosnowski)
+
+* doc: document uv_async_(init|send) return values (Ben Noordhuis)
+
+* doc: add Android as a tier 3 supported platform (Saúl Ibarra Corretgé)
+
+* unix: add missing semicolon (jBarz)
+
+* win, test: fix double close in test runner (Bartosz Sosnowski)
+
+* doc: update supported windows version baseline (Ben Noordhuis)
+
+* test,zos: skip chown root test (jBarz)
+
+* test,zos: use gid=-1 to test spawn_setgid_fails (jBarz)
+
+* zos: fix hr timer resolution (jBarz)
+
+* android: fix blocking recvmsg due to netlink bug (Jacob Segal)
+
+* zos: read more accurate rss info from RSM (jBarz)
+
+* win: allow bound/connected socket in uv_tcp_open() (Maciej Szeptuch
+ (Neverous))
+
+* doc: differentiate SmartOS and SunOS support (cjihrig)
+
+* unix: make uv_poll_stop() remove fd from pollset (Ben Noordhuis)
+
+* unix, windows: add basic uv_fs_copyfile() (cjihrig)
+
+
2017.07.07, Version 1.13.1 (Stable), 2bb4b68758f07cd8617838e68c44c125bc567ba6
Changes since version 1.13.0:
diff --git a/deps/uv/LICENSE b/deps/uv/LICENSE
index 41ba44c2857a49..28f17339e29ca6 100644
--- a/deps/uv/LICENSE
+++ b/deps/uv/LICENSE
@@ -62,8 +62,8 @@ The externally maintained libraries used by libuv are:
- stdint-msvc2008.h (from msinttypes), copyright Alexander Chemeris. Three
clause BSD license.
- - pthread-fixes.h, pthread-fixes.c, copyright Google Inc. and Sony Mobile
- Communications AB. Three clause BSD license.
+ - pthread-fixes.c, copyright Google Inc. and Sony Mobile Communications AB.
+ Three clause BSD license.
- android-ifaddrs.h, android-ifaddrs.c, copyright Berkeley Software Design
Inc, Kenneth MacKay and Emergya (Cloud4all, FP7/2007-2013, grant agreement
diff --git a/deps/uv/Makefile.am b/deps/uv/Makefile.am
index 404674baf211d5..b94fdd63b58eea 100644
--- a/deps/uv/Makefile.am
+++ b/deps/uv/Makefile.am
@@ -169,6 +169,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-env-vars.c \
test/test-error.c \
test/test-fail-always.c \
+ test/test-fs-copyfile.c \
test/test-fs-event.c \
test/test-fs-poll.c \
test/test-fs.c \
@@ -212,10 +213,11 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-pipe-close-stdout-read-stdin.c \
test/test-pipe-set-non-blocking.c \
test/test-platform-output.c \
+ test/test-poll.c \
test/test-poll-close.c \
test/test-poll-close-doesnt-corrupt-stack.c \
test/test-poll-closesocket.c \
- test/test-poll.c \
+ test/test-poll-oob.c \
test/test-process-title.c \
test/test-queue-foreach-delete.c \
test/test-ref.c \
@@ -333,11 +335,11 @@ if ANDROID
include_HEADERS += include/android-ifaddrs.h \
include/pthread-barrier.h
libuv_la_SOURCES += src/unix/android-ifaddrs.c \
- src/unix/pthread-fixes.c \
- src/unix/pthread-barrier.c
+ src/unix/pthread-fixes.c
endif
if CYGWIN
+include_HEADERS += include/uv-posix.h
libuv_la_CFLAGS += -D_GNU_SOURCE
libuv_la_SOURCES += src/unix/cygwin.c \
src/unix/bsd-ifaddrs.c \
@@ -360,8 +362,7 @@ libuv_la_SOURCES += src/unix/bsd-ifaddrs.c \
src/unix/darwin-proctitle.c \
src/unix/fsevents.c \
src/unix/kqueue.c \
- src/unix/proctitle.c \
- src/unix/pthread-barrier.c
+ src/unix/proctitle.c
test_run_tests_LDFLAGS += -lutil
endif
@@ -436,7 +437,7 @@ libuv_la_SOURCES += src/unix/no-proctitle.c \
endif
if OS390
-include_HEADERS += include/pthread-fixes.h include/pthread-barrier.h
+include_HEADERS += include/pthread-barrier.h
libuv_la_CFLAGS += -D_UNIX03_THREADS \
-D_UNIX03_SOURCE \
-D_OPEN_SYS_IF_EXT=1 \
@@ -453,7 +454,6 @@ libuv_la_CFLAGS += -D_UNIX03_THREADS \
-qFLOAT=IEEE
libuv_la_LDFLAGS += -qXPLINK
libuv_la_SOURCES += src/unix/pthread-fixes.c \
- src/unix/pthread-barrier.c \
src/unix/no-fsevents.c \
src/unix/os390.c \
src/unix/os390-syscalls.c \
diff --git a/deps/uv/README.md b/deps/uv/README.md
index 372d514e049c39..733171be085ab5 100644
--- a/deps/uv/README.md
+++ b/deps/uv/README.md
@@ -267,7 +267,14 @@ Make sure that you specify the architecture you wish to build for in the
Run:
```bash
-$ source ./android-configure NDK_PATH gyp
+$ source ./android-configure NDK_PATH gyp [API_LEVEL]
+$ make -C out
+```
+
+The default API level is 24, but a different one can be selected as follows:
+
+```bash
+$ source ./android-configure ~/android-ndk-r15b gyp 21
$ make -C out
```
diff --git a/deps/uv/SUPPORTED_PLATFORMS.md b/deps/uv/SUPPORTED_PLATFORMS.md
index 3a000c5dd25e35..08fd5f4a9a1100 100644
--- a/deps/uv/SUPPORTED_PLATFORMS.md
+++ b/deps/uv/SUPPORTED_PLATFORMS.md
@@ -4,13 +4,15 @@
|---|---|---|---|
| GNU/Linux | Tier 1 | Linux >= 2.6.32 with glibc >= 2.12 | |
| macOS | Tier 1 | macOS >= 10.7 | |
-| Windows | Tier 1 | Windows >= XP SP1 | MSVC 2008 and later are supported |
+| Windows | Tier 1 | Windows >= 8.1 | MSVC 2008 and later are supported |
| FreeBSD | Tier 1 | >= 9 (see note) | |
| AIX | Tier 2 | >= 6 | Maintainers: @libuv/aix |
| z/OS | Tier 2 | >= V2R2 | Maintainers: @libuv/zos |
| Linux with musl | Tier 2 | musl >= 1.0 | |
-| SunOS | Tier 2 | Solaris 121 and later | Maintainers: @libuv/sunos |
+| SmartOS | Tier 2 | >= 14.4 | Maintainers: @libuv/smartos |
+| Android | Tier 3 | NDK >= r15b | |
| MinGW | Tier 3 | MinGW32 and MinGW-w64 | |
+| SunOS | Tier 3 | Solaris 121 and later | |
| Other | Tier 3 | N/A | |
#### Note on FreeBSD 9
diff --git a/deps/uv/android-configure b/deps/uv/android-configure
index 7ffc035c1bfbfc..b5c11cd40c6873 100755
--- a/deps/uv/android-configure
+++ b/deps/uv/android-configure
@@ -2,17 +2,20 @@
export TOOLCHAIN=$PWD/android-toolchain
mkdir -p $TOOLCHAIN
+API=${3:-24}
$1/build/tools/make-standalone-toolchain.sh \
--toolchain=arm-linux-androideabi-4.9 \
--arch=arm \
--install-dir=$TOOLCHAIN \
- --platform=android-21
+ --platform=android-$API \
+ --force
export PATH=$TOOLCHAIN/bin:$PATH
export AR=arm-linux-androideabi-ar
export CC=arm-linux-androideabi-gcc
export CXX=arm-linux-androideabi-g++
export LINK=arm-linux-androideabi-g++
export PLATFORM=android
+export CFLAGS="-D__ANDROID_API__=$API"
if [[ $2 == 'gyp' ]]
then
diff --git a/deps/uv/appveyor.yml b/deps/uv/appveyor.yml
index be90ef7b435c74..986c0d44030e5b 100644
--- a/deps/uv/appveyor.yml
+++ b/deps/uv/appveyor.yml
@@ -1,4 +1,7 @@
-version: v1.13.1.build{build}
+version: v1.14.1.build{build}
+
+init:
+ - git config --global core.autocrlf true
install:
- cinst -y nsis
diff --git a/deps/uv/checksparse.sh b/deps/uv/checksparse.sh
index 9782718a23266c..ae0e5374f5efba 100755
--- a/deps/uv/checksparse.sh
+++ b/deps/uv/checksparse.sh
@@ -96,6 +96,7 @@ test/test-embed.c
test/test-env-vars.c
test/test-error.c
test/test-fail-always.c
+test/test-fs-copyfile.c
test/test-fs-event.c
test/test-fs-poll.c
test/test-fs.c
diff --git a/deps/uv/configure.ac b/deps/uv/configure.ac
index a52cfc622cbb5f..41349a092c85e6 100644
--- a/deps/uv/configure.ac
+++ b/deps/uv/configure.ac
@@ -13,7 +13,7 @@
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
AC_PREREQ(2.57)
-AC_INIT([libuv], [1.13.1], [https://github.com/libuv/libuv/issues])
+AC_INIT([libuv], [1.14.1], [https://github.com/libuv/libuv/issues])
AC_CONFIG_MACRO_DIR([m4])
m4_include([m4/libuv-extra-automake-flags.m4])
m4_include([m4/as_case.m4])
diff --git a/deps/uv/docs/src/async.rst b/deps/uv/docs/src/async.rst
index 5c400458244a86..02e6a58e7838db 100644
--- a/deps/uv/docs/src/async.rst
+++ b/deps/uv/docs/src/async.rst
@@ -35,12 +35,16 @@ API
Initialize the handle. A NULL callback is allowed.
+ :returns: 0 on success, or an error code < 0 on failure.
+
.. note::
Unlike other handle initialization functions, it immediately starts the handle.
.. c:function:: int uv_async_send(uv_async_t* async)
- Wakeup the event loop and call the async handle's callback.
+ Wake up the event loop and call the async handle's callback.
+
+ :returns: 0 on success, or an error code < 0 on failure.
.. note::
It's safe to call this function from any thread. The callback will be called on the
diff --git a/deps/uv/docs/src/fs.rst b/deps/uv/docs/src/fs.rst
index 3f766e393f2fa7..2db915bc9e6706 100644
--- a/deps/uv/docs/src/fs.rst
+++ b/deps/uv/docs/src/fs.rst
@@ -92,7 +92,8 @@ Data types
UV_FS_READLINK,
UV_FS_CHOWN,
UV_FS_FCHOWN,
- UV_FS_REALPATH
+ UV_FS_REALPATH,
+ UV_FS_COPYFILE
} uv_fs_type;
.. c:type:: uv_dirent_t
@@ -241,6 +242,22 @@ API
Equivalent to :man:`ftruncate(2)`.
+.. c:function:: int uv_fs_copyfile(uv_loop_t* loop, uv_fs_t* req, const char* path, const char* new_path, int flags, uv_fs_cb cb)
+
+ Copies a file from `path` to `new_path`. Supported `flags` are described below.
+
+ - `UV_FS_COPYFILE_EXCL`: If present, `uv_fs_copyfile()` will fail with
+ `UV_EEXIST` if the destination path already exists. The default behavior
+ is to overwrite the destination if it exists.
+
+ .. warning::
+ If the destination path is created, but an error occurs while copying
+ the data, then the destination path is removed. There is a brief window
+ of time between closing and removing the file where another process
+ could access the file.
+
+ .. versionadded:: 1.14.0
+
.. c:function:: int uv_fs_sendfile(uv_loop_t* loop, uv_fs_t* req, uv_file out_fd, uv_file in_fd, int64_t in_offset, size_t length, uv_fs_cb cb)
Limited equivalent to :man:`sendfile(2)`.
diff --git a/deps/uv/docs/src/misc.rst b/deps/uv/docs/src/misc.rst
index 9d7c3617e864bb..3fea708a8d38e2 100644
--- a/deps/uv/docs/src/misc.rst
+++ b/deps/uv/docs/src/misc.rst
@@ -186,17 +186,24 @@ API
.. c:function:: int uv_get_process_title(char* buffer, size_t size)
- Gets the title of the current process. If `buffer` is `NULL` or `size` is
- zero, `UV_EINVAL` is returned. If `size` cannot accommodate the process
- title and terminating `NULL` character, the function returns `UV_ENOBUFS`.
+ Gets the title of the current process. You *must* call `uv_setup_args`
+ before calling this function. If `buffer` is `NULL` or `size` is zero,
+ `UV_EINVAL` is returned. If `size` cannot accommodate the process title and
+ terminating `NULL` character, the function returns `UV_ENOBUFS`.
+
+ .. warning::
+ `uv_get_process_title` is not thread safe on any platform except Windows.
.. c:function:: int uv_set_process_title(const char* title)
- Sets the current process title. On platforms with a fixed size buffer for the
- process title the contents of `title` will be copied to the buffer and
- truncated if larger than the available space. Other platforms will return
- `UV_ENOMEM` if they cannot allocate enough space to duplicate the contents of
- `title`.
+ Sets the current process title. You *must* call `uv_setup_args` before
+ calling this function. On platforms with a fixed size buffer for the process
+ title the contents of `title` will be copied to the buffer and truncated if
+ larger than the available space. Other platforms will return `UV_ENOMEM` if
+ they cannot allocate enough space to duplicate the contents of `title`.
+
+ .. warning::
+ `uv_set_process_title` is not thread safe on any platform except Windows.
.. c:function:: int uv_resident_set_memory(size_t* rss)
diff --git a/deps/uv/docs/src/poll.rst b/deps/uv/docs/src/poll.rst
index 004ff4b92e5ea5..aba8915886bb5f 100644
--- a/deps/uv/docs/src/poll.rst
+++ b/deps/uv/docs/src/poll.rst
@@ -54,7 +54,8 @@ Data types
enum uv_poll_event {
UV_READABLE = 1,
UV_WRITABLE = 2,
- UV_DISCONNECT = 4
+ UV_DISCONNECT = 4,
+ UV_PRIORITIZED = 8
};
@@ -84,10 +85,13 @@ API
.. c:function:: int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb)
- Starts polling the file descriptor. `events` is a bitmask consisting made up
- of UV_READABLE, UV_WRITABLE and UV_DISCONNECT. As soon as an event is detected
- the callback will be called with `status` set to 0, and the detected events set on the
- `events` field.
+ Starts polling the file descriptor. `events` is a bitmask made up of
+ UV_READABLE, UV_WRITABLE, UV_PRIORITIZED and UV_DISCONNECT. As soon as an
+ event is detected the callback will be called with `status` set to 0, and the
+ detected events set on the `events` field.
+
+ The UV_PRIORITIZED event is used to watch for sysfs interrupts or TCP out-of-band
+ messages.
The UV_DISCONNECT event is optional in the sense that it may not be
reported and the user is free to ignore it, but it can help optimize the shutdown
@@ -108,6 +112,7 @@ API
on the `events` field in the callback.
.. versionchanged:: 1.9.0 Added the UV_DISCONNECT event.
+ .. versionchanged:: 1.14.0 Added the UV_PRIORITIZED event.
.. c:function:: int uv_poll_stop(uv_poll_t* poll)
diff --git a/deps/uv/docs/src/stream.rst b/deps/uv/docs/src/stream.rst
index de492b3578152b..1f4e87e63a9db3 100644
--- a/deps/uv/docs/src/stream.rst
+++ b/deps/uv/docs/src/stream.rst
@@ -6,7 +6,7 @@
Stream handles provide an abstraction of a duplex communication channel.
:c:type:`uv_stream_t` is an abstract type, libuv provides 3 stream implementations
-in the for of :c:type:`uv_tcp_t`, :c:type:`uv_pipe_t` and :c:type:`uv_tty_t`.
+in the form of :c:type:`uv_tcp_t`, :c:type:`uv_pipe_t` and :c:type:`uv_tty_t`.
Data types
diff --git a/deps/uv/include/pthread-barrier.h b/deps/uv/include/pthread-barrier.h
index 900ebedd308b26..07db9b8a6a27e0 100644
--- a/deps/uv/include/pthread-barrier.h
+++ b/deps/uv/include/pthread-barrier.h
@@ -23,6 +23,7 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#endif
#define PTHREAD_BARRIER_SERIAL_THREAD 0x12345
+#define UV__PTHREAD_BARRIER_FALLBACK 1
/*
* To maintain ABI compatibility with
diff --git a/deps/uv/include/uv-errno.h b/deps/uv/include/uv-errno.h
index f1371517cc32fe..32bbc5177e4b2d 100644
--- a/deps/uv/include/uv-errno.h
+++ b/deps/uv/include/uv-errno.h
@@ -416,4 +416,10 @@
# define UV__EHOSTDOWN (-4031)
#endif
+#if defined(EREMOTEIO) && !defined(_WIN32)
+# define UV__EREMOTEIO (-EREMOTEIO)
+#else
+# define UV__EREMOTEIO (-4030)
+#endif
+
#endif /* UV_ERRNO_H_ */
diff --git a/deps/uv/include/uv-version.h b/deps/uv/include/uv-version.h
index c80c40ea7f1bb0..9b891499eb5f4a 100644
--- a/deps/uv/include/uv-version.h
+++ b/deps/uv/include/uv-version.h
@@ -31,7 +31,7 @@
*/
#define UV_VERSION_MAJOR 1
-#define UV_VERSION_MINOR 13
+#define UV_VERSION_MINOR 14
#define UV_VERSION_PATCH 1
#define UV_VERSION_IS_RELEASE 1
#define UV_VERSION_SUFFIX ""
diff --git a/deps/uv/include/uv.h b/deps/uv/include/uv.h
index f076094ccdc24c..eac63dde445829 100644
--- a/deps/uv/include/uv.h
+++ b/deps/uv/include/uv.h
@@ -140,6 +140,7 @@ extern "C" {
XX(ENXIO, "no such device or address") \
XX(EMLINK, "too many links") \
XX(EHOSTDOWN, "host is down") \
+ XX(EREMOTEIO, "remote I/O error") \
#define UV_HANDLE_TYPE_MAP(XX) \
XX(ASYNC, async) \
@@ -719,7 +720,8 @@ struct uv_poll_s {
enum uv_poll_event {
UV_READABLE = 1,
UV_WRITABLE = 2,
- UV_DISCONNECT = 4
+ UV_DISCONNECT = 4,
+ UV_PRIORITIZED = 8
};
UV_EXTERN int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd);
@@ -1112,7 +1114,8 @@ typedef enum {
UV_FS_READLINK,
UV_FS_CHOWN,
UV_FS_FCHOWN,
- UV_FS_REALPATH
+ UV_FS_REALPATH,
+ UV_FS_COPYFILE
} uv_fs_type;
/* uv_fs_t is a subclass of uv_req_t. */
@@ -1157,6 +1160,18 @@ UV_EXTERN int uv_fs_write(uv_loop_t* loop,
unsigned int nbufs,
int64_t offset,
uv_fs_cb cb);
+/*
+ * This flag can be used with uv_fs_copyfile() to return an error if the
+ * destination already exists.
+ */
+#define UV_FS_COPYFILE_EXCL 0x0001
+
+UV_EXTERN int uv_fs_copyfile(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ int flags,
+ uv_fs_cb cb);
UV_EXTERN int uv_fs_mkdir(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
diff --git a/deps/uv/src/unix/aix.c b/deps/uv/src/unix/aix.c
index 426f7f4735fd39..56a8f4ffe753e8 100644
--- a/deps/uv/src/unix/aix.c
+++ b/deps/uv/src/unix/aix.c
@@ -1108,9 +1108,10 @@ void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int uv_interface_addresses(uv_interface_address_t** addresses,
int* count) {
uv_interface_address_t* address;
- int sockfd, size = 1;
+ int sockfd, inet6, size = 1;
struct ifconf ifc;
struct ifreq *ifr, *p, flg;
+ struct sockaddr_dl* sa_addr;
*count = 0;
@@ -1174,6 +1175,8 @@ int uv_interface_addresses(uv_interface_address_t** addresses,
p->ifr_addr.sa_family == AF_INET))
continue;
+ inet6 = (p->ifr_addr.sa_family == AF_INET6);
+
memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
uv__close(sockfd);
@@ -1187,13 +1190,23 @@ int uv_interface_addresses(uv_interface_address_t** addresses,
address->name = uv__strdup(p->ifr_name);
- if (p->ifr_addr.sa_family == AF_INET6) {
+ if (inet6)
address->address.address6 = *((struct sockaddr_in6*) &p->ifr_addr);
- } else {
+ else
address->address.address4 = *((struct sockaddr_in*) &p->ifr_addr);
+
+ sa_addr = (struct sockaddr_dl*) &p->ifr_addr;
+ memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
+
+ if (ioctl(sockfd, SIOCGIFNETMASK, p) == -1) {
+ uv__close(sockfd);
+ return -ENOSYS;
}
- /* TODO: Retrieve netmask using SIOCGIFNETMASK ioctl */
+ if (inet6)
+ address->netmask.netmask6 = *((struct sockaddr_in6*) &p->ifr_addr);
+ else
+ address->netmask.netmask4 = *((struct sockaddr_in*) &p->ifr_addr);
address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0;
diff --git a/deps/uv/src/unix/android-ifaddrs.c b/deps/uv/src/unix/android-ifaddrs.c
index 30f681b7d04a41..1a842ced48f9b5 100644
--- a/deps/uv/src/unix/android-ifaddrs.c
+++ b/deps/uv/src/unix/android-ifaddrs.c
@@ -43,9 +43,10 @@ typedef struct NetlinkList
unsigned int m_size;
} NetlinkList;
-static int netlink_socket(void)
+static int netlink_socket(pid_t *p_pid)
{
struct sockaddr_nl l_addr;
+ socklen_t l_len;
int l_socket = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if(l_socket < 0)
@@ -61,6 +62,14 @@ static int netlink_socket(void)
return -1;
}
+ l_len = sizeof(l_addr);
+ if(getsockname(l_socket, (struct sockaddr *)&l_addr, &l_len) < 0)
+ {
+ close(l_socket);
+ return -1;
+ }
+ *p_pid = l_addr.nl_pid;
+
return l_socket;
}
@@ -128,7 +137,7 @@ static int netlink_recv(int p_socket, void *p_buffer, size_t p_len)
}
}
-static struct nlmsghdr *getNetlinkResponse(int p_socket, int *p_size, int *p_done)
+static struct nlmsghdr *getNetlinkResponse(int p_socket, pid_t p_pid, int *p_size, int *p_done)
{
size_t l_size = 4096;
void *l_buffer = NULL;
@@ -153,11 +162,10 @@ static struct nlmsghdr *getNetlinkResponse(int p_socket, int *p_size, int *p_don
}
if(l_read >= 0)
{
- pid_t l_pid = getpid();
struct nlmsghdr *l_hdr;
for(l_hdr = (struct nlmsghdr *)l_buffer; NLMSG_OK(l_hdr, (unsigned int)l_read); l_hdr = (struct nlmsghdr *)NLMSG_NEXT(l_hdr, l_read))
{
- if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket)
+ if((pid_t)l_hdr->nlmsg_pid != p_pid || (int)l_hdr->nlmsg_seq != p_socket)
{
continue;
}
@@ -207,7 +215,7 @@ static void freeResultList(NetlinkList *p_list)
}
}
-static NetlinkList *getResultList(int p_socket, int p_request)
+static NetlinkList *getResultList(int p_socket, int p_request, pid_t p_pid)
{
int l_size;
int l_done;
@@ -227,7 +235,7 @@ static NetlinkList *getResultList(int p_socket, int p_request)
{
NetlinkList *l_item;
- struct nlmsghdr *l_hdr = getNetlinkResponse(p_socket, &l_size, &l_done);
+ struct nlmsghdr *l_hdr = getNetlinkResponse(p_socket, p_pid, &l_size, &l_done);
/* Error */
if(!l_hdr)
{
@@ -578,18 +586,17 @@ static int interpretAddr(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList,
return 0;
}
-static int interpretLinks(int p_socket, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList)
+static int interpretLinks(int p_socket, pid_t p_pid, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList)
{
int l_numLinks = 0;
- pid_t l_pid = getpid();
for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
{
unsigned int l_nlsize = p_netlinkList->m_size;
struct nlmsghdr *l_hdr;
for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
{
- if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket)
+ if((pid_t)l_hdr->nlmsg_pid != p_pid || (int)l_hdr->nlmsg_seq != p_socket)
{
continue;
}
@@ -612,16 +619,15 @@ static int interpretLinks(int p_socket, NetlinkList *p_netlinkList, struct ifadd
return l_numLinks;
}
-static int interpretAddrs(int p_socket, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList, int p_numLinks)
+static int interpretAddrs(int p_socket, pid_t p_pid, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList, int p_numLinks)
{
- pid_t l_pid = getpid();
for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
{
unsigned int l_nlsize = p_netlinkList->m_size;
struct nlmsghdr *l_hdr;
for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
{
- if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket)
+ if((pid_t)l_hdr->nlmsg_pid != p_pid || (int)l_hdr->nlmsg_seq != p_socket)
{
continue;
}
@@ -648,6 +654,7 @@ int getifaddrs(struct ifaddrs **ifap)
int l_socket;
int l_result;
int l_numLinks;
+ pid_t l_pid;
NetlinkList *l_linkResults;
NetlinkList *l_addrResults;
@@ -657,20 +664,20 @@ int getifaddrs(struct ifaddrs **ifap)
}
*ifap = NULL;
- l_socket = netlink_socket();
+ l_socket = netlink_socket(&l_pid);
if(l_socket < 0)
{
return -1;
}
- l_linkResults = getResultList(l_socket, RTM_GETLINK);
+ l_linkResults = getResultList(l_socket, RTM_GETLINK, l_pid);
if(!l_linkResults)
{
close(l_socket);
return -1;
}
- l_addrResults = getResultList(l_socket, RTM_GETADDR);
+ l_addrResults = getResultList(l_socket, RTM_GETADDR, l_pid);
if(!l_addrResults)
{
close(l_socket);
@@ -679,8 +686,8 @@ int getifaddrs(struct ifaddrs **ifap)
}
l_result = 0;
- l_numLinks = interpretLinks(l_socket, l_linkResults, ifap);
- if(l_numLinks == -1 || interpretAddrs(l_socket, l_addrResults, ifap, l_numLinks) == -1)
+ l_numLinks = interpretLinks(l_socket, l_pid, l_linkResults, ifap);
+ if(l_numLinks == -1 || interpretAddrs(l_socket, l_pid, l_addrResults, ifap, l_numLinks) == -1)
{
l_result = -1;
}
diff --git a/deps/uv/src/unix/bsd-ifaddrs.c b/deps/uv/src/unix/bsd-ifaddrs.c
index 414789451ab3d6..ffcf156440d559 100644
--- a/deps/uv/src/unix/bsd-ifaddrs.c
+++ b/deps/uv/src/unix/bsd-ifaddrs.c
@@ -31,11 +31,18 @@
#include
#endif
-static int uv__ifaddr_exclude(struct ifaddrs *ent) {
+static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
return 1;
if (ent->ifa_addr == NULL)
return 1;
+ /*
+ * If `exclude_type` is `UV__EXCLUDE_IFPHYS`, just see whether `sa_family`
+ * equals to `AF_LINK` or not. Otherwise, the result depends on the operation
+ * system with `AF_LINK` or `PF_INET`.
+ */
+ if (exclude_type == UV__EXCLUDE_IFPHYS)
+ return (ent->ifa_addr->sa_family != AF_LINK);
#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__DragonFly__)
/*
* On BSD getifaddrs returns information related to the raw underlying
@@ -63,7 +70,7 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
/* Count the number of interfaces */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
- if (uv__ifaddr_exclude(ent))
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
continue;
(*count)++;
}
@@ -78,7 +85,7 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
- if (uv__ifaddr_exclude(ent))
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
continue;
address->name = uv__strdup(ent->ifa_name);
@@ -102,7 +109,7 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
/* Fill in physical addresses for each interface */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
- if (uv__ifaddr_exclude(ent))
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))
continue;
address = *addresses;
diff --git a/deps/uv/src/unix/core.c b/deps/uv/src/unix/core.c
index 4c744925e29607..bee641cb440410 100644
--- a/deps/uv/src/unix/core.c
+++ b/deps/uv/src/unix/core.c
@@ -838,7 +838,7 @@ void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
- assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP)));
+ assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
assert(0 != events);
assert(w->fd >= 0);
assert(w->fd < INT_MAX);
@@ -866,7 +866,7 @@ void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
- assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP)));
+ assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
assert(0 != events);
if (w->fd == -1)
@@ -898,7 +898,7 @@ void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
- uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP);
+ uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
QUEUE_REMOVE(&w->pending_queue);
/* Remove stale events for this file descriptor */
@@ -913,7 +913,7 @@ void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
int uv__io_active(const uv__io_t* w, unsigned int events) {
- assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP)));
+ assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
assert(0 != events);
return 0 != (w->pevents & events);
}
@@ -1292,6 +1292,9 @@ int uv_os_setenv(const char* name, const char* value) {
int uv_os_unsetenv(const char* name) {
+ if (name == NULL)
+ return -EINVAL;
+
if (unsetenv(name) != 0)
return -errno;
diff --git a/deps/uv/src/unix/freebsd.c b/deps/uv/src/unix/freebsd.c
index c3c4902be9a568..dba94298d1c06d 100644
--- a/deps/uv/src/unix/freebsd.c
+++ b/deps/uv/src/unix/freebsd.c
@@ -160,9 +160,13 @@ char** uv_setup_args(int argc, char** argv) {
int uv_set_process_title(const char* title) {
int oid[4];
+ char* new_title;
+ new_title = uv__strdup(title);
+ if (process_title == NULL)
+ return -ENOMEM;
uv__free(process_title);
- process_title = uv__strdup(title);
+ process_title = new_title;
oid[0] = CTL_KERN;
oid[1] = KERN_PROC;
diff --git a/deps/uv/src/unix/fs.c b/deps/uv/src/unix/fs.c
index f9513ea55a03eb..5a172cc7805171 100644
--- a/deps/uv/src/unix/fs.c
+++ b/deps/uv/src/unix/fs.c
@@ -60,8 +60,14 @@
# include
#endif
+#if defined(__APPLE__)
+# include
+#endif
+
#define INIT(subtype) \
do { \
+ if (req == NULL) \
+ return -EINVAL; \
req->type = UV_FS; \
if (cb != NULL) \
uv__req_init(loop, req, UV_FS); \
@@ -766,6 +772,112 @@ static ssize_t uv__fs_write(uv_fs_t* req) {
return r;
}
+static ssize_t uv__fs_copyfile(uv_fs_t* req) {
+#if defined(__APPLE__) && !TARGET_OS_IPHONE
+ /* On macOS, use the native copyfile(3). */
+ copyfile_flags_t flags;
+
+ flags = COPYFILE_ALL;
+
+ if (req->flags & UV_FS_COPYFILE_EXCL)
+ flags |= COPYFILE_EXCL;
+
+ return copyfile(req->path, req->new_path, NULL, flags);
+#else
+ uv_fs_t fs_req;
+ uv_file srcfd;
+ uv_file dstfd;
+ struct stat statsbuf;
+ int dst_flags;
+ int result;
+ int err;
+ size_t bytes_to_send;
+ int64_t in_offset;
+
+ dstfd = -1;
+
+ /* Open the source file. */
+ srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
+ uv_fs_req_cleanup(&fs_req);
+
+ if (srcfd < 0)
+ return srcfd;
+
+ /* Get the source file's mode. */
+ if (fstat(srcfd, &statsbuf)) {
+ err = -errno;
+ goto out;
+ }
+
+ dst_flags = O_WRONLY | O_CREAT;
+
+ if (req->flags & UV_FS_COPYFILE_EXCL)
+ dst_flags |= O_EXCL;
+
+ /* Open the destination file. */
+ dstfd = uv_fs_open(NULL,
+ &fs_req,
+ req->new_path,
+ dst_flags,
+ statsbuf.st_mode,
+ NULL);
+ uv_fs_req_cleanup(&fs_req);
+
+ if (dstfd < 0) {
+ err = dstfd;
+ goto out;
+ }
+
+ bytes_to_send = statsbuf.st_size;
+ in_offset = 0;
+ while (bytes_to_send != 0) {
+ err = uv_fs_sendfile(NULL,
+ &fs_req,
+ dstfd,
+ srcfd,
+ in_offset,
+ bytes_to_send,
+ NULL);
+ uv_fs_req_cleanup(&fs_req);
+ if (err < 0)
+ break;
+ bytes_to_send -= fs_req.result;
+ in_offset += fs_req.result;
+ }
+
+out:
+ if (err < 0)
+ result = err;
+ else
+ result = 0;
+
+ /* Close the source file. */
+ err = uv__close_nocheckstdio(srcfd);
+
+ /* Don't overwrite any existing errors. */
+ if (err != 0 && result == 0)
+ result = err;
+
+ /* Close the destination file if it is open. */
+ if (dstfd >= 0) {
+ err = uv__close_nocheckstdio(dstfd);
+
+ /* Don't overwrite any existing errors. */
+ if (err != 0 && result == 0)
+ result = err;
+
+ /* Remove the destination file if something went wrong. */
+ if (result != 0) {
+ uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
+ /* Ignore the unlink return value, as an error already happened. */
+ uv_fs_req_cleanup(&fs_req);
+ }
+ }
+
+ return result;
+#endif
+}
+
static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
dst->st_dev = src->st_dev;
dst->st_mode = src->st_mode;
@@ -946,6 +1058,7 @@ static void uv__fs_work(struct uv__work* w) {
X(CHMOD, chmod(req->path, req->mode));
X(CHOWN, chown(req->path, req->uid, req->gid));
X(CLOSE, close(req->file));
+ X(COPYFILE, uv__fs_copyfile(req));
X(FCHMOD, fchmod(req->file, req->mode));
X(FCHOWN, fchown(req->file, req->uid, req->gid));
X(FDATASYNC, uv__fs_fdatasync(req));
@@ -1186,10 +1299,11 @@ int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
unsigned int nbufs,
int64_t off,
uv_fs_cb cb) {
+ INIT(READ);
+
if (bufs == NULL || nbufs == 0)
return -EINVAL;
- INIT(READ);
req->file = file;
req->nbufs = nbufs;
@@ -1324,10 +1438,11 @@ int uv_fs_write(uv_loop_t* loop,
unsigned int nbufs,
int64_t off,
uv_fs_cb cb) {
+ INIT(WRITE);
+
if (bufs == NULL || nbufs == 0)
return -EINVAL;
- INIT(WRITE);
req->file = file;
req->nbufs = nbufs;
@@ -1349,6 +1464,9 @@ int uv_fs_write(uv_loop_t* loop,
void uv_fs_req_cleanup(uv_fs_t* req) {
+ if (req == NULL)
+ return;
+
/* Only necessary for asychronous requests, i.e., requests with a callback.
* Synchronous ones don't copy their arguments and have req->path and
* req->new_path pointing to user-owned memory. UV_FS_MKDTEMP is the
@@ -1367,3 +1485,20 @@ void uv_fs_req_cleanup(uv_fs_t* req) {
uv__free(req->ptr);
req->ptr = NULL;
}
+
+
+int uv_fs_copyfile(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ int flags,
+ uv_fs_cb cb) {
+ INIT(COPYFILE);
+
+ if (flags & ~UV_FS_COPYFILE_EXCL)
+ return -EINVAL;
+
+ PATH2;
+ req->flags = flags;
+ POST;
+}
diff --git a/deps/uv/src/unix/internal.h b/deps/uv/src/unix/internal.h
index 2e3afa6c856827..c0898d982e9815 100644
--- a/deps/uv/src/unix/internal.h
+++ b/deps/uv/src/unix/internal.h
@@ -110,6 +110,12 @@ int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset);
# define UV__POLLRDHUP 0x2000
#endif
+#ifdef POLLPRI
+# define UV__POLLPRI POLLPRI
+#else
+# define UV__POLLPRI 0
+#endif
+
#if !defined(O_CLOEXEC) && defined(__FreeBSD__)
/*
* It may be that we are just missing `__POSIX_VISIBLE >= 200809`.
@@ -145,6 +151,12 @@ enum {
UV_LOOP_BLOCK_SIGPROF = 1
};
+/* flags of excluding ifaddr */
+enum {
+ UV__EXCLUDE_IFPHYS,
+ UV__EXCLUDE_IFADDR
+};
+
typedef enum {
UV_CLOCK_PRECISE = 0, /* Use the highest resolution clock available. */
UV_CLOCK_FAST = 1 /* Use the fastest clock with <= 1ms granularity. */
diff --git a/deps/uv/src/unix/kqueue.c b/deps/uv/src/unix/kqueue.c
index 6bc60bbe46885e..300bac07c3322f 100644
--- a/deps/uv/src/unix/kqueue.c
+++ b/deps/uv/src/unix/kqueue.c
@@ -34,6 +34,17 @@
#include
#include
+/*
+ * Required on
+ * - Until at least FreeBSD 11.0
+ * - Older versions of Mac OS X
+ *
+ * http://www.boost.org/doc/libs/1_61_0/boost/asio/detail/kqueue_reactor.hpp
+ */
+#ifndef EV_OOBAND
+#define EV_OOBAND EV_FLAG1
+#endif
+
static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
@@ -166,6 +177,16 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
}
}
+ if ((w->events & UV__POLLPRI) == 0 && (w->pevents & UV__POLLPRI) != 0) {
+ EV_SET(events + nevents, w->fd, EV_OOBAND, EV_ADD, 0, 0, 0);
+
+ if (++nevents == ARRAY_SIZE(events)) {
+ if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
+ abort();
+ nevents = 0;
+ }
+ }
+
w->events = w->pevents;
}
@@ -275,6 +296,20 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
}
}
+ if (ev->filter == EV_OOBAND) {
+ if (w->pevents & UV__POLLPRI) {
+ revents |= UV__POLLPRI;
+ w->rcount = ev->data;
+ } else {
+ /* TODO batch up */
+ struct kevent events[1];
+ EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
+ if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
+ if (errno != ENOENT)
+ abort();
+ }
+ }
+
if (ev->filter == EVFILT_WRITE) {
if (w->pevents & POLLOUT) {
revents |= POLLOUT;
diff --git a/deps/uv/src/unix/linux-core.c b/deps/uv/src/unix/linux-core.c
index 2866e938545cb3..4d480ce10a2585 100644
--- a/deps/uv/src/unix/linux-core.c
+++ b/deps/uv/src/unix/linux-core.c
@@ -388,7 +388,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
* free when we switch over to edge-triggered I/O.
*/
if (pe->events == POLLERR || pe->events == POLLHUP)
- pe->events |= w->pevents & (POLLIN | POLLOUT);
+ pe->events |= w->pevents & (POLLIN | POLLOUT | UV__POLLPRI);
if (pe->events != 0) {
/* Run signal watchers last. This also affects child process watchers
@@ -837,7 +837,7 @@ void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
uv__free(cpu_infos);
}
-static int uv__ifaddr_exclude(struct ifaddrs *ent) {
+static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
return 1;
if (ent->ifa_addr == NULL)
@@ -847,8 +847,8 @@ static int uv__ifaddr_exclude(struct ifaddrs *ent) {
* devices. We're not interested in this information yet.
*/
if (ent->ifa_addr->sa_family == PF_PACKET)
- return 1;
- return 0;
+ return exclude_type;
+ return !exclude_type;
}
int uv_interface_addresses(uv_interface_address_t** addresses,
@@ -869,7 +869,7 @@ int uv_interface_addresses(uv_interface_address_t** addresses,
/* Count the number of interfaces */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
- if (uv__ifaddr_exclude(ent))
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
continue;
(*count)++;
@@ -887,7 +887,7 @@ int uv_interface_addresses(uv_interface_address_t** addresses,
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
- if (uv__ifaddr_exclude(ent))
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
continue;
address->name = uv__strdup(ent->ifa_name);
@@ -911,7 +911,7 @@ int uv_interface_addresses(uv_interface_address_t** addresses,
/* Fill in physical addresses for each interface */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
- if (uv__ifaddr_exclude(ent))
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))
continue;
address = *addresses;
diff --git a/deps/uv/src/unix/netbsd.c b/deps/uv/src/unix/netbsd.c
index 9b5546b7e6959b..c54c04df28c201 100644
--- a/deps/uv/src/unix/netbsd.c
+++ b/deps/uv/src/unix/netbsd.c
@@ -124,9 +124,13 @@ char** uv_setup_args(int argc, char** argv) {
int uv_set_process_title(const char* title) {
- if (process_title) uv__free(process_title);
+ char* new_title;
- process_title = uv__strdup(title);
+ new_title = uv__strdup(title);
+ if (process_title == NULL)
+ return -ENOMEM;
+ uv__free(process_title);
+ process_title = new_title;
setproctitle("%s", title);
return 0;
diff --git a/deps/uv/src/unix/openbsd.c b/deps/uv/src/unix/openbsd.c
index 56f0af15c3ef3f..d1c90289e5691e 100644
--- a/deps/uv/src/unix/openbsd.c
+++ b/deps/uv/src/unix/openbsd.c
@@ -146,8 +146,13 @@ char** uv_setup_args(int argc, char** argv) {
int uv_set_process_title(const char* title) {
+ char* new_title;
+
+ new_title = uv__strdup(title);
+ if (process_title == NULL)
+ return -ENOMEM;
uv__free(process_title);
- process_title = uv__strdup(title);
+ process_title = new_title;
setproctitle("%s", title);
return 0;
}
diff --git a/deps/uv/src/unix/os390-syscalls.c b/deps/uv/src/unix/os390-syscalls.c
index 7edf2358d43a49..08623f4eafa137 100644
--- a/deps/uv/src/unix/os390-syscalls.c
+++ b/deps/uv/src/unix/os390-syscalls.c
@@ -183,33 +183,22 @@ int epoll_wait(uv__os390_epoll* lst, struct epoll_event* events,
int pollret;
int reventcount;
- uv_mutex_lock(&global_epoll_lock);
- uv_mutex_unlock(&global_epoll_lock);
size = lst->size;
pfds = lst->items;
pollret = poll(pfds, size, timeout);
- if(pollret == -1)
+ if (pollret <= 0)
return pollret;
reventcount = 0;
- for (int i = 0; i < lst->size && i < maxevents; ++i) {
+ for (int i = 0;
+ i < lst->size && i < maxevents && reventcount < pollret; ++i) {
struct epoll_event ev;
- ev.events = 0;
- ev.fd = pfds[i].fd;
- if(!pfds[i].revents)
+ if (pfds[i].fd == -1 || pfds[i].revents == 0)
continue;
- if(pfds[i].revents & POLLRDNORM)
- ev.events = ev.events | POLLIN;
-
- if(pfds[i].revents & POLLWRNORM)
- ev.events = ev.events | POLLOUT;
-
- if(pfds[i].revents & POLLHUP)
- ev.events = ev.events | POLLHUP;
-
- pfds[i].revents = 0;
+ ev.fd = pfds[i].fd;
+ ev.events = pfds[i].revents;
events[reventcount++] = ev;
}
diff --git a/deps/uv/src/unix/os390.c b/deps/uv/src/unix/os390.c
index de7df91169ad8f..559970de2c3d59 100644
--- a/deps/uv/src/unix/os390.c
+++ b/deps/uv/src/unix/os390.c
@@ -33,6 +33,7 @@
#endif
#define CVT_PTR 0x10
+#define PSA_PTR 0x00
#define CSD_OFFSET 0x294
/*
@@ -70,6 +71,18 @@
/* CPC model length from the CSRSI Service. */
#define CPCMODEL_LENGTH 16
+/* Pointer to the home (current) ASCB. */
+#define PSAAOLD 0x224
+
+/* Pointer to rsm address space block extension. */
+#define ASCBRSME 0x16C
+
+/*
+ NUMBER OF FRAMES CURRENTLY IN USE BY THIS ADDRESS SPACE.
+ It does not include 2G frames.
+*/
+#define RAXFMCT 0x2C
+
/* Thread Entry constants */
#define PGTH_CURRENT 1
#define PGTH_LEN 26
@@ -77,6 +90,9 @@
#pragma linkage(BPX4GTH, OS)
#pragma linkage(BPX1GTH, OS)
+/* TOD Clock resolution in nanoseconds */
+#define TOD_RES 4.096
+
typedef unsigned data_area_ptr_assign_type;
typedef union {
@@ -122,7 +138,7 @@ uint64_t uv__hrtime(uv_clocktype_t type) {
unsigned long long timestamp;
__stckf(×tamp);
/* Convert to nanoseconds */
- return timestamp / 10;
+ return timestamp / TOD_RES;
}
@@ -339,13 +355,17 @@ uint64_t uv_get_total_memory(void) {
int uv_resident_set_memory(size_t* rss) {
- W_PSPROC buf;
+ char* psa;
+ char* ascb;
+ char* rax;
+ size_t nframes;
- memset(&buf, 0, sizeof(buf));
- if (w_getpsent(0, &buf, sizeof(W_PSPROC)) == -1)
- return -EINVAL;
+ psa = PSA_PTR;
+ ascb = *(char* __ptr32 *)(psa + PSAAOLD);
+ rax = *(char* __ptr32 *)(ascb + ASCBRSME);
+ nframes = *(unsigned int*)(rax + RAXFMCT);
- *rss = buf.ps_size;
+ *rss = nframes * sysconf(_SC_PAGESIZE);
return 0;
}
@@ -747,9 +767,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
assert(timeout != -1);
- timeout = real_timeout - timeout;
- if (timeout > 0)
+
+ if (timeout > 0) {
+ timeout = real_timeout - timeout;
continue;
+ }
return;
}
diff --git a/deps/uv/src/unix/poll.c b/deps/uv/src/unix/poll.c
index 370994bd572ceb..816c7dc2eb9e0b 100644
--- a/deps/uv/src/unix/poll.c
+++ b/deps/uv/src/unix/poll.c
@@ -33,8 +33,19 @@ static void uv__poll_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
handle = container_of(w, uv_poll_t, io_watcher);
- if (events & POLLERR) {
- uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP);
+ /*
+ * As documented in the kernel source fs/kernfs/file.c #780
+ * poll will return POLLERR|POLLPRI in case of sysfs
+ * polling. This does not happen in case of out-of-band
+ * TCP messages.
+ *
+ * The above is the case on (at least) FreeBSD and Linux.
+ *
+ * So to properly determine a POLLPRI or a POLLERR we need
+ * to check for both.
+ */
+ if ((events & POLLERR) && !(events & UV__POLLPRI)) {
+ uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
uv__handle_stop(handle);
handle->poll_cb(handle, -EBADF, 0);
return;
@@ -43,6 +54,8 @@ static void uv__poll_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
pevents = 0;
if (events & POLLIN)
pevents |= UV_READABLE;
+ if (events & UV__POLLPRI)
+ pevents |= UV_PRIORITIZED;
if (events & POLLOUT)
pevents |= UV_WRITABLE;
if (events & UV__POLLRDHUP)
@@ -86,8 +99,9 @@ int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
static void uv__poll_stop(uv_poll_t* handle) {
uv__io_stop(handle->loop,
&handle->io_watcher,
- POLLIN | POLLOUT | UV__POLLRDHUP);
+ POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
uv__handle_stop(handle);
+ uv__platform_invalidate_fd(handle->loop, handle->io_watcher.fd);
}
@@ -101,7 +115,8 @@ int uv_poll_stop(uv_poll_t* handle) {
int uv_poll_start(uv_poll_t* handle, int pevents, uv_poll_cb poll_cb) {
int events;
- assert((pevents & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT)) == 0);
+ assert((pevents & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT |
+ UV_PRIORITIZED)) == 0);
assert(!uv__is_closing(handle));
uv__poll_stop(handle);
@@ -112,6 +127,8 @@ int uv_poll_start(uv_poll_t* handle, int pevents, uv_poll_cb poll_cb) {
events = 0;
if (pevents & UV_READABLE)
events |= POLLIN;
+ if (pevents & UV_PRIORITIZED)
+ events |= UV__POLLPRI;
if (pevents & UV_WRITABLE)
events |= POLLOUT;
if (pevents & UV_DISCONNECT)
diff --git a/deps/uv/src/unix/pthread-barrier.c b/deps/uv/src/unix/pthread-barrier.c
deleted file mode 100644
index b6e604d46d825c..00000000000000
--- a/deps/uv/src/unix/pthread-barrier.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
-Copyright (c) 2016, Kari Tristan Helgason
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-*/
-#include "uv-common.h"
-#include "pthread-barrier.h"
-
-#include
-#include
-
-/* TODO: support barrier_attr */
-int pthread_barrier_init(pthread_barrier_t* barrier,
- const void* barrier_attr,
- unsigned count) {
- int rc;
- _uv_barrier* b;
-
- if (barrier == NULL || count == 0)
- return EINVAL;
-
- if (barrier_attr != NULL)
- return ENOTSUP;
-
- b = uv__malloc(sizeof(*b));
- if (b == NULL)
- return ENOMEM;
-
- b->in = 0;
- b->out = 0;
- b->threshold = count;
-
- if ((rc = pthread_mutex_init(&b->mutex, NULL)) != 0)
- goto error2;
- if ((rc = pthread_cond_init(&b->cond, NULL)) != 0)
- goto error;
-
- barrier->b = b;
- return 0;
-
-error:
- pthread_mutex_destroy(&b->mutex);
-error2:
- uv__free(b);
- return rc;
-}
-
-int pthread_barrier_wait(pthread_barrier_t* barrier) {
- int rc;
- _uv_barrier* b;
-
- if (barrier == NULL || barrier->b == NULL)
- return EINVAL;
-
- b = barrier->b;
- /* Lock the mutex*/
- if ((rc = pthread_mutex_lock(&b->mutex)) != 0)
- return rc;
-
- /* Increment the count. If this is the first thread to reach the threshold,
- wake up waiters, unlock the mutex, then return
- PTHREAD_BARRIER_SERIAL_THREAD. */
- if (++b->in == b->threshold) {
- b->in = 0;
- b->out = b->threshold - 1;
- rc = pthread_cond_signal(&b->cond);
- assert(rc == 0);
-
- pthread_mutex_unlock(&b->mutex);
- return PTHREAD_BARRIER_SERIAL_THREAD;
- }
- /* Otherwise, wait for other threads until in is set to 0,
- then return 0 to indicate this is not the first thread. */
- do {
- if ((rc = pthread_cond_wait(&b->cond, &b->mutex)) != 0)
- break;
- } while (b->in != 0);
-
- /* mark thread exit */
- b->out--;
- pthread_cond_signal(&b->cond);
- pthread_mutex_unlock(&b->mutex);
- return rc;
-}
-
-int pthread_barrier_destroy(pthread_barrier_t* barrier) {
- int rc;
- _uv_barrier* b;
-
- if (barrier == NULL || barrier->b == NULL)
- return EINVAL;
-
- b = barrier->b;
-
- if ((rc = pthread_mutex_lock(&b->mutex)) != 0)
- return rc;
-
- if (b->in > 0 || b->out > 0)
- rc = EBUSY;
-
- pthread_mutex_unlock(&b->mutex);
-
- if (rc)
- return rc;
-
- pthread_cond_destroy(&b->cond);
- pthread_mutex_destroy(&b->mutex);
- uv__free(barrier->b);
- barrier->b = NULL;
- return 0;
-}
diff --git a/deps/uv/src/unix/thread.c b/deps/uv/src/unix/thread.c
index a9b5e4c02a8e9a..f8846225910a80 100644
--- a/deps/uv/src/unix/thread.c
+++ b/deps/uv/src/unix/thread.c
@@ -41,6 +41,110 @@
#define NANOSEC ((uint64_t) 1e9)
+#if defined(UV__PTHREAD_BARRIER_FALLBACK)
+/* TODO: support barrier_attr */
+int pthread_barrier_init(pthread_barrier_t* barrier,
+ const void* barrier_attr,
+ unsigned count) {
+ int rc;
+ _uv_barrier* b;
+
+ if (barrier == NULL || count == 0)
+ return EINVAL;
+
+ if (barrier_attr != NULL)
+ return ENOTSUP;
+
+ b = uv__malloc(sizeof(*b));
+ if (b == NULL)
+ return ENOMEM;
+
+ b->in = 0;
+ b->out = 0;
+ b->threshold = count;
+
+ if ((rc = pthread_mutex_init(&b->mutex, NULL)) != 0)
+ goto error2;
+ if ((rc = pthread_cond_init(&b->cond, NULL)) != 0)
+ goto error;
+
+ barrier->b = b;
+ return 0;
+
+error:
+ pthread_mutex_destroy(&b->mutex);
+error2:
+ uv__free(b);
+ return rc;
+}
+
+int pthread_barrier_wait(pthread_barrier_t* barrier) {
+ int rc;
+ _uv_barrier* b;
+
+ if (barrier == NULL || barrier->b == NULL)
+ return EINVAL;
+
+ b = barrier->b;
+ /* Lock the mutex*/
+ if ((rc = pthread_mutex_lock(&b->mutex)) != 0)
+ return rc;
+
+ /* Increment the count. If this is the first thread to reach the threshold,
+ wake up waiters, unlock the mutex, then return
+ PTHREAD_BARRIER_SERIAL_THREAD. */
+ if (++b->in == b->threshold) {
+ b->in = 0;
+ b->out = b->threshold - 1;
+ rc = pthread_cond_signal(&b->cond);
+ assert(rc == 0);
+
+ pthread_mutex_unlock(&b->mutex);
+ return PTHREAD_BARRIER_SERIAL_THREAD;
+ }
+ /* Otherwise, wait for other threads until in is set to 0,
+ then return 0 to indicate this is not the first thread. */
+ do {
+ if ((rc = pthread_cond_wait(&b->cond, &b->mutex)) != 0)
+ break;
+ } while (b->in != 0);
+
+ /* mark thread exit */
+ b->out--;
+ pthread_cond_signal(&b->cond);
+ pthread_mutex_unlock(&b->mutex);
+ return rc;
+}
+
+int pthread_barrier_destroy(pthread_barrier_t* barrier) {
+ int rc;
+ _uv_barrier* b;
+
+ if (barrier == NULL || barrier->b == NULL)
+ return EINVAL;
+
+ b = barrier->b;
+
+ if ((rc = pthread_mutex_lock(&b->mutex)) != 0)
+ return rc;
+
+ if (b->in > 0 || b->out > 0)
+ rc = EBUSY;
+
+ pthread_mutex_unlock(&b->mutex);
+
+ if (rc)
+ return rc;
+
+ pthread_cond_destroy(&b->cond);
+ pthread_mutex_destroy(&b->mutex);
+ uv__free(barrier->b);
+ barrier->b = NULL;
+ return 0;
+}
+#endif
+
+
int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
int err;
pthread_attr_t* attr;
@@ -283,16 +387,19 @@ int uv_sem_init(uv_sem_t* sem, unsigned int value) {
uv_sem_t semid;
struct sembuf buf;
int err;
+ union {
+ int val;
+ struct semid_ds* buf;
+ unsigned short* array;
+ } arg;
- buf.sem_num = 0;
- buf.sem_op = value;
- buf.sem_flg = 0;
semid = semget(IPC_PRIVATE, 1, S_IRUSR | S_IWUSR);
if (semid == -1)
return -errno;
- if (-1 == semop(semid, &buf, 1)) {
+ arg.val = value;
+ if (-1 == semctl(semid, 0, SETVAL, arg)) {
err = errno;
if (-1 == semctl(*sem, 0, IPC_RMID))
abort();
@@ -424,7 +531,7 @@ int uv_cond_init(uv_cond_t* cond) {
if (err)
return -err;
-#if !(defined(__ANDROID__) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC))
+#if !(defined(__ANDROID_API__) && __ANDROID_API__ < 21)
err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
if (err)
goto error2;
@@ -511,7 +618,8 @@ int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
timeout += uv__hrtime(UV_CLOCK_PRECISE);
ts.tv_sec = timeout / NANOSEC;
ts.tv_nsec = timeout % NANOSEC;
-#if defined(__ANDROID__) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)
+#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
+
/*
* The bionic pthread implementation doesn't support CLOCK_MONOTONIC,
* but has this alternative function instead.
@@ -519,7 +627,7 @@ int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
r = pthread_cond_timedwait_monotonic_np(cond, mutex, &ts);
#else
r = pthread_cond_timedwait(cond, mutex, &ts);
-#endif /* __ANDROID__ */
+#endif /* __ANDROID_API__ */
#endif
diff --git a/deps/uv/src/win/dl.c b/deps/uv/src/win/dl.c
index 39e400ab2dbd19..d454014d8adf6e 100644
--- a/deps/uv/src/win/dl.c
+++ b/deps/uv/src/win/dl.c
@@ -22,7 +22,7 @@
#include "uv.h"
#include "internal.h"
-static int uv__dlerror(uv_lib_t* lib, int errorno);
+static int uv__dlerror(uv_lib_t* lib, const char* filename, DWORD errorno);
int uv_dlopen(const char* filename, uv_lib_t* lib) {
@@ -37,12 +37,12 @@ int uv_dlopen(const char* filename, uv_lib_t* lib) {
-1,
filename_w,
ARRAY_SIZE(filename_w))) {
- return uv__dlerror(lib, GetLastError());
+ return uv__dlerror(lib, filename, GetLastError());
}
lib->handle = LoadLibraryExW(filename_w, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
if (lib->handle == NULL) {
- return uv__dlerror(lib, GetLastError());
+ return uv__dlerror(lib, filename, GetLastError());
}
return 0;
@@ -65,7 +65,7 @@ void uv_dlclose(uv_lib_t* lib) {
int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) {
*ptr = (void*) GetProcAddress(lib->handle, name);
- return uv__dlerror(lib, *ptr ? 0 : GetLastError());
+ return uv__dlerror(lib, "", *ptr ? 0 : GetLastError());
}
@@ -88,31 +88,46 @@ static void uv__format_fallback_error(uv_lib_t* lib, int errorno){
-static int uv__dlerror(uv_lib_t* lib, int errorno) {
+static int uv__dlerror(uv_lib_t* lib, const char* filename, DWORD errorno) {
+ static const char not_win32_app_msg[] = "%1 is not a valid Win32 application";
+ DWORD_PTR arg;
DWORD res;
if (lib->errmsg) {
- LocalFree((void*)lib->errmsg);
+ LocalFree(lib->errmsg);
lib->errmsg = NULL;
}
- if (errorno) {
+ if (errorno == 0)
+ return 0;
+
+ res = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
+ MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
+ (LPSTR) &lib->errmsg, 0, NULL);
+
+ if (!res && GetLastError() == ERROR_MUI_FILE_NOT_FOUND) {
res = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
- MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
- (LPSTR) &lib->errmsg, 0, NULL);
- if (!res && GetLastError() == ERROR_MUI_FILE_NOT_FOUND) {
- res = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
- FORMAT_MESSAGE_FROM_SYSTEM |
- FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
- 0, (LPSTR) &lib->errmsg, 0, NULL);
- }
-
- if (!res) {
- uv__format_fallback_error(lib, errorno);
- }
+ 0, (LPSTR) &lib->errmsg, 0, NULL);
+ }
+
+ /* Inexpert hack to get the filename into the error message. */
+ if (res && strstr(lib->errmsg, not_win32_app_msg)) {
+ LocalFree(lib->errmsg);
+ lib->errmsg = NULL;
+ arg = (DWORD_PTR) filename;
+ res = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_ARGUMENT_ARRAY |
+ FORMAT_MESSAGE_FROM_STRING,
+ not_win32_app_msg,
+ 0, 0, (LPSTR) &lib->errmsg, 0, (va_list*) &arg);
}
- return errorno ? -1 : 0;
+ if (!res)
+ uv__format_fallback_error(lib, errorno);
+
+ return -1;
}
diff --git a/deps/uv/src/win/fs.c b/deps/uv/src/win/fs.c
index 8223d6f655d1df..c374a82ca01f75 100644
--- a/deps/uv/src/win/fs.c
+++ b/deps/uv/src/win/fs.c
@@ -43,11 +43,26 @@
#define UV_FS_CLEANEDUP 0x0010
-#define QUEUE_FS_TP_JOB(loop, req) \
- do { \
- uv__req_register(loop, req); \
- uv__work_submit((loop), &(req)->work_req, uv__fs_work, uv__fs_done); \
- } while (0)
+#define INIT(subtype) \
+ do { \
+ if (req == NULL) \
+ return UV_EINVAL; \
+ uv_fs_req_init(loop, req, subtype, cb); \
+ } \
+ while (0)
+
+#define POST \
+ do { \
+ if (cb != NULL) { \
+ uv__req_register(loop, req); \
+ uv__work_submit(loop, &req->work_req, uv__fs_work, uv__fs_done); \
+ return 0; \
+ } else { \
+ uv__fs_work(&req->work_req); \
+ return req->result; \
+ } \
+ } \
+ while (0)
#define SET_REQ_RESULT(req, result_value) \
do { \
@@ -113,6 +128,7 @@ const WCHAR LONG_PATH_PREFIX_LEN = 4;
const WCHAR UNC_PATH_PREFIX[] = L"\\\\?\\UNC\\";
const WCHAR UNC_PATH_PREFIX_LEN = 8;
+static int uv__file_symlink_usermode_flag = SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE;
void uv_fs_init(void) {
_fmode = _O_BINARY;
@@ -220,6 +236,7 @@ INLINE static int fs__capture_path(uv_fs_t* req, const char* path,
INLINE static void uv_fs_req_init(uv_loop_t* loop, uv_fs_t* req,
uv_fs_type fs_type, const uv_fs_cb cb) {
+ uv__once_init();
UV_REQ_INIT(req, UV_FS);
req->loop = loop;
req->flags = 0;
@@ -1118,8 +1135,6 @@ INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf) {
*/
if (fs__readlink_handle(handle, NULL, &statbuf->st_size) == 0) {
statbuf->st_mode |= S_IFLNK;
- } else if (GetLastError() != ERROR_NOT_A_REPARSE_POINT) {
- return -1;
}
}
@@ -1333,6 +1348,22 @@ static void fs__ftruncate(uv_fs_t* req) {
}
+static void fs__copyfile(uv_fs_t* req) {
+ int flags;
+ int overwrite;
+
+ flags = req->fs.info.file_flags;
+ overwrite = flags & UV_FS_COPYFILE_EXCL;
+
+ if (CopyFileW(req->file.pathw, req->fs.info.new_pathw, overwrite) == 0) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ return;
+ }
+
+ SET_REQ_RESULT(req, 0);
+}
+
+
static void fs__sendfile(uv_fs_t* req) {
int fd_in = req->file.fd, fd_out = req->fs.info.fd_out;
size_t length = req->fs.info.bufsml[0].len;
@@ -1699,25 +1730,46 @@ static void fs__create_junction(uv_fs_t* req, const WCHAR* path,
static void fs__symlink(uv_fs_t* req) {
- WCHAR* pathw = req->file.pathw;
- WCHAR* new_pathw = req->fs.info.new_pathw;
- int flags = req->fs.info.file_flags;
- int result;
+ WCHAR* pathw;
+ WCHAR* new_pathw;
+ int flags;
+ int err;
+ pathw = req->file.pathw;
+ new_pathw = req->fs.info.new_pathw;
- if (flags & UV_FS_SYMLINK_JUNCTION) {
+ if (req->fs.info.file_flags & UV_FS_SYMLINK_JUNCTION) {
fs__create_junction(req, pathw, new_pathw);
- } else if (pCreateSymbolicLinkW) {
- result = pCreateSymbolicLinkW(new_pathw,
- pathw,
- flags & UV_FS_SYMLINK_DIR ? SYMBOLIC_LINK_FLAG_DIRECTORY : 0) ? 0 : -1;
- if (result == -1) {
- SET_REQ_WIN32_ERROR(req, GetLastError());
- } else {
- SET_REQ_RESULT(req, result);
- }
- } else {
+ return;
+ }
+ if (!pCreateSymbolicLinkW) {
SET_REQ_UV_ERROR(req, UV_ENOSYS, ERROR_NOT_SUPPORTED);
+ return;
+ }
+
+ if (req->fs.info.file_flags & UV_FS_SYMLINK_DIR)
+ flags = SYMBOLIC_LINK_FLAG_DIRECTORY;
+ else
+ flags = uv__file_symlink_usermode_flag;
+
+ if (pCreateSymbolicLinkW(new_pathw, pathw, flags)) {
+ SET_REQ_RESULT(req, 0);
+ return;
+ }
+
+ /* Something went wrong. We will test if it is because of user-mode
+ * symlinks.
+ */
+ err = GetLastError();
+ if (err == ERROR_INVALID_PARAMETER &&
+ flags & SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE) {
+ /* This system does not support user-mode symlinks. We will clear the
+ * unsupported flag and retry.
+ */
+ uv__file_symlink_usermode_flag = 0;
+ fs__symlink(req);
+ } else {
+ SET_REQ_WIN32_ERROR(req, err);
}
}
@@ -1855,6 +1907,7 @@ static void uv__fs_work(struct uv__work* w) {
XX(CLOSE, close)
XX(READ, read)
XX(WRITE, write)
+ XX(COPYFILE, copyfile)
XX(SENDFILE, sendfile)
XX(STAT, stat)
XX(LSTAT, lstat)
@@ -1901,6 +1954,9 @@ static void uv__fs_done(struct uv__work* w, int status) {
void uv_fs_req_cleanup(uv_fs_t* req) {
+ if (req == NULL)
+ return;
+
if (req->flags & UV_FS_CLEANEDUP)
return;
@@ -1931,8 +1987,7 @@ int uv_fs_open(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags,
int mode, uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_OPEN, cb);
-
+ INIT(UV_FS_OPEN);
err = fs__capture_path(req, path, NULL, cb != NULL);
if (err) {
return uv_translate_sys_error(err);
@@ -1940,28 +1995,14 @@ int uv_fs_open(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags,
req->fs.info.file_flags = flags;
req->fs.info.mode = mode;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__open(req);
- return req->result;
- }
+ POST;
}
int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) {
- uv_fs_req_init(loop, req, UV_FS_CLOSE, cb);
+ INIT(UV_FS_CLOSE);
req->file.fd = fd;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__close(req);
- return req->result;
- }
+ POST;
}
@@ -1972,11 +2013,11 @@ int uv_fs_read(uv_loop_t* loop,
unsigned int nbufs,
int64_t offset,
uv_fs_cb cb) {
+ INIT(UV_FS_READ);
+
if (bufs == NULL || nbufs == 0)
return UV_EINVAL;
- uv_fs_req_init(loop, req, UV_FS_READ, cb);
-
req->file.fd = fd;
req->fs.info.nbufs = nbufs;
@@ -1990,14 +2031,7 @@ int uv_fs_read(uv_loop_t* loop,
memcpy(req->fs.info.bufs, bufs, nbufs * sizeof(*bufs));
req->fs.info.offset = offset;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__read(req);
- return req->result;
- }
+ POST;
}
@@ -2008,11 +2042,11 @@ int uv_fs_write(uv_loop_t* loop,
unsigned int nbufs,
int64_t offset,
uv_fs_cb cb) {
+ INIT(UV_FS_WRITE);
+
if (bufs == NULL || nbufs == 0)
return UV_EINVAL;
- uv_fs_req_init(loop, req, UV_FS_WRITE, cb);
-
req->file.fd = fd;
req->fs.info.nbufs = nbufs;
@@ -2026,14 +2060,7 @@ int uv_fs_write(uv_loop_t* loop,
memcpy(req->fs.info.bufs, bufs, nbufs * sizeof(*bufs));
req->fs.info.offset = offset;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__write(req);
- return req->result;
- }
+ POST;
}
@@ -2041,20 +2068,13 @@ int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path,
uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_UNLINK, cb);
-
+ INIT(UV_FS_UNLINK);
err = fs__capture_path(req, path, NULL, cb != NULL);
if (err) {
return uv_translate_sys_error(err);
}
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__unlink(req);
- return req->result;
- }
+ POST;
}
@@ -2062,22 +2082,14 @@ int uv_fs_mkdir(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode,
uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_MKDIR, cb);
-
+ INIT(UV_FS_MKDIR);
err = fs__capture_path(req, path, NULL, cb != NULL);
if (err) {
return uv_translate_sys_error(err);
}
req->fs.info.mode = mode;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__mkdir(req);
- return req->result;
- }
+ POST;
}
@@ -2085,39 +2097,25 @@ int uv_fs_mkdtemp(uv_loop_t* loop, uv_fs_t* req, const char* tpl,
uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_MKDTEMP, cb);
-
+ INIT(UV_FS_MKDTEMP);
err = fs__capture_path(req, tpl, NULL, TRUE);
if (err)
return uv_translate_sys_error(err);
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__mkdtemp(req);
- return req->result;
- }
+ POST;
}
int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_RMDIR, cb);
-
+ INIT(UV_FS_RMDIR);
err = fs__capture_path(req, path, NULL, cb != NULL);
if (err) {
return uv_translate_sys_error(err);
}
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__rmdir(req);
- return req->result;
- }
+ POST;
}
@@ -2125,22 +2123,14 @@ int uv_fs_scandir(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags,
uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_SCANDIR, cb);
-
+ INIT(UV_FS_SCANDIR);
err = fs__capture_path(req, path, NULL, cb != NULL);
if (err) {
return uv_translate_sys_error(err);
}
req->fs.info.file_flags = flags;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__scandir(req);
- return req->result;
- }
+ POST;
}
@@ -2148,20 +2138,13 @@ int uv_fs_link(uv_loop_t* loop, uv_fs_t* req, const char* path,
const char* new_path, uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_LINK, cb);
-
+ INIT(UV_FS_LINK);
err = fs__capture_path(req, path, new_path, cb != NULL);
if (err) {
return uv_translate_sys_error(err);
}
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__link(req);
- return req->result;
- }
+ POST;
}
@@ -2169,22 +2152,14 @@ int uv_fs_symlink(uv_loop_t* loop, uv_fs_t* req, const char* path,
const char* new_path, int flags, uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_SYMLINK, cb);
-
+ INIT(UV_FS_SYMLINK);
err = fs__capture_path(req, path, new_path, cb != NULL);
if (err) {
return uv_translate_sys_error(err);
}
req->fs.info.file_flags = flags;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__symlink(req);
- return req->result;
- }
+ POST;
}
@@ -2192,20 +2167,13 @@ int uv_fs_readlink(uv_loop_t* loop, uv_fs_t* req, const char* path,
uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_READLINK, cb);
-
+ INIT(UV_FS_READLINK);
err = fs__capture_path(req, path, NULL, cb != NULL);
if (err) {
return uv_translate_sys_error(err);
}
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__readlink(req);
- return req->result;
- }
+ POST;
}
@@ -2213,24 +2181,18 @@ int uv_fs_realpath(uv_loop_t* loop, uv_fs_t* req, const char* path,
uv_fs_cb cb) {
int err;
- if (!req || !path) {
+ INIT(UV_FS_REALPATH);
+
+ if (!path) {
return UV_EINVAL;
}
- uv_fs_req_init(loop, req, UV_FS_REALPATH, cb);
-
err = fs__capture_path(req, path, NULL, cb != NULL);
if (err) {
return uv_translate_sys_error(err);
}
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__realpath(req);
- return req->result;
- }
+ POST;
}
@@ -2238,88 +2200,53 @@ int uv_fs_chown(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_uid_t uid,
uv_gid_t gid, uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_CHOWN, cb);
-
+ INIT(UV_FS_CHOWN);
err = fs__capture_path(req, path, NULL, cb != NULL);
if (err) {
return uv_translate_sys_error(err);
}
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__chown(req);
- return req->result;
- }
+ POST;
}
int uv_fs_fchown(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_uid_t uid,
uv_gid_t gid, uv_fs_cb cb) {
- uv_fs_req_init(loop, req, UV_FS_FCHOWN, cb);
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__fchown(req);
- return req->result;
- }
+ INIT(UV_FS_FCHOWN);
+ POST;
}
int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_STAT, cb);
-
+ INIT(UV_FS_STAT);
err = fs__capture_path(req, path, NULL, cb != NULL);
if (err) {
return uv_translate_sys_error(err);
}
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__stat(req);
- return req->result;
- }
+ POST;
}
int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_LSTAT, cb);
-
+ INIT(UV_FS_LSTAT);
err = fs__capture_path(req, path, NULL, cb != NULL);
if (err) {
return uv_translate_sys_error(err);
}
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__lstat(req);
- return req->result;
- }
+ POST;
}
int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) {
- uv_fs_req_init(loop, req, UV_FS_FSTAT, cb);
+ INIT(UV_FS_FSTAT);
req->file.fd = fd;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__fstat(req);
- return req->result;
- }
+ POST;
}
@@ -2327,85 +2254,70 @@ int uv_fs_rename(uv_loop_t* loop, uv_fs_t* req, const char* path,
const char* new_path, uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_RENAME, cb);
-
+ INIT(UV_FS_RENAME);
err = fs__capture_path(req, path, new_path, cb != NULL);
if (err) {
return uv_translate_sys_error(err);
}
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__rename(req);
- return req->result;
- }
+ POST;
}
int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) {
- uv_fs_req_init(loop, req, UV_FS_FSYNC, cb);
+ INIT(UV_FS_FSYNC);
req->file.fd = fd;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__fsync(req);
- return req->result;
- }
+ POST;
}
int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) {
- uv_fs_req_init(loop, req, UV_FS_FDATASYNC, cb);
+ INIT(UV_FS_FDATASYNC);
req->file.fd = fd;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__fdatasync(req);
- return req->result;
- }
+ POST;
}
int uv_fs_ftruncate(uv_loop_t* loop, uv_fs_t* req, uv_file fd,
int64_t offset, uv_fs_cb cb) {
- uv_fs_req_init(loop, req, UV_FS_FTRUNCATE, cb);
-
+ INIT(UV_FS_FTRUNCATE);
req->file.fd = fd;
req->fs.info.offset = offset;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__ftruncate(req);
- return req->result;
- }
+ POST;
}
+int uv_fs_copyfile(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ int flags,
+ uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_COPYFILE);
+
+ if (flags & ~UV_FS_COPYFILE_EXCL)
+ return UV_EINVAL;
+
+ err = fs__capture_path(req, path, new_path, cb != NULL);
+
+ if (err)
+ return uv_translate_sys_error(err);
+
+ req->fs.info.file_flags = flags;
+ POST;
+}
+
int uv_fs_sendfile(uv_loop_t* loop, uv_fs_t* req, uv_file fd_out,
uv_file fd_in, int64_t in_offset, size_t length, uv_fs_cb cb) {
- uv_fs_req_init(loop, req, UV_FS_SENDFILE, cb);
-
+ INIT(UV_FS_SENDFILE);
req->file.fd = fd_in;
req->fs.info.fd_out = fd_out;
req->fs.info.offset = in_offset;
req->fs.info.bufsml[0].len = length;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__sendfile(req);
- return req->result;
- }
+ POST;
}
@@ -2416,21 +2328,13 @@ int uv_fs_access(uv_loop_t* loop,
uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_ACCESS, cb);
-
+ INIT(UV_FS_ACCESS);
err = fs__capture_path(req, path, NULL, cb != NULL);
if (err)
return uv_translate_sys_error(err);
req->fs.info.mode = flags;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- }
-
- fs__access(req);
- return req->result;
+ POST;
}
@@ -2438,39 +2342,23 @@ int uv_fs_chmod(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode,
uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_CHMOD, cb);
-
+ INIT(UV_FS_CHMOD);
err = fs__capture_path(req, path, NULL, cb != NULL);
if (err) {
return uv_translate_sys_error(err);
}
req->fs.info.mode = mode;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__chmod(req);
- return req->result;
- }
+ POST;
}
int uv_fs_fchmod(uv_loop_t* loop, uv_fs_t* req, uv_file fd, int mode,
uv_fs_cb cb) {
- uv_fs_req_init(loop, req, UV_FS_FCHMOD, cb);
-
+ INIT(UV_FS_FCHMOD);
req->file.fd = fd;
req->fs.info.mode = mode;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__fchmod(req);
- return req->result;
- }
+ POST;
}
@@ -2478,8 +2366,7 @@ int uv_fs_utime(uv_loop_t* loop, uv_fs_t* req, const char* path, double atime,
double mtime, uv_fs_cb cb) {
int err;
- uv_fs_req_init(loop, req, UV_FS_UTIME, cb);
-
+ INIT(UV_FS_UTIME);
err = fs__capture_path(req, path, NULL, cb != NULL);
if (err) {
return uv_translate_sys_error(err);
@@ -2487,30 +2374,15 @@ int uv_fs_utime(uv_loop_t* loop, uv_fs_t* req, const char* path, double atime,
req->fs.time.atime = atime;
req->fs.time.mtime = mtime;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__utime(req);
- return req->result;
- }
+ POST;
}
int uv_fs_futime(uv_loop_t* loop, uv_fs_t* req, uv_file fd, double atime,
double mtime, uv_fs_cb cb) {
- uv_fs_req_init(loop, req, UV_FS_FUTIME, cb);
-
+ INIT(UV_FS_FUTIME);
req->file.fd = fd;
req->fs.time.atime = atime;
req->fs.time.mtime = mtime;
-
- if (cb) {
- QUEUE_FS_TP_JOB(loop, req);
- return 0;
- } else {
- fs__futime(req);
- return req->result;
- }
+ POST;
}
diff --git a/deps/uv/src/win/pipe.c b/deps/uv/src/win/pipe.c
index 9b10cc9fe22bf6..5c666788fd67f5 100644
--- a/deps/uv/src/win/pipe.c
+++ b/deps/uv/src/win/pipe.c
@@ -1913,6 +1913,7 @@ int uv_pipe_open(uv_pipe_t* pipe, uv_file file) {
if (os_handle == INVALID_HANDLE_VALUE)
return UV_EBADF;
+ uv__once_init();
/* In order to avoid closing a stdio file descriptor 0-2, duplicate the
* underlying OS handle and forget about the original fd.
* We could also opt to use the original OS handle and just never close it,
@@ -1986,6 +1987,7 @@ static int uv__pipe_getname(const uv_pipe_t* handle, char* buffer, size_t* size)
unsigned int name_len;
int err;
+ uv__once_init();
name_info = NULL;
if (handle->handle == INVALID_HANDLE_VALUE) {
diff --git a/deps/uv/src/win/process.c b/deps/uv/src/win/process.c
index d141601607851b..97b67ca529582e 100644
--- a/deps/uv/src/win/process.c
+++ b/deps/uv/src/win/process.c
@@ -405,8 +405,15 @@ static WCHAR* search_path(const WCHAR *file,
/* Next slice starts just after where the previous one ended */
dir_start = dir_end;
+ /* If path is quoted, find quote end */
+ if (*dir_start == L'"' || *dir_start == L'\'') {
+ dir_end = wcschr(dir_start + 1, *dir_start);
+ if (dir_end == NULL) {
+ dir_end = wcschr(dir_start, L'\0');
+ }
+ }
/* Slice until the next ; or \0 is found */
- dir_end = wcschr(dir_start, L';');
+ dir_end = wcschr(dir_end, L';');
if (dir_end == NULL) {
dir_end = wcschr(dir_start, L'\0');
}
diff --git a/deps/uv/src/win/tcp.c b/deps/uv/src/win/tcp.c
index 972539f4df49d0..e63a63e7712af1 100644
--- a/deps/uv/src/win/tcp.c
+++ b/deps/uv/src/win/tcp.c
@@ -1446,6 +1446,8 @@ int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) {
WSAPROTOCOL_INFOW protocol_info;
int opt_len;
int err;
+ struct sockaddr_storage saddr;
+ int saddr_len;
/* Detect the address family of the socket. */
opt_len = (int) sizeof protocol_info;
@@ -1466,6 +1468,19 @@ int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) {
return uv_translate_sys_error(err);
}
+ /* Support already active socket. */
+ saddr_len = sizeof(saddr);
+ if (!uv_tcp_getsockname(handle, (struct sockaddr*) &saddr, &saddr_len)) {
+ /* Socket is already bound. */
+ handle->flags |= UV_HANDLE_BOUND;
+ saddr_len = sizeof(saddr);
+ if (!uv_tcp_getpeername(handle, (struct sockaddr*) &saddr, &saddr_len)) {
+ /* Socket is already connected. */
+ uv_connection_init((uv_stream_t*) handle);
+ handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
+ }
+ }
+
return 0;
}
diff --git a/deps/uv/src/win/tty.c b/deps/uv/src/win/tty.c
index a6f583956fe6b3..c4f99bdc7961b8 100644
--- a/deps/uv/src/win/tty.c
+++ b/deps/uv/src/win/tty.c
@@ -148,6 +148,7 @@ int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, uv_file fd, int readable) {
HANDLE handle;
CONSOLE_SCREEN_BUFFER_INFO screen_buffer_info;
+ uv__once_init();
handle = (HANDLE) uv__get_osfhandle(fd);
if (handle == INVALID_HANDLE_VALUE)
return UV_EBADF;
diff --git a/deps/uv/src/win/winapi.h b/deps/uv/src/win/winapi.h
index 9401676fbdc3ad..6c699bfe170c25 100644
--- a/deps/uv/src/win/winapi.h
+++ b/deps/uv/src/win/winapi.h
@@ -4104,6 +4104,10 @@
# define JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE 0x00002000
#endif
+#ifndef SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE
+# define SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE 0x00000002
+#endif
+
/* from winternl.h */
typedef struct _UNICODE_STRING {
USHORT Length;
diff --git a/deps/uv/test/runner-win.c b/deps/uv/test/runner-win.c
index 0f1b56e777c6cb..d86fda3c5d7b8c 100644
--- a/deps/uv/test/runner-win.c
+++ b/deps/uv/test/runner-win.c
@@ -300,7 +300,6 @@ int process_reap(process_info_t *p) {
void process_cleanup(process_info_t *p) {
CloseHandle(p->process);
CloseHandle(p->stdio_in);
- CloseHandle(p->stdio_out);
}
diff --git a/deps/uv/test/test-dlerror.c b/deps/uv/test/test-dlerror.c
index 091200edbed591..8f7697b594129c 100644
--- a/deps/uv/test/test-dlerror.c
+++ b/deps/uv/test/test-dlerror.c
@@ -42,11 +42,13 @@ TEST_IMPL(dlerror) {
msg = uv_dlerror(&lib);
ASSERT(msg != NULL);
+ ASSERT(strstr(msg, path) != NULL);
ASSERT(strstr(msg, dlerror_no_error) == NULL);
/* Should return the same error twice in a row. */
msg = uv_dlerror(&lib);
ASSERT(msg != NULL);
+ ASSERT(strstr(msg, path) != NULL);
ASSERT(strstr(msg, dlerror_no_error) == NULL);
uv_dlclose(&lib);
diff --git a/deps/uv/test/test-fs-copyfile.c b/deps/uv/test/test-fs-copyfile.c
new file mode 100644
index 00000000000000..2d1f9079a5f915
--- /dev/null
+++ b/deps/uv/test/test-fs-copyfile.c
@@ -0,0 +1,150 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "task.h"
+
+#if defined(__unix__) || defined(__POSIX__) || \
+ defined(__APPLE__) || defined(_AIX) || defined(__MVS__)
+#include /* unlink, etc. */
+#else
+# include
+# include
+# define unlink _unlink
+#endif
+
+static const char fixture[] = "test/fixtures/load_error.node";
+static const char dst[] = "test_file_dst";
+static int result_check_count;
+
+
+static void handle_result(uv_fs_t* req) {
+ uv_fs_t stat_req;
+ uint64_t size;
+ uint64_t mode;
+ int r;
+
+ ASSERT(req->fs_type == UV_FS_COPYFILE);
+ ASSERT(req->result == 0);
+
+ /* Verify that the file size and mode are the same. */
+ r = uv_fs_stat(NULL, &stat_req, req->path, NULL);
+ ASSERT(r == 0);
+ size = stat_req.statbuf.st_size;
+ mode = stat_req.statbuf.st_mode;
+ uv_fs_req_cleanup(&stat_req);
+ r = uv_fs_stat(NULL, &stat_req, dst, NULL);
+ ASSERT(r == 0);
+ ASSERT(stat_req.statbuf.st_size == size);
+ ASSERT(stat_req.statbuf.st_mode == mode);
+ uv_fs_req_cleanup(&stat_req);
+ uv_fs_req_cleanup(req);
+ result_check_count++;
+}
+
+
+static void touch_file(const char* name, unsigned int size) {
+ uv_file file;
+ uv_fs_t req;
+ uv_buf_t buf;
+ int r;
+ unsigned int i;
+
+ r = uv_fs_open(NULL, &req, name, O_WRONLY | O_CREAT, S_IWUSR | S_IRUSR, NULL);
+ uv_fs_req_cleanup(&req);
+ ASSERT(r >= 0);
+ file = r;
+
+ buf = uv_buf_init("a", 1);
+
+ /* Inefficient but simple. */
+ for (i = 0; i < size; i++) {
+ r = uv_fs_write(NULL, &req, file, &buf, 1, i, NULL);
+ uv_fs_req_cleanup(&req);
+ ASSERT(r >= 0);
+ }
+
+ r = uv_fs_close(NULL, &req, file, NULL);
+ uv_fs_req_cleanup(&req);
+ ASSERT(r == 0);
+}
+
+
+TEST_IMPL(fs_copyfile) {
+ const char src[] = "test_file_src";
+ uv_loop_t* loop;
+ uv_fs_t req;
+ int r;
+
+ loop = uv_default_loop();
+
+ /* Fails with EINVAL if bad flags are passed. */
+ r = uv_fs_copyfile(NULL, &req, src, dst, -1, NULL);
+ ASSERT(r == UV_EINVAL);
+ uv_fs_req_cleanup(&req);
+
+ /* Fails with ENOENT if source does not exist. */
+ unlink(src);
+ unlink(dst);
+ r = uv_fs_copyfile(NULL, &req, src, dst, 0, NULL);
+ ASSERT(req.result == UV_ENOENT);
+ ASSERT(r == UV_ENOENT);
+ uv_fs_req_cleanup(&req);
+ /* The destination should not exist. */
+ r = uv_fs_stat(NULL, &req, dst, NULL);
+ ASSERT(r != 0);
+ uv_fs_req_cleanup(&req);
+
+ /* Copies file synchronously. Creates new file. */
+ unlink(dst);
+ r = uv_fs_copyfile(NULL, &req, fixture, dst, 0, NULL);
+ ASSERT(r == 0);
+ handle_result(&req);
+
+ /* Copies file synchronously. Overwrites existing file. */
+ r = uv_fs_copyfile(NULL, &req, fixture, dst, 0, NULL);
+ ASSERT(r == 0);
+ handle_result(&req);
+
+ /* Fails to overwrites existing file. */
+ r = uv_fs_copyfile(NULL, &req, fixture, dst, UV_FS_COPYFILE_EXCL, NULL);
+ ASSERT(r == UV_EEXIST);
+ uv_fs_req_cleanup(&req);
+
+ /* Copies a larger file. */
+ unlink(dst);
+ touch_file(src, 4096 * 2);
+ r = uv_fs_copyfile(NULL, &req, src, dst, 0, NULL);
+ ASSERT(r == 0);
+ handle_result(&req);
+ unlink(src);
+
+ /* Copies file asynchronously */
+ unlink(dst);
+ r = uv_fs_copyfile(loop, &req, fixture, dst, 0, handle_result);
+ ASSERT(r == 0);
+ ASSERT(result_check_count == 3);
+ uv_run(loop, UV_RUN_DEFAULT);
+ ASSERT(result_check_count == 4);
+ unlink(dst); /* Cleanup */
+
+ return 0;
+}
diff --git a/deps/uv/test/test-fs.c b/deps/uv/test/test-fs.c
index 404d0426f38aed..0000e563a73ffc 100644
--- a/deps/uv/test/test-fs.c
+++ b/deps/uv/test/test-fs.c
@@ -1492,12 +1492,14 @@ TEST_IMPL(fs_chown) {
uv_run(loop, UV_RUN_DEFAULT);
ASSERT(chown_cb_count == 1);
+#ifndef __MVS__
/* chown to root (fail) */
chown_cb_count = 0;
r = uv_fs_chown(loop, &req, "test_file", 0, 0, chown_root_cb);
ASSERT(r == 0);
uv_run(loop, UV_RUN_DEFAULT);
ASSERT(chown_cb_count == 1);
+#endif
/* async fchown */
r = uv_fs_fchown(loop, &req, file, -1, -1, fchown_cb);
@@ -2749,19 +2751,23 @@ TEST_IMPL(fs_write_alotof_bufs_with_offset) {
TEST_IMPL(fs_read_write_null_arguments) {
int r;
- r = uv_fs_read(NULL, NULL, 0, NULL, 0, -1, NULL);
+ r = uv_fs_read(NULL, &read_req, 0, NULL, 0, -1, NULL);
ASSERT(r == UV_EINVAL);
+ uv_fs_req_cleanup(&read_req);
- r = uv_fs_write(NULL, NULL, 0, NULL, 0, -1, NULL);
+ r = uv_fs_write(NULL, &write_req, 0, NULL, 0, -1, NULL);
ASSERT(r == UV_EINVAL);
+ uv_fs_req_cleanup(&write_req);
iov = uv_buf_init(NULL, 0);
- r = uv_fs_read(NULL, NULL, 0, &iov, 0, -1, NULL);
+ r = uv_fs_read(NULL, &read_req, 0, &iov, 0, -1, NULL);
ASSERT(r == UV_EINVAL);
+ uv_fs_req_cleanup(&read_req);
iov = uv_buf_init(NULL, 0);
- r = uv_fs_write(NULL, NULL, 0, &iov, 0, -1, NULL);
+ r = uv_fs_write(NULL, &write_req, 0, &iov, 0, -1, NULL);
ASSERT(r == UV_EINVAL);
+ uv_fs_req_cleanup(&write_req);
return 0;
}
@@ -2844,3 +2850,100 @@ TEST_IMPL(fs_file_pos_after_op_with_offset) {
MAKE_VALGRIND_HAPPY();
return 0;
}
+
+TEST_IMPL(fs_null_req) {
+ /* Verify that all fs functions return UV_EINVAL when the request is NULL. */
+ int r;
+
+ r = uv_fs_open(NULL, NULL, NULL, 0, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_close(NULL, NULL, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_read(NULL, NULL, 0, NULL, 0, -1, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_write(NULL, NULL, 0, NULL, 0, -1, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_unlink(NULL, NULL, NULL, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_mkdir(NULL, NULL, NULL, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_mkdtemp(NULL, NULL, NULL, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_rmdir(NULL, NULL, NULL, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_scandir(NULL, NULL, NULL, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_link(NULL, NULL, NULL, NULL, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_symlink(NULL, NULL, NULL, NULL, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_readlink(NULL, NULL, NULL, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_realpath(NULL, NULL, NULL, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_chown(NULL, NULL, NULL, 0, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_fchown(NULL, NULL, 0, 0, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_stat(NULL, NULL, NULL, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_lstat(NULL, NULL, NULL, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_fstat(NULL, NULL, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_rename(NULL, NULL, NULL, NULL, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_fsync(NULL, NULL, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_fdatasync(NULL, NULL, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_ftruncate(NULL, NULL, 0, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_copyfile(NULL, NULL, NULL, NULL, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_sendfile(NULL, NULL, 0, 0, 0, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_access(NULL, NULL, NULL, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_chmod(NULL, NULL, NULL, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_fchmod(NULL, NULL, 0, 0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_utime(NULL, NULL, NULL, 0.0, 0.0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ r = uv_fs_futime(NULL, NULL, 0, 0.0, 0.0, NULL);
+ ASSERT(r == UV_EINVAL);
+
+ /* This should be a no-op. */
+ uv_fs_req_cleanup(NULL);
+
+ return 0;
+}
diff --git a/deps/uv/test/test-list.h b/deps/uv/test/test-list.h
index 0c32d84d2038ba..6e84653e8b4f83 100644
--- a/deps/uv/test/test-list.h
+++ b/deps/uv/test/test-list.h
@@ -81,6 +81,8 @@ TEST_DECLARE (tcp_try_write)
TEST_DECLARE (tcp_write_queue_order)
TEST_DECLARE (tcp_open)
TEST_DECLARE (tcp_open_twice)
+TEST_DECLARE (tcp_open_bound)
+TEST_DECLARE (tcp_open_connected)
TEST_DECLARE (tcp_connect_error_after_write)
TEST_DECLARE (tcp_shutdown_after_write)
TEST_DECLARE (tcp_bind_error_addrinuse)
@@ -256,6 +258,7 @@ TEST_DECLARE (spawn_auto_unref)
TEST_DECLARE (spawn_closed_process_io)
TEST_DECLARE (spawn_reads_child_path)
TEST_DECLARE (spawn_inherit_streams)
+TEST_DECLARE (spawn_quoted_path)
TEST_DECLARE (fs_poll)
TEST_DECLARE (fs_poll_getpath)
TEST_DECLARE (kill)
@@ -271,6 +274,7 @@ TEST_DECLARE (fs_mkdtemp)
TEST_DECLARE (fs_fstat)
TEST_DECLARE (fs_access)
TEST_DECLARE (fs_chmod)
+TEST_DECLARE (fs_copyfile)
TEST_DECLARE (fs_unlink_readonly)
TEST_DECLARE (fs_chown)
TEST_DECLARE (fs_link)
@@ -312,6 +316,7 @@ TEST_DECLARE (get_osfhandle_valid_handle)
TEST_DECLARE (fs_write_alotof_bufs)
TEST_DECLARE (fs_write_alotof_bufs_with_offset)
TEST_DECLARE (fs_file_pos_after_op_with_offset)
+TEST_DECLARE (fs_null_req)
TEST_DECLARE (threadpool_queue_work_simple)
TEST_DECLARE (threadpool_queue_work_einval)
TEST_DECLARE (threadpool_multiple_event_loops)
@@ -328,6 +333,10 @@ TEST_DECLARE (thread_rwlock_trylock)
TEST_DECLARE (thread_create)
TEST_DECLARE (thread_equal)
TEST_DECLARE (dlerror)
+#if (defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))) && \
+ !defined(__sun)
+TEST_DECLARE (poll_oob)
+#endif
TEST_DECLARE (poll_duplex)
TEST_DECLARE (poll_unidirectional)
TEST_DECLARE (poll_close)
@@ -486,6 +495,9 @@ TASK_LIST_START
TEST_ENTRY (tcp_open)
TEST_HELPER (tcp_open, tcp4_echo_server)
TEST_ENTRY (tcp_open_twice)
+ TEST_ENTRY (tcp_open_bound)
+ TEST_ENTRY (tcp_open_connected)
+ TEST_HELPER (tcp_open_connected, tcp4_echo_server)
TEST_ENTRY (tcp_shutdown_after_write)
TEST_HELPER (tcp_shutdown_after_write, tcp4_echo_server)
@@ -681,6 +693,11 @@ TASK_LIST_START
TEST_ENTRY (poll_unidirectional)
TEST_ENTRY (poll_close)
TEST_ENTRY (poll_bad_fdtype)
+#if (defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))) && \
+ !defined(__sun)
+ TEST_ENTRY (poll_oob)
+#endif
+
#ifdef __linux__
TEST_ENTRY (poll_nested_epoll)
#endif
@@ -714,6 +731,7 @@ TASK_LIST_START
TEST_ENTRY (spawn_closed_process_io)
TEST_ENTRY (spawn_reads_child_path)
TEST_ENTRY (spawn_inherit_streams)
+ TEST_ENTRY (spawn_quoted_path)
TEST_ENTRY (fs_poll)
TEST_ENTRY (fs_poll_getpath)
TEST_ENTRY (kill)
@@ -762,6 +780,7 @@ TASK_LIST_START
TEST_ENTRY (fs_fstat)
TEST_ENTRY (fs_access)
TEST_ENTRY (fs_chmod)
+ TEST_ENTRY (fs_copyfile)
TEST_ENTRY (fs_unlink_readonly)
TEST_ENTRY (fs_chown)
TEST_ENTRY (fs_utime)
@@ -801,6 +820,7 @@ TASK_LIST_START
TEST_ENTRY (fs_write_alotof_bufs_with_offset)
TEST_ENTRY (fs_read_write_null_arguments)
TEST_ENTRY (fs_file_pos_after_op_with_offset)
+ TEST_ENTRY (fs_null_req)
TEST_ENTRY (get_osfhandle_valid_handle)
TEST_ENTRY (threadpool_queue_work_simple)
TEST_ENTRY (threadpool_queue_work_einval)
diff --git a/deps/uv/test/test-poll-oob.c b/deps/uv/test/test-poll-oob.c
new file mode 100644
index 00000000000000..2a6da843c616ad
--- /dev/null
+++ b/deps/uv/test/test-poll-oob.c
@@ -0,0 +1,205 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if !defined(_WIN32)
+
+#include "uv.h"
+#include "task.h"
+
+#include
+#include
+#include
+#include
+#include
+
+static uv_tcp_t server_handle;
+static uv_tcp_t client_handle;
+static uv_tcp_t peer_handle;
+static uv_poll_t poll_req[2];
+static uv_idle_t idle;
+static uv_os_fd_t client_fd;
+static uv_os_fd_t server_fd;
+static int ticks;
+static const int kMaxTicks = 10;
+static int cli_pr_check = 0;
+static int cli_rd_check = 0;
+static int srv_rd_check = 0;
+
+static int got_eagain(void) {
+ return errno == EAGAIN
+ || errno == EINPROGRESS
+#ifdef EWOULDBLOCK
+ || errno == EWOULDBLOCK
+#endif
+ ;
+}
+
+static void idle_cb(uv_idle_t* idle) {
+ uv_sleep(100);
+ if (++ticks < kMaxTicks)
+ return;
+
+ uv_poll_stop(&poll_req[0]);
+ uv_poll_stop(&poll_req[1]);
+ uv_close((uv_handle_t*) &server_handle, NULL);
+ uv_close((uv_handle_t*) &client_handle, NULL);
+ uv_close((uv_handle_t*) &peer_handle, NULL);
+ uv_close((uv_handle_t*) idle, NULL);
+}
+
+static void poll_cb(uv_poll_t* handle, int status, int events) {
+ char buffer[5];
+ int n;
+ int fd;
+
+ ASSERT(0 == uv_fileno((uv_handle_t*)handle, &fd));
+ memset(buffer, 0, 5);
+
+ if (events & UV_PRIORITIZED) {
+ do
+ n = recv(client_fd, &buffer, 5, MSG_OOB);
+ while (n == -1 && errno == EINTR);
+ ASSERT(n >= 0 || errno != EINVAL);
+ cli_pr_check = 1;
+ ASSERT(0 == uv_poll_stop(&poll_req[0]));
+ ASSERT(0 == uv_poll_start(&poll_req[0],
+ UV_READABLE | UV_WRITABLE,
+ poll_cb));
+ }
+ if (events & UV_READABLE) {
+ if (fd == client_fd) {
+ do
+ n = recv(client_fd, &buffer, 5, 0);
+ while (n == -1 && errno == EINTR);
+ ASSERT(n >= 0 || errno != EINVAL);
+ if (cli_rd_check == 1) {
+ ASSERT(strncmp(buffer, "world", n) == 0);
+ ASSERT(5 == n);
+ cli_rd_check = 2;
+ }
+ if (cli_rd_check == 0) {
+ ASSERT(n == 4);
+ ASSERT(strncmp(buffer, "hello", n) == 0);
+ cli_rd_check = 1;
+ do {
+ do
+ n = recv(server_fd, &buffer, 5, 0);
+ while (n == -1 && errno == EINTR);
+ if (n > 0) {
+ ASSERT(n == 5);
+ ASSERT(strncmp(buffer, "world", n) == 0);
+ cli_rd_check = 2;
+ }
+ } while (n > 0);
+
+ ASSERT(got_eagain());
+ }
+ }
+ if (fd == server_fd) {
+ do
+ n = recv(server_fd, &buffer, 3, 0);
+ while (n == -1 && errno == EINTR);
+ ASSERT(n >= 0 || errno != EINVAL);
+ ASSERT(3 == n);
+ ASSERT(strncmp(buffer, "foo", n) == 0);
+ srv_rd_check = 1;
+ uv_poll_stop(&poll_req[1]);
+ }
+ }
+ if (events & UV_WRITABLE) {
+ do {
+ n = send(client_fd, "foo", 3, 0);
+ } while (n < 0 && errno == EINTR);
+ ASSERT(3 == n);
+ }
+}
+
+static void connection_cb(uv_stream_t* handle, int status) {
+ int r;
+
+ ASSERT(0 == status);
+ ASSERT(0 == uv_accept(handle, (uv_stream_t*) &peer_handle));
+ ASSERT(0 == uv_fileno((uv_handle_t*) &peer_handle, &server_fd));
+ ASSERT(0 == uv_poll_init_socket(uv_default_loop(), &poll_req[0], client_fd));
+ ASSERT(0 == uv_poll_init_socket(uv_default_loop(), &poll_req[1], server_fd));
+ ASSERT(0 == uv_poll_start(&poll_req[0],
+ UV_PRIORITIZED | UV_READABLE | UV_WRITABLE,
+ poll_cb));
+ ASSERT(0 == uv_poll_start(&poll_req[1],
+ UV_READABLE,
+ poll_cb));
+ do {
+ r = send(server_fd, "hello", 5, MSG_OOB);
+ } while (r < 0 && errno == EINTR);
+ ASSERT(5 == r);
+
+ do {
+ r = send(server_fd, "world", 5, 0);
+ } while (r < 0 && errno == EINTR);
+ ASSERT(5 == r);
+
+ ASSERT(0 == uv_idle_start(&idle, idle_cb));
+}
+
+
+TEST_IMPL(poll_oob) {
+ struct sockaddr_in addr;
+ int r = 0;
+ uv_loop_t* loop;
+
+ ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr));
+ loop = uv_default_loop();
+
+ ASSERT(0 == uv_tcp_init(loop, &server_handle));
+ ASSERT(0 == uv_tcp_init(loop, &client_handle));
+ ASSERT(0 == uv_tcp_init(loop, &peer_handle));
+ ASSERT(0 == uv_idle_init(loop, &idle));
+ ASSERT(0 == uv_tcp_bind(&server_handle, (const struct sockaddr*) &addr, 0));
+ ASSERT(0 == uv_listen((uv_stream_t*) &server_handle, 1, connection_cb));
+
+ /* Ensure two separate packets */
+ ASSERT(0 == uv_tcp_nodelay(&client_handle, 1));
+
+ client_fd = socket(PF_INET, SOCK_STREAM, 0);
+ ASSERT(client_fd >= 0);
+ do {
+ errno = 0;
+ r = connect(client_fd, (const struct sockaddr*)&addr, sizeof(addr));
+ } while (r == -1 && errno == EINTR);
+ ASSERT(r == 0);
+
+ ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
+
+ ASSERT(ticks == kMaxTicks);
+
+ /* Did client receive the POLLPRI message */
+ ASSERT(cli_pr_check == 1);
+ /* Did client receive the POLLIN message */
+ ASSERT(cli_rd_check == 2);
+ /* Could we write with POLLOUT and did the server receive our POLLOUT message
+ * through POLLIN.
+ */
+ ASSERT(srv_rd_check == 1);
+
+ MAKE_VALGRIND_HAPPY();
+ return 0;
+}
+#endif
diff --git a/deps/uv/test/test-spawn.c b/deps/uv/test/test-spawn.c
index bb35e32b28a285..91d831e19b92fe 100644
--- a/deps/uv/test/test-spawn.c
+++ b/deps/uv/test/test-spawn.c
@@ -1,3 +1,4 @@
+
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -1350,10 +1351,14 @@ TEST_IMPL(spawn_setgid_fails) {
init_process_options("spawn_helper1", fail_cb);
options.flags |= UV_PROCESS_SETGID;
+#if defined(__MVS__)
+ options.gid = -1;
+#else
options.gid = 0;
+#endif
r = uv_spawn(uv_default_loop(), &process, &options);
-#if defined(__CYGWIN__)
+#if defined(__CYGWIN__) || defined(__MVS__)
ASSERT(r == UV_EINVAL);
#else
ASSERT(r == UV_EPERM);
@@ -1711,6 +1716,31 @@ TEST_IMPL(spawn_inherit_streams) {
return 0;
}
+TEST_IMPL(spawn_quoted_path) {
+#ifndef _WIN32
+ RETURN_SKIP("Test for Windows");
+#else
+ char* quoted_path_env[2];
+ options.file = "not_existing";
+ args[0] = options.file;
+ args[1] = NULL;
+ options.args = args;
+ options.exit_cb = exit_cb;
+ options.flags = 0;
+ /* We test if search_path works correctly with semicolons in quoted path. */
+ /* We will use invalid drive, so we are sure no executable is spawned */
+ quoted_path_env[0] = "PATH=\"xyz:\\test;\";xyz:\\other";
+ quoted_path_env[1] = NULL;
+ options.env = quoted_path_env;
+
+ /* We test if libuv will not segfault. */
+ uv_spawn(uv_default_loop(), &process, &options);
+
+ MAKE_VALGRIND_HAPPY();
+ return 0;
+#endif
+}
+
/* Helper for child process of spawn_inherit_streams */
#ifndef _WIN32
int spawn_stdin_stdout(void) {
diff --git a/deps/uv/test/test-tcp-oob.c b/deps/uv/test/test-tcp-oob.c
index fc011ee495f1f6..4f1397a82ffed7 100644
--- a/deps/uv/test/test-tcp-oob.c
+++ b/deps/uv/test/test-tcp-oob.c
@@ -56,8 +56,21 @@ static void idle_cb(uv_idle_t* idle) {
static void read_cb(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf) {
+#ifdef __MVS__
+ char lbuf[12];
+#endif
+ uv_os_fd_t fd;
+
ASSERT(nread > 0);
+ ASSERT(0 == uv_fileno((uv_handle_t*)handle, &fd));
ASSERT(0 == uv_idle_start(&idle, idle_cb));
+
+#ifdef __MVS__
+ /* Need to flush out the OOB data. Otherwise, this callback will get
+ * triggered on every poll with nread = 0.
+ */
+ ASSERT(-1 != recv(fd, lbuf, sizeof(lbuf), MSG_OOB));
+#endif
}
diff --git a/deps/uv/test/test-tcp-open.c b/deps/uv/test/test-tcp-open.c
index 6c8d43d0009e79..cb74c50e2c9929 100644
--- a/deps/uv/test/test-tcp-open.c
+++ b/deps/uv/test/test-tcp-open.c
@@ -218,3 +218,60 @@ TEST_IMPL(tcp_open_twice) {
MAKE_VALGRIND_HAPPY();
return 0;
}
+
+
+TEST_IMPL(tcp_open_bound) {
+ struct sockaddr_in addr;
+ uv_tcp_t server;
+ uv_os_sock_t sock;
+
+ startup();
+ sock = create_tcp_socket();
+
+ ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr));
+
+ ASSERT(0 == uv_tcp_init(uv_default_loop(), &server));
+
+ ASSERT(0 == bind(sock, (struct sockaddr*) &addr, sizeof(addr)));
+
+ ASSERT(0 == uv_tcp_open(&server, sock));
+
+ ASSERT(0 == uv_listen((uv_stream_t*) &server, 128, NULL));
+
+ MAKE_VALGRIND_HAPPY();
+ return 0;
+}
+
+
+TEST_IMPL(tcp_open_connected) {
+ struct sockaddr_in addr;
+ uv_tcp_t client;
+ uv_os_sock_t sock;
+ uv_buf_t buf = uv_buf_init("PING", 4);
+
+ ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr));
+
+ startup();
+ sock = create_tcp_socket();
+
+ ASSERT(0 == connect(sock, (struct sockaddr*) &addr, sizeof(addr)));
+
+ ASSERT(0 == uv_tcp_init(uv_default_loop(), &client));
+
+ ASSERT(0 == uv_tcp_open(&client, sock));
+
+ ASSERT(0 == uv_write(&write_req, (uv_stream_t*) &client, &buf, 1, write_cb));
+
+ ASSERT(0 == uv_shutdown(&shutdown_req, (uv_stream_t*) &client, shutdown_cb));
+
+ ASSERT(0 == uv_read_start((uv_stream_t*) &client, alloc_cb, read_cb));
+
+ uv_run(uv_default_loop(), UV_RUN_DEFAULT);
+
+ ASSERT(shutdown_cb_called == 1);
+ ASSERT(write_cb_called == 1);
+ ASSERT(close_cb_called == 1);
+
+ MAKE_VALGRIND_HAPPY();
+ return 0;
+}
diff --git a/deps/uv/uv.gyp b/deps/uv/uv.gyp
index 6f61d725a90b99..cac6da819b2196 100644
--- a/deps/uv/uv.gyp
+++ b/deps/uv/uv.gyp
@@ -217,8 +217,7 @@
'sources': [
'src/unix/darwin.c',
'src/unix/fsevents.c',
- 'src/unix/darwin-proctitle.c',
- 'src/unix/pthread-barrier.c'
+ 'src/unix/darwin-proctitle.c'
],
'defines': [
'_DARWIN_USE_64_BIT_INODE=1',
@@ -253,7 +252,6 @@
'src/unix/linux-syscalls.h',
'src/unix/pthread-fixes.c',
'src/unix/android-ifaddrs.c',
- 'src/unix/pthread-barrier.c',
'src/unix/procfs-exepath.c',
'src/unix/sysinfo-loadavg.c',
'src/unix/sysinfo-memory.c',
@@ -322,7 +320,6 @@
['OS=="os390"', {
'sources': [
'src/unix/pthread-fixes.c',
- 'src/unix/pthread-barrier.c',
'src/unix/no-fsevents.c',
'src/unix/os390.c',
'src/unix/os390-syscalls.c'
@@ -362,6 +359,7 @@
'test/test-fail-always.c',
'test/test-fork.c',
'test/test-fs.c',
+ 'test/test-fs-copyfile.c',
'test/test-fs-event.c',
'test/test-get-currentexe.c',
'test/test-get-memory.c',
@@ -405,6 +403,7 @@
'test/test-poll-close.c',
'test/test-poll-close-doesnt-corrupt-stack.c',
'test/test-poll-closesocket.c',
+ 'test/test-poll-oob.c',
'test/test-process-title.c',
'test/test-queue-foreach-delete.c',
'test/test-ref.c',
diff --git a/deps/uv/vcbuild.bat b/deps/uv/vcbuild.bat
index e33573d108e16f..698044df490c02 100644
--- a/deps/uv/vcbuild.bat
+++ b/deps/uv/vcbuild.bat
@@ -43,6 +43,9 @@ shift
goto next-arg
:args-done
+if defined WindowsSDKDir goto select-target
+if defined VCINSTALLDIR goto select-target
+
@rem Look for Visual Studio 2017 only if explicitly requested.
if "%target_env%" NEQ "vs2017" goto vs-set-2015
echo Looking for Visual Studio 2017
@@ -168,9 +171,7 @@ echo Failed to create vc project files.
exit /b 1
:help
-
-echo "vcbuild.bat [debug/release] [test/bench] [clean] [noprojgen] [nobuild] [vs2017] [x86/x64] [static/shared]"
-
+echo vcbuild.bat [debug/release] [test/bench] [clean] [noprojgen] [nobuild] [vs2017] [x86/x64] [static/shared]
echo Examples:
echo vcbuild.bat : builds debug build
echo vcbuild.bat test : builds debug build and runs tests
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 2060cb19817059..b60425df45b469 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -2507,6 +2507,8 @@ v8_component("v8_libbase") {
if (is_posix) {
sources += [
+ "src/base/platform/platform-posix-time.cc",
+ "src/base/platform/platform-posix-time.h",
"src/base/platform/platform-posix.cc",
"src/base/platform/platform-posix.h",
]
diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h
index e9450456299698..b615088300e444 100644
--- a/deps/v8/include/libplatform/libplatform.h
+++ b/deps/v8/include/libplatform/libplatform.h
@@ -30,12 +30,15 @@ enum class MessageLoopBehavior : bool {
* If |idle_task_support| is enabled then the platform will accept idle
* tasks (IdleTasksEnabled will return true) and will rely on the embedder
* calling v8::platform::RunIdleTasks to process the idle tasks.
+ * If |tracing_controller| is nullptr, the default platform will create a
+ * v8::platform::TracingController instance and use it.
*/
V8_PLATFORM_EXPORT v8::Platform* CreateDefaultPlatform(
int thread_pool_size = 0,
IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled,
InProcessStackDumping in_process_stack_dumping =
- InProcessStackDumping::kEnabled);
+ InProcessStackDumping::kEnabled,
+ v8::TracingController* tracing_controller = nullptr);
/**
* Pumps the message loop for the given isolate.
@@ -67,6 +70,8 @@ V8_PLATFORM_EXPORT void RunIdleTasks(v8::Platform* platform,
* Attempts to set the tracing controller for the given platform.
*
* The |platform| has to be created using |CreateDefaultPlatform|.
+ *
+ * DEPRECATED: Will be removed soon.
*/
V8_PLATFORM_EXPORT void SetTracingController(
v8::Platform* platform,
diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h
index 902f8ea93dbaa5..8c1febf7627ae7 100644
--- a/deps/v8/include/libplatform/v8-tracing.h
+++ b/deps/v8/include/libplatform/v8-tracing.h
@@ -209,7 +209,15 @@ class V8_PLATFORM_EXPORT TraceConfig {
void operator=(const TraceConfig&) = delete;
};
-class V8_PLATFORM_EXPORT TracingController {
+#if defined(_MSC_VER)
+#define V8_PLATFORM_NON_EXPORTED_BASE(code) \
+ __pragma(warning(suppress : 4275)) code
+#else
+#define V8_PLATFORM_NON_EXPORTED_BASE(code) code
+#endif // defined(_MSC_VER)
+
+class V8_PLATFORM_EXPORT TracingController
+ : public V8_PLATFORM_NON_EXPORTED_BASE(v8::TracingController) {
public:
enum Mode { DISABLED = 0, RECORDING_MODE };
@@ -227,25 +235,29 @@ class V8_PLATFORM_EXPORT TracingController {
};
TracingController();
- ~TracingController();
+ ~TracingController() override;
void Initialize(TraceBuffer* trace_buffer);
- const uint8_t* GetCategoryGroupEnabled(const char* category_group);
- static const char* GetCategoryGroupName(const uint8_t* category_enabled_flag);
+
+ // v8::TracingController implementation.
+ const uint8_t* GetCategoryGroupEnabled(const char* category_group) override;
uint64_t AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
const char** arg_names, const uint8_t* arg_types,
const uint64_t* arg_values,
std::unique_ptr* arg_convertables,
- unsigned int flags);
+ unsigned int flags) override;
void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
- const char* name, uint64_t handle);
+ const char* name, uint64_t handle) override;
+ void AddTraceStateObserver(
+ v8::TracingController::TraceStateObserver* observer) override;
+ void RemoveTraceStateObserver(
+ v8::TracingController::TraceStateObserver* observer) override;
void StartTracing(TraceConfig* trace_config);
void StopTracing();
- void AddTraceStateObserver(Platform::TraceStateObserver* observer);
- void RemoveTraceStateObserver(Platform::TraceStateObserver* observer);
+ static const char* GetCategoryGroupName(const uint8_t* category_enabled_flag);
private:
const uint8_t* GetCategoryGroupEnabledInternal(const char* category_group);
@@ -255,7 +267,7 @@ class V8_PLATFORM_EXPORT TracingController {
std::unique_ptr trace_buffer_;
std::unique_ptr trace_config_;
std::unique_ptr mutex_;
- std::unordered_set observers_;
+ std::unordered_set observers_;
Mode mode_ = DISABLED;
// Disallow copy and assign
@@ -263,6 +275,8 @@ class V8_PLATFORM_EXPORT TracingController {
void operator=(const TracingController&) = delete;
};
+#undef V8_PLATFORM_NON_EXPORTED_BASE
+
} // namespace tracing
} // namespace platform
} // namespace v8
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index 8f6d8042abd86a..3df78a81c0a837 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -52,6 +52,66 @@ class ConvertableToTraceFormat {
virtual void AppendAsTraceFormat(std::string* out) const = 0;
};
+/**
+ * V8 Tracing controller.
+ *
+ * Can be implemented by an embedder to record trace events from V8.
+ */
+class TracingController {
+ public:
+ virtual ~TracingController() = default;
+
+ /**
+ * Called by TRACE_EVENT* macros, don't call this directly.
+ * The name parameter is a category group for example:
+ * TRACE_EVENT0("v8,parse", "V8.Parse")
+ * The pointer returned points to a value with zero or more of the bits
+ * defined in CategoryGroupEnabledFlags.
+ **/
+ virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
+ static uint8_t no = 0;
+ return &no;
+ }
+
+ /**
+ * Adds a trace event to the platform tracing system. This function call is
+ * usually the result of a TRACE_* macro from trace_event_common.h when
+ * tracing and the category of the particular trace are enabled. It is not
+ * advisable to call this function on its own; it is really only meant to be
+ * used by the trace macros. The returned handle can be used by
+ * UpdateTraceEventDuration to update the duration of COMPLETE events.
+ */
+ virtual uint64_t AddTraceEvent(
+ char phase, const uint8_t* category_enabled_flag, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values,
+ std::unique_ptr* arg_convertables,
+ unsigned int flags) {
+ return 0;
+ }
+
+ /**
+ * Sets the duration field of a COMPLETE trace event. It must be called with
+ * the handle returned from AddTraceEvent().
+ **/
+ virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
+ const char* name, uint64_t handle) {}
+
+ class TraceStateObserver {
+ public:
+ virtual ~TraceStateObserver() = default;
+ virtual void OnTraceEnabled() = 0;
+ virtual void OnTraceDisabled() = 0;
+ };
+
+ /** Adds tracing state change observer. */
+ virtual void AddTraceStateObserver(TraceStateObserver*) {}
+
+ /** Removes tracing state change observer. */
+ virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
+};
+
/**
* V8 Platform abstraction layer.
*
@@ -135,6 +195,20 @@ class Platform {
* the epoch.
**/
virtual double MonotonicallyIncreasingTime() = 0;
+ typedef void (*StackTracePrinter)();
+
+ /**
+ * Returns a function pointer that print a stack trace of the current stack
+ * on invocation. Disables printing of the stack trace if nullptr.
+ */
+ virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
+
+ /**
+ * Returns an instance of a v8::TracingController. This must be non-nullptr.
+ */
+ virtual TracingController* GetTracingController() = 0;
+
+ // DEPRECATED methods, use TracingController interface instead.
/**
* Called by TRACE_EVENT* macros, don't call this directly.
@@ -200,26 +274,13 @@ class Platform {
virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
const char* name, uint64_t handle) {}
- class TraceStateObserver {
- public:
- virtual ~TraceStateObserver() = default;
- virtual void OnTraceEnabled() = 0;
- virtual void OnTraceDisabled() = 0;
- };
+ typedef v8::TracingController::TraceStateObserver TraceStateObserver;
/** Adds tracing state change observer. */
virtual void AddTraceStateObserver(TraceStateObserver*) {}
/** Removes tracing state change observer. */
virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
-
- typedef void (*StackTracePrinter)();
-
- /**
- * Returns a function pointer that print a stack trace of the current stack
- * on invocation. Disables printing of the stack trace if nullptr.
- */
- virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
};
} // namespace v8
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 0889459ca778da..db9369f649a9e5 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -10,8 +10,8 @@
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 6
#define V8_MINOR_VERSION 0
-#define V8_BUILD_NUMBER 286
-#define V8_PATCH_LEVEL 52
+#define V8_BUILD_NUMBER 287
+#define V8_PATCH_LEVEL 53
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index 5c5b8a0d3b04bb..9ab1601e7a81d9 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -29,13 +29,16 @@
#undef MAP_TYPE
#include "src/base/macros.h"
+#include "src/base/platform/platform-posix-time.h"
#include "src/base/platform/platform-posix.h"
#include "src/base/platform/platform.h"
namespace v8 {
namespace base {
-TimezoneCache* OS::CreateTimezoneCache() { return new PosixTimezoneCache(); }
+TimezoneCache* OS::CreateTimezoneCache() {
+ return new PosixDefaultTimezoneCache();
+}
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access) {
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index 483cdd49ca91d5..ba161b26c73b0c 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -44,6 +44,7 @@
#undef MAP_TYPE
#include "src/base/macros.h"
+#include "src/base/platform/platform-posix-time.h"
#include "src/base/platform/platform-posix.h"
#include "src/base/platform/platform.h"
@@ -92,7 +93,9 @@ bool OS::ArmUsingHardFloat() {
#endif // def __arm__
-TimezoneCache* OS::CreateTimezoneCache() { return new PosixTimezoneCache(); }
+TimezoneCache* OS::CreateTimezoneCache() {
+ return new PosixDefaultTimezoneCache();
+}
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access) {
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index 7d1a6d24719902..50ac55d880e89e 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -36,10 +36,10 @@
#undef MAP_TYPE
#include "src/base/macros.h"
+#include "src/base/platform/platform-posix-time.h"
#include "src/base/platform/platform-posix.h"
#include "src/base/platform/platform.h"
-
namespace v8 {
namespace base {
@@ -97,7 +97,9 @@ std::vector OS::GetSharedLibraryAddresses() {
void OS::SignalCodeMovingGC() {
}
-TimezoneCache* OS::CreateTimezoneCache() { return new PosixTimezoneCache(); }
+TimezoneCache* OS::CreateTimezoneCache() {
+ return new PosixDefaultTimezoneCache();
+}
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc
index 06040e2f404d8f..0056ad56d49765 100644
--- a/deps/v8/src/base/platform/platform-openbsd.cc
+++ b/deps/v8/src/base/platform/platform-openbsd.cc
@@ -27,13 +27,16 @@
#undef MAP_TYPE
#include "src/base/macros.h"
+#include "src/base/platform/platform-posix-time.h"
#include "src/base/platform/platform-posix.h"
#include "src/base/platform/platform.h"
namespace v8 {
namespace base {
-TimezoneCache* OS::CreateTimezoneCache() { return new PosixTimezoneCache(); }
+TimezoneCache* OS::CreateTimezoneCache() {
+ return new PosixDefaultTimezoneCache();
+}
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access) {
diff --git a/deps/v8/src/base/platform/platform-posix-time.cc b/deps/v8/src/base/platform/platform-posix-time.cc
new file mode 100644
index 00000000000000..a960f7237eaa20
--- /dev/null
+++ b/deps/v8/src/base/platform/platform-posix-time.cc
@@ -0,0 +1,31 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include
+
+#include "src/base/platform/platform-posix-time.h"
+
+namespace v8 {
+namespace base {
+
+const char* PosixDefaultTimezoneCache::LocalTimezone(double time) {
+ if (std::isnan(time)) return "";
+ time_t tv = static_cast(std::floor(time / msPerSecond));
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
+ if (!t || !t->tm_zone) return "";
+ return t->tm_zone;
+}
+
+double PosixDefaultTimezoneCache::LocalTimeOffset() {
+ time_t tv = time(NULL);
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
+ // tm_gmtoff includes any daylight savings offset, so subtract it.
+ return static_cast(t->tm_gmtoff * msPerSecond -
+ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-posix-time.h b/deps/v8/src/base/platform/platform-posix-time.h
new file mode 100644
index 00000000000000..945ea3b2e9eee3
--- /dev/null
+++ b/deps/v8/src/base/platform/platform-posix-time.h
@@ -0,0 +1,19 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/platform-posix.h"
+
+namespace v8 {
+namespace base {
+
+class PosixDefaultTimezoneCache : public PosixTimezoneCache {
+ public:
+ const char* LocalTimezone(double time_ms) override;
+ double LocalTimeOffset() override;
+
+ ~PosixDefaultTimezoneCache() override {}
+};
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 25c270ad28605b..621abbe6f17fa1 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -388,26 +388,6 @@ double OS::TimeCurrentMillis() {
return Time::Now().ToJsTime();
}
-#if !V8_OS_AIX && !V8_OS_SOLARIS && !V8_OS_CYGWIN
-const char* PosixTimezoneCache::LocalTimezone(double time) {
- if (std::isnan(time)) return "";
- time_t tv = static_cast(std::floor(time / msPerSecond));
- struct tm tm;
- struct tm* t = localtime_r(&tv, &tm);
- if (!t || !t->tm_zone) return "";
- return t->tm_zone;
-}
-
-double PosixTimezoneCache::LocalTimeOffset() {
- time_t tv = time(NULL);
- struct tm tm;
- struct tm* t = localtime_r(&tv, &tm);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-#endif
-
double PosixTimezoneCache::DaylightSavingsOffset(double time) {
if (std::isnan(time)) return std::numeric_limits::quiet_NaN();
time_t tv = static_cast(std::floor(time/msPerSecond));
diff --git a/deps/v8/src/base/platform/platform-posix.h b/deps/v8/src/base/platform/platform-posix.h
index 9818f642476c29..b092bb526dfd86 100644
--- a/deps/v8/src/base/platform/platform-posix.h
+++ b/deps/v8/src/base/platform/platform-posix.h
@@ -13,9 +13,7 @@ namespace base {
class PosixTimezoneCache : public TimezoneCache {
public:
- const char* LocalTimezone(double time_ms) override;
double DaylightSavingsOffset(double time_ms) override;
- double LocalTimeOffset() override;
void Clear() override {}
~PosixTimezoneCache() override {}
diff --git a/deps/v8/src/base/platform/platform-qnx.cc b/deps/v8/src/base/platform/platform-qnx.cc
index 7ce3de119d69b8..f151bba8bb7474 100644
--- a/deps/v8/src/base/platform/platform-qnx.cc
+++ b/deps/v8/src/base/platform/platform-qnx.cc
@@ -31,6 +31,7 @@
#undef MAP_TYPE
#include "src/base/macros.h"
+#include "src/base/platform/platform-posix-time.h"
#include "src/base/platform/platform-posix.h"
#include "src/base/platform/platform.h"
@@ -84,7 +85,9 @@ bool OS::ArmUsingHardFloat() {
#endif // __arm__
-TimezoneCache* OS::CreateTimezoneCache() { return new PosixTimezoneCache(); }
+TimezoneCache* OS::CreateTimezoneCache() {
+ return new PosixDefaultTimezoneCache();
+}
void* OS::Allocate(const size_t requested, size_t* allocated,
OS::MemoryPermission access) {
diff --git a/deps/v8/src/cancelable-task.cc b/deps/v8/src/cancelable-task.cc
index b0387f4dc05b55..9e48fe7593cf03 100644
--- a/deps/v8/src/cancelable-task.cc
+++ b/deps/v8/src/cancelable-task.cc
@@ -29,18 +29,17 @@ Cancelable::~Cancelable() {
CancelableTaskManager::CancelableTaskManager()
: task_id_counter_(0), canceled_(false) {}
-uint32_t CancelableTaskManager::Register(Cancelable* task) {
+CancelableTaskManager::Id CancelableTaskManager::Register(Cancelable* task) {
base::LockGuard guard(&mutex_);
- uint32_t id = ++task_id_counter_;
- // The loop below is just used when task_id_counter_ overflows.
- while (cancelable_tasks_.count(id) > 0) ++id;
+ CancelableTaskManager::Id id = ++task_id_counter_;
+ // Id overflows are not supported.
+ CHECK_NE(0, id);
CHECK(!canceled_);
cancelable_tasks_[id] = task;
return id;
}
-
-void CancelableTaskManager::RemoveFinishedTask(uint32_t id) {
+void CancelableTaskManager::RemoveFinishedTask(CancelableTaskManager::Id id) {
base::LockGuard guard(&mutex_);
size_t removed = cancelable_tasks_.erase(id);
USE(removed);
@@ -49,7 +48,7 @@ void CancelableTaskManager::RemoveFinishedTask(uint32_t id) {
}
CancelableTaskManager::TryAbortResult CancelableTaskManager::TryAbort(
- uint32_t id) {
+ CancelableTaskManager::Id id) {
base::LockGuard guard(&mutex_);
auto entry = cancelable_tasks_.find(id);
if (entry != cancelable_tasks_.end()) {
diff --git a/deps/v8/src/cancelable-task.h b/deps/v8/src/cancelable-task.h
index 5b1a5f1def2f8d..8a1ad325c8c6ac 100644
--- a/deps/v8/src/cancelable-task.h
+++ b/deps/v8/src/cancelable-task.h
@@ -5,7 +5,7 @@
#ifndef V8_CANCELABLE_TASK_H_
#define V8_CANCELABLE_TASK_H_
-#include
+#include
#include "include/v8-platform.h"
#include "src/base/atomic-utils.h"
@@ -24,12 +24,14 @@ class Isolate;
// from any fore- and background task/thread.
class V8_EXPORT_PRIVATE CancelableTaskManager {
public:
+ using Id = uint64_t;
+
CancelableTaskManager();
// Registers a new cancelable {task}. Returns the unique {id} of the task that
// can be used to try to abort a task by calling {Abort}.
// Must not be called after CancelAndWait.
- uint32_t Register(Cancelable* task);
+ Id Register(Cancelable* task);
// Try to abort running a task identified by {id}. The possible outcomes are:
// (1) The task is already finished running or was canceled before and
@@ -39,7 +41,7 @@ class V8_EXPORT_PRIVATE CancelableTaskManager {
// removed.
//
enum TryAbortResult { kTaskRemoved, kTaskRunning, kTaskAborted };
- TryAbortResult TryAbort(uint32_t id);
+ TryAbortResult TryAbort(Id id);
// Cancels all remaining registered tasks and waits for tasks that are
// already running. This disallows subsequent Register calls.
@@ -59,13 +61,13 @@ class V8_EXPORT_PRIVATE CancelableTaskManager {
private:
// Only called by {Cancelable} destructor. The task is done with executing,
// but needs to be removed.
- void RemoveFinishedTask(uint32_t id);
+ void RemoveFinishedTask(Id id);
// To mitigate the ABA problem, the api refers to tasks through an id.
- uint32_t task_id_counter_;
+ Id task_id_counter_;
// A set of cancelable tasks that are currently registered.
- std::map cancelable_tasks_;
+ std::unordered_map cancelable_tasks_;
// Mutex and condition variable enabling concurrent register and removing, as
// well as waiting for background tasks on {CancelAndWait}.
@@ -89,7 +91,7 @@ class V8_EXPORT_PRIVATE Cancelable {
// a platform. This step transfers ownership to the platform, which destroys
// the task after running it. Since the exact time is not known, we cannot
// access the object after handing it to a platform.
- uint32_t id() { return id_; }
+ CancelableTaskManager::Id id() { return id_; }
protected:
bool TryRun() { return status_.TrySetValue(kWaiting, kRunning); }
@@ -120,7 +122,7 @@ class V8_EXPORT_PRIVATE Cancelable {
CancelableTaskManager* parent_;
base::AtomicValue status_;
- uint32_t id_;
+ CancelableTaskManager::Id id_;
// The counter is incremented for failing tries to cancel a task. This can be
// used by the task itself as an indication how often external entities tried
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 1691f1618fc1e7..33fe9095ce4cb0 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -734,7 +734,11 @@ class RepresentationSelector {
!GetUpperBound(node->InputAt(1))->Maybe(type);
}
- void ConvertInput(Node* node, int index, UseInfo use) {
+ // Converts input {index} of {node} according to given UseInfo {use},
+ // assuming the type of the input is {input_type}. If {input_type} is null,
+ // it takes the input from the input node {TypeOf(node->InputAt(index))}.
+ void ConvertInput(Node* node, int index, UseInfo use,
+ Type* input_type = nullptr) {
Node* input = node->InputAt(index);
// In the change phase, insert a change before the use if necessary.
if (use.representation() == MachineRepresentation::kNone)
@@ -752,8 +756,11 @@ class RepresentationSelector {
TRACE(" to ");
PrintUseInfo(use);
TRACE("\n");
+ if (input_type == nullptr) {
+ input_type = TypeOf(input);
+ }
Node* n = changer_->GetRepresentationFor(
- input, input_info->representation(), TypeOf(input), node, use);
+ input, input_info->representation(), input_type, node, use);
node->ReplaceInput(index, n);
}
}
@@ -2802,18 +2809,22 @@ class RepresentationSelector {
case IrOpcode::kObjectState:
return VisitObjectState(node);
case IrOpcode::kTypeGuard: {
- // We just get rid of the sigma here. In principle, it should be
- // possible to refine the truncation and representation based on
- // the sigma's type.
+ // We just get rid of the sigma here, choosing the best representation
+ // for the sigma's type.
+ Type* type = TypeOf(node);
MachineRepresentation representation =
- GetOutputInfoForPhi(node, TypeOf(node->InputAt(0)), truncation);
-
- // For now, we just handle specially the impossible case.
- MachineRepresentation output = TypeOf(node)->IsInhabited()
- ? representation
- : MachineRepresentation::kNone;
+ GetOutputInfoForPhi(node, type, truncation);
- VisitUnop(node, UseInfo(representation, truncation), output);
+ // Here we pretend that the input has the sigma's type for the
+ // conversion.
+ UseInfo use(representation, truncation);
+ if (propagate()) {
+ EnqueueInput(node, 0, use);
+ } else if (lower()) {
+ ConvertInput(node, 0, use, type);
+ }
+ ProcessRemainingInputs(node, 1);
+ SetOutput(node, representation);
if (lower()) DeferReplacement(node, node->InputAt(0));
return;
}
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index efa8dbc6331bc5..1bb5300ce98b6e 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -191,72 +191,65 @@ class MockArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
};
-
-// Predictable v8::Platform implementation. All background and foreground
-// tasks are run immediately, delayed tasks are not executed at all.
+// Predictable v8::Platform implementation. Background tasks and idle tasks are
+// disallowed, and the time reported by {MonotonicallyIncreasingTime} is
+// deterministic.
class PredictablePlatform : public Platform {
- public:
- PredictablePlatform() {}
-
- void CallOnBackgroundThread(Task* task,
- ExpectedRuntime expected_runtime) override {
- task->Run();
- delete task;
- }
-
- void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
- task->Run();
- delete task;
- }
-
- void CallDelayedOnForegroundThread(v8::Isolate* isolate, Task* task,
- double delay_in_seconds) override {
- delete task;
- }
-
- void CallIdleOnForegroundThread(v8::Isolate* isolate,
- IdleTask* task) override {
- UNREACHABLE();
- }
-
- bool IdleTasksEnabled(v8::Isolate* isolate) override { return false; }
-
- double MonotonicallyIncreasingTime() override {
- return synthetic_time_in_sec_ += 0.00001;
- }
-
- using Platform::AddTraceEvent;
- uint64_t AddTraceEvent(char phase, const uint8_t* categoryEnabledFlag,
- const char* name, const char* scope, uint64_t id,
- uint64_t bind_id, int numArgs, const char** argNames,
- const uint8_t* argTypes, const uint64_t* argValues,
- unsigned int flags) override {
- return 0;
- }
-
- void UpdateTraceEventDuration(const uint8_t* categoryEnabledFlag,
- const char* name, uint64_t handle) override {}
-
- const uint8_t* GetCategoryGroupEnabled(const char* name) override {
- static uint8_t no = 0;
- return &no;
- }
-
- const char* GetCategoryGroupName(
- const uint8_t* categoryEnabledFlag) override {
- static const char* dummy = "dummy";
- return dummy;
- }
-
- private:
- double synthetic_time_in_sec_ = 0.0;
-
- DISALLOW_COPY_AND_ASSIGN(PredictablePlatform);
+public:
+ explicit PredictablePlatform(std::unique_ptr platform)
+ : platform_(std::move(platform)) {
+ DCHECK_NOT_NULL(platform_);
+ }
+
+ void CallOnBackgroundThread(Task* task,
+ ExpectedRuntime expected_runtime) override {
+ // It's not defined when background tasks are being executed, so we can just
+ // execute them right away.
+ task->Run();
+ delete task;
+ }
+
+ void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
+ platform_->CallOnForegroundThread(isolate, task);
+ }
+
+ void CallDelayedOnForegroundThread(v8::Isolate* isolate, Task* task,
+ double delay_in_seconds) override {
+ platform_->CallDelayedOnForegroundThread(isolate, task, delay_in_seconds);
+ }
+
+ void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override {
+ UNREACHABLE();
+ }
+
+ bool IdleTasksEnabled(Isolate* isolate) override { return false; }
+
+ double MonotonicallyIncreasingTime() override {
+ return synthetic_time_in_sec_ += 0.00001;
+ }
+
+ v8::TracingController* GetTracingController() override {
+ return platform_->GetTracingController();
+ }
+
+ Platform* platform() const { return platform_.get(); }
+
+private:
+ double synthetic_time_in_sec_ = 0.0;
+ std::unique_ptr platform_;
+
+ DISALLOW_COPY_AND_ASSIGN(PredictablePlatform);
};
v8::Platform* g_platform = NULL;
+v8::Platform* GetDefaultPlatform() {
+ return i::FLAG_verify_predictable
+ ? static_cast(g_platform)->platform()
+ : g_platform;
+}
+
static Local Throw(Isolate* isolate, const char* message) {
return isolate->ThrowException(
String::NewFromUtf8(isolate, message, NewStringType::kNormal)
@@ -1385,8 +1378,6 @@ void Shell::Quit(const v8::FunctionCallbackInfo& args) {
const_cast*>(&args));
}
-// Note that both WaitUntilDone and NotifyDone are no-op when
-// --verify-predictable. See comment in Shell::EnsureEventLoopInitialized.
void Shell::WaitUntilDone(const v8::FunctionCallbackInfo& args) {
SetWaitUntilDone(args.GetIsolate(), true);
}
@@ -2763,13 +2754,8 @@ void Shell::CollectGarbage(Isolate* isolate) {
}
void Shell::EnsureEventLoopInitialized(Isolate* isolate) {
- // When using PredictablePlatform (i.e. FLAG_verify_predictable),
- // we don't need event loop support, because tasks are completed
- // immediately - both background and foreground ones.
- if (!i::FLAG_verify_predictable) {
- v8::platform::EnsureEventLoopInitialized(g_platform, isolate);
- SetWaitUntilDone(isolate, false);
- }
+ v8::platform::EnsureEventLoopInitialized(GetDefaultPlatform(), isolate);
+ SetWaitUntilDone(isolate, false);
}
void Shell::SetWaitUntilDone(Isolate* isolate, bool value) {
@@ -2788,29 +2774,32 @@ bool Shell::IsWaitUntilDone(Isolate* isolate) {
}
void Shell::CompleteMessageLoop(Isolate* isolate) {
- // See comment in EnsureEventLoopInitialized.
- if (i::FLAG_verify_predictable) return;
+ Platform* platform = GetDefaultPlatform();
while (v8::platform::PumpMessageLoop(
- g_platform, isolate,
+ platform, isolate,
Shell::IsWaitUntilDone(isolate)
? platform::MessageLoopBehavior::kWaitForWork
: platform::MessageLoopBehavior::kDoNotWait)) {
isolate->RunMicrotasks();
}
- v8::platform::RunIdleTasks(g_platform, isolate,
- 50.0 / base::Time::kMillisecondsPerSecond);
+ if (platform->IdleTasksEnabled(isolate)) {
+ v8::platform::RunIdleTasks(platform, isolate,
+ 50.0 / base::Time::kMillisecondsPerSecond);
+ }
}
void Shell::EmptyMessageQueues(Isolate* isolate) {
- if (i::FLAG_verify_predictable) return;
+ Platform* platform = GetDefaultPlatform();
// Pump the message loop until it is empty.
while (v8::platform::PumpMessageLoop(
- g_platform, isolate, platform::MessageLoopBehavior::kDoNotWait)) {
+ platform, isolate, platform::MessageLoopBehavior::kDoNotWait)) {
isolate->RunMicrotasks();
}
// Run the idle tasks.
- v8::platform::RunIdleTasks(g_platform, isolate,
- 50.0 / base::Time::kMillisecondsPerSecond);
+ if (platform->IdleTasksEnabled(isolate)) {
+ v8::platform::RunIdleTasks(platform, isolate,
+ 50.0 / base::Time::kMillisecondsPerSecond);
+ }
}
class Serializer : public ValueSerializer::Delegate {
@@ -3068,14 +3057,14 @@ int Shell::Main(int argc, char* argv[]) {
? v8::platform::InProcessStackDumping::kDisabled
: v8::platform::InProcessStackDumping::kEnabled;
- g_platform = i::FLAG_verify_predictable
- ? new PredictablePlatform()
- : v8::platform::CreateDefaultPlatform(
- 0, v8::platform::IdleTaskSupport::kEnabled,
- in_process_stack_dumping);
+ g_platform = v8::platform::CreateDefaultPlatform(
+ 0, v8::platform::IdleTaskSupport::kEnabled, in_process_stack_dumping);
+ if (i::FLAG_verify_predictable) {
+ g_platform = new PredictablePlatform(std::unique_ptr(g_platform));
+ }
- platform::tracing::TracingController* tracing_controller;
- if (options.trace_enabled) {
+ platform::tracing::TracingController* tracing_controller = nullptr;
+ if (options.trace_enabled && !i::FLAG_verify_predictable) {
trace_file.open("v8_trace.json");
tracing_controller = new platform::tracing::TracingController();
platform::tracing::TraceBuffer* trace_buffer =
@@ -3083,9 +3072,7 @@ int Shell::Main(int argc, char* argv[]) {
platform::tracing::TraceBuffer::kRingBufferChunks,
platform::tracing::TraceWriter::CreateJSONTraceWriter(trace_file));
tracing_controller->Initialize(trace_buffer);
- if (!i::FLAG_verify_predictable) {
- platform::SetTracingController(g_platform, tracing_controller);
- }
+ platform::SetTracingController(g_platform, tracing_controller);
}
v8::V8::InitializePlatform(g_platform);
@@ -3134,6 +3121,7 @@ int Shell::Main(int argc, char* argv[]) {
}
Isolate* isolate = Isolate::New(create_params);
+
D8Console console(isolate);
{
Isolate::Scope scope(isolate);
@@ -3203,9 +3191,6 @@ int Shell::Main(int argc, char* argv[]) {
V8::Dispose();
V8::ShutdownPlatform();
delete g_platform;
- if (i::FLAG_verify_predictable) {
- delete tracing_controller;
- }
return result;
}
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 2b26e0d1b9f0f7..fa47dc825b71d3 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -161,6 +161,7 @@ Heap::Heap()
heap_iterator_depth_(0),
local_embedder_heap_tracer_(nullptr),
fast_promotion_mode_(false),
+ use_tasks_(true),
force_oom_(false),
delay_sweeper_tasks_for_testing_(false),
pending_layout_change_object_(nullptr) {
@@ -5823,6 +5824,7 @@ void Heap::RegisterExternallyReferencedObject(Object** object) {
}
void Heap::TearDown() {
+ use_tasks_ = false;
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index f18f3edd3f1231..7f213eff2724cf 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -1016,6 +1016,8 @@ class Heap {
// Returns whether SetUp has been called.
bool HasBeenSetUp();
+ bool use_tasks() const { return use_tasks_; }
+
// ===========================================================================
// Getters for spaces. =======================================================
// ===========================================================================
@@ -2375,6 +2377,8 @@ class Heap {
bool fast_promotion_mode_;
+ bool use_tasks_;
+
// Used for testing purposes.
bool force_oom_;
bool delay_sweeper_tasks_for_testing_;
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
index c3228403ff87ab..4c2b37e9e6ad1a 100644
--- a/deps/v8/src/heap/item-parallel-job.h
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -133,7 +133,8 @@ class ItemParallelJob {
const size_t num_tasks = tasks_.size();
const size_t num_items = items_.size();
const size_t items_per_task = (num_items + num_tasks - 1) / num_tasks;
- uint32_t* task_ids = new uint32_t[num_tasks];
+ CancelableTaskManager::Id* task_ids =
+ new CancelableTaskManager::Id[num_tasks];
size_t start_index = 0;
Task* main_task = nullptr;
Task* task = nullptr;
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 6bb7d3e352a65b..d970e1a50e3f8a 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -7,6 +7,7 @@
#include "src/base/atomicops.h"
#include "src/base/bits.h"
#include "src/base/sys-info.h"
+#include "src/cancelable-task.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/deoptimizer.h"
@@ -546,12 +547,14 @@ void MarkCompactCollector::ClearMarkbits() {
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
}
-class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
+class MarkCompactCollector::Sweeper::SweeperTask final : public CancelableTask {
public:
- SweeperTask(Sweeper* sweeper, base::Semaphore* pending_sweeper_tasks,
+ SweeperTask(Isolate* isolate, Sweeper* sweeper,
+ base::Semaphore* pending_sweeper_tasks,
base::AtomicNumber* num_sweeping_tasks,
AllocationSpace space_to_start)
- : sweeper_(sweeper),
+ : CancelableTask(isolate),
+ sweeper_(sweeper),
pending_sweeper_tasks_(pending_sweeper_tasks),
num_sweeping_tasks_(num_sweeping_tasks),
space_to_start_(space_to_start) {}
@@ -559,8 +562,7 @@ class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
virtual ~SweeperTask() {}
private:
- // v8::Task overrides.
- void Run() override {
+ void RunInternal() final {
DCHECK_GE(space_to_start_, FIRST_SPACE);
DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
const int offset = space_to_start_ - FIRST_SPACE;
@@ -575,9 +577,9 @@ class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
pending_sweeper_tasks_->Signal();
}
- Sweeper* sweeper_;
- base::Semaphore* pending_sweeper_tasks_;
- base::AtomicNumber* num_sweeping_tasks_;
+ Sweeper* const sweeper_;
+ base::Semaphore* const pending_sweeper_tasks_;
+ base::AtomicNumber* const num_sweeping_tasks_;
AllocationSpace space_to_start_;
DISALLOW_COPY_AND_ASSIGN(SweeperTask);
@@ -595,15 +597,19 @@ void MarkCompactCollector::Sweeper::StartSweeping() {
}
void MarkCompactCollector::Sweeper::StartSweeperTasks() {
+ DCHECK_EQ(0, num_tasks_);
+ DCHECK_EQ(0, num_sweeping_tasks_.Value());
if (FLAG_concurrent_sweeping && sweeping_in_progress_) {
ForAllSweepingSpaces([this](AllocationSpace space) {
if (space == NEW_SPACE) return;
num_sweeping_tasks_.Increment(1);
- semaphore_counter_++;
+ SweeperTask* task = new SweeperTask(heap_->isolate(), this,
+ &pending_sweeper_tasks_semaphore_,
+ &num_sweeping_tasks_, space);
+ DCHECK_LT(num_tasks_, kMaxSweeperTasks);
+ task_ids_[num_tasks_++] = task->id();
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new SweeperTask(this, &pending_sweeper_tasks_semaphore_,
- &num_sweeping_tasks_, space),
- v8::Platform::kShortRunningTask);
+ task, v8::Platform::kShortRunningTask);
});
}
}
@@ -646,10 +652,14 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() {
[this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
if (FLAG_concurrent_sweeping) {
- while (semaphore_counter_ > 0) {
- pending_sweeper_tasks_semaphore_.Wait();
- semaphore_counter_--;
+ for (int i = 0; i < num_tasks_; i++) {
+ if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
+ CancelableTaskManager::kTaskAborted) {
+ pending_sweeper_tasks_semaphore_.Wait();
+ }
}
+ num_tasks_ = 0;
+ num_sweeping_tasks_.SetValue(0);
}
ForAllSweepingSpaces([this](AllocationSpace space) {
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 24ec7043abe22d..e32ab4c6f19eba 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -408,8 +408,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
class Sweeper {
public:
- class SweeperTask;
-
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum ClearOldToNewSlotsMode {
DO_NOT_CLEAR,
@@ -425,8 +423,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
explicit Sweeper(Heap* heap)
: heap_(heap),
+ num_tasks_(0),
pending_sweeper_tasks_semaphore_(0),
- semaphore_counter_(0),
sweeping_in_progress_(false),
num_sweeping_tasks_(0) {}
@@ -452,7 +450,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
Page* GetSweptPageSafe(PagedSpace* space);
private:
+ class SweeperTask;
+
static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
+ static const int kMaxSweeperTasks = kAllocationSpaces;
static ClearOldToNewSlotsMode GetClearOldToNewSlotsMode(Page* p);
@@ -468,10 +469,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
- Heap* heap_;
+ Heap* const heap_;
+ int num_tasks_;
+ CancelableTaskManager::Id task_ids_[kMaxSweeperTasks];
base::Semaphore pending_sweeper_tasks_semaphore_;
- // Counter is only used for waiting on the semaphore.
- intptr_t semaphore_counter_;
base::Mutex mutex_;
SweptList swept_list_[kAllocationSpaces];
SweepingList sweeping_list_[kAllocationSpaces];
diff --git a/deps/v8/src/heap/page-parallel-job.h b/deps/v8/src/heap/page-parallel-job.h
index eb215efbb489fe..939bdb3b3b2925 100644
--- a/deps/v8/src/heap/page-parallel-job.h
+++ b/deps/v8/src/heap/page-parallel-job.h
@@ -69,7 +69,7 @@ class PageParallelJob {
void Run(int num_tasks, Callback per_task_data_callback) {
if (num_items_ == 0) return;
DCHECK_GE(num_tasks, 1);
- uint32_t task_ids[kMaxNumberOfTasks];
+ CancelableTaskManager::Id task_ids[kMaxNumberOfTasks];
const int max_num_tasks = Min(
kMaxNumberOfTasks,
static_cast(
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 71e1b60be978ec..3e677888280e0c 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -300,7 +300,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
size_executable_(0),
lowest_ever_allocated_(reinterpret_cast(-1)),
highest_ever_allocated_(reinterpret_cast(0)),
- unmapper_(this) {}
+ unmapper_(isolate->heap(), this) {}
bool MemoryAllocator::SetUp(size_t capacity, size_t code_range_size) {
capacity_ = RoundUp(capacity, Page::kPageSize);
@@ -332,40 +332,46 @@ void MemoryAllocator::TearDown() {
code_range_ = nullptr;
}
-class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
+class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
public:
- explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {}
+ explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
+ : CancelableTask(isolate), unmapper_(unmapper) {}
private:
- // v8::Task overrides.
- void Run() override {
+ void RunInternal() override {
unmapper_->PerformFreeMemoryOnQueuedChunks();
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
}
- Unmapper* unmapper_;
+ Unmapper* const unmapper_;
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
};
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
ReconsiderDelayedChunks();
- if (FLAG_concurrent_sweeping) {
+ if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
+ if (concurrent_unmapping_tasks_active_ >= kMaxUnmapperTasks) {
+ // kMaxUnmapperTasks are already running. Avoid creating any more.
+ return;
+ }
+ UnmapFreeMemoryTask* task = new UnmapFreeMemoryTask(heap_->isolate(), this);
+ DCHECK_LT(concurrent_unmapping_tasks_active_, kMaxUnmapperTasks);
+ task_ids_[concurrent_unmapping_tasks_active_++] = task->id();
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
- concurrent_unmapping_tasks_active_++;
+ task, v8::Platform::kShortRunningTask);
} else {
PerformFreeMemoryOnQueuedChunks();
}
}
-bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
- bool waited = false;
- while (concurrent_unmapping_tasks_active_ > 0) {
- pending_unmapping_tasks_semaphore_.Wait();
- concurrent_unmapping_tasks_active_--;
- waited = true;
+void MemoryAllocator::Unmapper::WaitUntilCompleted() {
+ for (int i = 0; i < concurrent_unmapping_tasks_active_; i++) {
+ if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
+ CancelableTaskManager::kTaskAborted) {
+ pending_unmapping_tasks_semaphore_.Wait();
+ }
+ concurrent_unmapping_tasks_active_ = 0;
}
- return waited;
}
template
@@ -392,7 +398,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
}
void MemoryAllocator::Unmapper::TearDown() {
- WaitUntilCompleted();
+ CHECK_EQ(0, concurrent_unmapping_tasks_active_);
ReconsiderDelayedChunks();
CHECK(delayed_regular_chunks_.empty());
PerformFreeMemoryOnQueuedChunks();
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index dc49f3d4a03973..5c37482ac2161b 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -15,6 +15,7 @@
#include "src/base/bits.h"
#include "src/base/hashmap.h"
#include "src/base/platform/mutex.h"
+#include "src/cancelable-task.h"
#include "src/flags.h"
#include "src/globals.h"
#include "src/heap/heap.h"
@@ -1149,8 +1150,9 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
public:
class UnmapFreeMemoryTask;
- explicit Unmapper(MemoryAllocator* allocator)
- : allocator_(allocator),
+ Unmapper(Heap* heap, MemoryAllocator* allocator)
+ : heap_(heap),
+ allocator_(allocator),
pending_unmapping_tasks_semaphore_(0),
concurrent_unmapping_tasks_active_(0) {
chunks_[kRegular].reserve(kReservedQueueingSlots);
@@ -1184,13 +1186,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
}
void FreeQueuedChunks();
- bool WaitUntilCompleted();
+ void WaitUntilCompleted();
void TearDown();
bool has_delayed_chunks() { return delayed_regular_chunks_.size() > 0; }
private:
static const int kReservedQueueingSlots = 64;
+ static const int kMaxUnmapperTasks = 24;
enum ChunkQueueType {
kRegular, // Pages of kPageSize that do not live in a CodeRange and
@@ -1229,13 +1232,15 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
template
void PerformFreeMemoryOnQueuedChunks();
+ Heap* const heap_;
+ MemoryAllocator* const allocator_;
base::Mutex mutex_;
- MemoryAllocator* allocator_;
std::vector chunks_[kNumberOfChunkQueues];
// Delayed chunks cannot be processed in the current unmapping cycle because
// of dependencies such as an active sweeper.
// See MemoryAllocator::CanFreeMemoryChunk.
std::list delayed_regular_chunks_;
+ CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t concurrent_unmapping_tasks_active_;
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index faa04848cff43c..366c14fb11dc2b 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -2455,6 +2455,7 @@ void Isolate::Deinit() {
}
heap_.mark_compact_collector()->EnsureSweepingCompleted();
+ heap_.memory_allocator()->unmapper()->WaitUntilCompleted();
DumpAndResetStats();
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 66290726833b67..34cda33b4349cc 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -13,6 +13,8 @@
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/sys-info.h"
+#include "src/libplatform/tracing/trace-buffer.h"
+#include "src/libplatform/tracing/trace-writer.h"
#include "src/libplatform/worker-thread.h"
namespace v8 {
@@ -29,13 +31,15 @@ void PrintStackTrace() {
} // namespace
-v8::Platform* CreateDefaultPlatform(
- int thread_pool_size, IdleTaskSupport idle_task_support,
- InProcessStackDumping in_process_stack_dumping) {
+v8::Platform* CreateDefaultPlatform(int thread_pool_size,
+ IdleTaskSupport idle_task_support,
+ InProcessStackDumping in_process_stack_dumping,
+ v8::TracingController* tracing_controller) {
if (in_process_stack_dumping == InProcessStackDumping::kEnabled) {
v8::base::debug::EnableInProcessStackDumping();
}
- DefaultPlatform* platform = new DefaultPlatform(idle_task_support);
+ DefaultPlatform* platform =
+ new DefaultPlatform(idle_task_support, tracing_controller);
platform->SetThreadPoolSize(thread_pool_size);
platform->EnsureInitialized();
return platform;
@@ -43,41 +47,48 @@ v8::Platform* CreateDefaultPlatform(
bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate,
MessageLoopBehavior behavior) {
- return reinterpret_cast(platform)->PumpMessageLoop(
- isolate, behavior);
+ return static_cast(platform)->PumpMessageLoop(isolate,
+ behavior);
}
void EnsureEventLoopInitialized(v8::Platform* platform, v8::Isolate* isolate) {
- return reinterpret_cast(platform)
- ->EnsureEventLoopInitialized(isolate);
+ return static_cast(platform)->EnsureEventLoopInitialized(
+ isolate);
}
void RunIdleTasks(v8::Platform* platform, v8::Isolate* isolate,
double idle_time_in_seconds) {
- reinterpret_cast(platform)->RunIdleTasks(
- isolate, idle_time_in_seconds);
+ static_cast(platform)->RunIdleTasks(isolate,
+ idle_time_in_seconds);
}
void SetTracingController(
v8::Platform* platform,
v8::platform::tracing::TracingController* tracing_controller) {
- return reinterpret_cast(platform)->SetTracingController(
+ return static_cast(platform)->SetTracingController(
tracing_controller);
}
const int DefaultPlatform::kMaxThreadPoolSize = 8;
-DefaultPlatform::DefaultPlatform(IdleTaskSupport idle_task_support)
+DefaultPlatform::DefaultPlatform(IdleTaskSupport idle_task_support,
+ v8::TracingController* tracing_controller)
: initialized_(false),
thread_pool_size_(0),
- idle_task_support_(idle_task_support) {}
-
-DefaultPlatform::~DefaultPlatform() {
- if (tracing_controller_) {
- tracing_controller_->StopTracing();
- tracing_controller_.reset();
+ idle_task_support_(idle_task_support) {
+ if (tracing_controller) {
+ tracing_controller_.reset(tracing_controller);
+ } else {
+ tracing::TraceWriter* writer = new tracing::NullTraceWriter();
+ tracing::TraceBuffer* ring_buffer =
+ new tracing::TraceBufferRingBuffer(1, writer);
+ tracing::TracingController* controller = new tracing::TracingController();
+ controller->Initialize(ring_buffer);
+ tracing_controller_.reset(controller);
}
+}
+DefaultPlatform::~DefaultPlatform() {
base::LockGuard guard(&lock_);
queue_.Terminate();
if (initialized_) {
@@ -272,47 +283,13 @@ double DefaultPlatform::MonotonicallyIncreasingTime() {
static_cast(base::Time::kMicrosecondsPerSecond);
}
-uint64_t DefaultPlatform::AddTraceEvent(
- char phase, const uint8_t* category_enabled_flag, const char* name,
- const char* scope, uint64_t id, uint64_t bind_id, int num_args,
- const char** arg_names, const uint8_t* arg_types,
- const uint64_t* arg_values,
- std::unique_ptr* arg_convertables,
- unsigned int flags) {
- if (tracing_controller_) {
- return tracing_controller_->AddTraceEvent(
- phase, category_enabled_flag, name, scope, id, bind_id, num_args,
- arg_names, arg_types, arg_values, arg_convertables, flags);
- }
-
- return 0;
-}
-
-void DefaultPlatform::UpdateTraceEventDuration(
- const uint8_t* category_enabled_flag, const char* name, uint64_t handle) {
- if (tracing_controller_) {
- tracing_controller_->UpdateTraceEventDuration(category_enabled_flag, name,
- handle);
- }
-}
-
-const uint8_t* DefaultPlatform::GetCategoryGroupEnabled(const char* name) {
- if (tracing_controller_) {
- return tracing_controller_->GetCategoryGroupEnabled(name);
- }
- static uint8_t no = 0;
- return &no;
-}
-
-
-const char* DefaultPlatform::GetCategoryGroupName(
- const uint8_t* category_enabled_flag) {
- static const char dummy[] = "dummy";
- return dummy;
+TracingController* DefaultPlatform::GetTracingController() {
+ return tracing_controller_.get();
}
void DefaultPlatform::SetTracingController(
- tracing::TracingController* tracing_controller) {
+ v8::TracingController* tracing_controller) {
+ DCHECK_NOT_NULL(tracing_controller);
tracing_controller_.reset(tracing_controller);
}
@@ -320,16 +297,6 @@ size_t DefaultPlatform::NumberOfAvailableBackgroundThreads() {
return static_cast(thread_pool_size_);
}
-void DefaultPlatform::AddTraceStateObserver(TraceStateObserver* observer) {
- if (!tracing_controller_) return;
- tracing_controller_->AddTraceStateObserver(observer);
-}
-
-void DefaultPlatform::RemoveTraceStateObserver(TraceStateObserver* observer) {
- if (!tracing_controller_) return;
- tracing_controller_->RemoveTraceStateObserver(observer);
-}
-
Platform::StackTracePrinter DefaultPlatform::GetStackTracePrinter() {
return PrintStackTrace;
}
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index 40268647493673..a5fa7342181235 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -27,14 +27,11 @@ class TaskQueue;
class Thread;
class WorkerThread;
-namespace tracing {
-class TracingController;
-}
-
class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
public:
explicit DefaultPlatform(
- IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled);
+ IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled,
+ v8::TracingController* tracing_controller = nullptr);
virtual ~DefaultPlatform();
void SetThreadPoolSize(int thread_pool_size);
@@ -48,6 +45,8 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
void RunIdleTasks(v8::Isolate* isolate, double idle_time_in_seconds);
+ void SetTracingController(v8::TracingController* tracing_controller);
+
// v8::Platform implementation.
size_t NumberOfAvailableBackgroundThreads() override;
void CallOnBackgroundThread(Task* task,
@@ -58,23 +57,7 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override;
bool IdleTasksEnabled(Isolate* isolate) override;
double MonotonicallyIncreasingTime() override;
- const uint8_t* GetCategoryGroupEnabled(const char* name) override;
- const char* GetCategoryGroupName(
- const uint8_t* category_enabled_flag) override;
- using Platform::AddTraceEvent;
- uint64_t AddTraceEvent(
- char phase, const uint8_t* category_enabled_flag, const char* name,
- const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
- const char** arg_names, const uint8_t* arg_types,
- const uint64_t* arg_values,
- std::unique_ptr* arg_convertables,
- unsigned int flags) override;
- void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
- const char* name, uint64_t handle) override;
- void SetTracingController(tracing::TracingController* tracing_controller);
-
- void AddTraceStateObserver(TraceStateObserver* observer) override;
- void RemoveTraceStateObserver(TraceStateObserver* observer) override;
+ v8::TracingController* GetTracingController() override;
StackTracePrinter GetStackTracePrinter() override;
private:
@@ -102,7 +85,7 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
std::priority_queue,
std::greater > >
main_thread_delayed_queue_;
- std::unique_ptr tracing_controller_;
+ std::unique_ptr tracing_controller_;
DISALLOW_COPY_AND_ASSIGN(DefaultPlatform);
};
diff --git a/deps/v8/src/libplatform/tracing/trace-writer.h b/deps/v8/src/libplatform/tracing/trace-writer.h
index 43d7cb6a9063f8..67559f91fe559b 100644
--- a/deps/v8/src/libplatform/tracing/trace-writer.h
+++ b/deps/v8/src/libplatform/tracing/trace-writer.h
@@ -26,6 +26,14 @@ class JSONTraceWriter : public TraceWriter {
bool append_comma_ = false;
};
+class NullTraceWriter : public TraceWriter {
+ public:
+ NullTraceWriter() = default;
+ ~NullTraceWriter() = default;
+ void AppendTraceEvent(TraceObject*) override {}
+ void Flush() override {}
+};
+
} // namespace tracing
} // namespace platform
} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index c1a4057c0555b4..4e71f432e84046 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -40,7 +40,7 @@ v8::base::AtomicWord g_category_index = g_num_builtin_categories;
TracingController::TracingController() {}
-TracingController::~TracingController() {}
+TracingController::~TracingController() { StopTracing(); }
void TracingController::Initialize(TraceBuffer* trace_buffer) {
trace_buffer_.reset(trace_buffer);
@@ -98,7 +98,7 @@ const char* TracingController::GetCategoryGroupName(
void TracingController::StartTracing(TraceConfig* trace_config) {
trace_config_.reset(trace_config);
- std::unordered_set observers_copy;
+ std::unordered_set observers_copy;
{
base::LockGuard lock(mutex_.get());
mode_ = RECORDING_MODE;
@@ -111,9 +111,13 @@ void TracingController::StartTracing(TraceConfig* trace_config) {
}
void TracingController::StopTracing() {
+ if (mode_ == DISABLED) {
+ return;
+ }
+ DCHECK(trace_buffer_);
mode_ = DISABLED;
UpdateCategoryGroupEnabledFlags();
- std::unordered_set observers_copy;
+ std::unordered_set observers_copy;
{
base::LockGuard lock(mutex_.get());
observers_copy = observers_;
@@ -196,7 +200,7 @@ const uint8_t* TracingController::GetCategoryGroupEnabledInternal(
}
void TracingController::AddTraceStateObserver(
- Platform::TraceStateObserver* observer) {
+ v8::TracingController::TraceStateObserver* observer) {
{
base::LockGuard lock(mutex_.get());
observers_.insert(observer);
@@ -207,7 +211,7 @@ void TracingController::AddTraceStateObserver(
}
void TracingController::RemoveTraceStateObserver(
- Platform::TraceStateObserver* observer) {
+ v8::TracingController::TraceStateObserver* observer) {
base::LockGuard lock(mutex_.get());
DCHECK(observers_.find(observer) != observers_.end());
observers_.erase(observer);
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.cc b/deps/v8/src/profiler/tracing-cpu-profiler.cc
index a9b84b66345c2e..601adf60ac32ea 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.cc
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.cc
@@ -25,12 +25,13 @@ TracingCpuProfilerImpl::TracingCpuProfilerImpl(Isolate* isolate)
TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"));
TRACE_EVENT_WARMUP_CATEGORY(
TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler.hires"));
- V8::GetCurrentPlatform()->AddTraceStateObserver(this);
+ V8::GetCurrentPlatform()->GetTracingController()->AddTraceStateObserver(this);
}
TracingCpuProfilerImpl::~TracingCpuProfilerImpl() {
StopProfiling();
- V8::GetCurrentPlatform()->RemoveTraceStateObserver(this);
+ V8::GetCurrentPlatform()->GetTracingController()->RemoveTraceStateObserver(
+ this);
}
void TracingCpuProfilerImpl::OnTraceEnabled() {
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.h b/deps/v8/src/profiler/tracing-cpu-profiler.h
index a512a940f8578b..e654f2be9d645a 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.h
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.h
@@ -17,13 +17,14 @@ namespace internal {
class CpuProfiler;
class Isolate;
-class TracingCpuProfilerImpl final : public TracingCpuProfiler,
- private v8::Platform::TraceStateObserver {
+class TracingCpuProfilerImpl final
+ : public TracingCpuProfiler,
+ private v8::TracingController::TraceStateObserver {
public:
explicit TracingCpuProfilerImpl(Isolate*);
~TracingCpuProfilerImpl();
- // v8::Platform::TraceStateObserver
+ // v8::TracingController::TraceStateObserver
void OnTraceEnabled() final;
void OnTraceDisabled() final;
diff --git a/deps/v8/src/tracing/trace-event.cc b/deps/v8/src/tracing/trace-event.cc
index 97da1de056ce1c..41c59269e82a45 100644
--- a/deps/v8/src/tracing/trace-event.cc
+++ b/deps/v8/src/tracing/trace-event.cc
@@ -15,8 +15,8 @@ namespace v8 {
namespace internal {
namespace tracing {
-v8::Platform* TraceEventHelper::GetCurrentPlatform() {
- return v8::internal::V8::GetCurrentPlatform();
+v8::TracingController* TraceEventHelper::GetTracingController() {
+ return v8::internal::V8::GetCurrentPlatform()->GetTracingController();
}
void CallStatsScopedTracer::AddEndTraceEvent() {
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index 8fbd56f6b5de23..6550e3e6fa9c64 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -72,8 +72,8 @@ enum CategoryGroupEnabledFlags {
// for best performance when tracing is disabled.
// const uint8_t*
// TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group)
-#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
- v8::internal::tracing::TraceEventHelper::GetCurrentPlatform() \
+#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
+ v8::internal::tracing::TraceEventHelper::GetTracingController() \
->GetCategoryGroupEnabled
// Get the number of times traces have been recorded. This is used to implement
@@ -101,8 +101,8 @@ enum CategoryGroupEnabledFlags {
// const uint8_t* category_group_enabled,
// const char* name,
// uint64_t id)
-#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
- v8::internal::tracing::TraceEventHelper::GetCurrentPlatform() \
+#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
+ v8::internal::tracing::TraceEventHelper::GetTracingController() \
->UpdateTraceEventDuration
// Defines atomic operations used internally by the tracing system.
@@ -277,7 +277,7 @@ const uint64_t kNoId = 0;
class TraceEventHelper {
public:
- static v8::Platform* GetCurrentPlatform();
+ static v8::TracingController* GetTracingController();
};
// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
@@ -424,11 +424,11 @@ static V8_INLINE uint64_t AddTraceEventImpl(
static_cast(arg_values[1])));
}
DCHECK(num_args <= 2);
- v8::Platform* platform =
- v8::internal::tracing::TraceEventHelper::GetCurrentPlatform();
- return platform->AddTraceEvent(phase, category_group_enabled, name, scope, id,
- bind_id, num_args, arg_names, arg_types,
- arg_values, arg_convertables, flags);
+ v8::TracingController* controller =
+ v8::internal::tracing::TraceEventHelper::GetTracingController();
+ return controller->AddTraceEvent(phase, category_group_enabled, name, scope,
+ id, bind_id, num_args, arg_names, arg_types,
+ arg_values, arg_convertables, flags);
}
// Define SetTraceValue for each allowed type. It stores the type and
diff --git a/deps/v8/src/tracing/tracing-category-observer.cc b/deps/v8/src/tracing/tracing-category-observer.cc
index 6a3615874184f6..3e286620dc555b 100644
--- a/deps/v8/src/tracing/tracing-category-observer.cc
+++ b/deps/v8/src/tracing/tracing-category-observer.cc
@@ -15,8 +15,9 @@ TracingCategoryObserver* TracingCategoryObserver::instance_ = nullptr;
void TracingCategoryObserver::SetUp() {
TracingCategoryObserver::instance_ = new TracingCategoryObserver();
- v8::internal::V8::GetCurrentPlatform()->AddTraceStateObserver(
- TracingCategoryObserver::instance_);
+ v8::internal::V8::GetCurrentPlatform()
+ ->GetTracingController()
+ ->AddTraceStateObserver(TracingCategoryObserver::instance_);
TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"));
TRACE_EVENT_WARMUP_CATEGORY(
TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling"));
@@ -25,8 +26,9 @@ void TracingCategoryObserver::SetUp() {
}
void TracingCategoryObserver::TearDown() {
- v8::internal::V8::GetCurrentPlatform()->RemoveTraceStateObserver(
- TracingCategoryObserver::instance_);
+ v8::internal::V8::GetCurrentPlatform()
+ ->GetTracingController()
+ ->RemoveTraceStateObserver(TracingCategoryObserver::instance_);
delete TracingCategoryObserver::instance_;
}
diff --git a/deps/v8/src/tracing/tracing-category-observer.h b/deps/v8/src/tracing/tracing-category-observer.h
index 66dd2d78f11419..858bf0bdf81d53 100644
--- a/deps/v8/src/tracing/tracing-category-observer.h
+++ b/deps/v8/src/tracing/tracing-category-observer.h
@@ -10,7 +10,7 @@
namespace v8 {
namespace tracing {
-class TracingCategoryObserver : public Platform::TraceStateObserver {
+class TracingCategoryObserver : public TracingController::TraceStateObserver {
public:
enum Mode {
ENABLED_BY_NATIVE = 1 << 0,
@@ -21,7 +21,7 @@ class TracingCategoryObserver : public Platform::TraceStateObserver {
static void SetUp();
static void TearDown();
- // v8::Platform::TraceStateObserver
+ // v8::TracingController::TraceStateObserver
void OnTraceEnabled() final;
void OnTraceDisabled() final;
diff --git a/deps/v8/src/trap-handler/handler-shared.cc b/deps/v8/src/trap-handler/handler-shared.cc
index 7b399f5eeac1be..d1b549a1701900 100644
--- a/deps/v8/src/trap-handler/handler-shared.cc
+++ b/deps/v8/src/trap-handler/handler-shared.cc
@@ -23,7 +23,14 @@ namespace v8 {
namespace internal {
namespace trap_handler {
-THREAD_LOCAL bool g_thread_in_wasm_code = false;
+// We declare this as int rather than bool as a workaround for a glibc bug, in
+// which the dynamic loader cannot handle executables whose TLS area is only
+// 1 byte in size; see https://sourceware.org/bugzilla/show_bug.cgi?id=14898.
+THREAD_LOCAL int g_thread_in_wasm_code = false;
+
+static_assert(sizeof(g_thread_in_wasm_code) > 1,
+ "sizeof(thread_local_var) must be > 1, see "
+ "https://sourceware.org/bugzilla/show_bug.cgi?id=14898");
size_t gNumCodeObjects = 0;
CodeProtectionInfoListEntry* gCodeObjects = nullptr;
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index 5494c5fdb312f3..ed9459918b7b36 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -65,7 +65,7 @@ inline bool UseTrapHandler() {
return FLAG_wasm_trap_handler && V8_TRAP_HANDLER_SUPPORTED;
}
-extern THREAD_LOCAL bool g_thread_in_wasm_code;
+extern THREAD_LOCAL int g_thread_in_wasm_code;
inline bool IsThreadInWasm() { return g_thread_in_wasm_code; }
diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp
index 8015a6bda634cf..c269f245aaa65b 100644
--- a/deps/v8/src/v8.gyp
+++ b/deps/v8/src/v8.gyp
@@ -2063,6 +2063,8 @@
'base/platform/platform-linux.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc',
+ 'base/platform/platform-posix-time.h',
+ 'base/platform/platform-posix-time.cc',
],
}
],
@@ -2071,6 +2073,8 @@
'base/debug/stack_trace_android.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc',
+ 'base/platform/platform-posix-time.h',
+ 'base/platform/platform-posix-time.cc',
],
'link_settings': {
'target_conditions': [
@@ -2127,6 +2131,8 @@
'base/debug/stack_trace_posix.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc',
+ 'base/platform/platform-posix-time.h',
+ 'base/platform/platform-posix-time.cc',
'base/qnx-math.h'
],
'target_conditions': [
@@ -2158,6 +2164,8 @@
'base/platform/platform-freebsd.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc',
+ 'base/platform/platform-posix-time.h',
+ 'base/platform/platform-posix-time.cc',
],
}
],
@@ -2170,6 +2178,8 @@
'base/platform/platform-openbsd.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc'
+ 'base/platform/platform-posix-time.h',
+ 'base/platform/platform-posix-time.cc',
],
}
],
@@ -2183,6 +2193,8 @@
'base/platform/platform-openbsd.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc',
+ 'base/platform/platform-posix-time.h',
+ 'base/platform/platform-posix-time.cc',
],
}
],
@@ -2213,6 +2225,8 @@
'base/platform/platform-macos.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc',
+ 'base/platform/platform-posix-time.h',
+ 'base/platform/platform-posix-time.cc',
]},
],
['OS=="win"', {
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index bd4495573568f8..5b8020b4f5cb08 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -2695,10 +2695,6 @@ void wasm::AsyncInstantiate(Isolate* isolate, Handle promise,
// foreground task. All other tasks (e.g. decoding and validating, the majority
// of the work of compilation) can be background tasks.
// TODO(wasm): factor out common parts of this with the synchronous pipeline.
-//
-// Note: In predictable mode, DoSync and DoAsync execute the referenced function
-// immediately before returning. Thus we handle the predictable mode specially,
-// e.g. when we synchronizing tasks or when we delete the AyncCompileJob.
class AsyncCompileJob {
// TODO(ahaas): Fix https://bugs.chromium.org/p/v8/issues/detail?id=6263 to
// make sure that d8 does not shut down before the AsyncCompileJob is
@@ -2761,14 +2757,14 @@ class AsyncCompileJob {
RejectPromise(isolate_, context_, thrower, module_promise_);
// The AsyncCompileJob is finished, we resolved the promise, we do not need
// the data anymore. We can delete the AsyncCompileJob object.
- if (!FLAG_verify_predictable) delete this;
+ delete this;
}
void AsyncCompileSucceeded(Handle result) {
ResolvePromise(isolate_, context_, module_promise_, result);
// The AsyncCompileJob is finished, we resolved the promise, we do not need
// the data anymore. We can delete the AsyncCompileJob object.
- if (!FLAG_verify_predictable) delete this;
+ delete this;
}
enum TaskType { SYNC, ASYNC };
@@ -2975,9 +2971,7 @@ class AsyncCompileJob {
// TODO(ahaas): Limit the number of outstanding compilation units to be
// finished to reduce memory overhead.
}
- // Special handling for predictable mode, see above.
- if (!FLAG_verify_predictable)
- job_->helper_->module_->pending_tasks.get()->Signal();
+ job_->helper_->module_->pending_tasks.get()->Signal();
}
};
@@ -3026,12 +3020,9 @@ class AsyncCompileJob {
// Bump next_unit_, such that background tasks stop processing the queue.
job_->helper_->next_unit_.SetValue(
job_->helper_->compilation_units_.size());
- // Special handling for predictable mode, see above.
- if (!FLAG_verify_predictable) {
- for (size_t i = 0; i < job_->num_background_tasks_; ++i) {
- // We wait for it to finish.
- job_->helper_->module_->pending_tasks.get()->Wait();
- }
+ for (size_t i = 0; i < job_->num_background_tasks_; ++i) {
+ // We wait for it to finish.
+ job_->helper_->module_->pending_tasks.get()->Wait();
}
if (thrower_.error()) {
job_->DoSync(std::move(thrower_));
@@ -3194,8 +3185,6 @@ void wasm::AsyncCompile(Isolate* isolate, Handle promise,
auto job = new AsyncCompileJob(isolate, std::move(copy), bytes.length(),
handle(isolate->context()), promise);
job->Start();
- // Special handling for predictable mode, see above.
- if (FLAG_verify_predictable) delete job;
}
Handle wasm::CompileLazy(Isolate* isolate) {
diff --git a/deps/v8/test/cctest/heap/test-incremental-marking.cc b/deps/v8/test/cctest/heap/test-incremental-marking.cc
index ce1fb349517f48..6e4aa04d255c4a 100644
--- a/deps/v8/test/cctest/heap/test-incremental-marking.cc
+++ b/deps/v8/test/cctest/heap/test-incremental-marking.cc
@@ -62,6 +62,10 @@ class MockPlatform : public v8::Platform {
bool IdleTasksEnabled(v8::Isolate* isolate) override { return false; }
+ v8::TracingController* GetTracingController() override {
+ return platform_->GetTracingController();
+ }
+
bool PendingTask() { return task_ != nullptr; }
void PerformTask() {
@@ -71,29 +75,6 @@ class MockPlatform : public v8::Platform {
delete task;
}
- using Platform::AddTraceEvent;
- uint64_t AddTraceEvent(char phase, const uint8_t* categoryEnabledFlag,
- const char* name, const char* scope, uint64_t id,
- uint64_t bind_id, int numArgs, const char** argNames,
- const uint8_t* argTypes, const uint64_t* argValues,
- unsigned int flags) override {
- return 0;
- }
-
- void UpdateTraceEventDuration(const uint8_t* categoryEnabledFlag,
- const char* name, uint64_t handle) override {}
-
- const uint8_t* GetCategoryGroupEnabled(const char* name) override {
- static uint8_t no = 0;
- return &no;
- }
-
- const char* GetCategoryGroupName(
- const uint8_t* categoryEnabledFlag) override {
- static const char* dummy = "dummy";
- return dummy;
- }
-
private:
v8::Platform* platform_;
Task* task_;
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 0d625ca408078b..e9d2045f6f4f29 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -370,6 +370,7 @@ TEST(NewSpace) {
}
new_space.TearDown();
+ memory_allocator->unmapper()->WaitUntilCompleted();
memory_allocator->TearDown();
delete memory_allocator;
}
diff --git a/deps/v8/test/cctest/libplatform/test-tracing.cc b/deps/v8/test/cctest/libplatform/test-tracing.cc
index 5dc6b965f16c48..369d7bc7621a3f 100644
--- a/deps/v8/test/cctest/libplatform/test-tracing.cc
+++ b/deps/v8/test/cctest/libplatform/test-tracing.cc
@@ -4,6 +4,7 @@
#include
#include "include/libplatform/v8-tracing.h"
+#include "src/libplatform/default-platform.h"
#include "src/tracing/trace-event.h"
#include "test/cctest/cctest.h"
@@ -135,7 +136,8 @@ TEST(TestJSONTraceWriter) {
// Create a scope for the tracing controller to terminate the trace writer.
{
TracingController tracing_controller;
- platform::SetTracingController(default_platform, &tracing_controller);
+ static_cast(default_platform)
+ ->SetTracingController(&tracing_controller);
TraceWriter* writer = TraceWriter::CreateJSONTraceWriter(stream);
TraceBuffer* ring_buffer =
@@ -178,7 +180,8 @@ TEST(TestTracingController) {
i::V8::SetPlatformForTesting(default_platform);
TracingController tracing_controller;
- platform::SetTracingController(default_platform, &tracing_controller);
+ static_cast(default_platform)
+ ->SetTracingController(&tracing_controller);
MockTraceWriter* writer = new MockTraceWriter();
TraceBuffer* ring_buffer =
@@ -244,7 +247,8 @@ TEST(TestTracingControllerMultipleArgsAndCopy) {
// Create a scope for the tracing controller to terminate the trace writer.
{
TracingController tracing_controller;
- platform::SetTracingController(default_platform, &tracing_controller);
+ static_cast(default_platform)
+ ->SetTracingController(&tracing_controller);
TraceWriter* writer = TraceWriter::CreateJSONTraceWriter(stream);
TraceBuffer* ring_buffer =
@@ -339,7 +343,7 @@ TEST(TestTracingControllerMultipleArgsAndCopy) {
namespace {
-class TraceStateObserverImpl : public Platform::TraceStateObserver {
+class TraceStateObserverImpl : public TracingController::TraceStateObserver {
public:
void OnTraceEnabled() override { ++enabled_count; }
void OnTraceDisabled() override { ++disabled_count; }
@@ -356,7 +360,8 @@ TEST(TracingObservers) {
i::V8::SetPlatformForTesting(default_platform);
v8::platform::tracing::TracingController tracing_controller;
- v8::platform::SetTracingController(default_platform, &tracing_controller);
+ static_cast(default_platform)
+ ->SetTracingController(&tracing_controller);
MockTraceWriter* writer = new MockTraceWriter();
v8::platform::tracing::TraceBuffer* ring_buffer =
v8::platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(1,
@@ -367,7 +372,7 @@ TEST(TracingObservers) {
trace_config->AddIncludedCategory("v8");
TraceStateObserverImpl observer;
- default_platform->AddTraceStateObserver(&observer);
+ tracing_controller.AddTraceStateObserver(&observer);
CHECK_EQ(0, observer.enabled_count);
CHECK_EQ(0, observer.disabled_count);
@@ -378,12 +383,12 @@ TEST(TracingObservers) {
CHECK_EQ(0, observer.disabled_count);
TraceStateObserverImpl observer2;
- default_platform->AddTraceStateObserver(&observer2);
+ tracing_controller.AddTraceStateObserver(&observer2);
CHECK_EQ(1, observer2.enabled_count);
CHECK_EQ(0, observer2.disabled_count);
- default_platform->RemoveTraceStateObserver(&observer2);
+ tracing_controller.RemoveTraceStateObserver(&observer2);
CHECK_EQ(1, observer2.enabled_count);
CHECK_EQ(0, observer2.disabled_count);
@@ -395,7 +400,7 @@ TEST(TracingObservers) {
CHECK_EQ(1, observer2.enabled_count);
CHECK_EQ(0, observer2.disabled_count);
- default_platform->RemoveTraceStateObserver(&observer);
+ tracing_controller.RemoveTraceStateObserver(&observer);
CHECK_EQ(1, observer.enabled_count);
CHECK_EQ(1, observer.disabled_count);
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 9ccc93f0f5b3f0..1ffb5dfcaf6256 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -33,6 +33,7 @@
#include "src/api.h"
#include "src/base/platform/platform.h"
#include "src/deoptimizer.h"
+#include "src/libplatform/default-platform.h"
#include "src/objects-inl.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/profiler/profiler-listener.h"
@@ -2152,7 +2153,8 @@ TEST(TracingCpuProfiler) {
i::V8::SetPlatformForTesting(default_platform);
v8::platform::tracing::TracingController tracing_controller;
- v8::platform::SetTracingController(default_platform, &tracing_controller);
+ static_cast(default_platform)
+ ->SetTracingController(&tracing_controller);
CpuProfileEventChecker* event_checker = new CpuProfileEventChecker();
TraceBuffer* ring_buffer =
diff --git a/deps/v8/test/cctest/test-trace-event.cc b/deps/v8/test/cctest/test-trace-event.cc
index 88f295f30126f6..bc1684dfa59fa1 100644
--- a/deps/v8/test/cctest/test-trace-event.cc
+++ b/deps/v8/test/cctest/test-trace-event.cc
@@ -40,38 +40,16 @@ struct MockTraceObject {
typedef v8::internal::List MockTraceObjectList;
-class MockTracingPlatform : public v8::Platform {
+class MockTracingController : public v8::TracingController {
public:
- explicit MockTracingPlatform(v8::Platform* platform) {}
- virtual ~MockTracingPlatform() {
+ MockTracingController() = default;
+ ~MockTracingController() {
for (int i = 0; i < trace_object_list_.length(); ++i) {
delete trace_object_list_[i];
}
trace_object_list_.Clear();
}
- void CallOnBackgroundThread(Task* task,
- ExpectedRuntime expected_runtime) override {}
-
- void CallOnForegroundThread(Isolate* isolate, Task* task) override {}
-
- void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
- double delay_in_seconds) override {}
-
- double MonotonicallyIncreasingTime() override { return 0.0; }
-
- void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override {}
-
- bool IdleTasksEnabled(Isolate* isolate) override { return false; }
- bool PendingIdleTask() { return false; }
-
- void PerformIdleTask(double idle_time_in_seconds) {}
-
- bool PendingDelayedTask() { return false; }
-
- void PerformDelayedTask() {}
-
- using Platform::AddTraceEvent;
uint64_t AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, int num_args,
@@ -98,16 +76,52 @@ class MockTracingPlatform : public v8::Platform {
}
}
- const char* GetCategoryGroupName(
- const uint8_t* category_enabled_flag) override {
- static const char dummy[] = "dummy";
- return dummy;
- }
-
MockTraceObjectList* GetMockTraceObjects() { return &trace_object_list_; }
private:
MockTraceObjectList trace_object_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockTracingController);
+};
+
+class MockTracingPlatform : public v8::Platform {
+ public:
+ explicit MockTracingPlatform(v8::Platform* platform) {}
+ virtual ~MockTracingPlatform() {}
+ void CallOnBackgroundThread(Task* task,
+ ExpectedRuntime expected_runtime) override {}
+
+ void CallOnForegroundThread(Isolate* isolate, Task* task) override {}
+
+ void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
+ double delay_in_seconds) override {}
+
+ double MonotonicallyIncreasingTime() override { return 0.0; }
+
+ void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override {}
+
+ bool IdleTasksEnabled(Isolate* isolate) override { return false; }
+
+ v8::TracingController* GetTracingController() override {
+ return &tracing_controller_;
+ }
+
+ bool PendingIdleTask() { return false; }
+
+ void PerformIdleTask(double idle_time_in_seconds) {}
+
+ bool PendingDelayedTask() { return false; }
+
+ void PerformDelayedTask() {}
+
+ MockTraceObjectList* GetMockTraceObjects() {
+ return tracing_controller_.GetMockTraceObjects();
+ }
+
+ private:
+ MockTracingController tracing_controller_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockTracingPlatform);
};
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index 927bd31ef43e32..b1e85ede80a12f 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -93,3 +93,10 @@ v8::MaybeLocal IsolateData::ModuleResolveCallback(
IsolateData* data = IsolateData::FromContext(context);
return data->modules_[ToVector(specifier)].Get(data->isolate_);
}
+
+void IsolateData::FreeContext(v8::Local context) {
+ int context_group_id = GetContextGroupId(context);
+ auto it = contexts_.find(context_group_id);
+ if (it == contexts_.end()) return;
+ contexts_.erase(it);
+}
diff --git a/deps/v8/test/inspector/isolate-data.h b/deps/v8/test/inspector/isolate-data.h
index 34f0ae83086f23..52a3fb320633a1 100644
--- a/deps/v8/test/inspector/isolate-data.h
+++ b/deps/v8/test/inspector/isolate-data.h
@@ -39,6 +39,7 @@ class IsolateData {
void RegisterModule(v8::Local context,
v8::internal::Vector name,
v8::ScriptCompiler::Source* source);
+ void FreeContext(v8::Local context);
private:
struct VectorCompare {
diff --git a/deps/v8/test/mjsunit/compiler/regress-726554.js b/deps/v8/test/mjsunit/compiler/regress-726554.js
new file mode 100644
index 00000000000000..afd81936a5d149
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-726554.js
@@ -0,0 +1,27 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function h(a,b){
+ for(var i=0; iid();
+ CancelableTaskManager::Id id = task1->id();
EXPECT_EQ(id, 1u);
EXPECT_TRUE(manager.TryAbort(id));
runner1.Start();
@@ -195,7 +195,7 @@ TEST(CancelableTask, RemoveAfterCancelAndWait) {
ResultType result1 = 0;
TestTask* task1 = new TestTask(&manager, &result1);
ThreadedRunner runner1(task1);
- uint32_t id = task1->id();
+ CancelableTaskManager::Id id = task1->id();
EXPECT_EQ(id, 1u);
runner1.Start();
runner1.Join();
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index 143b5d4ad58e3e..7766fb6a2146fd 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -103,7 +103,12 @@ namespace {
class MockPlatform : public v8::Platform {
public:
- MockPlatform() : time_(0.0), time_step_(0.0), idle_task_(nullptr), sem_(0) {}
+ explicit MockPlatform(v8::TracingController* tracing_controller)
+ : time_(0.0),
+ time_step_(0.0),
+ idle_task_(nullptr),
+ sem_(0),
+ tracing_controller_(tracing_controller) {}
~MockPlatform() override {
base::LockGuard lock(&mutex_);
EXPECT_TRUE(foreground_tasks_.empty());
@@ -143,6 +148,10 @@ class MockPlatform : public v8::Platform {
return time_;
}
+ v8::TracingController* GetTracingController() override {
+ return tracing_controller_;
+ }
+
void RunIdleTask(double deadline_in_seconds, double time_step) {
time_step_ = time_step;
IdleTask* task;
@@ -269,6 +278,8 @@ class MockPlatform : public v8::Platform {
base::Semaphore sem_;
+ v8::TracingController* tracing_controller_;
+
DISALLOW_COPY_AND_ASSIGN(MockPlatform);
};
@@ -277,12 +288,12 @@ const char test_script[] = "(x) { x*x; }";
} // namespace
TEST_F(CompilerDispatcherTest, Construct) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
}
TEST_F(CompilerDispatcherTest, IsEnqueued) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
@@ -300,7 +311,7 @@ TEST_F(CompilerDispatcherTest, IsEnqueued) {
}
TEST_F(CompilerDispatcherTest, FinishNow) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
@@ -319,7 +330,7 @@ TEST_F(CompilerDispatcherTest, FinishNow) {
}
TEST_F(CompilerDispatcherTest, FinishAllNow) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
constexpr int num_funcs = 2;
@@ -349,7 +360,7 @@ TEST_F(CompilerDispatcherTest, FinishAllNow) {
}
TEST_F(CompilerDispatcherTest, IdleTask) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
@@ -370,7 +381,7 @@ TEST_F(CompilerDispatcherTest, IdleTask) {
}
TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
@@ -409,7 +420,7 @@ TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
}
TEST_F(CompilerDispatcherTest, IdleTaskException) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, 50);
std::string func_name("f" STR(__LINE__));
@@ -436,7 +447,7 @@ TEST_F(CompilerDispatcherTest, IdleTaskException) {
}
TEST_F(CompilerDispatcherTest, CompileOnBackgroundThread) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
@@ -480,7 +491,7 @@ TEST_F(CompilerDispatcherTest, CompileOnBackgroundThread) {
}
TEST_F(CompilerDispatcherTest, FinishNowWithBackgroundTask) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
@@ -520,7 +531,7 @@ TEST_F(CompilerDispatcherTest, FinishNowWithBackgroundTask) {
}
TEST_F(CompilerDispatcherTest, IdleTaskMultipleJobs) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script1[] = TEST_SCRIPT();
@@ -549,7 +560,7 @@ TEST_F(CompilerDispatcherTest, IdleTaskMultipleJobs) {
}
TEST_F(CompilerDispatcherTest, FinishNowException) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, 50);
std::string func_name("f" STR(__LINE__));
@@ -577,7 +588,7 @@ TEST_F(CompilerDispatcherTest, FinishNowException) {
}
TEST_F(CompilerDispatcherTest, AsyncAbortAllPendingBackgroundTask) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
@@ -620,7 +631,7 @@ TEST_F(CompilerDispatcherTest, AsyncAbortAllPendingBackgroundTask) {
}
TEST_F(CompilerDispatcherTest, AsyncAbortAllRunningBackgroundTask) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script1[] = TEST_SCRIPT();
@@ -702,7 +713,7 @@ TEST_F(CompilerDispatcherTest, AsyncAbortAllRunningBackgroundTask) {
}
TEST_F(CompilerDispatcherTest, FinishNowDuringAbortAll) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
@@ -781,7 +792,7 @@ TEST_F(CompilerDispatcherTest, FinishNowDuringAbortAll) {
}
TEST_F(CompilerDispatcherTest, MemoryPressure) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
@@ -829,7 +840,7 @@ class PressureNotificationTask : public CancelableTask {
} // namespace
TEST_F(CompilerDispatcherTest, MemoryPressureFromBackground) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
@@ -862,7 +873,7 @@ TEST_F(CompilerDispatcherTest, MemoryPressureFromBackground) {
}
TEST_F(CompilerDispatcherTest, EnqueueJob) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
Handle f =
@@ -881,7 +892,7 @@ TEST_F(CompilerDispatcherTest, EnqueueJob) {
}
TEST_F(CompilerDispatcherTest, EnqueueWithoutSFI) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
ASSERT_TRUE(dispatcher.jobs_.empty());
ASSERT_TRUE(dispatcher.shared_to_job_id_.empty());
@@ -906,7 +917,7 @@ TEST_F(CompilerDispatcherTest, EnqueueWithoutSFI) {
}
TEST_F(CompilerDispatcherTest, EnqueueAndStep) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script[] = TEST_SCRIPT();
@@ -928,7 +939,7 @@ TEST_F(CompilerDispatcherTest, EnqueueAndStep) {
}
TEST_F(CompilerDispatcherTest, EnqueueParsed) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char source[] = TEST_SCRIPT();
@@ -955,7 +966,7 @@ TEST_F(CompilerDispatcherTest, EnqueueParsed) {
}
TEST_F(CompilerDispatcherTest, EnqueueAndStepParsed) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char source[] = TEST_SCRIPT();
@@ -984,7 +995,7 @@ TEST_F(CompilerDispatcherTest, EnqueueAndStepParsed) {
}
TEST_F(CompilerDispatcherTest, CompileParsedOutOfScope) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char source[] = TEST_SCRIPT();
@@ -1046,7 +1057,7 @@ class MockNativeFunctionExtension : public Extension {
} // namespace
TEST_F(CompilerDispatcherTestWithoutContext, CompileExtensionWithoutContext) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
Local context = v8::Context::New(isolate());
@@ -1145,7 +1156,7 @@ TEST_F(CompilerDispatcherTest, CompileLazy2FinishesDispatcherJob) {
}
TEST_F(CompilerDispatcherTest, EnqueueAndStepTwice) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char source[] = TEST_SCRIPT();
@@ -1186,7 +1197,7 @@ TEST_F(CompilerDispatcherTest, EnqueueAndStepTwice) {
}
TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
- MockPlatform platform;
+ MockPlatform platform(V8::GetCurrentPlatform()->GetTracingController());
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
const char script1[] = TEST_SCRIPT();
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index e793a9186527dc..9e0b7c8a0cd017 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -72,6 +72,7 @@
{ 'name': 'ConsStringTag', 'value': 'kConsStringTag' },
{ 'name': 'ExternalStringTag', 'value': 'kExternalStringTag' },
{ 'name': 'SlicedStringTag', 'value': 'kSlicedStringTag' },
+ { 'name': 'ThinStringTag', 'value': 'kThinStringTag' },
{ 'name': 'HeapObjectTag', 'value': 'kHeapObjectTag' },
{ 'name': 'HeapObjectTagMask', 'value': 'kHeapObjectTagMask' },
diff --git a/doc/api/_toc.md b/doc/api/_toc.md
index 6791e63f0c601a..b3987ed8e4e9f7 100644
--- a/doc/api/_toc.md
+++ b/doc/api/_toc.md
@@ -19,6 +19,7 @@
* [Deprecated APIs](deprecations.html)
* [DNS](dns.html)
* [Domain](domain.html)
+* [ECMAScript Modules](esm.html)
* [Errors](errors.html)
* [Events](events.html)
* [File System](fs.html)
@@ -32,6 +33,7 @@
* [Net](net.html)
* [OS](os.html)
* [Path](path.html)
+* [Performance Hooks](perf_hooks.html)
* [Process](process.html)
* [Punycode](punycode.html)
* [Query Strings](querystring.html)
diff --git a/doc/api/addons.md b/doc/api/addons.md
index f09b2e7ee60f3f..b031c273e7e66b 100644
--- a/doc/api/addons.md
+++ b/doc/api/addons.md
@@ -1,5 +1,7 @@
# C++ Addons
+
+
Node.js Addons are dynamically-linked shared objects, written in C++, that
can be loaded into Node.js using the [`require()`][require] function, and used
just as if they were an ordinary Node.js module. They are used primarily to
@@ -72,7 +74,7 @@ void init(Local exports) {
NODE_SET_METHOD(exports, "hello", Method);
}
-NODE_MODULE(addon, init)
+NODE_MODULE(NODE_GYP_MODULE_NAME, init)
} // namespace demo
```
@@ -82,7 +84,7 @@ the pattern:
```cpp
void Initialize(Local exports);
-NODE_MODULE(module_name, Initialize)
+NODE_MODULE(NODE_GYP_MODULE_NAME, Initialize)
```
There is no semi-colon after `NODE_MODULE` as it's not a function (see
@@ -328,7 +330,7 @@ void Init(Local exports) {
NODE_SET_METHOD(exports, "add", Add);
}
-NODE_MODULE(addon, Init)
+NODE_MODULE(NODE_GYP_MODULE_NAME, Init)
} // namespace demo
```
@@ -376,7 +378,7 @@ void Init(Local exports, Local module) {
NODE_SET_METHOD(module, "exports", RunCallback);
}
-NODE_MODULE(addon, Init)
+NODE_MODULE(NODE_GYP_MODULE_NAME, Init)
} // namespace demo
```
@@ -432,7 +434,7 @@ void Init(Local exports, Local module) {
NODE_SET_METHOD(module, "exports", CreateObject);
}
-NODE_MODULE(addon, Init)
+NODE_MODULE(NODE_GYP_MODULE_NAME, Init)
} // namespace demo
```
@@ -491,7 +493,7 @@ void Init(Local exports, Local module) {
NODE_SET_METHOD(module, "exports", CreateFunction);
}
-NODE_MODULE(addon, Init)
+NODE_MODULE(NODE_GYP_MODULE_NAME, Init)
} // namespace demo
```
@@ -527,7 +529,7 @@ void InitAll(Local exports) {
MyObject::Init(exports);
}
-NODE_MODULE(addon, InitAll)
+NODE_MODULE(NODE_GYP_MODULE_NAME, InitAll)
} // namespace demo
```
@@ -711,7 +713,7 @@ void InitAll(Local exports, Local module) {
NODE_SET_METHOD(module, "exports", CreateObject);
}
-NODE_MODULE(addon, InitAll)
+NODE_MODULE(NODE_GYP_MODULE_NAME, InitAll)
} // namespace demo
```
@@ -924,7 +926,7 @@ void InitAll(Local exports) {
NODE_SET_METHOD(exports, "add", Add);
}
-NODE_MODULE(addon, InitAll)
+NODE_MODULE(NODE_GYP_MODULE_NAME, InitAll)
} // namespace demo
```
@@ -1115,7 +1117,7 @@ void init(Local exports) {
AtExit(sanity_check);
}
-NODE_MODULE(addon, init)
+NODE_MODULE(NODE_GYP_MODULE_NAME, init)
} // namespace demo
```
diff --git a/doc/api/all.md b/doc/api/all.md
index 425513e2568d03..b11661d2b7e916 100644
--- a/doc/api/all.md
+++ b/doc/api/all.md
@@ -14,6 +14,7 @@
@include deprecations
@include dns
@include domain
+@include esm
@include errors
@include events
@include fs
@@ -27,6 +28,7 @@
@include net
@include os
@include path
+@include perf_hooks
@include process
@include punycode
@include querystring
diff --git a/doc/api/assert.md b/doc/api/assert.md
index 2fc83307dfa78f..d019540ee6c9bf 100644
--- a/doc/api/assert.md
+++ b/doc/api/assert.md
@@ -1,5 +1,7 @@
# Assert
+
+
> Stability: 2 - Stable
The `assert` module provides a simple set of assertion tests that can be used to
@@ -45,12 +47,12 @@ Only [enumerable "own" properties][] are considered. The
non-enumerable properties — for such checks, consider using
[`assert.deepStrictEqual()`][] instead. This can lead to some
potentially surprising results. For example, the following example does not
-throw an `AssertionError` because the properties on the [`Error`][] object are
+throw an `AssertionError` because the properties on the [`RegExp`][] object are
not enumerable:
```js
// WARNING: This does not throw an AssertionError!
-assert.deepEqual(Error('a'), Error('b'));
+assert.deepEqual(/a/gi, new Date());
```
An exception is made for [`Map`][] and [`Set`][]. Maps and Sets have their
@@ -102,6 +104,9 @@ parameter is undefined, a default error message is assigned.
+
> Stability: 2 - Stable
Prior to the introduction of [`TypedArray`] in ECMAScript 2015 (ES6), the
@@ -907,7 +909,7 @@ const buf2 = Buffer.from('7468697320697320612074c3a97374', 'hex');
console.log(buf2.toString());
```
-A `TypeError` will be thrown if `str` is not a string.
+A `TypeError` will be thrown if `string` is not a string.
### Class Method: Buffer.from(object[, offsetOrEncoding[, length]])
+
> Stability: 2 - Stable
The `child_process` module provides the ability to spawn child processes in
@@ -685,8 +687,8 @@ does not exit, the parent process will still wait until the child process has
exited.
If the process times out, or has a non-zero exit code, this method ***will***
-throw. The [`Error`][] object will contain the entire result from
-[`child_process.spawnSync()`][]
+throw an [`Error`][] that will include the full result of the underlying
+[`child_process.spawnSync()`][].
### child_process.execSync(command[, options])
Node.js comes with a variety of CLI options. These options expose built-in
@@ -352,15 +353,15 @@ added: v7.5.0
-->
Use OpenSSL's default CA store or use bundled Mozilla CA store as supplied by
-current NodeJS version. The default store is selectable at build-time.
+current Node.js version. The default store is selectable at build-time.
Using OpenSSL store allows for external modifications of the store. For most
Linux and BSD distributions, this store is maintained by the distribution
maintainers and system administrators. OpenSSL CA store location is dependent on
configuration of the OpenSSL library but this can be altered at runtime using
-environmental variables.
+environment variables.
-The bundled CA store, as supplied by NodeJS, is a snapshot of Mozilla CA store
+The bundled CA store, as supplied by Node.js, is a snapshot of Mozilla CA store
that is fixed at release time. It is identical on all supported platforms.
See `SSL_CERT_DIR` and `SSL_CERT_FILE`.
@@ -542,7 +543,7 @@ If `--use-openssl-ca` is enabled, this overrides and sets OpenSSL's directory
containing trusted certificates.
*Note*: Be aware that unless the child environment is explicitly set, this
-evironment variable will be inherited by any child processes, and if they use
+environment variable will be inherited by any child processes, and if they use
OpenSSL, it may cause them to trust the same CAs as node.
### `SSL_CERT_FILE=file`
@@ -554,7 +555,7 @@ If `--use-openssl-ca` is enabled, this overrides and sets OpenSSL's file
containing trusted certificates.
*Note*: Be aware that unless the child environment is explicitly set, this
-evironment variable will be inherited by any child processes, and if they use
+environment variable will be inherited by any child processes, and if they use
OpenSSL, it may cause them to trust the same CAs as node.
### `NODE_REDIRECT_WARNINGS=file`
@@ -568,6 +569,30 @@ appended to if it does. If an error occurs while attempting to write the
warning to the file, the warning will be written to stderr instead. This is
equivalent to using the `--redirect-warnings=file` command-line flag.
+### `UV_THREADPOOL_SIZE=size`
+
+Set the number of threads used in libuv's threadpool to `size` threads.
+
+Asynchronous system APIs are used by Node.js whenever possible, but where they
+do not exist, libuv's threadpool is used to create asynchronous node APIs based
+on synchronous system APIs. Node.js APIs that use the threadpool are:
+
+- all `fs` APIs, other than the file watcher APIs and those that are explicitly
+ synchronous
+- `crypto.pbkdf2()`
+- `crypto.randomBytes()`, unless it is used without a callback
+- `crypto.randomFill()`
+- `dns.lookup()`
+- all `zlib` APIs, other than those that are explicitly synchronous
+
+Because libuv's threadpool has a fixed size, it means that if for whatever
+reason any of these APIs takes a long time, other (seemingly unrelated) APIs
+that run in libuv's threadpool will experience degraded performance. In order to
+mitigate this issue, one potential solution is to increase the size of libuv's
+threadpool by setting the `'UV_THREADPOOL_SIZE'` environment variable to a value
+greater than `4` (its current default value). For more information, see the
+[libuv threadpool documentation][].
+
[`--openssl-config`]: #cli_openssl_config_file
[Buffer]: buffer.html#buffer_buffer
[Chrome Debugging Protocol]: https://chromedevtools.github.io/debugger-protocol-viewer
@@ -575,3 +600,4 @@ equivalent to using the `--redirect-warnings=file` command-line flag.
[SlowBuffer]: buffer.html#buffer_class_slowbuffer
[debugger]: debugger.html
[emit_warning]: process.html#process_process_emitwarning_warning_type_code_ctor
+[libuv threadpool documentation]: http://docs.libuv.org/en/latest/threadpool.html
diff --git a/doc/api/cluster.md b/doc/api/cluster.md
index 278bd31e6fd8ad..4d56218b1b6b65 100644
--- a/doc/api/cluster.md
+++ b/doc/api/cluster.md
@@ -1,5 +1,7 @@
# Cluster
+
+
> Stability: 2 - Stable
A single instance of Node.js runs in a single thread. To take advantage of
diff --git a/doc/api/console.md b/doc/api/console.md
index 048948d31b0970..6c832fece6a226 100644
--- a/doc/api/console.md
+++ b/doc/api/console.md
@@ -1,5 +1,7 @@
# Console
+
+
> Stability: 2 - Stable
The `console` module provides a simple debugging console that is similar to the
@@ -286,6 +288,32 @@ If formatting elements (e.g. `%d`) are not found in the first string then
[`util.inspect()`][] is called on each argument and the resulting string
values are concatenated. See [`util.format()`][] for more information.
+### console.group([...label])
+
+
+* `label` {any}
+
+Increases indentation of subsequent lines by two spaces.
+
+If one or more `label`s are provided, those are printed first without the
+additional indentation.
+
+### console.groupCollapsed()
+
+
+An alias for [`console.group()`][].
+
+### console.groupEnd()
+
+
+Decreases indentation of subsequent lines by two spaces.
+
### console.info([data][, ...args])
+
> Stability: 2 - Stable
The `crypto` module provides cryptographic functionality that includes a set of
@@ -1170,15 +1172,16 @@ added: v6.0.0
Property for checking and controlling whether a FIPS compliant crypto provider is
currently in use. Setting to true requires a FIPS build of Node.js.
-### crypto.createCipher(algorithm, password)
+### crypto.createCipher(algorithm, password[, options])
- `algorithm` {string}
- `password` {string | Buffer | TypedArray | DataView}
+- `options` {Object} [`stream.transform` options][]
Creates and returns a `Cipher` object that uses the given `algorithm` and
-`password`.
+`password`. Optional `options` argument controls stream behavior.
The `algorithm` is dependent on OpenSSL, examples are `'aes192'`, etc. On
recent OpenSSL releases, `openssl list-cipher-algorithms` will display the
@@ -1198,15 +1201,20 @@ rapidly.
In line with OpenSSL's recommendation to use pbkdf2 instead of
[`EVP_BytesToKey`][] it is recommended that developers derive a key and IV on
their own using [`crypto.pbkdf2()`][] and to use [`crypto.createCipheriv()`][]
-to create the `Cipher` object.
+to create the `Cipher` object. Users should not use ciphers with counter mode
+(e.g. CTR, GCM or CCM) in `crypto.createCipher()`. A warning is emitted when
+they are used in order to avoid the risk of IV reuse that causes
+vulnerabilities. For the case when IV is reused in GCM, see [Nonce-Disrespecting
+Adversaries][] for details.
-### crypto.createCipheriv(algorithm, key, iv)
+### crypto.createCipheriv(algorithm, key, iv[, options])
- `algorithm` {string}
- `key` {string | Buffer | TypedArray | DataView}
- `iv` {string | Buffer | TypedArray | DataView}
+- `options` {Object} [`stream.transform` options][]
Creates and returns a `Cipher` object, with the given `algorithm`, `key` and
-initialization vector (`iv`).
+initialization vector (`iv`). Optional `options` argument controls stream behavior.
The `algorithm` is dependent on OpenSSL, examples are `'aes192'`, etc. On
recent OpenSSL releases, `openssl list-cipher-algorithms` will display the
@@ -1234,15 +1242,16 @@ value.
Returns a `tls.SecureContext`, as-if [`tls.createSecureContext()`][] had been
called.
-### crypto.createDecipher(algorithm, password)
+### crypto.createDecipher(algorithm, password[, options])
- `algorithm` {string}
- `password` {string | Buffer | TypedArray | DataView}
+- `options` {Object} [`stream.transform` options][]
Creates and returns a `Decipher` object that uses the given `algorithm` and
-`password` (key).
+`password` (key). Optional `options` argument controls stream behavior.
The implementation of `crypto.createDecipher()` derives keys using the OpenSSL
function [`EVP_BytesToKey`][] with the digest algorithm set to MD5, one
@@ -1256,16 +1265,18 @@ In line with OpenSSL's recommendation to use pbkdf2 instead of
their own using [`crypto.pbkdf2()`][] and to use [`crypto.createDecipheriv()`][]
to create the `Decipher` object.
-### crypto.createDecipheriv(algorithm, key, iv)
+### crypto.createDecipheriv(algorithm, key, iv[, options])
- `algorithm` {string}
- `key` {string | Buffer | TypedArray | DataView}
- `iv` {string | Buffer | TypedArray | DataView}
+- `options` {Object} [`stream.transform` options][]
Creates and returns a `Decipher` object that uses the given `algorithm`, `key`
-and initialization vector (`iv`).
+and initialization vector (`iv`). Optional `options` argument controls stream
+behavior.
The `algorithm` is dependent on OpenSSL, examples are `'aes192'`, etc. On
recent OpenSSL releases, `openssl list-cipher-algorithms` will display the
@@ -1333,14 +1344,16 @@ predefined curve specified by the `curveName` string. Use
OpenSSL releases, `openssl ecparam -list_curves` will also display the name
and description of each available elliptic curve.
-### crypto.createHash(algorithm)
+### crypto.createHash(algorithm[, options])
- `algorithm` {string}
+- `options` {Object} [`stream.transform` options][]
Creates and returns a `Hash` object that can be used to generate hash digests
-using the given `algorithm`.
+using the given `algorithm`. Optional `options` argument controls stream
+behavior.
The `algorithm` is dependent on the available algorithms supported by the
version of OpenSSL on the platform. Examples are `'sha256'`, `'sha512'`, etc.
@@ -1367,14 +1380,16 @@ input.on('readable', () => {
});
```
-### crypto.createHmac(algorithm, key)
+### crypto.createHmac(algorithm, key[, options])
- `algorithm` {string}
- `key` {string | Buffer | TypedArray | DataView}
+- `options` {Object} [`stream.transform` options][]
Creates and returns an `Hmac` object that uses the given `algorithm` and `key`.
+Optional `options` argument controls stream behavior.
The `algorithm` is dependent on the available algorithms supported by the
version of OpenSSL on the platform. Examples are `'sha256'`, `'sha512'`, etc.
@@ -1403,25 +1418,29 @@ input.on('readable', () => {
});
```
-### crypto.createSign(algorithm)
+### crypto.createSign(algorithm[, options])
- `algorithm` {string}
+- `options` {Object} [`stream.Writable` options][]
Creates and returns a `Sign` object that uses the given `algorithm`.
Use [`crypto.getHashes()`][] to obtain an array of names of the available
-signing algorithms.
+signing algorithms. Optional `options` argument controls the
+`stream.Writable` behavior.
-### crypto.createVerify(algorithm)
+### crypto.createVerify(algorithm[, options])
- `algorithm` {string}
+- `options` {Object} [`stream.Writable` options][]
Creates and returns a `Verify` object that uses the given algorithm.
Use [`crypto.getHashes()`][] to obtain an array of names of the available
-signing algorithms.
+signing algorithms. Optional `options` argument controls the
+`stream.Writable` behavior.
### crypto.getCiphers()
+
> Stability: 2 - Stable
diff --git a/doc/api/deprecations.md b/doc/api/deprecations.md
index d06e70a10c9e30..5e0b63b13ee13e 100644
--- a/doc/api/deprecations.md
+++ b/doc/api/deprecations.md
@@ -148,7 +148,7 @@ Type: Runtime
explicitly via error event handlers set on the domain instead.
-### DEP0013: fs async function without callback
+### DEP0013: fs asynchronous function without callback
Type: Runtime
diff --git a/doc/api/dgram.md b/doc/api/dgram.md
index 22fb2b5bbf1506..3f5664d8fc9090 100644
--- a/doc/api/dgram.md
+++ b/doc/api/dgram.md
@@ -1,5 +1,7 @@
# UDP / Datagram Sockets
+
+
> Stability: 2 - Stable
diff --git a/doc/api/dns.md b/doc/api/dns.md
index 0d191af15e867b..5e9fc97bbc48c2 100644
--- a/doc/api/dns.md
+++ b/doc/api/dns.md
@@ -1,5 +1,7 @@
# DNS
+
+
> Stability: 2 - Stable
The `dns` module contains functions belonging to two different categories:
@@ -140,6 +142,12 @@ changes:
flags may be passed by bitwise `OR`ing their values.
- `all` {boolean} When `true`, the callback returns all resolved addresses in
an array. Otherwise, returns a single address. Defaults to `false`.
+ - `verbatim` {boolean} When `true`, the callback receives IPv4 and IPv6
+ addresses in the order the DNS resolver returned them. When `false`,
+ IPv4 addresses are placed before IPv6 addresses.
+ Default: currently `false` (addresses are reordered) but this is expected
+ to change in the not too distant future.
+ New code should use `{ verbatim: true }`.
- `callback` {Function}
- `err` {Error}
- `address` {string} A string representation of an IPv4 or IPv6 address.
@@ -614,15 +622,16 @@ but note that changing these files will change the behavior of _all other
programs running on the same operating system_.
Though the call to `dns.lookup()` will be asynchronous from JavaScript's
-perspective, it is implemented as a synchronous call to getaddrinfo(3) that
-runs on libuv's threadpool. Because libuv's threadpool has a fixed size, it
-means that if for whatever reason the call to getaddrinfo(3) takes a long
-time, other operations that could run on libuv's threadpool (such as filesystem
-operations) will experience degraded performance. In order to mitigate this
-issue, one potential solution is to increase the size of libuv's threadpool by
-setting the `'UV_THREADPOOL_SIZE'` environment variable to a value greater than
-`4` (its current default value). For more information on libuv's threadpool, see
-[the official libuv documentation][].
+perspective, it is implemented as a synchronous call to getaddrinfo(3) that runs
+on libuv's threadpool. This can have surprising negative performance
+implications for some applications, see the [`UV_THREADPOOL_SIZE`][]
+documentation for more information.
+
+Note that various networking APIs will call `dns.lookup()` internally to resolve
+host names. If that is an issue, consider resolving the hostname to and address
+using `dns.resolve()` and using the address instead of a host name. Also, some
+networking APIs (such as [`socket.connect()`][] and [`dgram.createSocket()`][])
+allow the default resolver, `dns.lookup()`, to be replaced.
### `dns.resolve()`, `dns.resolve*()` and `dns.reverse()`
@@ -638,10 +647,14 @@ They do not use the same set of configuration files than what [`dns.lookup()`][]
uses. For instance, _they do not use the configuration from `/etc/hosts`_.
[`Error`]: errors.html#errors_class_error
+[`UV_THREADPOOL_SIZE`]: cli.html#cli_uv_threadpool_size_size
+[`dgram.createSocket()`]: dgram.html#dgram_dgram_createsocket_options_callback
+[`dns.getServers()`]: #dns_dns_getservers
[`dns.lookup()`]: #dns_dns_lookup_hostname_options_callback
[`dns.resolve()`]: #dns_dns_resolve_hostname_rrtype_callback
[`dns.resolve4()`]: #dns_dns_resolve4_hostname_options_callback
[`dns.resolve6()`]: #dns_dns_resolve6_hostname_options_callback
+[`dns.resolveAny()`]: #dns_dns_resolveany_hostname_callback
[`dns.resolveCname()`]: #dns_dns_resolvecname_hostname_callback
[`dns.resolveMx()`]: #dns_dns_resolvemx_hostname_callback
[`dns.resolveNaptr()`]: #dns_dns_resolvenaptr_hostname_callback
@@ -650,13 +663,12 @@ uses. For instance, _they do not use the configuration from `/etc/hosts`_.
[`dns.resolveSoa()`]: #dns_dns_resolvesoa_hostname_callback
[`dns.resolveSrv()`]: #dns_dns_resolvesrv_hostname_callback
[`dns.resolveTxt()`]: #dns_dns_resolvetxt_hostname_callback
-[`dns.resolveAny()`]: #dns_dns_resolveany_hostname_callback
-[`dns.getServers()`]: #dns_dns_getservers
-[`dns.setServers()`]: #dns_dns_setservers_servers
[`dns.reverse()`]: #dns_dns_reverse_ip_callback
+[`dns.setServers()`]: #dns_dns_setservers_servers
+[`socket.connect()`]: net.html#net_socket_connect_options_connectlistener
+[`util.promisify()`]: util.html#util_util_promisify_original
[DNS error codes]: #dns_error_codes
[Implementation considerations section]: #dns_implementation_considerations
+[rfc5952]: https://tools.ietf.org/html/rfc5952#section-6
[supported `getaddrinfo` flags]: #dns_supported_getaddrinfo_flags
[the official libuv documentation]: http://docs.libuv.org/en/latest/threadpool.html
-[`util.promisify()`]: util.html#util_util_promisify_original
-[rfc5952]: https://tools.ietf.org/html/rfc5952#section-6
diff --git a/doc/api/documentation.md b/doc/api/documentation.md
index a12f00e1d63bb8..802bf3613f9b55 100644
--- a/doc/api/documentation.md
+++ b/doc/api/documentation.md
@@ -1,5 +1,6 @@
# About this Documentation
+
The goal of this documentation is to comprehensively explain the Node.js
diff --git a/doc/api/domain.md b/doc/api/domain.md
index 102ac8ec7c354d..a4a31d4fecd1f2 100644
--- a/doc/api/domain.md
+++ b/doc/api/domain.md
@@ -7,6 +7,8 @@ changes:
the first promise of a chain was created.
-->
+
+
> Stability: 0 - Deprecated
**This module is pending deprecation**. Once a replacement API has been
diff --git a/doc/api/errors.md b/doc/api/errors.md
index 93d6a25173cc17..0df38e69f71815 100644
--- a/doc/api/errors.md
+++ b/doc/api/errors.md
@@ -1,5 +1,6 @@
# Errors
+
Applications running in Node.js will generally experience four categories of
@@ -255,14 +256,14 @@ will affect any stack trace captured *after* the value has been changed.
If set to a non-number value, or set to a negative number, stack traces will
not capture any frames.
-#### error.code
+### error.code
* {string}
The `error.code` property is a string label that identifies the kind of error.
See [Node.js Error Codes][] for details about specific codes.
-#### error.message
+### error.message
* {string}
diff --git a/doc/api/esm.md b/doc/api/esm.md
new file mode 100644
index 00000000000000..108fd76336495d
--- /dev/null
+++ b/doc/api/esm.md
@@ -0,0 +1,88 @@
+# ECMAScript Modules
+
+
+
+> Stability: 1 - Experimental
+
+
+
+Node contains support for ES Modules based upon the [the Node EP for ES Modules][].
+
+Not all features of the EP are complete and will be landing as both VM support and implementation is ready. Error messages are still being polished.
+
+## Enabling
+
+
+
+The `--experimental-modules` flag can be used to enable features for loading ESM modules.
+
+Once this has been set, files ending with `.mjs` will be able to be loaded as ES Modules.
+
+```sh
+node --experimental-modules my-app.mjs
+```
+
+## Features
+
+
+
+### Supported
+
+Only the CLI argument for the main entry point to the program can be an entry point into an ESM graph. In the future `import()` can be used to create entry points into ESM graphs at run time.
+
+### Unsupported
+
+| Feature | Reason |
+| --- | --- |
+| `require('./foo.mjs')` | ES Modules have differing resolution and timing, use language standard `import()` |
+| `import()` | pending newer V8 release used in Node.js |
+| `import.meta` | pending V8 implementation |
+| Loader Hooks | pending Node.js EP creation/consensus |
+
+## Notable differences between `import` and `require`
+
+### No NODE_PATH
+
+`NODE_PATH` is not part of resolving `import` specifiers. Please use symlinks if this behavior is desired.
+
+### No `require.extensions`
+
+`require.extensions` is not used by `import`. The expectation is that loader hooks can provide this workflow in the future.
+
+### No `require.cache`
+
+`require.cache` is not used by `import`. It has a separate cache.
+
+### URL based paths
+
+ESM are resolved and cached based upon [URL](url.spec.whatwg.org) semantics. This means that files containing special characters such as `#` and `?` need to be escaped.
+
+Modules will be loaded multiple times if the `import` specifier used to resolve them have a different query or fragment.
+
+```js
+import './foo?query=1'; // loads ./foo with query of "?query=1"
+import './foo?query=2'; // loads ./foo with query of "?query=2"
+```
+
+For now, only modules using the `file:` protocol can be loaded.
+
+## Interop with existing modules
+
+All CommonJS, JSON, and C++ modules can be used with `import`.
+
+Modules loaded this way will only be loaded once, even if their query or fragment string differs between `import` statements.
+
+When loaded via `import` these modules will provide a single `default` export representing the value of `module.exports` at the time they finished evaluating.
+
+```js
+import fs from 'fs';
+fs.readFile('./foo.txt', (err, body) => {
+ if (err) {
+ console.error(err);
+ } else {
+ console.log(body);
+ }
+});
+```
+
+[the Node EP for ES Modules]: https://github.com/nodejs/node-eps/blob/master/002-es-modules.md
diff --git a/doc/api/events.md b/doc/api/events.md
index ff6fbe9bb9affb..99c2e1514b43a3 100644
--- a/doc/api/events.md
+++ b/doc/api/events.md
@@ -1,5 +1,7 @@
# Events
+
+
> Stability: 2 - Stable
diff --git a/doc/api/fs.md b/doc/api/fs.md
index 6aeee5d55e412f..0d9755d60900d3 100644
--- a/doc/api/fs.md
+++ b/doc/api/fs.md
@@ -1,5 +1,7 @@
# File System
+
+
> Stability: 2 - Stable
@@ -100,6 +102,13 @@ example `fs.readdirSync('c:\\')` can potentially return a different result than
`fs.readdirSync('c:')`. For more information, see
[this MSDN page][MSDN-Rel-Path].
+## Threadpool Usage
+
+Note that all file system APIs except `fs.FSWatcher()` and those that are
+explicitly synchronous use libuv's threadpool, which can have surprising and
+negative performance implications for some applications, see the
+[`UV_THREADPOOL_SIZE`][] documentation for more information.
+
## WHATWG URL object support
+
+* `src` {string|Buffer|URL} source filename to copy
+* `dest` {string|Buffer|URL} destination filename of the copy operation
+* `flags` {number} modifiers for copy operation. **Default:** `0`
+* `callback` {Function}
+
+Asynchronously copies `src` to `dest`. By default, `dest` is overwritten if it
+already exists. No arguments other than a possible exception are given to the
+callback function. Node.js makes no guarantees about the atomicity of the copy
+operation. If an error occurs after the destination file has been opened for
+writing, Node.js will attempt to remove the destination.
+
+`flags` is an optional integer that specifies the behavior
+of the copy operation. The only supported flag is `fs.constants.COPYFILE_EXCL`,
+which causes the copy operation to fail if `dest` already exists.
+
+Example:
+
+```js
+const fs = require('fs');
+
+// destination.txt will be created or overwritten by default.
+fs.copyFile('source.txt', 'destination.txt', (err) => {
+ if (err) throw err;
+ console.log('source.txt was copied to destination.txt');
+});
+```
+
+If the third argument is a number, then it specifies `flags`, as shown in the
+following example.
+
+```js
+const fs = require('fs');
+const { COPYFILE_EXCL } = fs.constants;
+
+// By using COPYFILE_EXCL, the operation will fail if destination.txt exists.
+fs.copyFile('source.txt', 'destination.txt', COPYFILE_EXCL, callback);
+```
+
+## fs.copyFileSync(src, dest[, flags])
+
+
+* `src` {string|Buffer|URL} source filename to copy
+* `dest` {string|Buffer|URL} destination filename of the copy operation
+* `flags` {number} modifiers for copy operation. **Default:** `0`
+
+Synchronously copies `src` to `dest`. By default, `dest` is overwritten if it
+already exists. Returns `undefined`. Node.js makes no guarantees about the
+atomicity of the copy operation. If an error occurs after the destination file
+has been opened for writing, Node.js will attempt to remove the destination.
+
+`flags` is an optional integer that specifies the behavior
+of the copy operation. The only supported flag is `fs.constants.COPYFILE_EXCL`,
+which causes the copy operation to fail if `dest` already exists.
+
+Example:
+
+```js
+const fs = require('fs');
+
+// destination.txt will be created or overwritten by default.
+fs.copyFileSync('source.txt', 'destination.txt');
+console.log('source.txt was copied to destination.txt');
+```
+
+If the third argument is a number, then it specifies `flags`, as shown in the
+following example.
+
+```js
+const fs = require('fs');
+const { COPYFILE_EXCL } = fs.constants;
+
+// By using COPYFILE_EXCL, the operation will fail if destination.txt exists.
+fs.copyFileSync('source.txt', 'destination.txt', COPYFILE_EXCL);
+```
+
## fs.createReadStream(path[, options])
These objects are available in all modules. The following variables may appear
diff --git a/doc/api/http.md b/doc/api/http.md
index f7843fe2be180f..e646e291ac724c 100644
--- a/doc/api/http.md
+++ b/doc/api/http.md
@@ -1,5 +1,7 @@
# HTTP
+
+
> Stability: 2 - Stable
To use the HTTP server and client one must `require('http')`.
@@ -74,9 +76,9 @@ to keep the Node.js process running when there are no outstanding requests.
It is good practice, to [`destroy()`][] an `Agent` instance when it is no
longer in use, because unused sockets consume OS resources.
-Sockets are removed from an agent's pool when the socket emits either
+Sockets are removed from an agent when the socket emits either
a `'close'` event or an `'agentRemove'` event. When intending to keep one
-HTTP request open for a long time without keeping it in the pool, something
+HTTP request open for a long time without keeping it in the agent, something
like the following may be done:
```js
@@ -168,8 +170,9 @@ Called when `socket` is detached from a request and could be persisted by the
Agent. Default behavior is to:
```js
+socket.setKeepAlive(true, this.keepAliveMsecs);
socket.unref();
-socket.setKeepAlive(agent.keepAliveMsecs);
+return true;
```
This method can be overridden by a particular `Agent` subclass. If this
@@ -226,13 +229,14 @@ added: v0.11.4
* `port` {number} Port of remote server
* `localAddress` {string} Local interface to bind for network connections
when issuing the request
+ * `family` {integer} Must be 4 or 6 if this doesn't equal `undefined`.
* Returns: {string}
Get a unique name for a set of request options, to determine whether a
-connection can be reused. For an HTTP agent, this returns
-`host:port:localAddress`. For an HTTPS agent, the name includes the
-CA, cert, ciphers, and other HTTPS/TLS-specific options that determine
-socket reusability.
+connection can be reused. For an HTTP agent, this returns
+`host:port:localAddress` or `host:port:localAddress:family`. For an HTTPS agent,
+the name includes the CA, cert, ciphers, and other HTTPS/TLS-specific options
+that determine socket reusability.
### agent.maxFreeSockets
-* `request` {http.ClientRequest}
+* `request` {http.IncomingMessage}
* `response` {http.ServerResponse}
Emitted each time a request with an HTTP `Expect` header is received, where the
@@ -1224,8 +1233,8 @@ Example:
```js
const http = require('http');
const server = http.createServer((req, res) => {
- const ip = req.socket.remoteAddress;
- const port = req.socket.remotePort;
+ const ip = res.socket.remoteAddress;
+ const port = res.socket.remotePort;
res.end(`Your IP address is ${ip} and your source port is ${port}.`);
}).listen(3000);
```
@@ -1686,8 +1695,8 @@ changes:
Since most requests are GET requests without bodies, Node.js provides this
convenience method. The only difference between this method and
[`http.request()`][] is that it sets the method to GET and calls `req.end()`
-automatically. Note that response data must be consumed in the callback
-for reasons stated in [`http.ClientRequest`][] section.
+automatically. Note that the callback must take care to consume the response
+data for reasons stated in [`http.ClientRequest`][] section.
The `callback` is invoked with a single argument that is an instance of
[`http.IncomingMessage`][]
@@ -1883,6 +1892,7 @@ const req = http.request(options, (res) => {
[`TypeError`]: errors.html#errors_class_typeerror
[`URL`]: url.html#url_the_whatwg_url_api
[`agent.createConnection()`]: #http_agent_createconnection_options_callback
+[`agent.getName()`]: #http_agent_getname_options
[`destroy()`]: #http_agent_destroy
[`http.Agent`]: #http_class_http_agent
[`http.ClientRequest`]: #http_class_http_clientrequest
@@ -1898,6 +1908,7 @@ const req = http.request(options, (res) => {
[`net.Server`]: net.html#net_class_net_server
[`net.Socket`]: net.html#net_class_net_socket
[`net.createConnection()`]: net.html#net_net_createconnection_options_connectlistener
+[`request.end()`]: #http_request_end_data_encoding_callback
[`request.socket`]: #http_request_socket
[`request.socket.getPeerCertificate()`]: tls.html#tls_tlssocket_getpeercertificate_detailed
[`request.write(data, encoding)`]: #http_request_write_chunk_encoding_callback
diff --git a/doc/api/http2.md b/doc/api/http2.md
old mode 100755
new mode 100644
index eaf88483cd0bd4..43813feffdfb80
--- a/doc/api/http2.md
+++ b/doc/api/http2.md
@@ -192,7 +192,7 @@ added: v8.4.0
-->
The `'remoteSettings'` event is emitted when a new SETTINGS frame is received
-from the connected peer. When invoked, the handle function will receive a copy
+from the connected peer. When invoked, the handler function will receive a copy
of the remote settings.
```js
@@ -757,7 +757,7 @@ added: v8.4.0
Shortcut for `http2stream.rstStream()` using error code `0x00` (No Error).
-#### http2stream.rstWithProtocolError() {
+#### http2stream.rstWithProtocolError()
@@ -766,7 +766,7 @@ added: v8.4.0
Shortcut for `http2stream.rstStream()` using error code `0x01` (Protocol Error).
-#### http2stream.rstWithCancel() {
+#### http2stream.rstWithCancel()
@@ -775,7 +775,7 @@ added: v8.4.0
Shortcut for `http2stream.rstStream()` using error code `0x08` (Cancel).
-#### http2stream.rstWithRefuse() {
+#### http2stream.rstWithRefuse()
@@ -784,7 +784,7 @@ added: v8.4.0
Shortcut for `http2stream.rstStream()` using error code `0x07` (Refused Stream).
-#### http2stream.rstWithInternalError() {
+#### http2stream.rstWithInternalError()
@@ -849,6 +849,15 @@ used exclusively on HTTP/2 Clients. `Http2Stream` instances on the client
provide events such as `'response'` and `'push'` that are only relevant on
the client.
+#### Event: 'continue'
+
+
+Emitted when the server sends a `100 Continue` status, usually because
+the request contained `Expect: 100-continue`. This is an instruction that
+the client should send the request body.
+
#### Event: 'headers'
+
+The `'socketError'` event is emitted when a `'socketError'` event is emitted by
+an `Http2Session` associated with the server.
+
#### Event: 'sessionError'
-The `'socketError'` event is emitted when a `'socketError'` event is emitted by
-an `Http2Session` associated with the server.
+* `socket` {http2.ServerHttp2Stream}
+
+If an `ServerHttp2Stream` emits an `'error'` event, it will be forwarded here.
+The stream will already be destroyed when this event is triggered.
#### Event: 'stream'
+
+* `request` {http2.Http2ServerRequest}
+* `response` {http2.Http2ServerResponse}
+
+If a [`'request'`][] listener is registered or [`'http2.createServer()'`][] is
+supplied a callback function, the `'checkContinue'` event is emitted each time
+a request with an HTTP `Expect: 100-continue` is received. If this event is
+not listened for, the server will automatically respond with a status
+`100 Continue` as appropriate.
+
+Handling this event involves calling [`response.writeContinue()`][] if the client
+should continue to send the request body, or generating an appropriate HTTP
+response (e.g. 400 Bad Request) if the client should not continue to send the
+request body.
+
+Note that when this event is emitted and handled, the [`'request'`][] event will
+not be emitted.
+
### Class: Http2SecureServer
+#### Event: 'checkContinue'
+
+
+* `request` {http2.Http2ServerRequest}
+* `response` {http2.Http2ServerResponse}
+
+If a [`'request'`][] listener is registered or [`'http2.createSecureServer()'`][]
+is supplied a callback function, the `'checkContinue'` event is emitted each
+time a request with an HTTP `Expect: 100-continue` is received. If this event
+is not listened for, the server will automatically respond with a status
+`100 Continue` as appropriate.
+
+Handling this event involves calling [`response.writeContinue()`][] if the client
+should continue to send the request body, or generating an appropriate HTTP
+response (e.g. 400 Bad Request) if the client should not continue to send the
+request body.
+
+Note that when this event is emitted and handled, the [`'request'`][] event will
+not be emitted.
+
### http2.createServer(options[, onRequestHandler])
-Throws an error as the `'continue'` flow is not current implemented. Added for
-parity with [HTTP/1]().
+Sends a status `100 Continue` to the client, indicating that the request body
+should be sent. See the [`'checkContinue'`][] event on `Http2Server` and
+`Http2SecureServer`.
### response.writeHead(statusCode[, statusMessage][, headers])
+
> Stability: 2 - Stable
HTTPS is the HTTP protocol over TLS/SSL. In Node.js this is implemented as a
diff --git a/doc/api/intl.md b/doc/api/intl.md
index ad7b670fae38d2..c7ac7fd2b68cd2 100644
--- a/doc/api/intl.md
+++ b/doc/api/intl.md
@@ -112,7 +112,7 @@ at runtime so that the JS methods would work for all ICU locales. Assuming the
data file is stored at `/some/directory`, it can be made available to ICU
through either:
-* The [`NODE_ICU_DATA`][] environmental variable:
+* The [`NODE_ICU_DATA`][] environment variable:
```shell
env NODE_ICU_DATA=/some/directory node
diff --git a/doc/api/modules.md b/doc/api/modules.md
index 13e3731cae749d..afddbc14c4f387 100644
--- a/doc/api/modules.md
+++ b/doc/api/modules.md
@@ -1,5 +1,7 @@
# Modules
+
+
> Stability: 2 - Stable
diff --git a/doc/api/n-api.md b/doc/api/n-api.md
index 109674a4bf4b06..e3d7dcce32256b 100644
--- a/doc/api/n-api.md
+++ b/doc/api/n-api.md
@@ -42,6 +42,8 @@ The documentation for N-API is structured as follows:
* [Working with JavaScript Functions][]
* [Object Wrap][]
* [Asynchronous Operations][]
+* [Promises][]
+* [Script Execution][]
The N-API is a C API that ensures ABI stability across Node.js versions
and different compiler levels. However, we also understand that a C++
@@ -876,7 +878,7 @@ except that instead of using the `NODE_MODULE` macro the following
is used:
```C
-NAPI_MODULE(addon, Init)
+NAPI_MODULE(NODE_GYP_MODULE_NAME, Init)
```
The next difference is the signature for the `Init` method. For a N-API
@@ -1462,51 +1464,51 @@ The JavaScript Number type is described in
[Section 6.1.6](https://tc39.github.io/ecma262/#sec-ecmascript-language-types-number-type)
of the ECMAScript Language Specification.
-#### *napi_create_string_utf16*
+#### *napi_create_string_latin1*
```C
-napi_status napi_create_string_utf16(napi_env env,
- const char16_t* str,
- size_t length,
- napi_value* result)
+NAPI_EXTERN napi_status napi_create_string_latin1(napi_env env,
+ const char* str,
+ size_t length,
+ napi_value* result);
```
- `[in] env`: The environment that the API is invoked under.
-- `[in] str`: Character buffer representing a UTF16-LE-encoded string.
-- `[in] length`: The length of the string in two-byte code units, or -1 if
-it is null-terminated.
+- `[in] str`: Character buffer representing a ISO-8859-1-encoded string.
+- `[in] length`: The length of the string in bytes, or -1 if it is
+null-terminated.
- `[out] result`: A `napi_value` representing a JavaScript String.
Returns `napi_ok` if the API succeeded.
-This API creates a JavaScript String object from a UTF16-LE-encoded C string
+This API creates a JavaScript String object from a ISO-8859-1-encoded C string.
The JavaScript String type is described in
[Section 6.1.4](https://tc39.github.io/ecma262/#sec-ecmascript-language-types-string-type)
of the ECMAScript Language Specification.
-#### *napi_create_string_latin1*
+#### *napi_create_string_utf16*
```C
-NAPI_EXTERN napi_status napi_create_string_latin1(napi_env env,
- const char* str,
- size_t length,
- napi_value* result);
+napi_status napi_create_string_utf16(napi_env env,
+ const char16_t* str,
+ size_t length,
+ napi_value* result)
```
- `[in] env`: The environment that the API is invoked under.
-- `[in] str`: Character buffer representing a latin1-encoded string.
-- `[in] length`: The length of the string in bytes, or -1 if it is
-null-terminated.
+- `[in] str`: Character buffer representing a UTF16-LE-encoded string.
+- `[in] length`: The length of the string in two-byte code units, or -1 if
+it is null-terminated.
- `[out] result`: A `napi_value` representing a JavaScript String.
Returns `napi_ok` if the API succeeded.
-This API creates a JavaScript String object from a latin1-encoded C string.
+This API creates a JavaScript String object from a UTF16-LE-encoded C string
The JavaScript String type is described in
[Section 6.1.4](https://tc39.github.io/ecma262/#sec-ecmascript-language-types-string-type)
@@ -1795,6 +1797,33 @@ is passed in it returns `napi_number_expected`.
This API returns the C int64 primitive equivalent of the given
JavaScript Number
+#### *napi_get_value_string_latin1*
+
+```C
+NAPI_EXTERN napi_status napi_get_value_string_latin1(napi_env env,
+ napi_value value,
+ char* buf,
+ size_t bufsize,
+ size_t* result)
+```
+
+- `[in] env`: The environment that the API is invoked under.
+- `[in] value`: `napi_value` representing JavaScript string.
+- `[in] buf`: Buffer to write the ISO-8859-1-encoded string into. If NULL is
+passed in, the length of the string (in bytes) is returned.
+- `[in] bufsize`: Size of the destination buffer. When this value is
+insufficient, the returned string will be truncated.
+- `[out] result`: Number of bytes copied into the buffer, excluding the null
+terminator.
+
+Returns `napi_ok` if the API succeeded. If a non-String `napi_value`
+is passed in it returns `napi_string_expected`.
+
+This API returns the ISO-8859-1-encoded string corresponding the value passed
+in.
+
#### *napi_get_value_string_utf8*
+```C
+napi_status napi_remove_wrap(napi_env env,
+ napi_value js_object,
+ void** result);
+```
+
+ - `[in] env`: The environment that the API is invoked under.
+ - `[in] js_object`: The object associated with the native instance.
+ - `[out] result`: Pointer to the wrapped native instance.
+
+Returns `napi_ok` if the API succeeded.
+
+Retrieves a native instance that was previously wrapped in the JavaScript
+object `js_object` using `napi_wrap()` and removes the wrapping, thereby
+restoring the JavaScript object's prototype chain. If a finalize callback was
+associated with the wrapping, it will no longer be called when the JavaScript
+object becomes garbage-collected.
+
## Asynchronous Operations
Addon modules often need to leverage async helpers from libuv as part of their
@@ -3342,8 +3395,188 @@ support it:
* If the function is not available, provide an alternate implementation
that does not use the function.
+## Memory Management
+
+### napi_adjust_external_memory
+
+```C
+NAPI_EXTERN napi_status napi_adjust_external_memory(napi_env env,
+ int64_t change_in_bytes,
+ int64_t* result);
+```
+
+- `[in] env`: The environment that the API is invoked under.
+- `[in] change_in_bytes`: The change in externally allocated memory that is
+kept alive by JavaScript objects.
+- `[out] result`: The adjusted value
+
+Returns `napi_ok` if the API succeeded.
+
+This function gives V8 an indication of the amount of externally allocated
+memory that is kept alive by JavaScript objects (i.e. a JavaScript object
+that points to its own memory allocated by a native module). Registering
+externally allocated memory will trigger global garbage collections more
+often than it would otherwise.
+
+## Promises
+
+N-API provides facilities for creating `Promise` objects as described in
+[Section 25.4][] of the ECMA specification. It implements promises as a pair of
+objects. When a promise is created by `napi_create_promise()`, a "deferred"
+object is created and returned alongside the `Promise`. The deferred object is
+bound to the created `Promise` and is the only means to resolve or reject the
+`Promise` using `napi_resolve_deferred()` or `napi_reject_deferred()`. The
+deferred object that is created by `napi_create_promise()` is freed by
+`napi_resolve_deferred()` or `napi_reject_deferred()`. The `Promise` object may
+be returned to JavaScript where it can be used in the usual fashion.
+
+For example, to create a promise and pass it to an asynchronous worker:
+```c
+napi_deferred deferred;
+napi_value promise;
+napi_status status;
+
+// Create the promise.
+status = napi_create_promise(env, &deferred, &promise);
+if (status != napi_ok) return NULL;
+
+// Pass the deferred to a function that performs an asynchronous action.
+do_something_asynchronous(deferred);
+
+// Return the promise to JS
+return promise;
+```
+
+The above function `do_something_asynchronous()` would perform its asynchronous
+action and then it would resolve or reject the deferred, thereby concluding the
+promise and freeing the deferred:
+```c
+napi_deferred deferred;
+napi_value undefined;
+napi_status status;
+
+// Create a value with which to conclude the deferred.
+status = napi_get_undefined(env, &undefined);
+if (status != napi_ok) return NULL;
+
+// Resolve or reject the promise associated with the deferred depending on
+// whether the asynchronous action succeeded.
+if (asynchronous_action_succeeded) {
+ status = napi_resolve_deferred(env, deferred, undefined);
+} else {
+ status = napi_reject_deferred(env, deferred, undefined);
+}
+if (status != napi_ok) return NULL;
+
+// At this point the deferred has been freed, so we should assign NULL to it.
+deferred = NULL;
+```
+
+### napi_create_promise
+
+```C
+NAPI_EXTERN napi_status napi_create_promise(napi_env env,
+ napi_deferred* deferred,
+ napi_value* promise);
+```
+
+- `[in] env`: The environment that the API is invoked under.
+- `[out] deferred`: A newly created deferred object which can later be passed to
+`napi_resolve_deferred()` or `napi_reject_deferred()` to resolve resp. reject
+the associated promise.
+- `[out] promise`: The JavaScript promise associated with the deferred object.
+
+Returns `napi_ok` if the API succeeded.
+
+This API creates a deferred object and a JavaScript promise.
+
+### napi_resolve_deferred
+
+```C
+NAPI_EXTERN napi_status napi_resolve_deferred(napi_env env,
+ napi_deferred deferred,
+ napi_value resolution);
+```
+
+- `[in] env`: The environment that the API is invoked under.
+- `[in] deferred`: The deferred object whose associated promise to resolve.
+- `[in] resolution`: The value with which to resolve the promise.
+
+This API resolves a JavaScript promise by way of the deferred object
+with which it is associated. Thus, it can only be used to resolve JavaScript
+promises for which the corresponding deferred object is available. This
+effectively means that the promise must have been created using
+`napi_create_promise()` and the deferred object returned from that call must
+have been retained in order to be passed to this API.
+
+The deferred object is freed upon successful completion.
+
+### napi_reject_deferred
+
+```C
+NAPI_EXTERN napi_status napi_reject_deferred(napi_env env,
+ napi_deferred deferred,
+ napi_value rejection);
+```
+
+- `[in] env`: The environment that the API is invoked under.
+- `[in] deferred`: The deferred object whose associated promise to resolve.
+- `[in] rejection`: The value with which to reject the promise.
+
+This API rejects a JavaScript promise by way of the deferred object
+with which it is associated. Thus, it can only be used to reject JavaScript
+promises for which the corresponding deferred object is available. This
+effectively means that the promise must have been created using
+`napi_create_promise()` and the deferred object returned from that call must
+have been retained in order to be passed to this API.
+
+The deferred object is freed upon successful completion.
+
+### napi_is_promise
+
+```C
+NAPI_EXTERN napi_status napi_is_promise(napi_env env,
+ napi_value promise,
+ bool* is_promise);
+```
+
+- `[in] env`: The environment that the API is invoked under.
+- `[in] promise`: The promise to examine
+- `[out] is_promise`: Flag indicating whether `promise` is a native promise
+object - that is, a promise object created by the underlying engine.
+
+## Script execution
+
+N-API provides an API for executing a string containing JavaScript using the
+underlying JavaScript engine.
+
+### napi_run_script
+
+```C
+NAPI_EXTERN napi_status napi_run_script(napi_env env,
+ napi_value script,
+ napi_value* result);
+```
+
+- `[in] env`: The environment that the API is invoked under.
+- `[in] script`: A JavaScript string containing the script to execute.
+- `[out] result`: The value resulting from having executed the script.
+
+[Promises]: #n_api_promises
[Asynchronous Operations]: #n_api_asynchronous_operations
[Basic N-API Data Types]: #n_api_basic_n_api_data_types
[ECMAScript Language Specification]: https://tc39.github.io/ecma262/
@@ -3352,9 +3585,11 @@ support it:
[Native Abstractions for Node.js]: https://github.com/nodejs/nan
[Object Lifetime Management]: #n_api_object_lifetime_management
[Object Wrap]: #n_api_object_wrap
+[Script Execution]: #n_api_script_execution
[Section 9.1.6]: https://tc39.github.io/ecma262/#sec-ordinary-object-internal-methods-and-internal-slots-defineownproperty-p-desc
[Section 12.5.5]: https://tc39.github.io/ecma262/#sec-typeof-operator
[Section 24.3]: https://tc39.github.io/ecma262/#sec-dataview-objects
+[Section 25.4]: https://tc39.github.io/ecma262/#sec-promise-objects
[Working with JavaScript Functions]: #n_api_working_with_javascript_functions
[Working with JavaScript Properties]: #n_api_working_with_javascript_properties
[Working with JavaScript Values]: #n_api_working_with_javascript_values
diff --git a/doc/api/net.md b/doc/api/net.md
index e744e5c8be4454..36280c06493350 100644
--- a/doc/api/net.md
+++ b/doc/api/net.md
@@ -1,5 +1,7 @@
# Net
+
+
> Stability: 2 - Stable
The `net` module provides an asynchronous network API for creating stream-based
@@ -50,7 +52,7 @@ added: v0.1.90
This class is used to create a TCP or [IPC][] server.
-## new net.Server([options][, connectionListener])
+### new net.Server([options][, connectionListener])
* Returns: {net.Server}
diff --git a/doc/api/os.md b/doc/api/os.md
index 3b4b9526cb48cc..28eff6a13f0b60 100644
--- a/doc/api/os.md
+++ b/doc/api/os.md
@@ -1,5 +1,7 @@
# OS
+
+
> Stability: 2 - Stable
The `os` module provides a number of operating system-related utility methods.
@@ -46,7 +48,7 @@ added: v6.3.0
Returns an object containing commonly used operating system specific constants
for error codes, process signals, and so on. The specific constants currently
-defined are described in [OS Constants][].
+defined are described in [OS Constants](#os_os_constants_1).
## os.cpus()
```js
@@ -263,14 +268,16 @@ The properties available on the assigned network address object include:
netmask: '255.0.0.0',
family: 'IPv4',
mac: '00:00:00:00:00:00',
- internal: true
+ internal: true,
+ cidr: '127.0.0.1/8'
},
{
address: '::1',
netmask: 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff',
family: 'IPv6',
mac: '00:00:00:00:00:00',
- internal: true
+ internal: true,
+ cidr: '::1/128'
}
],
eth0: [
@@ -279,14 +286,16 @@ The properties available on the assigned network address object include:
netmask: '255.255.255.0',
family: 'IPv4',
mac: '01:02:03:0a:0b:0c',
- internal: false
+ internal: false,
+ cidr: '192.168.1.108/24'
},
{
address: 'fe80::a00:27ff:fe4e:66a1',
netmask: 'ffff:ffff:ffff:ffff::',
family: 'IPv6',
mac: '01:02:03:0a:0b:0c',
- internal: false
+ internal: false,
+ cidr: 'fe80::a00:27ff:fe4e:66a1/64'
}
]
}
diff --git a/doc/api/path.md b/doc/api/path.md
index f951a4ab8a5b1a..f2015db47048d7 100644
--- a/doc/api/path.md
+++ b/doc/api/path.md
@@ -1,5 +1,7 @@
# Path
+
+
> Stability: 2 - Stable
The `path` module provides utilities for working with file and directory paths.
diff --git a/doc/api/perf_hooks.md b/doc/api/perf_hooks.md
new file mode 100644
index 00000000000000..bccc99c18e8447
--- /dev/null
+++ b/doc/api/perf_hooks.md
@@ -0,0 +1,658 @@
+# Performance Timing API
+
+
+> Stability: 1 - Experimental
+
+The Performance Timing API provides an implementation of the
+[W3C Performance Timeline][] specification. The purpose of the API
+is to support collection of high resolution performance metrics.
+This is the same Performance API as implemented in modern Web browsers.
+
+```js
+const { performance } = require('perf_hooks');
+performance.mark('A');
+doSomeLongRunningProcess(() => {
+ performance.mark('B');
+ performance.measure('A to B', 'A', 'B');
+ const measure = performance.getEntriesByName('A to B')[0];
+ console.log(measure.duration);
+ // Prints the number of milliseconds between Mark 'A' and Mark 'B'
+});
+```
+
+## Class: Performance
+
+
+The `Performance` provides access to performance metric data. A single
+instance of this class is provided via the `performance` property.
+
+### performance.clearFunctions([name])
+
+
+* `name` {string}
+
+If `name` is not provided, removes all `PerformanceFunction` objects from the
+Performance Timeline. If `name` is provided, removes entries with `name`.
+
+### performance.clearMarks([name])
+
+
+* `name` {string}
+
+If `name` is not provided, removes all `PerformanceMark` objects from the
+Performance Timeline. If `name` is provided, removes only the named mark.
+
+### performance.clearMeasures([name])
+
+
+* `name` {string}
+
+If `name` is not provided, removes all `PerformanceMeasure` objects from the
+Performance Timeline. If `name` is provided, removes only objects whose
+`performanceEntry.name` matches `name`.
+
+### performance.getEntries()
+
+
+* Returns: {Array}
+
+Returns a list of all `PerformanceEntry` objects in chronological order
+with respect to `performanceEntry.startTime`.
+
+### performance.getEntriesByName(name[, type])
+
+
+* `name` {string}
+* `type` {string}
+* Returns: {Array}
+
+Returns a list of all `PerformanceEntry` objects in chronological order
+with respect to `performanceEntry.startTime` whose `performanceEntry.name` is
+equal to `name`, and optionally, whose `performanceEntry.entryType` is equal to
+`type`.
+
+### performance.getEntriesByType(type)
+
+
+* `type` {string}
+* Returns: {Array}
+
+Returns a list of all `PerformanceEntry` objects in chronological order
+with respect to `performanceEntry.startTime` whose `performanceEntry.entryType`
+is equal to `type`.
+
+### performance.mark([name])
+
+
+* `name` {string}
+
+Creates a new `PerformanceMark` entry in the Performance Timeline. A
+`PerformanceMark` is a subclass of `PerformanceEntry` whose
+`performanceEntry.entryType` is always `'mark'`, and whose
+`performanceEntry.duration` is always `0`. Performance marks are used
+to mark specific significant moments in the Performance Timeline.
+
+### performance.measure(name, startMark, endMark)
+
+
+* `name` {string}
+* `startMark` {string}
+* `endMark` {string}
+
+Creates a new `PerformanceMeasure` entry in the Performance Timeline. A
+`PerformanceMeasure` is a subclass of `PerformanceEntry` whose
+`performanceEntry.entryType` is always `'measure'`, and whose
+`performanceEntry.duration` measures the number of milliseconds elapsed since
+`startMark` and `endMark`.
+
+The `startMark` argument may identify any *existing* `PerformanceMark` in the
+the Performance Timeline, or *may* identify any of the timestamp properties
+provided by the `PerformanceNodeTiming` class. If the named `startMark` does
+not exist, then `startMark` is set to [`timeOrigin`][] by default.
+
+The `endMark` argument must identify any *existing* `PerformanceMark` in the
+the Performance Timeline or any of the timestamp properties provided by the
+`PerformanceNodeTiming` class. If the named `endMark` does not exist, an
+error will be thrown.
+
+### performance.nodeFrame
+
+
+* {PerformanceFrame}
+
+An instance of the `PerformanceFrame` class that provides performance metrics
+for the event loop.
+
+### performance.nodeTiming
+
+
+* {PerformanceNodeTiming}
+
+An instance of the `PerformanceNodeTiming` class that provides performance
+metrics for specific Node.js operational milestones.
+
+### performance.now()
+
+
+* Returns: {number}
+
+Returns the current high resolution millisecond timestamp.
+
+### performance.timeOrigin
+
+
+* {number}
+
+The [`timeOrigin`][] specifies the high resolution millisecond timestamp from
+which all performance metric durations are measured.
+
+### performance.timerify(fn)
+
+
+* `fn` {Function}
+
+Wraps a function within a new function that measures the running time of the
+wrapped function. A `PerformanceObserver` must be subscribed to the `'function'`
+event type in order for the timing details to be accessed.
+
+```js
+const {
+ performance,
+ PerformanceObserver
+} = require('perf_hooks');
+
+function someFunction() {
+ console.log('hello world');
+}
+
+const wrapped = performance.timerify(someFunction);
+
+const obs = new PerformanceObserver((list) => {
+ console.log(list.getEntries()[0].duration);
+ obs.disconnect();
+ performance.clearFunctions();
+});
+obs.observe({ entryTypes: 'function' });
+
+// A performance timeline entry will be created
+wrapped();
+```
+
+## Class: PerformanceEntry
+
+
+### performanceEntry.duration
+
+
+* {number}
+
+The total number of milliseconds elapsed for this entry. This value will not
+be meaningful for all Performance Entry types.
+
+### performanceEntry.name
+
+
+* {string}
+
+The name of the performance entry.
+
+### performanceEntry.startTime
+
+
+* {number}
+
+The high resolution millisecond timestamp marking the starting time of the
+Performance Entry.
+
+### performanceEntry.entryType
+
+
+* {string}
+
+The type of the performance entry. Current it may be one of: `'node'`, `'mark'`,
+`'measure'`, `'gc'`, or `'function'`.
+
+### performanceEntry.kind
+
+
+* {number}
+
+When `performanceEntry.entryType` is equal to `'gc'`, the `performance.kind`
+property identifies the type of garbage collection operation that occurred.
+The value may be one of:
+
+* `perf_hooks.constants.NODE_PERFORMANCE_GC_MAJOR`
+* `perf_hooks.constants.NODE_PERFORMANCE_GC_MINOR`
+* `perf_hooks.constants.NODE_PERFORMANCE_GC_INCREMENTAL`
+* `perf_hooks.constants.NODE_PERFORMANCE_GC_WEAKCB`
+
+## Class: PerformanceNodeFrame extends PerformanceEntry
+
+
+Provides timing details for the Node.js event loop.
+
+### performanceNodeFrame.frameCheck
+
+The high resolution timestamp when `uv_check_t` processing occurred on the
+current loop.
+
+### performanceNodeFrame.frameCount
+
+The total number of event loop iterations (iterated when `uv_idle_t`
+processing occurrs).
+
+### performanceNodeFrame.frameIdle
+
+The high resolution timestamp when `uv_idle_t` processing occurred on the
+current loop.
+
+### performanceNodeFrame.framesPerSecond
+
+The number of event loop iterations per second.
+
+### performanceNodeFrame.framePrepare
+
+The high resolution timestamp when `uv_prepare_t` processing occurred on the
+current loop.
+
+## Class: PerformanceNodeTiming extends PerformanceEntry
+
+
+Provides timing details for Node.js itself.
+
+### performanceNodeTiming.bootstrapComplete
+
+
+* {number}
+
+The high resolution millisecond timestamp at which the Node.js process
+completed bootstrap.
+
+### performanceNodeTiming.clusterSetupEnd
+
+
+* {number}
+
+The high resolution millisecond timestamp at which cluster processing ended.
+
+### performanceNodeTiming.clusterSetupStart
+
+
+* {number}
+
+The high resolution millisecond timestamp at which cluster processing started.
+
+### performanceNodeTiming.loopExit
+
+
+* {number}
+
+The high resolution millisecond timestamp at which the Node.js event loop
+exited.
+
+### performanceNodeTiming.loopStart
+
+
+* {number}
+
+The high resolution millisecond timestamp at which the Node.js event loop
+started.
+
+### performanceNodeTiming.moduleLoadEnd
+
+
+* {number}
+
+The high resolution millisecond timestamp at which main module load ended.
+
+### performanceNodeTiming.moduleLoadStart
+
+
+* {number}
+
+The high resolution millisecond timestamp at which main module load started.
+
+### performanceNodeTiming.nodeStart
+
+
+* {number}
+
+The high resolution millisecond timestamp at which the Node.js process was
+initialized.
+
+### performanceNodeTiming.preloadModuleLoadEnd
+
+
+* {number}
+
+The high resolution millisecond timestamp at which preload module load ended.
+
+### performanceNodeTiming.preloadModuleLoadStart
+
+
+* {number}
+
+The high resolution millisecond timestamp at which preload module load started.
+
+### performanceNodeTiming.thirdPartyMainEnd
+
+
+* {number}
+
+The high resolution millisecond timestamp at which third_party_main processing
+ended.
+
+### performanceNodeTiming.thirdPartyMainStart
+
+
+* {number}
+
+The high resolution millisecond timestamp at which third_party_main processing
+started.
+
+### performanceNodeTiming.v8Start
+
+
+* {number}
+
+The high resolution millisecond timestamp at which the V8 platform was
+initialized.
+
+
+## Class: PerformanceObserver(callback)
+
+
+* `callback` {Function} A `PerformanceObserverCallback` callback function.
+
+`PerformanceObserver` objects provide notifications when new
+`PerformanceEntry` instances have been added to the Performance Timeline.
+
+```js
+const {
+ performance,
+ PerformanceObserver
+} = require('perf_hooks');
+
+const obs = new PerformanceObserver((list, observer) => {
+ console.log(list.getEntries());
+ observer.disconnect();
+});
+obs.observe({ entryTypes: ['mark'], buffered: true });
+
+performance.mark('test');
+```
+
+Because `PerformanceObserver` instances introduce their own additional
+performance overhead, instances should not be left subscribed to notifications
+indefinitely. Users should disconnect observers as soon as they are no
+longer needed.
+
+### Callback: PerformanceObserverCallback(list, observer)
+
+
+* `list` {PerformanceObserverEntryList}
+* `observer` {PerformanceObserver}
+
+The `PerformanceObserverCallback` is invoked when a `PerformanceObserver` is
+notified about new `PerformanceEntry` instances. The callback receives a
+`PerformanceObserverEntryList` instance and a reference to the
+`PerformanceObserver`.
+
+### Class: PerformanceObserverEntryList
+
+
+The `PerformanceObserverEntryList` class is used to provide access to the
+`PerformanceEntry` instances passed to a `PerformanceObserver`.
+
+#### performanceObserverEntryList.getEntries()
+
+
+* Returns: {Array}
+
+Returns a list of `PerformanceEntry` objects in chronological order
+with respect to `performanceEntry.startTime`.
+
+#### performanceObserverEntryList.getEntriesByName(name[, type])
+
+
+* `name` {string}
+* `type` {string}
+* Returns: {Array}
+
+Returns a list of `PerformanceEntry` objects in chronological order
+with respect to `performanceEntry.startTime` whose `performanceEntry.name` is
+equal to `name`, and optionally, whose `performanceEntry.entryType` is equal to
+`type`.
+
+#### performanceObserverEntryList.getEntriesByType(type)
+
+
+* `type` {string}
+* Returns: {Array}
+
+Returns a list of `PerformanceEntry` objects in chronological order
+with respect to `performanceEntry.startTime` whose `performanceEntry.entryType`
+is equal to `type`.
+
+### performanceObserver.disconnect()
+
+Disconnects the `PerformanceObserver` instance from all notifications.
+
+### performanceObserver.observe(options)
+
+* `options` {Object}
+ * `entryTypes` {Array} An array of strings identifying the types of
+ `PerformanceEntry` instances the observer is interested in. If not
+ provided an error will be thrown.
+ * `buffered` {boolean} If true, the notification callback will be
+ called using `setImmediate()` and multiple `PerformanceEntry` instance
+ notifications will be buffered internally. If `false`, notifications will
+ be immediate and synchronous. Defaults to `false`.
+
+Subscribes the `PerformanceObserver` instance to notifications of new
+`PerformanceEntry` instances identified by `options.entryTypes`.
+
+When `options.buffered` is `false`, the `callback` will be invoked once for
+every `PerformanceEntry` instance:
+
+```js
+const {
+ performance,
+ PerformanceObserver
+} = require('perf_hooks');
+
+const obs = new PerformanceObserver((list, observer) => {
+ // called three times synchronously. list contains one item
+});
+obs.observe({ entryTypes: ['mark'] });
+
+for (let n = 0; n < 3; n++)
+ performance.mark(`test${n}`);
+```
+
+```js
+const {
+ performance,
+ PerformanceObserver
+} = require('perf_hooks');
+
+const obs = new PerformanceObserver((list, observer) => {
+ // called once. list contains three items
+});
+obs.observe({ entryTypes: ['mark'], buffered: true });
+
+for (let n = 0; n < 3; n++)
+ performance.mark(`test${n}`);
+```
+
+## Examples
+
+### Measuring the duration of async operations
+
+The following example uses the [Async Hooks][] and Performance APIs to measure
+the actual duration of a Timeout operation (including the amount of time it
+to execute the callback).
+
+```js
+'use strict';
+const async_hooks = require('async_hooks');
+const {
+ performance,
+ PerformanceObserver
+} = require('perf_hooks');
+
+const set = new Set();
+const hook = async_hooks.createHook({
+ init(id, type) {
+ if (type === 'Timeout') {
+ performance.mark(`Timeout-${id}-Init`);
+ set.add(id);
+ }
+ },
+ destroy(id) {
+ if (set.has(id)) {
+ set.delete(id);
+ performance.mark(`Timeout-${id}-Destroy`);
+ performance.measure(`Timeout-${id}`,
+ `Timeout-${id}-Init`,
+ `Timeout-${id}-Destroy`);
+ }
+ }
+});
+hook.enable();
+
+const obs = new PerformanceObserver((list, observer) => {
+ console.log(list.getEntries()[0]);
+ performance.clearMarks();
+ performance.clearMeasures();
+ observer.disconnect();
+});
+obs.observe({ entryTypes: ['measure'], buffered: true });
+
+setTimeout(() => {}, 1000);
+```
+
+### Measuring how long it takes to load dependencies
+
+The following example measures the duration of `require()` operations to load
+dependencies:
+
+
+```js
+'use strict';
+const {
+ performance,
+ PerformanceObserver
+} = require('perf_hooks');
+const mod = require('module');
+
+// Monkey patch the require function
+mod.Module.prototype.require =
+ performance.timerify(mod.Module.prototype.require);
+require = performance.timerify(require);
+
+// Activate the observer
+const obs = new PerformanceObserver((list) => {
+ const entries = list.getEntries();
+ entries.forEach((entry) => {
+ console.log(`require('${entry[0]}')`, entry.duration);
+ });
+ obs.disconnect();
+ // Free memory
+ performance.clearFunctions();
+});
+obs.observe({ entryTypes: ['function'], buffered: true });
+
+require('some-module');
+```
+
+[`timeOrigin`]: https://w3c.github.io/hr-time/#dom-performance-timeorigin
+[W3C Performance Timeline]: https://w3c.github.io/performance-timeline/
diff --git a/doc/api/process.md b/doc/api/process.md
index 2a85a551c9e032..80498aac0766af 100644
--- a/doc/api/process.md
+++ b/doc/api/process.md
@@ -1,5 +1,6 @@
# Process
+
The `process` object is a `global` that provides information about, and control
@@ -1850,11 +1851,11 @@ cases:
[Cluster]: cluster.html
[Duplex]: stream.html#stream_duplex_and_transform_streams
[LTS]: https://github.com/nodejs/LTS/
+[note on process I/O]: process.html#process_a_note_on_process_i_o
+[process_emit_warning]: #process_process_emitwarning_warning_type_code_ctor
+[process_warning]: #process_event_warning
[Readable]: stream.html#stream_readable_streams
[Signal Events]: #process_signal_events
[Stream compatibility]: stream.html#stream_compatibility_with_older_node_js_versions
[TTY]: tty.html#tty_tty
[Writable]: stream.html#stream_writable_streams
-[note on process I/O]: process.html#process_a_note_on_process_i_o
-[process_emit_warning]: #process_process_emitwarning_warning_type_code_ctor
-[process_warning]: #process_event_warning
diff --git a/doc/api/punycode.md b/doc/api/punycode.md
index b88a89832641ea..03ee3d62ebfd67 100644
--- a/doc/api/punycode.md
+++ b/doc/api/punycode.md
@@ -6,6 +6,8 @@ changes:
description: Accessing this module will now emit a deprecation warning.
-->
+
+
> Stability: 0 - Deprecated
**The version of the punycode module bundled in Node.js is being deprecated**.
diff --git a/doc/api/querystring.md b/doc/api/querystring.md
index c6b89235c14d43..5bd4f1cce192a7 100644
--- a/doc/api/querystring.md
+++ b/doc/api/querystring.md
@@ -1,5 +1,7 @@
# Query String
+
+
> Stability: 2 - Stable
diff --git a/doc/api/readline.md b/doc/api/readline.md
index 085ac885401c89..603a5ec1888394 100644
--- a/doc/api/readline.md
+++ b/doc/api/readline.md
@@ -1,5 +1,7 @@
# Readline
+
+
> Stability: 2 - Stable
The `readline` module provides an interface for reading data from a [Readable][]
diff --git a/doc/api/repl.md b/doc/api/repl.md
index f276b965bdfeb2..04e885a99dbfe1 100644
--- a/doc/api/repl.md
+++ b/doc/api/repl.md
@@ -1,5 +1,7 @@
# REPL
+
+
> Stability: 2 - Stable
The `repl` module provides a Read-Eval-Print-Loop (REPL) implementation that
@@ -499,7 +501,7 @@ by the `NODE_REPL_HISTORY` variable, as documented in the
### Using the Node.js REPL with advanced line-editors
-For advanced line-editors, start Node.js with the environmental variable
+For advanced line-editors, start Node.js with the environment variable
`NODE_NO_READLINE=1`. This will start the main and debugger REPL in canonical
terminal settings, which will allow use with `rlwrap`.
diff --git a/doc/api/stream.md b/doc/api/stream.md
index 3663fe2d780830..f635558707b1cf 100644
--- a/doc/api/stream.md
+++ b/doc/api/stream.md
@@ -1,5 +1,7 @@
# Stream
+
+
> Stability: 2 - Stable
A stream is an abstract interface for working with streaming data in Node.js.
@@ -66,8 +68,8 @@ buffer that can be retrieved using `writable._writableState.getBuffer()` or
The amount of data potentially buffered depends on the `highWaterMark` option
passed into the streams constructor. For normal streams, the `highWaterMark`
-option specifies a total number of bytes. For streams operating in object mode,
-the `highWaterMark` specifies a total number of objects.
+option specifies a [total number of bytes][hwm-gotcha]. For streams operating
+in object mode, the `highWaterMark` specifies a total number of objects.
Data is buffered in Readable streams when the implementation calls
[`stream.push(chunk)`][stream-push]. If the consumer of the Stream does not
@@ -894,7 +896,7 @@ in object mode.
The optional `size` argument specifies a specific number of bytes to read. If
`size` bytes are not available to be read, `null` will be returned *unless*
the stream has ended, in which case all of the data remaining in the internal
-buffer will be returned (*even if it exceeds `size` bytes*).
+buffer will be returned.
If the `size` argument is not specified, all of the data contained in the
internal buffer will be returned.
@@ -1383,7 +1385,7 @@ resource.
[`writable._write()`][stream-_write].
*Note*: This function MUST NOT be called by application code directly. It
-should be implemented by child classes, and called only by the internal Writable
+should be implemented by child classes, and called by the internal Writable
class methods only.
The `callback` method must be called to signal either that the write completed
@@ -1418,7 +1420,7 @@ user programs.
argument) to be invoked when processing is complete for the supplied chunks.
*Note*: This function MUST NOT be called by application code directly. It
-should be implemented by child classes, and called only by the internal Writable
+should be implemented by child classes, and called by the internal Writable
class methods only.
The `writable._writev()` method may be implemented in addition to
@@ -1445,9 +1447,9 @@ added: v8.0.0
-->
* `callback` {Function} Call this function (optionally with an error
- argument) when you are done writing any remaining data.
+ argument) when finished writing any remaining data.
-Note: `_final()` **must not** be called directly. It MAY be implemented
+The `_final()` method **must not** be called directly. It may be implemented
by child classes, and if so, will be called by the internal Writable
class methods only.
@@ -1515,9 +1517,9 @@ constructor and implement the `readable._read()` method.
#### new stream.Readable([options])
* `options` {Object}
- * `highWaterMark` {number} The maximum number of bytes to store in
- the internal buffer before ceasing to read from the underlying
- resource. Defaults to `16384` (16kb), or `16` for `objectMode` streams
+ * `highWaterMark` {number} The maximum [number of bytes][hwm-gotcha] to store
+ in the internal buffer before ceasing to read from the underlying resource.
+ Defaults to `16384` (16kb), or `16` for `objectMode` streams
* `encoding` {string} If specified, then buffers will be decoded to
strings using the specified encoding. Defaults to `null`
* `objectMode` {boolean} Whether this stream should behave
@@ -1573,7 +1575,7 @@ const myReadable = new Readable({
* `size` {number} Number of bytes to read asynchronously
*Note*: This function MUST NOT be called by application code directly. It
-should be implemented by child classes, and called only by the internal Readable
+should be implemented by child classes, and called by the internal Readable
class methods only.
All Readable stream implementations must provide an implementation of the
@@ -1974,7 +1976,7 @@ after all data has been output, which occurs after the callback in
argument and data) to be called when remaining data has been flushed.
*Note*: This function MUST NOT be called by application code directly. It
-should be implemented by child classes, and called only by the internal Readable
+should be implemented by child classes, and called by the internal Readable
class methods only.
In some cases, a transform operation may need to emit an additional bit of
@@ -2009,7 +2011,7 @@ user programs.
processed.
*Note*: This function MUST NOT be called by application code directly. It
-should be implemented by child classes, and called only by the internal Readable
+should be implemented by child classes, and called by the internal Readable
class methods only.
All Transform stream implementations must provide a `_transform()`
@@ -2155,6 +2157,19 @@ object mode has an interesting side effect. Because it *is* a call to
However, because the argument is an empty string, no data is added to the
readable buffer so there is nothing for a user to consume.
+### `highWaterMark` discrepency after calling `readable.setEncoding()`
+
+The use of `readable.setEncoding()` will change the behavior of how the
+`highWaterMark` operates in non-object mode.
+
+Typically, the size of the current buffer is measured against the
+`highWaterMark` in _bytes_. However, after `setEncoding()` is called, the
+comparison function will begin to measure the buffer's size in _characters_.
+
+This is not a problem in common cases with `latin1` or `ascii`. But it is
+advised to be mindful about this behavior when working with strings that could
+contain multi-byte characters.
+
[`'data'`]: #stream_event_data
[`'drain'`]: #stream_event_drain
[`'end'`]: #stream_event_end
@@ -2193,6 +2208,8 @@ readable buffer so there is nothing for a user to consume.
[fs write streams]: fs.html#fs_class_fs_writestream
[http-incoming-message]: http.html#http_class_http_incomingmessage
[zlib]: zlib.html
+[hwm-gotcha]: #stream_highWaterMark_discrepency_after_calling_readable_setencoding
+[Readable]: #stream_class_stream_readable
[stream-_flush]: #stream_transform_flush_callback
[stream-_read]: #stream_readable_read_size_1
[stream-_transform]: #stream_transform_transform_chunk_encoding_callback
diff --git a/doc/api/string_decoder.md b/doc/api/string_decoder.md
index 5757ba6e2b3440..cde81e6ae5ec2b 100644
--- a/doc/api/string_decoder.md
+++ b/doc/api/string_decoder.md
@@ -1,5 +1,7 @@
# String Decoder
+
+
> Stability: 2 - Stable
The `string_decoder` module provides an API for decoding `Buffer` objects into
diff --git a/doc/api/synopsis.md b/doc/api/synopsis.md
index e8fa77eee47e9e..3d680c33b554ba 100644
--- a/doc/api/synopsis.md
+++ b/doc/api/synopsis.md
@@ -1,5 +1,6 @@
# Usage
+
`node [options] [v8 options] [script.js | -e "script" | - ] [arguments]`
diff --git a/doc/api/timers.md b/doc/api/timers.md
index 8abcdcb5cb6890..09502dee1003c8 100644
--- a/doc/api/timers.md
+++ b/doc/api/timers.md
@@ -1,5 +1,7 @@
# Timers
+
+
> Stability: 2 - Stable
The `timer` module exposes a global API for scheduling functions to
diff --git a/doc/api/tls.md b/doc/api/tls.md
index 5df8c6af5e6b79..ebcf85438fdeb5 100644
--- a/doc/api/tls.md
+++ b/doc/api/tls.md
@@ -1,5 +1,7 @@
# TLS (SSL)
+
+
> Stability: 2 - Stable
The `tls` module provides an implementation of the Transport Layer Security
@@ -932,10 +934,14 @@ changes:
-->
* `options` {Object}
- * `pfx` {string|Buffer} Optional PFX or PKCS12 encoded private key and
- certificate chain. `pfx` is an alternative to providing `key` and `cert`
- individually. PFX is usually encrypted, if it is, `passphrase` will be used
- to decrypt it.
+ * `pfx` {string|string[]|Buffer|Buffer[]|Object[]} Optional PFX or PKCS12
+ encoded private key and certificate chain. `pfx` is an alternative to
+ providing `key` and `cert` individually. PFX is usually encrypted, if it is,
+ `passphrase` will be used to decrypt it. Multiple PFX can be provided either
+ as an array of unencrypted PFX buffers, or an array of objects in the form
+ `{buf: [, passphrase: ]}`. The object form can only
+ occur in an array. `object.passphrase` is optional. Encrypted PFX will be
+ decrypted with `object.passphrase` if provided, or `options.passphrase` if it is not.
* `key` {string|string[]|Buffer|Buffer[]|Object[]} Optional private keys in
PEM format. PEM allows the option of private keys being encrypted. Encrypted
keys will be decrypted with `options.passphrase`. Multiple keys using
diff --git a/doc/api/tty.md b/doc/api/tty.md
index 963de892cbc0fd..2950eb6db1a396 100644
--- a/doc/api/tty.md
+++ b/doc/api/tty.md
@@ -1,5 +1,7 @@
# TTY
+
+
> Stability: 2 - Stable
The `tty` module provides the `tty.ReadStream` and `tty.WriteStream` classes.
diff --git a/doc/api/url.md b/doc/api/url.md
index cb2a3965f5eee4..632eef82e4435e 100644
--- a/doc/api/url.md
+++ b/doc/api/url.md
@@ -1,5 +1,7 @@
# URL
+
+
> Stability: 2 - Stable
The `url` module provides utilities for URL resolution and parsing. It can be
diff --git a/doc/api/util.md b/doc/api/util.md
index 076fbc479dc695..ce56c50104dbc4 100644
--- a/doc/api/util.md
+++ b/doc/api/util.md
@@ -1,5 +1,7 @@
# Util
+
+
> Stability: 2 - Stable
The `util` module is primarily designed to support the needs of Node.js' own
diff --git a/doc/api/v8.md b/doc/api/v8.md
index 3a3e5f664a14e8..634d3199a1a012 100644
--- a/doc/api/v8.md
+++ b/doc/api/v8.md
@@ -1,5 +1,7 @@
# V8
+
+
The `v8` module exposes APIs that are specific to the version of [V8][]
built into the Node.js binary. It can be accessed using:
diff --git a/doc/api/vm.md b/doc/api/vm.md
index 42046e01191b51..dff10b17cf20fd 100644
--- a/doc/api/vm.md
+++ b/doc/api/vm.md
@@ -1,5 +1,7 @@
# VM (Executing JavaScript)
+
+
> Stability: 2 - Stable
diff --git a/doc/api/zlib.md b/doc/api/zlib.md
index 4f9423585ff236..f480306d2584e9 100644
--- a/doc/api/zlib.md
+++ b/doc/api/zlib.md
@@ -1,5 +1,7 @@
# Zlib
+
+
> Stability: 2 - Stable
The `zlib` module provides compression functionality implemented using Gzip and
@@ -43,6 +45,13 @@ zlib.unzip(buffer, (err, buffer) => {
});
```
+## Threadpool Usage
+
+Note that all zlib APIs except those that are explicitly synchronous use libuv's
+threadpool, which can have surprising and negative performance implications for
+some applications, see the [`UV_THREADPOOL_SIZE`][] documentation for more
+information.
+
## Compressing HTTP requests and responses
The `zlib` module can be used to implement support for the `gzip` and `deflate`
diff --git a/doc/api_assets/style.css b/doc/api_assets/style.css
index 20845970fbce37..6d764fd88916c7 100644
--- a/doc/api_assets/style.css
+++ b/doc/api_assets/style.css
@@ -81,6 +81,61 @@ em code {
#gtoc {
font-size: .8em;
+ margin-bottom: 1em;
+}
+
+#gtoc ul {
+ list-style: none;
+ margin-left: 0;
+}
+
+#gtoc li {
+ display: inline;
+}
+
+li.version-picker {
+ position: relative;
+}
+
+li.version-picker:hover > ol {
+ display: block;
+}
+
+li.version-picker a span {
+ font-size: .7em;
+}
+
+ol.version-picker {
+ background: #fff;
+ border: 1px #43853d solid;
+ border-radius: 2px;
+ display: none;
+ list-style: none;
+ position: absolute;
+ right: -2px;
+ width: 101%;
+}
+
+#gtoc ol.version-picker li {
+ display: block;
+}
+
+ol.version-picker li a {
+ border-radius: 0;
+ display: block;
+ margin: 0;
+ padding: .1em;
+ padding-left: 1em;
+}
+
+ol.version-picker li:first-child a {
+ border-top-right-radius: 1px;
+ border-top-left-radius: 1px;
+}
+
+ol.version-picker li:last-child a {
+ border-bottom-right-radius: 1px;
+ border-bottom-left-radius: 1px;
}
.line {
@@ -93,7 +148,8 @@ em code {
color: white !important;
margin: 0 0 1em 0;
font-family: "Lato", "Lucida Grande", "Lucida Sans Unicode", "Lucida Sans", Verdana, Tahoma, sans-serif;
- font-weight: 700;
+ padding: 1em;
+ line-height: 1.5;
}
.api_stability * {
@@ -506,6 +562,9 @@ th > *:last-child, td > *:last-child {
#content {
font-size: 3.5em;
}
+ #gtoc {
+ font-size: 0.6em;
+ }
}
@media print {
diff --git a/doc/changelogs/CHANGELOG_V8.md b/doc/changelogs/CHANGELOG_V8.md
index 2413d874778933..02cf749e3cbf76 100644
--- a/doc/changelogs/CHANGELOG_V8.md
+++ b/doc/changelogs/CHANGELOG_V8.md
@@ -6,6 +6,7 @@
+8.5.0
8.4.0
8.3.0
8.2.1
@@ -30,6 +31,289 @@
* [io.js](CHANGELOG_IOJS.md)
* [Archive](CHANGELOG_ARCHIVE.md)
+
+## 2017-09-12, Version 8.5.0 (Current), @MylesBorins
+
+### Notable Changes
+
+* **build**
+ * Snapshots are now re-enabled in V8
+ [#14875](https://github.com/nodejs/node/pull/14875)
+* **console**
+ * Implement minimal `console.group()`.
+ [#14910](https://github.com/nodejs/node/pull/14910)
+* **deps**
+ * upgrade libuv to 1.14.1
+ [#14866](https://github.com/nodejs/node/pull/14866)
+ * update nghttp2 to v1.25.0
+ [#14955](https://github.com/nodejs/node/pull/14955)
+* **dns**
+ * Add `verbatim` option to dns.lookup(). When true, results from the DNS
+ resolver are passed on as-is, without the reshuffling that Node.js
+ otherwise does that puts IPv4 addresses before IPv6 addresses.
+ [#14731](https://github.com/nodejs/node/pull/14731)
+* **fs**
+ * add fs.copyFile and fs.copyFileSync which allows for more efficient
+ copying of files.
+ [#15034](https://github.com/nodejs/node/pull/15034)
+* **inspector**
+ * Enable async stack traces [#13870](https://github.com/nodejs/node/pull/13870)
+* **module**
+ * Add support for ESM. This is currently behind the `--experimental-modules` flag
+ and requires the .mjs extension.
+ `node --experimental-modules index.mjs`
+ [#14369](https://github.com/nodejs/node/pull/14369)
+* **napi**
+ * implement promise
+ [#14365](https://github.com/nodejs/node/pull/14365)
+* **os**
+ * Add support for CIDR notation to the output of the networkInterfaces() method.
+ [#14307](https://github.com/nodejs/node/pull/14307)
+* **perf_hooks**
+ * An initial implementation of the Performance Timing API for Node.js. This is the
+ same Performance Timing API implemented by modern browsers with a number of Node.js
+ specific properties. The User Timing mark() and measure() APIs are implemented,
+ as is a Node.js specific flavor of the Frame Timing for measuring event loop duration.
+ [#14680](https://github.com/nodejs/node/pull/14680)
+* **tls**
+ * multiple PFX in createSecureContext
+ [#14793](https://github.com/nodejs/node/pull/14793)
+* **Added new collaborators**
+ * [BridgeAR](https://github.com/BridgeAR) – Ruben Bridgewater
+
+### Commits
+
+* [[`87c3e1d7de`](https://github.com/nodejs/node/commit/87c3e1d7de)] - fix --prof-process --preprocess flag (davidmarkclements) [#14966](https://github.com/nodejs/node/pull/14966)
+* [[`bcf0e5d676`](https://github.com/nodejs/node/commit/bcf0e5d676)] - **assert**: handle errors properly with deep*Equal (Ruben Bridgewater) [#15001](https://github.com/nodejs/node/pull/15001)
+* [[`7174dc2e8a`](https://github.com/nodejs/node/commit/7174dc2e8a)] - **assert**: handle sparse arrays in deepStrictEqual (Ruben Bridgewater) [#15027](https://github.com/nodejs/node/pull/15027)
+* [[`b40105df3b`](https://github.com/nodejs/node/commit/b40105df3b)] - **async_hooks**: don't abort unnecessarily (Trevor Norris) [#14722](https://github.com/nodejs/node/pull/14722)
+* [[`3e73ea8745`](https://github.com/nodejs/node/commit/3e73ea8745)] - **async_hooks**: improve comments and function names (Trevor Norris) [#14722](https://github.com/nodejs/node/pull/14722)
+* [[`700d576962`](https://github.com/nodejs/node/commit/700d576962)] - **async_hooks**: emitAfter correctly on fatalException (Trevor Norris) [#14914](https://github.com/nodejs/node/pull/14914)
+* [[`78a36e0dd1`](https://github.com/nodejs/node/commit/78a36e0dd1)] - **async_wrap**: unroll unnecessarily DRY code (Trevor Norris) [#14722](https://github.com/nodejs/node/pull/14722)
+* [[`fadccbaa17`](https://github.com/nodejs/node/commit/fadccbaa17)] - **async_wrap**: return undefined if domain is disposed (Trevor Norris) [#14722](https://github.com/nodejs/node/pull/14722)
+* [[`8d11220e0b`](https://github.com/nodejs/node/commit/8d11220e0b)] - **benchmark**: add default configs to buffer benchmark (Rich Trott) [#15175](https://github.com/nodejs/node/pull/15175)
+* [[`7feb99455a`](https://github.com/nodejs/node/commit/7feb99455a)] - **benchmark**: fix issues in dns benchmark (Ian Perkins) [#14936](https://github.com/nodejs/node/pull/14936)
+* [[`978889f8c0`](https://github.com/nodejs/node/commit/978889f8c0)] - **benchmark**: fix dgram/bind-params.js benchmark (Rich Trott) [#14948](https://github.com/nodejs/node/pull/14948)
+* [[`7f1ea7c3af`](https://github.com/nodejs/node/commit/7f1ea7c3af)] - **benchmark**: removed unused arguments from callbacks (Abhishek Raj) [#14919](https://github.com/nodejs/node/pull/14919)
+* [[`ca3ec90285`](https://github.com/nodejs/node/commit/ca3ec90285)] - **benchmark**: convert var to es6 const (Sebastian Murphy) [#12886](https://github.com/nodejs/node/pull/12886)
+* [[`bda5585012`](https://github.com/nodejs/node/commit/bda5585012)] - **buffer**: fix MAX_LENGTH constant export (Anna Henningsen) [#14821](https://github.com/nodejs/node/pull/14821)
+* [[`b9e1f60333`](https://github.com/nodejs/node/commit/b9e1f60333)] - **buffer**: increase coverage by removing dead code (Marcelo Gobelli) [#15100](https://github.com/nodejs/node/pull/15100)
+* [[`5b8fa29649`](https://github.com/nodejs/node/commit/5b8fa29649)] - **build**: display HTTP2 configure --help options (Daniel Bevenius) [#15198](https://github.com/nodejs/node/pull/15198)
+* [[`6de4e10c7a`](https://github.com/nodejs/node/commit/6de4e10c7a)] - **build**: add NetBSD support to opensslconf.h (Roy Marples) [#14313](https://github.com/nodejs/node/pull/14313)
+* [[`ebb3c2ce6f`](https://github.com/nodejs/node/commit/ebb3c2ce6f)] - **build**: add npx to zip and 7z packages (Richard Lau) [#15033](https://github.com/nodejs/node/pull/15033)
+* [[`b946693f4b`](https://github.com/nodejs/node/commit/b946693f4b)] - **build**: fix indentation in node.gyp (Alexey Orlenko) [#15051](https://github.com/nodejs/node/pull/15051)
+* [[`c8be90cabf`](https://github.com/nodejs/node/commit/c8be90cabf)] - **build**: for --enable-static, run only cctest (Daniel Bevenius) [#14892](https://github.com/nodejs/node/pull/14892)
+* [[`77dfa73cf2`](https://github.com/nodejs/node/commit/77dfa73cf2)] - **build**: better support for python3 systems (Ben Noordhuis) [#14737](https://github.com/nodejs/node/pull/14737)
+* [[`8f3537f66a`](https://github.com/nodejs/node/commit/8f3537f66a)] - **build**: allow proper generation of html docs (Jon Moss) [#14932](https://github.com/nodejs/node/pull/14932)
+* [[`838d3fef72`](https://github.com/nodejs/node/commit/838d3fef72)] - **build**: don't add libraries when --enable-static (Daniel Bevenius) [#14912](https://github.com/nodejs/node/pull/14912)
+* [[`9d373981f4`](https://github.com/nodejs/node/commit/9d373981f4)] - **build**: remove duplicated code (Ruslan Bekenev) [#13482](https://github.com/nodejs/node/pull/13482)
+* [[`e12a9c567c`](https://github.com/nodejs/node/commit/e12a9c567c)] - **build**: re-enable snapshots in v8.x (Myles Borins) [#14875](https://github.com/nodejs/node/pull/14875)
+* [[`3a68b0bb98`](https://github.com/nodejs/node/commit/3a68b0bb98)] - **console**: improve console.group() (Rich Trott) [#14999](https://github.com/nodejs/node/pull/14999)
+* [[`a46e59d52d`](https://github.com/nodejs/node/commit/a46e59d52d)] - **(SEMVER-MINOR)** **console**: implement minimal `console.group()` (Rich Trott) [#14910](https://github.com/nodejs/node/pull/14910)
+* [[`78a71aa123`](https://github.com/nodejs/node/commit/78a71aa123)] - **crypto**: fix error of createCipher in wrap mode (Shigeki Ohtsu) [#15037](https://github.com/nodejs/node/pull/15037)
+* [[`41bf40e209`](https://github.com/nodejs/node/commit/41bf40e209)] - **crypto**: warn if counter mode used in createCipher (Shigeki Ohtsu) [#13821](https://github.com/nodejs/node/pull/13821)
+* [[`ba5a697bdb`](https://github.com/nodejs/node/commit/ba5a697bdb)] - **deps**: cherry-pick 5005faed5 from V8 upstream (Miguel Martins) [#15177](https://github.com/nodejs/node/pull/15177)
+* [[`d18bb3d1dd`](https://github.com/nodejs/node/commit/d18bb3d1dd)] - **deps**: cherry-pick 1aead19 from upstream V8 (Ben Noordhuis) [#15184](https://github.com/nodejs/node/pull/15184)
+* [[`acf9650730`](https://github.com/nodejs/node/commit/acf9650730)] - **deps**: upgrade libuv to 1.14.1 (cjihrig) [#14866](https://github.com/nodejs/node/pull/14866)
+* [[`296729c41e`](https://github.com/nodejs/node/commit/296729c41e)] - **deps**: cherry-pick 0ef4a0c64b6 from c-ares upstream (Anna Henningsen) [#15023](https://github.com/nodejs/node/pull/15023)
+* [[`3f7bdc5ab7`](https://github.com/nodejs/node/commit/3f7bdc5ab7)] - **deps**: cherry-pick e020aae394 from V8 upstream (Ben Noordhuis) [#14913](https://github.com/nodejs/node/pull/14913)
+* [[`c46e7e1988`](https://github.com/nodejs/node/commit/c46e7e1988)] - **deps**: fixup nghttp2 version number (Anna Henningsen) [#14955](https://github.com/nodejs/node/pull/14955)
+* [[`4eb907f26b`](https://github.com/nodejs/node/commit/4eb907f26b)] - **deps**: update nghttp2 to v1.25.0 (Anna Henningsen) [#14955](https://github.com/nodejs/node/pull/14955)
+* [[`9f46bde440`](https://github.com/nodejs/node/commit/9f46bde440)] - **deps**: backport d727680 from V8 upstream (Matt Loring) [#14947](https://github.com/nodejs/node/pull/14947)
+* [[`56bb199ef0`](https://github.com/nodejs/node/commit/56bb199ef0)] - **deps**: cherry-pick eb306f463e from nghttp2 upstream (Anna Henningsen) [#14808](https://github.com/nodejs/node/pull/14808)
+* [[`55eed604a9`](https://github.com/nodejs/node/commit/55eed604a9)] - **deps**: backport f9c4b7a from upstream V8 (Matt Loring) [#14001](https://github.com/nodejs/node/pull/14001)
+* [[`b7f7d67677`](https://github.com/nodejs/node/commit/b7f7d67677)] - **deps**: backport bca8409 from upstream V8 (Matt Loring) [#14001](https://github.com/nodejs/node/pull/14001)
+* [[`a67e7f9b35`](https://github.com/nodejs/node/commit/a67e7f9b35)] - **deps**: backport 6e9e2e5 from upstream V8 (Matt Loring) [#14001](https://github.com/nodejs/node/pull/14001)
+* [[`6e2f62262d`](https://github.com/nodejs/node/commit/6e2f62262d)] - **deps**: backport 3d8e87a from upstream V8 (Matt Loring) [#14001](https://github.com/nodejs/node/pull/14001)
+* [[`6cb718b87a`](https://github.com/nodejs/node/commit/6cb718b87a)] - **deps**: backport 5152d97 from upstream V8 (Matt Loring) [#14001](https://github.com/nodejs/node/pull/14001)
+* [[`c6e2b8adf7`](https://github.com/nodejs/node/commit/c6e2b8adf7)] - **deps**: backport c4852ea from upstream V8 (Matt Loring) [#14001](https://github.com/nodejs/node/pull/14001)
+* [[`bfb97b71b6`](https://github.com/nodejs/node/commit/bfb97b71b6)] - **deps**: cherry-pick fa4ec9f from V8 upstream (Jaideep Bajwa) [#14608](https://github.com/nodejs/node/pull/14608)
+* [[`1a2f749e16`](https://github.com/nodejs/node/commit/1a2f749e16)] - **deps**: fix inspector v8 test (Eugene Ostroukhov) [#14827](https://github.com/nodejs/node/pull/14827)
+* [[`13577d4ada`](https://github.com/nodejs/node/commit/13577d4ada)] - **dns**: add `verbatim` option to dns.lookup() (Ben Noordhuis) [#14731](https://github.com/nodejs/node/pull/14731)
+* [[`ffed33710c`](https://github.com/nodejs/node/commit/ffed33710c)] - **doc**: add ESM doc to _toc.md and all.md (Vse Mozhet Byt) [#15248](https://github.com/nodejs/node/pull/15248)
+* [[`1b51287603`](https://github.com/nodejs/node/commit/1b51287603)] - **doc**: fix Error property markdown level (Sam Roberts) [#15247](https://github.com/nodejs/node/pull/15247)
+* [[`af3b173e82`](https://github.com/nodejs/node/commit/af3b173e82)] - **doc**: add missing space in test/README.md (Vse Mozhet Byt) [#15278](https://github.com/nodejs/node/pull/15278)
+* [[`c90c68e8a0`](https://github.com/nodejs/node/commit/c90c68e8a0)] - **doc**: document bytes to chars after setEncoding (Jessica Quynh Tran) [#13442](https://github.com/nodejs/node/pull/13442)
+* [[`ea86cb59b9`](https://github.com/nodejs/node/commit/ea86cb59b9)] - **doc**: describe what security issues are (Sam Roberts) [#14485](https://github.com/nodejs/node/pull/14485)
+* [[`ddbcc9e59d`](https://github.com/nodejs/node/commit/ddbcc9e59d)] - **doc**: add options argument to crypto docs (Adina Shanholtz) [#14846](https://github.com/nodejs/node/pull/14846)
+* [[`da5e6d33d5`](https://github.com/nodejs/node/commit/da5e6d33d5)] - **doc**: instructions for generating coverage reports (Simon Brewster) [#15190](https://github.com/nodejs/node/pull/15190)
+* [[`286111a2b0`](https://github.com/nodejs/node/commit/286111a2b0)] - **doc**: clarify async/asynchronous in deprecations.md (Rich Trott) [#15172](https://github.com/nodejs/node/pull/15172)
+* [[`9542844feb`](https://github.com/nodejs/node/commit/9542844feb)] - **doc**: `readFileSync` instead of `fs.readFileSync` (Piotr Mionskowski) [#15137](https://github.com/nodejs/node/pull/15137)
+* [[`959b270fe1`](https://github.com/nodejs/node/commit/959b270fe1)] - **doc**: /s/SHASUM256/SHASUMS256 (Jon Moss) [#15101](https://github.com/nodejs/node/pull/15101)
+* [[`3697cd86c4`](https://github.com/nodejs/node/commit/3697cd86c4)] - **doc**: fix comment about http2.createSecureServer (creeperyang) [#15085](https://github.com/nodejs/node/pull/15085)
+* [[`76780445b3`](https://github.com/nodejs/node/commit/76780445b3)] - **doc**: remove braces which shouldn't be there (Jan Schär) [#15094](https://github.com/nodejs/node/pull/15094)
+* [[`2610ae326f`](https://github.com/nodejs/node/commit/2610ae326f)] - **doc**: clarify http.get data consumption requirement (AJ Jordan) [#15049](https://github.com/nodejs/node/pull/15049)
+* [[`e7838d7077`](https://github.com/nodejs/node/commit/e7838d7077)] - **doc**: add 8.4.0 link to CHANGELOG.md (Ruslan Iusupov) [#15064](https://github.com/nodejs/node/pull/15064)
+* [[`feeff48d5c`](https://github.com/nodejs/node/commit/feeff48d5c)] - **doc**: add links to alternative versions of doc (Chris Young) [#10958](https://github.com/nodejs/node/pull/10958)
+* [[`a5242851b9`](https://github.com/nodejs/node/commit/a5242851b9)] - **doc**: update configure to require g++ 4.9.4 (Dave Olszewski) [#14204](https://github.com/nodejs/node/pull/14204)
+* [[`87ff86b2d8`](https://github.com/nodejs/node/commit/87ff86b2d8)] - **doc**: building - note on Windows SDK 15063 (Refael Ackermann) [#14394](https://github.com/nodejs/node/pull/14394)
+* [[`449549bc4f`](https://github.com/nodejs/node/commit/449549bc4f)] - **doc**: threadpool size, and APIs using the pool (Sam Roberts) [#14995](https://github.com/nodejs/node/pull/14995)
+* [[`6bb8133638`](https://github.com/nodejs/node/commit/6bb8133638)] - **doc**: sort bottom-of-file dns markdown links (Sam Roberts) [#14992](https://github.com/nodejs/node/pull/14992)
+* [[`a06d1295c5`](https://github.com/nodejs/node/commit/a06d1295c5)] - **doc**: crypto.randomBytes does not block when async (Sam Roberts) [#14993](https://github.com/nodejs/node/pull/14993)
+* [[`83ba2aa46b`](https://github.com/nodejs/node/commit/83ba2aa46b)] - **doc**: environmental-\>environment & NodeJS-\>Node.js (Rod Vagg) [#14974](https://github.com/nodejs/node/pull/14974)
+* [[`f1bc168ad5`](https://github.com/nodejs/node/commit/f1bc168ad5)] - **doc**: fix typo in Buffer.from(string, \[encoding\]) (Michał Wadas) [#15013](https://github.com/nodejs/node/pull/15013)
+* [[`9b9e7b4044`](https://github.com/nodejs/node/commit/9b9e7b4044)] - **doc**: add note for Windows build path (Kyle Lamse) [#14354](https://github.com/nodejs/node/pull/14354)
+* [[`57c7eae1df`](https://github.com/nodejs/node/commit/57c7eae1df)] - **doc**: rephrase text of child_process.execSync() (hafiz) [#14953](https://github.com/nodejs/node/pull/14953)
+* [[`188713ca46`](https://github.com/nodejs/node/commit/188713ca46)] - **doc**: beautify net.md formats (sevenryze) [#14987](https://github.com/nodejs/node/pull/14987)
+* [[`a8648e287c`](https://github.com/nodejs/node/commit/a8648e287c)] - **doc**: link to correct "OS Constants" heading in docs (James Kyle) [#14969](https://github.com/nodejs/node/pull/14969)
+* [[`e187c98186`](https://github.com/nodejs/node/commit/e187c98186)] - **doc**: remove misterdjules from the CTC members list (Julien Gilli) [#1498](https://github.com/nodejs/node/pull/1498)
+* [[`78b2bc77f2`](https://github.com/nodejs/node/commit/78b2bc77f2)] - **doc**: update http2.md example code (RefinedSoftwareLLC) [#14979](https://github.com/nodejs/node/pull/14979)
+* [[`6179c2764a`](https://github.com/nodejs/node/commit/6179c2764a)] - **doc**: fix doc for napi_get_value_string_utf8 (Daniel Taveras) [#14529](https://github.com/nodejs/node/pull/14529)
+* [[`daae6bc652`](https://github.com/nodejs/node/commit/daae6bc652)] - **doc**: fixed link definitions in http2.md footer (sharababy) [#14946](https://github.com/nodejs/node/pull/14946)
+* [[`6c93d01fba`](https://github.com/nodejs/node/commit/6c93d01fba)] - **doc**: remove `you` and fixup note in stream.md (James M Snell) [#14938](https://github.com/nodejs/node/pull/14938)
+* [[`96d95d4fed`](https://github.com/nodejs/node/commit/96d95d4fed)] - **doc**: minor fixes to http/2 docs (Anand Suresh) [#14877](https://github.com/nodejs/node/pull/14877)
+* [[`bfa3cbe158`](https://github.com/nodejs/node/commit/bfa3cbe158)] - **doc**: remove redundant only from doc/api/stream.md (George Sapkin) [#14858](https://github.com/nodejs/node/pull/14858)
+* [[`c5380c83c6`](https://github.com/nodejs/node/commit/c5380c83c6)] - **doc**: add missing word (Jon Moss) [#14924](https://github.com/nodejs/node/pull/14924)
+* [[`abe014834e`](https://github.com/nodejs/node/commit/abe014834e)] - **doc**: fix http api document (陈刚) [#14625](https://github.com/nodejs/node/pull/14625)
+* [[`050a2249c1`](https://github.com/nodejs/node/commit/050a2249c1)] - **doc**: explain what to do if git push is rejected (Rich Trott) [#14848](https://github.com/nodejs/node/pull/14848)
+* [[`3d621393bd`](https://github.com/nodejs/node/commit/3d621393bd)] - **doc**: add BridgeAR to collaborators (Ruben Bridgewater) [#14862](https://github.com/nodejs/node/pull/14862)
+* [[`c8f0e5ab82`](https://github.com/nodejs/node/commit/c8f0e5ab82)] - **doc**: fix typo in cli.md (hsmtkk) [#14855](https://github.com/nodejs/node/pull/14855)
+* [[`0dc9d284a4`](https://github.com/nodejs/node/commit/0dc9d284a4)] - **doc**: added napi_get_value_string_latin1 (Kyle Farnung) [#14678](https://github.com/nodejs/node/pull/14678)
+* [[`72cc2caf78`](https://github.com/nodejs/node/commit/72cc2caf78)] - **doc**: fix word wrapping for api stability boxes (Saad Quadri) [#14809](https://github.com/nodejs/node/pull/14809)
+* [[`205d5f674a`](https://github.com/nodejs/node/commit/205d5f674a)] - **doc,fs**: rename defaultEncoding option to encoding (Aleh Zasypkin) [#14867](https://github.com/nodejs/node/pull/14867)
+* [[`aaf55db95b`](https://github.com/nodejs/node/commit/aaf55db95b)] - **doc,lib,src,test**: strip executable bits off files (Anna Henningsen) [#15132](https://github.com/nodejs/node/pull/15132)
+* [[`7f62378e76`](https://github.com/nodejs/node/commit/7f62378e76)] - **doc,stream**: remove wrong remark on readable.read (Jan Schär) [#15014](https://github.com/nodejs/node/pull/15014)
+* [[`ea2b5760d5`](https://github.com/nodejs/node/commit/ea2b5760d5)] - **errors**: remove duplicated ERR_HTTP_INVALID_STATUS_CODE error (Jon Moss) [#15003](https://github.com/nodejs/node/pull/15003)
+* [[`71f90c6f80`](https://github.com/nodejs/node/commit/71f90c6f80)] - **(SEMVER-MINOR)** **fs**: add fs.copyFile{Sync} (cjihrig) [#15034](https://github.com/nodejs/node/pull/15034)
+* [[`3d9ad82729`](https://github.com/nodejs/node/commit/3d9ad82729)] - **gyp**: fix ninja build failure (GYP patch) (Daniel Bevenius) [#12484](https://github.com/nodejs/node/pull/12484)
+* [[`12191f6ed8`](https://github.com/nodejs/node/commit/12191f6ed8)] - **gyp**: enable cctest to use objects (gyp part) (Daniel Bevenius) [#12450](https://github.com/nodejs/node/pull/12450)
+* [[`538894978b`](https://github.com/nodejs/node/commit/538894978b)] - **gyp**: add compile_commands.json gyp generator (Ben Noordhuis) [#12450](https://github.com/nodejs/node/pull/12450)
+* [[`7eb3679eea`](https://github.com/nodejs/node/commit/7eb3679eea)] - **gyp**: inherit parent for `*.host` (Johan Bergström) [#6173](https://github.com/nodejs/node/pull/6173)
+* [[`5fb252a5a2`](https://github.com/nodejs/node/commit/5fb252a5a2)] - **gyp**: fix gyp to work on MacOSX without XCode (Shigeki Ohtsu) [iojs/io.js#1325](https://github.com/iojs/io.js/pull/1325)
+* [[`0343eceda4`](https://github.com/nodejs/node/commit/0343eceda4)] - **http2**: fix refs to status 205, add tests (Anatoli Papirovski) [#15153](https://github.com/nodejs/node/pull/15153)
+* [[`d8ff550528`](https://github.com/nodejs/node/commit/d8ff550528)] - **http2**: store headersSent after stream destroyed (Anatoli Papirovski) [#15232](https://github.com/nodejs/node/pull/15232)
+* [[`4882f079f1`](https://github.com/nodejs/node/commit/4882f079f1)] - **http2**: set decodeStrings to false, test (Anatoli Papirovski) [#15140](https://github.com/nodejs/node/pull/15140)
+* [[`93a4cf60ff`](https://github.com/nodejs/node/commit/93a4cf60ff)] - **http2**: use session not socket timeout, tests (Anatoli Papirovski) [#15188](https://github.com/nodejs/node/pull/15188)
+* [[`764213cc7b`](https://github.com/nodejs/node/commit/764213cc7b)] - **http2**: add compat trailers, adjust multi-headers (Anatoli Papirovski) [#15193](https://github.com/nodejs/node/pull/15193)
+* [[`cc82f541e5`](https://github.com/nodejs/node/commit/cc82f541e5)] - **http2**: fix closedCode NaN, increase test coverage (Anatoli Papirovski) [#15154](https://github.com/nodejs/node/pull/15154)
+* [[`afa72dfdf3`](https://github.com/nodejs/node/commit/afa72dfdf3)] - **http2**: guard against destroyed session, timeouts (James M Snell) [#15106](https://github.com/nodejs/node/pull/15106)
+* [[`f6c51888db`](https://github.com/nodejs/node/commit/f6c51888db)] - **http2**: correct emit error in onConnect, full tests (Anatoli Papirovski) [#15080](https://github.com/nodejs/node/pull/15080)
+* [[`fd51cb8ca3`](https://github.com/nodejs/node/commit/fd51cb8ca3)] - **http2**: adjust error types, test coverage (Anatoli Papirovski) [#15109](https://github.com/nodejs/node/pull/15109)
+* [[`f612a6dd5c`](https://github.com/nodejs/node/commit/f612a6dd5c)] - **http2**: handle 100-continue flow & writeContinue (Anatoli Papirovski) [#15039](https://github.com/nodejs/node/pull/15039)
+* [[`989dfaf930`](https://github.com/nodejs/node/commit/989dfaf930)] - **http2**: refactor error handling (Matteo Collina) [#14991](https://github.com/nodejs/node/pull/14991)
+* [[`d231ef645e`](https://github.com/nodejs/node/commit/d231ef645e)] - **http2**: ignore invalid headers explicitly (Anna Henningsen) [#14955](https://github.com/nodejs/node/pull/14955)
+* [[`1b57c375aa`](https://github.com/nodejs/node/commit/1b57c375aa)] - **http2**: minor refactor of passing headers to JS (Anna Henningsen) [#14808](https://github.com/nodejs/node/pull/14808)
+* [[`80fe40aabf`](https://github.com/nodejs/node/commit/80fe40aabf)] - **http2**: handful of http/2 src cleanups (James M Snell) [#14825](https://github.com/nodejs/node/pull/14825)
+* [[`9589641c5c`](https://github.com/nodejs/node/commit/9589641c5c)] - **http2**: Expose Http2ServerRequest/Response (Pini Houri) [#14690](https://github.com/nodejs/node/pull/14690)
+* [[`8c61b72f90`](https://github.com/nodejs/node/commit/8c61b72f90)] - **(SEMVER-MINOR)** **inspector**: enable async stack traces (Miroslav Bajtoš) [#13870](https://github.com/nodejs/node/pull/13870)
+* [[`e2ae08b48d`](https://github.com/nodejs/node/commit/e2ae08b48d)] - **inspector**: rewrite inspector test helper (Eugene Ostroukhov) [#14797](https://github.com/nodejs/node/pull/14797)
+* [[`105acf4af7`](https://github.com/nodejs/node/commit/105acf4af7)] - **inspector**: log exceptions in message handlers (Eugene Ostroukhov) [#14980](https://github.com/nodejs/node/pull/14980)
+* [[`d5a376ab7a`](https://github.com/nodejs/node/commit/d5a376ab7a)] - **lib**: remove circular reference (Ruben Bridgewater) [#14885](https://github.com/nodejs/node/pull/14885)
+* [[`605d625e62`](https://github.com/nodejs/node/commit/605d625e62)] - **lib**: simplify the readonly properties of icu (Jackson Tian) [#13221](https://github.com/nodejs/node/pull/13221)
+* [[`ea0a882041`](https://github.com/nodejs/node/commit/ea0a882041)] - **lib**: remove the invalid command line options (Jackson Tian) [#13764](https://github.com/nodejs/node/pull/13764)
+* [[`9129057e03`](https://github.com/nodejs/node/commit/9129057e03)] - **lib**: clean up usage of threw (Jackson Tian) [#10534](https://github.com/nodejs/node/pull/10534)
+* [[`f34e0f97e7`](https://github.com/nodejs/node/commit/f34e0f97e7)] - **lib**: instantiate console methods eagerly (Ben Noordhuis) [#14791](https://github.com/nodejs/node/pull/14791)
+* [[`01846a06c2`](https://github.com/nodejs/node/commit/01846a06c2)] - **meta**: merge TSC and CTC back into a single body (James M Snell) [#14973](https://github.com/nodejs/node/pull/14973)
+* [[`859abe5169`](https://github.com/nodejs/node/commit/859abe5169)] - **meta**: considerations for new core modules (James M Snell) [#15022](https://github.com/nodejs/node/pull/15022)
+* [[`cc72118e71`](https://github.com/nodejs/node/commit/cc72118e71)] - **meta**: improve definition of a collaborator (James M Snell) [#14981](https://github.com/nodejs/node/pull/14981)
+* [[`865a3c3daf`](https://github.com/nodejs/node/commit/865a3c3daf)] - **(SEMVER-MINOR)** **module**: Allow runMain to be ESM (Bradley Farias) [#14369](https://github.com/nodejs/node/pull/14369)
+* [[`4bf0d4e133`](https://github.com/nodejs/node/commit/4bf0d4e133)] - **n-api**: implement napi_run_script (Gabriel Schulhof) [#15216](https://github.com/nodejs/node/pull/15216)
+* [[`3a18df0750`](https://github.com/nodejs/node/commit/3a18df0750)] - **n-api**: adds function to adjust external memory (Chris Young) [#14310](https://github.com/nodejs/node/pull/14310)
+* [[`503370e2d3`](https://github.com/nodejs/node/commit/503370e2d3)] - **(SEMVER-MINOR)** **n-api**: implement promise (Gabriel Schulhof) [#14365](https://github.com/nodejs/node/pull/14365)
+* [[`a6344d5a83`](https://github.com/nodejs/node/commit/a6344d5a83)] - **(SEMVER-MINOR)** **n-api**: add ability to remove a wrapping (Gabriel Schulhof) [#14658](https://github.com/nodejs/node/pull/14658)
+* [[`67fde146e0`](https://github.com/nodejs/node/commit/67fde146e0)] - **net**: check EADDRINUSE after binding localPort (Joyee Cheung) [#15097](https://github.com/nodejs/node/pull/15097)
+* [[`b4e8850576`](https://github.com/nodejs/node/commit/b4e8850576)] - **net**: move debug statement (Brian White) [#12616](https://github.com/nodejs/node/pull/12616)
+* [[`136eea4bcb`](https://github.com/nodejs/node/commit/136eea4bcb)] - **(SEMVER-MINOR)** **os**: add CIDR support (Mudit Ameta) [#14307](https://github.com/nodejs/node/pull/14307)
+* [[`29f9101a0f`](https://github.com/nodejs/node/commit/29f9101a0f)] - **path**: fix normalize on directories with two dots (Michaël Zasso) [#14107](https://github.com/nodejs/node/pull/14107)
+* [[`e3f5c58423`](https://github.com/nodejs/node/commit/e3f5c58423)] - **perf_hooks**: fix presumed typo in node_perf.cc (Anna Henningsen) [#15019](https://github.com/nodejs/node/pull/15019)
+* [[`69e3bc64cc`](https://github.com/nodejs/node/commit/69e3bc64cc)] - **perf_hooks**: mark as experimental (James M Snell) [#14997](https://github.com/nodejs/node/pull/14997)
+* [[`f75faddb1f`](https://github.com/nodejs/node/commit/f75faddb1f)] - **(SEMVER-MINOR)** **perf_hooks**: implementation of the perf timing API (James M Snell) [#14680](https://github.com/nodejs/node/pull/14680)
+* [[`4d2aa16d33`](https://github.com/nodejs/node/commit/4d2aa16d33)] - **process**: keep process prototype in inheritance chain (Jimmy Thomson) [#14715](https://github.com/nodejs/node/pull/14715)
+* [[`ae85d5f024`](https://github.com/nodejs/node/commit/ae85d5f024)] - **promises**: more robust stringification (Timothy Gu) [#13784](https://github.com/nodejs/node/pull/13784)
+* [[`eee2aa693b`](https://github.com/nodejs/node/commit/eee2aa693b)] - **repl**: force editorMode in .load (Lance Ball) [#14861](https://github.com/nodejs/node/pull/14861)
+* [[`f81812b1ff`](https://github.com/nodejs/node/commit/f81812b1ff)] - **src**: turn key length exception into CHECK (Ben Noordhuis) [#15183](https://github.com/nodejs/node/pull/15183)
+* [[`f113d7332f`](https://github.com/nodejs/node/commit/f113d7332f)] - **src**: fix compiler warnings in node_perf.cc (Daniel Bevenius) [#15112](https://github.com/nodejs/node/pull/15112)
+* [[`a83d427091`](https://github.com/nodejs/node/commit/a83d427091)] - **src**: remove unused persistent properties from env (Anna Henningsen) [#15096](https://github.com/nodejs/node/pull/15096)
+* [[`391855c252`](https://github.com/nodejs/node/commit/391855c252)] - **src**: fix build on certain platforms (Anna Henningsen) [#14996](https://github.com/nodejs/node/pull/14996)
+* [[`8cee5d66bd`](https://github.com/nodejs/node/commit/8cee5d66bd)] - **src**: reduce code duplication (James M Snell) [#14937](https://github.com/nodejs/node/pull/14937)
+* [[`5a05dfe0a7`](https://github.com/nodejs/node/commit/5a05dfe0a7)] - **src**: fixup strings, reduce duplication (James M Snell) [#14937](https://github.com/nodejs/node/pull/14937)
+* [[`1c3cb49f00`](https://github.com/nodejs/node/commit/1c3cb49f00)] - **src**: miscellaneous cleanups for node_config (James M Snell) [#14868](https://github.com/nodejs/node/pull/14868)
+* [[`7213be9f59`](https://github.com/nodejs/node/commit/7213be9f59)] - **src**: fix DEBUG_HTTP2 type arguments (Daniel Bevenius) [#15197](https://github.com/nodejs/node/pull/15197)
+* [[`ffe572addd`](https://github.com/nodejs/node/commit/ffe572addd)] - **src**: replace assert() with CHECK() (Ben Noordhuis) [#14663](https://github.com/nodejs/node/pull/14663)
+* [[`abc5cdc923`](https://github.com/nodejs/node/commit/abc5cdc923)] - **src**: remove unnecessary helper function (Brian White) [#14959](https://github.com/nodejs/node/pull/14959)
+* [[`992d1dd956`](https://github.com/nodejs/node/commit/992d1dd956)] - **src**: detect nul bytes in InternalModuleReadFile() (Ben Noordhuis) [#14854](https://github.com/nodejs/node/pull/14854)
+* [[`4570fa16c7`](https://github.com/nodejs/node/commit/4570fa16c7)] - **src**: remove extra copy from Copy() in node_url.cc (Anna Henningsen) [#14907](https://github.com/nodejs/node/pull/14907)
+* [[`081c3e107d`](https://github.com/nodejs/node/commit/081c3e107d)] - **src**: minor cleanup for node_revert (James M Snell) [#14864](https://github.com/nodejs/node/pull/14864)
+* [[`dcd7817fbc`](https://github.com/nodejs/node/commit/dcd7817fbc)] - **src**: use `unordered_set` instead of custom rb tree (Anna Henningsen) [#14826](https://github.com/nodejs/node/pull/14826)
+* [[`fadcbab617`](https://github.com/nodejs/node/commit/fadcbab617)] - **src**: Node implementation of v8::Platform (Matt Loring) [#14001](https://github.com/nodejs/node/pull/14001)
+* [[`c861462faa`](https://github.com/nodejs/node/commit/c861462faa)] - **stream**: fix Writable instanceof for subclasses (Anna Henningsen) [#14945](https://github.com/nodejs/node/pull/14945)
+* [[`2adabe6777`](https://github.com/nodejs/node/commit/2adabe6777)] - **test**: fix single test runner regression (Timothy Gu) [#15329](https://github.com/nodejs/node/pull/15329)
+* [[`e3d0ff901b`](https://github.com/nodejs/node/commit/e3d0ff901b)] - **test**: split test-cli-node-options (Refael Ackermann) [#14195](https://github.com/nodejs/node/pull/14195)
+* [[`e87cb32db2`](https://github.com/nodejs/node/commit/e87cb32db2)] - **test**: remove envPlus, use Object.assign everywhere (Gibson Fahnestock) [#14845](https://github.com/nodejs/node/pull/14845)
+* [[`dea959e841`](https://github.com/nodejs/node/commit/dea959e841)] - **test**: fix flaky test-readline-interface (Rich Trott) [#15066](https://github.com/nodejs/node/pull/15066)
+* [[`ae91b1efc0`](https://github.com/nodejs/node/commit/ae91b1efc0)] - **test**: continue normalizing fixtures use (Miguel Angel Asencio Hurtado) [#14716](https://github.com/nodejs/node/pull/14716)
+* [[`77bc72ad54`](https://github.com/nodejs/node/commit/77bc72ad54)] - **(SEMVER-MINOR)** **test**: fix inspector helper port sniffing (Timothy Gu) [#13870](https://github.com/nodejs/node/pull/13870)
+* [[`7facfaab66`](https://github.com/nodejs/node/commit/7facfaab66)] - **test**: preserve env in test cases (Beth Griggs) [#14822](https://github.com/nodejs/node/pull/14822)
+* [[`2310cfcea1`](https://github.com/nodejs/node/commit/2310cfcea1)] - **test**: exclude write-coverage from coverage report (Benjamin Coe) [#15194](https://github.com/nodejs/node/pull/15194)
+* [[`6fa05e671c`](https://github.com/nodejs/node/commit/6fa05e671c)] - **test**: use no-save and no-package-lock flags (Simon Brewster) [#15196](https://github.com/nodejs/node/pull/15196)
+* [[`ac71d99253`](https://github.com/nodejs/node/commit/ac71d99253)] - **test**: add http2 compat setTimeout tests (Anatoli Papirovski) [#15156](https://github.com/nodejs/node/pull/15156)
+* [[`7106734773`](https://github.com/nodejs/node/commit/7106734773)] - **test**: add test-benchmark-buffer (Rich Trott) [#15175](https://github.com/nodejs/node/pull/15175)
+* [[`0b9fde4d4a`](https://github.com/nodejs/node/commit/0b9fde4d4a)] - **test**: refactor test-fs-readfile-unlink (Rich Trott) [#15173](https://github.com/nodejs/node/pull/15173)
+* [[`9f79bd8fba`](https://github.com/nodejs/node/commit/9f79bd8fba)] - **test**: http2 test coverage for NghttpError (James M Snell) [#15105](https://github.com/nodejs/node/pull/15105)
+* [[`c0dba0f3f4`](https://github.com/nodejs/node/commit/c0dba0f3f4)] - **test**: http2 test coverage for assertValidPseudoHeader (James M Snell) [#15105](https://github.com/nodejs/node/pull/15105)
+* [[`837c29c73b`](https://github.com/nodejs/node/commit/837c29c73b)] - **test**: http2 test coverage for updateOptionsBuffer (James M Snell) [#15105](https://github.com/nodejs/node/pull/15105)
+* [[`e3e9e5039d`](https://github.com/nodejs/node/commit/e3e9e5039d)] - **test**: increase Http2ServerResponse test coverage (Anatoli Papirovski) [#15074](https://github.com/nodejs/node/pull/15074)
+* [[`72aae0417c`](https://github.com/nodejs/node/commit/72aae0417c)] - **test**: split path tests into multiple files (Michaël Zasso) [#15093](https://github.com/nodejs/node/pull/15093)
+* [[`d176a18547`](https://github.com/nodejs/node/commit/d176a18547)] - **test**: add a test for Expect & checkExpectation (Anatoli Papirovski) [#15040](https://github.com/nodejs/node/pull/15040)
+* [[`cfbf5057d6`](https://github.com/nodejs/node/commit/cfbf5057d6)] - **test**: add http2 test for method CONNECT (Anatoli Papirovski) [#15052](https://github.com/nodejs/node/pull/15052)
+* [[`5b13add028`](https://github.com/nodejs/node/commit/5b13add028)] - **test**: remove unused param in test-graph.pipe (Simon Brewster) [#15007](https://github.com/nodejs/node/pull/15007)
+* [[`5cb6500de9`](https://github.com/nodejs/node/commit/5cb6500de9)] - **test**: increase coverage for http2 response headers (Anatoli Papirovski) [#15035](https://github.com/nodejs/node/pull/15035)
+* [[`7050608593`](https://github.com/nodejs/node/commit/7050608593)] - **test**: fix hijackStdout behavior in console (XadillaX) [#14647](https://github.com/nodejs/node/pull/14647)
+* [[`458b8ab5df`](https://github.com/nodejs/node/commit/458b8ab5df)] - **test**: add regression test for 14814 (Anna Henningsen) [#15023](https://github.com/nodejs/node/pull/15023)
+* [[`f89ef77144`](https://github.com/nodejs/node/commit/f89ef77144)] - **test**: run abort tests (Rich Trott) [#14013](https://github.com/nodejs/node/pull/14013)
+* [[`a91a3fe6c4`](https://github.com/nodejs/node/commit/a91a3fe6c4)] - **test**: improve test-abort-backtrace (Rich Trott) [#14013](https://github.com/nodejs/node/pull/14013)
+* [[`b85a73407b`](https://github.com/nodejs/node/commit/b85a73407b)] - **test**: improve test-abort-uncaught-exception (Rich Trott) [#14013](https://github.com/nodejs/node/pull/14013)
+* [[`f694ea6f2b`](https://github.com/nodejs/node/commit/f694ea6f2b)] - **test**: pipe some error output if npm fails (Jeremiah Senkpiel) [#12490](https://github.com/nodejs/node/pull/12490)
+* [[`f1284d32a5`](https://github.com/nodejs/node/commit/f1284d32a5)] - **test**: simplify test-tls-client-default-ciphers (Jon Moss) [#14928](https://github.com/nodejs/node/pull/14928)
+* [[`d4c2eba376`](https://github.com/nodejs/node/commit/d4c2eba376)] - **test**: remove unused function args (Mohd Maqbool Alam) [#14971](https://github.com/nodejs/node/pull/14971)
+* [[`9c7f27b91b`](https://github.com/nodejs/node/commit/9c7f27b91b)] - **test**: extend async addon test (Anna Henningsen) [#14922](https://github.com/nodejs/node/pull/14922)
+* [[`8c927dd71f`](https://github.com/nodejs/node/commit/8c927dd71f)] - **test**: fix async-hooks tests (Bartosz Sosnowski) [#14865](https://github.com/nodejs/node/pull/14865)
+* [[`1849c519ca`](https://github.com/nodejs/node/commit/1849c519ca)] - **test**: add test-benchmark-process (Rich Trott) [#14951](https://github.com/nodejs/node/pull/14951)
+* [[`b480b20e02`](https://github.com/nodejs/node/commit/b480b20e02)] - **test**: add test-benchmark-path (Rich Trott) [#14951](https://github.com/nodejs/node/pull/14951)
+* [[`2e3e136519`](https://github.com/nodejs/node/commit/2e3e136519)] - **test**: add test-benchmark-os (Rich Trott) [#14951](https://github.com/nodejs/node/pull/14951)
+* [[`7e541d6a97`](https://github.com/nodejs/node/commit/7e541d6a97)] - **test**: add test-benchmark-events (Rich Trott) [#14951](https://github.com/nodejs/node/pull/14951)
+* [[`981ef464e2`](https://github.com/nodejs/node/commit/981ef464e2)] - **test**: add test-benchmark-domain (Rich Trott) [#14951](https://github.com/nodejs/node/pull/14951)
+* [[`34d1a779b1`](https://github.com/nodejs/node/commit/34d1a779b1)] - **test**: add known issue for vm module (Franziska Hinkelmann) [#14661](https://github.com/nodejs/node/pull/14661)
+* [[`ae27cb8ea3`](https://github.com/nodejs/node/commit/ae27cb8ea3)] - **test**: do not modify fixtures in test-fs-chmod (Rich Trott) [#14926](https://github.com/nodejs/node/pull/14926)
+* [[`eb46609622`](https://github.com/nodejs/node/commit/eb46609622)] - **test**: improve assertion fail messages (Refael Ackermann) [#14949](https://github.com/nodejs/node/pull/14949)
+* [[`36b8b46443`](https://github.com/nodejs/node/commit/36b8b46443)] - **test**: remove unused parameters (Daniil Shakir) [#14968](https://github.com/nodejs/node/pull/14968)
+* [[`6421a9cb9a`](https://github.com/nodejs/node/commit/6421a9cb9a)] - **test**: remove unused arguments from function (Ankit Parashar) [#14931](https://github.com/nodejs/node/pull/14931)
+* [[`e244f8433e`](https://github.com/nodejs/node/commit/e244f8433e)] - **test**: update windows module load error message (cjihrig) [#14950](https://github.com/nodejs/node/pull/14950)
+* [[`8f61bf2cda`](https://github.com/nodejs/node/commit/8f61bf2cda)] - **test**: increase coverage for http2.connect (Michael Albert) [#14832](https://github.com/nodejs/node/pull/14832)
+* [[`c0312dc781`](https://github.com/nodejs/node/commit/c0312dc781)] - **test**: make timers-blocking-callback more reliable (Rich Trott) [#14831](https://github.com/nodejs/node/pull/14831)
+* [[`762155578a`](https://github.com/nodejs/node/commit/762155578a)] - **test**: remove erroneous assert message from test (Beth Griggs) [#14918](https://github.com/nodejs/node/pull/14918)
+* [[`1217b1a556`](https://github.com/nodejs/node/commit/1217b1a556)] - **test**: add test for cluster benchmarks (Rich Trott) [#14812](https://github.com/nodejs/node/pull/14812)
+* [[`03fd38c1bb`](https://github.com/nodejs/node/commit/03fd38c1bb)] - **test**: Mark test-stop-profile-after-done flaky (Eugene Ostroukhov)
+* [[`4f49ae52f8`](https://github.com/nodejs/node/commit/4f49ae52f8)] - **test**: check util.inspect circular Set and Map refs (Ruben Bridgewater) [#14790](https://github.com/nodejs/node/pull/14790)
+* [[`4dd095c982`](https://github.com/nodejs/node/commit/4dd095c982)] - **test**: refactor async-hooks/test-httparser tests (Runite618) [#14818](https://github.com/nodejs/node/pull/14818)
+* [[`27ec693a53`](https://github.com/nodejs/node/commit/27ec693a53)] - **test**: add missing console.error to exec-maxBuffer (Beth Griggs) [#14796](https://github.com/nodejs/node/pull/14796)
+* [[`7f02c36c4f`](https://github.com/nodejs/node/commit/7f02c36c4f)] - **test**: fix test-cluster-send-handle-large-payload (Rich Trott) [#14780](https://github.com/nodejs/node/pull/14780)
+* [[`4205648216`](https://github.com/nodejs/node/commit/4205648216)] - **test**: invoke callback with common.mustCall() (Griffith Tchenpan) [#8597](https://github.com/nodejs/node/pull/8597)
+* [[`a3feb54c7f`](https://github.com/nodejs/node/commit/a3feb54c7f)] - **test**: make test-tls-alert-handling more strict (Rich Trott) [#14650](https://github.com/nodejs/node/pull/14650)
+* [[`d4f2a52953`](https://github.com/nodejs/node/commit/d4f2a52953)] - **test**: check crypto before requiring tls module (Daniel Bevenius) [#14708](https://github.com/nodejs/node/pull/14708)
+* [[`868b441f3e`](https://github.com/nodejs/node/commit/868b441f3e)] - **test**: begin normalizing fixtures use (James M Snell) [#14332](https://github.com/nodejs/node/pull/14332)
+* [[`c76ec7130e`](https://github.com/nodejs/node/commit/c76ec7130e)] - **test**: improve multiple zlib tests (James M Snell) [#14455](https://github.com/nodejs/node/pull/14455)
+* [[`8fb0895176`](https://github.com/nodejs/node/commit/8fb0895176)] - **test**: improve multiple vm tests (James M Snell) [#14458](https://github.com/nodejs/node/pull/14458)
+* [[`4d6da3f770`](https://github.com/nodejs/node/commit/4d6da3f770)] - **test, win**: fix IPv6 detection on Windows (Bartosz Sosnowski) [#14865](https://github.com/nodejs/node/pull/14865)
+* [[`02260eab98`](https://github.com/nodejs/node/commit/02260eab98)] - **test,doc**: make module name match gyp target name (Gabriel Schulhof) [#15209](https://github.com/nodejs/node/pull/15209)
+* [[`dae86e4cf5`](https://github.com/nodejs/node/commit/dae86e4cf5)] - **timers**: fix outdated comment (Tim Costa) [#14314](https://github.com/nodejs/node/pull/14314)
+* [[`d6ad9d72f7`](https://github.com/nodejs/node/commit/d6ad9d72f7)] - **(SEMVER-MINOR)** **tls**: multiple PFX in createSecureContext (Yury Popov) [#14793](https://github.com/nodejs/node/pull/14793)
+* [[`97908ea4d0`](https://github.com/nodejs/node/commit/97908ea4d0)] - **tools**: bump vswhere helper to 2.0.0 (Refael Ackermann) [#14557](https://github.com/nodejs/node/pull/14557)
+* [[`87e44d8651`](https://github.com/nodejs/node/commit/87e44d8651)] - **tools**: add eslint rule for inspector checking (Daniel Bevenius) [#13813](https://github.com/nodejs/node/pull/13813)
+* [[`1d97ff4800`](https://github.com/nodejs/node/commit/1d97ff4800)] - **tools**: add eslint rule for hasCrypto checking (Daniel Bevenius) [#13813](https://github.com/nodejs/node/pull/13813)
+* [[`bc250a1e38`](https://github.com/nodejs/node/commit/bc250a1e38)] - **tools**: fix linter error in html.js (Michaël Zasso) [#15063](https://github.com/nodejs/node/pull/15063)
+* [[`5ee4e86efc`](https://github.com/nodejs/node/commit/5ee4e86efc)] - **tools**: add custom private key option (Ruslan Bekenev) [#14401](https://github.com/nodejs/node/pull/14401)
+* [[`8f34b834b7`](https://github.com/nodejs/node/commit/8f34b834b7)] - **tools**: update GYP to 324dd166 (Refael Ackermann) [#14718](https://github.com/nodejs/node/pull/14718)
+* [[`e4ea45412e`](https://github.com/nodejs/node/commit/e4ea45412e)] - **tools**: remove stray package-lock.json file (Rich Trott) [#14873](https://github.com/nodejs/node/pull/14873)
+* [[`37c43ede43`](https://github.com/nodejs/node/commit/37c43ede43)] - **tools**: fix update-eslint.sh (Myles Borins) [#14850](https://github.com/nodejs/node/pull/14850)
+* [[`b0f4539ce5`](https://github.com/nodejs/node/commit/b0f4539ce5)] - **tools**: delete an unused argument (phisixersai) [#14251](https://github.com/nodejs/node/pull/14251)
+* [[`9da6c1056c`](https://github.com/nodejs/node/commit/9da6c1056c)] - **tools**: checkout for unassigned DEP00XX codes (James M Snell) [#14702](https://github.com/nodejs/node/pull/14702)
+* [[`bd40cc6ef8`](https://github.com/nodejs/node/commit/bd40cc6ef8)] - **tracing**: Update to use new Platform tracing apis (Matt Loring) [#14001](https://github.com/nodejs/node/pull/14001)
+* [[`a4fc43202e`](https://github.com/nodejs/node/commit/a4fc43202e)] - **url**: remove unused code from autoEscapeStr (Cyril Lakech) [#15086](https://github.com/nodejs/node/pull/15086)
+* [[`2aec977fa2`](https://github.com/nodejs/node/commit/2aec977fa2)] - **util**: remove duplicate code in format (Anatoli Papirovski) [#15098](https://github.com/nodejs/node/pull/15098)
+* [[`de10c0f515`](https://github.com/nodejs/node/commit/de10c0f515)] - **util**: fix inspect array w. negative maxArrayLength (Ruben Bridgewater) [#14880](https://github.com/nodejs/node/pull/14880)
+* [[`c3c6cb1c13`](https://github.com/nodejs/node/commit/c3c6cb1c13)] - **util**: use proper circular reference checking (Anna Henningsen) [#14790](https://github.com/nodejs/node/pull/14790)
+
## 2017-08-15, Version 8.4.0 (Current), @addaleax
diff --git a/doc/node.1 b/doc/node.1
index cf79ce33f929df..36c44d6b2cf8c9 100644
--- a/doc/node.1
+++ b/doc/node.1
@@ -223,15 +223,15 @@ used to enable FIPS-compliant crypto if Node.js is built with
.TP
.BR \-\-use\-openssl\-ca,\-\-use\-bundled\-ca
Use OpenSSL's default CA store or use bundled Mozilla CA store as supplied by
-current NodeJS version. The default store is selectable at build-time.
+current Node.js version. The default store is selectable at build-time.
Using OpenSSL store allows for external modifications of the store. For most
Linux and BSD distributions, this store is maintained by the distribution
maintainers and system administrators. OpenSSL CA store location is dependent on
configuration of the OpenSSL library but this can be altered at runtime using
-environmental variables.
+environment variables.
-The bundled CA store, as supplied by NodeJS, is a snapshot of Mozilla CA store
+The bundled CA store, as supplied by Node.js, is a snapshot of Mozilla CA store
that is fixed at release time. It is identical on all supported platforms.
See \fBSSL_CERT_DIR\fR and \fBSSL_CERT_FILE\fR.
diff --git a/doc/onboarding.md b/doc/onboarding.md
index e1e10d88b30243..2702c9b993768d 100644
--- a/doc/onboarding.md
+++ b/doc/onboarding.md
@@ -43,7 +43,7 @@ onboarding session.
* Use [https://github.com/notifications](https://github.com/notifications) or set up email
* Watching the main repo will flood your inbox (several hundred notifications on typical weekdays), so be prepared
- * `#node-dev` on [webchat.freenode.net](https://webchat.freenode.net/) is the best place to interact with the CTC / other Collaborators
+ * `#node-dev` on [webchat.freenode.net](https://webchat.freenode.net/) is the best place to interact with the TSC / other Collaborators
* If there are any questions after the session, a good place to ask is there!
* Presence is not mandatory, but please drop a note there if force-pushing to `master`
@@ -67,7 +67,7 @@ onboarding session.
* [**See "Labels"**](./onboarding-extras.md#labels)
* There is [a bot](https://github.com/nodejs-github-bot/github-bot) that applies subsystem labels (for example, `doc`, `test`, `assert`, or `buffer`) so that we know what parts of the code base the pull request modifies. It is not perfect, of course. Feel free to apply relevant labels and remove irrelevant labels from pull requests and issues.
- * Use the `ctc-review` label if a topic is controversial or isn't coming to
+ * Use the `tsc-review` label if a topic is controversial or isn't coming to
a conclusion after an extended time.
* `semver-{minor,major}`:
* If a change has the remote *chance* of breaking something, use the `semver-major` label
@@ -166,7 +166,7 @@ onboarding session.
* Almost any mistake you could make can be fixed or reverted.
* The existing Collaborators trust you and are grateful for your help!
* Other repositories:
- * [https://github.com/nodejs/CTC](https://github.com/nodejs/CTC)
+ * [https://github.com/nodejs/TSC](https://github.com/nodejs/TSC)
* [https://github.com/nodejs/build](https://github.com/nodejs/build)
* [https://github.com/nodejs/nodejs.org](https://github.com/nodejs/nodejs.org)
* [https://github.com/nodejs/readable-stream](https://github.com/nodejs/readable-stream)
diff --git a/doc/releases.md b/doc/releases.md
index d2dd317fdb416b..d1eeb05876085c 100644
--- a/doc/releases.md
+++ b/doc/releases.md
@@ -144,7 +144,7 @@ is shown in **bold** in the index. When updating the index, please make sure
to update the display accordingly by removing the bold styling from the previous
release.
-#### Step 3: Update any REPLACEME tags in the docs
+#### Step 3: Update any REPLACEME and DEP00XX tags in the docs
If this release includes new APIs then it is necessary to document that they
were first added in this version. The relevant commits should already include
@@ -154,6 +154,13 @@ were first added in this version. The relevant commits should already include
`sed -i "s/REPLACEME/$VERSION/g" doc/api/*.md` or
`perl -pi -e "s/REPLACEME/$VERSION/g" doc/api/*.md`.
+If this release includes any new deprecations it is necessary to ensure that
+those are assigned a proper static deprecation code. These are listed in the
+docs (see `doc/api/deprecations.md`) and in the source as `DEP00XX`. The code
+must be assigned a number (e.g. `DEP0012`). Note that this assignment should
+occur when the PR is landed, but a check will be made when the release built
+is run.
+
### 4. Create Release Commit
The `CHANGELOG.md`, `doc/changelogs/CHANGELOG_*.md`, `src/node_version.h`, and
diff --git a/doc/template.html b/doc/template.html
index 572197beff44fe..d65b56ca5e80ad 100644
--- a/doc/template.html
+++ b/doc/template.html
@@ -23,11 +23,21 @@
diff --git a/lib/_stream_writable.js b/lib/_stream_writable.js
index 6e0eaf45b5464d..7de77958d56b4c 100644
--- a/lib/_stream_writable.js
+++ b/lib/_stream_writable.js
@@ -179,6 +179,8 @@ if (typeof Symbol === 'function' && Symbol.hasInstance) {
value: function(object) {
if (realHasInstance.call(this, object))
return true;
+ if (this !== Writable)
+ return false;
return object && object._writableState instanceof WritableState;
}
diff --git a/lib/_tls_common.js b/lib/_tls_common.js
index 36b2ebdad68d0b..3d191b8244871e 100644
--- a/lib/_tls_common.js
+++ b/lib/_tls_common.js
@@ -137,20 +137,29 @@ exports.createSecureContext = function createSecureContext(options, context) {
}
if (options.pfx) {
- var pfx = options.pfx;
- var passphrase = options.passphrase;
-
if (!crypto)
crypto = require('crypto');
- pfx = crypto._toBuf(pfx);
- if (passphrase)
- passphrase = crypto._toBuf(passphrase);
-
- if (passphrase) {
- c.context.loadPKCS12(pfx, passphrase);
+ if (Array.isArray(options.pfx)) {
+ for (i = 0; i < options.pfx.length; i++) {
+ const pfx = options.pfx[i];
+ const raw = pfx.buf ? pfx.buf : pfx;
+ const buf = crypto._toBuf(raw);
+ const passphrase = pfx.passphrase || options.passphrase;
+ if (passphrase) {
+ c.context.loadPKCS12(buf, crypto._toBuf(passphrase));
+ } else {
+ c.context.loadPKCS12(buf);
+ }
+ }
} else {
- c.context.loadPKCS12(pfx);
+ const buf = crypto._toBuf(options.pfx);
+ const passphrase = options.passphrase;
+ if (passphrase) {
+ c.context.loadPKCS12(buf, crypto._toBuf(passphrase));
+ } else {
+ c.context.loadPKCS12(buf);
+ }
}
}
diff --git a/lib/assert.js b/lib/assert.js
index 6e4b9effbe5549..6f340d8ef4e46b 100644
--- a/lib/assert.js
+++ b/lib/assert.js
@@ -21,10 +21,8 @@
'use strict';
const { compare } = process.binding('buffer');
-const util = require('util');
-const { isSet, isMap } = process.binding('util');
+const { isSet, isMap, isDate, isRegExp } = process.binding('util');
const { objectToString } = require('internal/util');
-const { Buffer } = require('buffer');
const errors = require('internal/errors');
// The assert module provides functions that throw
@@ -108,8 +106,8 @@ function areSimilarRegExps(a, b) {
}
// For small buffers it's faster to compare the buffer in a loop. The c++
-// barrier including the Buffer.from operation takes the advantage of the faster
-// compare otherwise. 300 was the number after which compare became faster.
+// barrier including the Uint8Array operation takes the advantage of the faster
+// binary compare otherwise. The break even point was at about 300 characters.
function areSimilarTypedArrays(a, b) {
const len = a.byteLength;
if (len !== b.byteLength) {
@@ -123,12 +121,8 @@ function areSimilarTypedArrays(a, b) {
}
return true;
}
- return compare(Buffer.from(a.buffer,
- a.byteOffset,
- len),
- Buffer.from(b.buffer,
- b.byteOffset,
- b.byteLength)) === 0;
+ return compare(new Uint8Array(a.buffer, a.byteOffset, len),
+ new Uint8Array(b.buffer, b.byteOffset, b.byteLength)) === 0;
}
function isFloatTypedArrayTag(tag) {
@@ -178,18 +172,32 @@ function strictDeepEqual(actual, expected) {
if (Object.getPrototypeOf(actual) !== Object.getPrototypeOf(expected)) {
return false;
}
- if (isObjectOrArrayTag(actualTag)) {
+ if (actualTag === '[object Array]') {
+ // Check for sparse arrays and general fast path
+ if (actual.length !== expected.length)
+ return false;
+ // Skip testing the part below and continue in the callee function.
+ return;
+ }
+ if (actualTag === '[object Object]') {
// Skip testing the part below and continue in the callee function.
return;
}
- if (util.isDate(actual)) {
+ if (isDate(actual)) {
if (actual.getTime() !== expected.getTime()) {
return false;
}
- } else if (util.isRegExp(actual)) {
+ } else if (isRegExp(actual)) {
if (!areSimilarRegExps(actual, expected)) {
return false;
}
+ } else if (actualTag === '[object Error]') {
+ // Do not compare the stack as it might differ even though the error itself
+ // is otherwise identical. The non-enumerable name should be identical as
+ // the prototype is also identical. Otherwise this is caught later on.
+ if (actual.message !== expected.message) {
+ return false;
+ }
} else if (!isFloatTypedArrayTag(actualTag) && ArrayBuffer.isView(actual)) {
if (!areSimilarTypedArrays(actual, expected)) {
return false;
@@ -215,12 +223,16 @@ function looseDeepEqual(actual, expected) {
if (expected === null || typeof expected !== 'object') {
return false;
}
- if (util.isDate(actual) && util.isDate(expected)) {
+ if (isDate(actual) && isDate(expected)) {
return actual.getTime() === expected.getTime();
}
- if (util.isRegExp(actual) && util.isRegExp(expected)) {
+ if (isRegExp(actual) && isRegExp(expected)) {
return areSimilarRegExps(actual, expected);
}
+ if (actual instanceof Error && expected instanceof Error) {
+ if (actual.message !== expected.message || actual.name !== expected.name)
+ return false;
+ }
const actualTag = objectToString(actual);
const expectedTag = objectToString(expected);
if (actualTag === expectedTag) {
diff --git a/lib/async_hooks.js b/lib/async_hooks.js
index 51039e9f36d24d..67e81ecaecb28a 100644
--- a/lib/async_hooks.js
+++ b/lib/async_hooks.js
@@ -2,39 +2,59 @@
const internalUtil = require('internal/util');
const async_wrap = process.binding('async_wrap');
-/* Both these arrays are used to communicate between JS and C++ with as little
- * overhead as possible.
+const errors = require('internal/errors');
+/* async_hook_fields is a Uint32Array wrapping the uint32_t array of
+ * Environment::AsyncHooks::fields_[]. Each index tracks the number of active
+ * hooks for each type.
*
- * async_hook_fields is a Uint32Array() that communicates the number of each
- * type of active hooks of each type and wraps the uin32_t array of
- * node::Environment::AsyncHooks::fields_.
- *
- * async_uid_fields is a Float64Array() that contains the async/trigger ids for
- * several operations. These fields are as follows:
- * kCurrentAsyncId: The async id of the current execution stack.
- * kCurrentTriggerId: The trigger id of the current execution stack.
- * kAsyncUidCntr: Counter that tracks the unique ids given to new resources.
- * kInitTriggerId: Written to just before creating a new resource, so the
- * constructor knows what other resource is responsible for its init().
- * Used this way so the trigger id doesn't need to be passed to every
- * resource's constructor.
+ * async_uid_fields is a Float64Array wrapping the double array of
+ * Environment::AsyncHooks::uid_fields_[]. Each index contains the ids for the
+ * various asynchronous states of the application. These are:
+ * kCurrentAsyncId: The async_id assigned to the resource responsible for the
+ * current execution stack.
+ * kCurrentTriggerId: The trigger_async_id of the resource responsible for the
+ * current execution stack.
+ * kAsyncUidCntr: Incremental counter tracking the next assigned async_id.
+ * kInitTriggerId: Written immediately before a resource's constructor that
+ * sets the value of the init()'s triggerAsyncId. The order of retrieving
+ * the triggerAsyncId value is passing directly to the constructor -> value
+ * set in kInitTriggerId -> executionAsyncId of the current resource.
*/
const { async_hook_fields, async_uid_fields } = async_wrap;
-// Used to change the state of the async id stack.
+// Store the pair executionAsyncId and triggerAsyncId in a std::stack on
+// Environment::AsyncHooks::ids_stack_ tracks the resource responsible for the
+// current execution stack. This is unwound as each resource exits. In the case
+// of a fatal exception this stack is emptied after calling each hook's after()
+// callback.
const { pushAsyncIds, popAsyncIds } = async_wrap;
-// Array of all AsyncHooks that will be iterated whenever an async event fires.
-// Using var instead of (preferably const) in order to assign
-// tmp_active_hooks_array if a hook is enabled/disabled during hook execution.
-var active_hooks_array = [];
-// Use a counter to track whether a hook callback is currently being processed.
-// Used to make sure active_hooks_array isn't altered in mid execution if
-// another hook is added or removed. A counter is used to track nested calls.
-var processing_hook = 0;
-// Use to temporarily store and updated active_hooks_array if the user enables
-// or disables a hook while hooks are being processed.
-var tmp_active_hooks_array = null;
-// Keep track of the field counts held in tmp_active_hooks_array.
-var tmp_async_hook_fields = null;
+// For performance reasons, only track Proimses when a hook is enabled.
+const { enablePromiseHook, disablePromiseHook } = async_wrap;
+// Properties in active_hooks are used to keep track of the set of hooks being
+// executed in case another hook is enabled/disabled. The new set of hooks is
+// then restored once the active set of hooks is finished executing.
+const active_hooks = {
+ // Array of all AsyncHooks that will be iterated whenever an async event
+ // fires. Using var instead of (preferably const) in order to assign
+ // active_hooks.tmp_array if a hook is enabled/disabled during hook
+ // execution.
+ array: [],
+ // Use a counter to track nested calls of async hook callbacks and make sure
+ // the active_hooks.array isn't altered mid execution.
+ call_depth: 0,
+ // Use to temporarily store and updated active_hooks.array if the user
+ // enables or disables a hook while hooks are being processed. If a hook is
+ // enabled() or disabled() during hook execution then the current set of
+ // active hooks is duplicated and set equal to active_hooks.tmp_array. Any
+ // subsequent changes are on the duplicated array. When all hooks have
+ // completed executing active_hooks.tmp_array is assigned to
+ // active_hooks.array.
+ tmp_array: null,
+ // Keep track of the field counts held in active_hooks.tmp_array. Because the
+ // async_hook_fields can't be reassigned, store each uint32 in an array that
+ // is written back to async_hook_fields when active_hooks.array is restored.
+ tmp_fields: null
+};
+
// Each constant tracks how many callbacks there are for any given step of
// async execution. These are tracked so if the user didn't include callbacks
@@ -43,6 +63,9 @@ const { kInit, kBefore, kAfter, kDestroy, kTotals, kCurrentAsyncId,
kCurrentTriggerId, kAsyncUidCntr,
kInitTriggerId } = async_wrap.constants;
+// Symbols used to store the respective ids on both AsyncResource instances and
+// internal resources. They will also be assigned to arbitrary objects passed
+// in by the user that take place of internally constructed objects.
const { async_id_symbol, trigger_id_symbol } = async_wrap;
// Used in AsyncHook and AsyncResource.
@@ -54,6 +77,9 @@ const emitBeforeNative = emitHookFactory(before_symbol, 'emitBeforeNative');
const emitAfterNative = emitHookFactory(after_symbol, 'emitAfterNative');
const emitDestroyNative = emitHookFactory(destroy_symbol, 'emitDestroyNative');
+// TODO(refack): move to node-config.cc
+const abort_regex = /^--abort[_-]on[_-]uncaught[_-]exception$/;
+
// Setup the callbacks that node::AsyncWrap will call when there are hooks to
// process. They use the same functions as the JS embedder API. These callbacks
// are setup immediately to prevent async_wrap.setupHooks() from being hijacked
@@ -72,7 +98,7 @@ function fatalError(e) {
Error.captureStackTrace(o, fatalError);
process._rawDebug(o.stack);
}
- if (process.execArgv.some((e) => /^--abort[_-]on[_-]uncaught[_-]exception$/.test(e))) {
+ if (process.execArgv.some((e) => abort_regex.test(e))) {
process.abort();
}
process.exit(1);
@@ -84,13 +110,13 @@ function fatalError(e) {
class AsyncHook {
constructor({ init, before, after, destroy }) {
if (init !== undefined && typeof init !== 'function')
- throw new TypeError('init must be a function');
+ throw new errors.TypeError('ERR_ASYNC_CALLBACK', 'init');
if (before !== undefined && typeof before !== 'function')
- throw new TypeError('before must be a function');
+ throw new errors.TypeError('ERR_ASYNC_CALLBACK', 'before');
if (after !== undefined && typeof after !== 'function')
- throw new TypeError('after must be a function');
+ throw new errors.TypeError('ERR_ASYNC_CALLBACK', 'before');
if (destroy !== undefined && typeof destroy !== 'function')
- throw new TypeError('destroy must be a function');
+ throw new errors.TypeError('ERR_ASYNC_CALLBACK', 'before');
this[init_symbol] = init;
this[before_symbol] = before;
@@ -122,7 +148,7 @@ class AsyncHook {
hooks_array.push(this);
if (prev_kTotals === 0 && hook_fields[kTotals] > 0)
- async_wrap.enablePromiseHook();
+ enablePromiseHook();
return this;
}
@@ -144,7 +170,7 @@ class AsyncHook {
hooks_array.splice(index, 1);
if (prev_kTotals > 0 && hook_fields[kTotals] === 0)
- async_wrap.disablePromiseHook();
+ disablePromiseHook();
return this;
}
@@ -152,41 +178,41 @@ class AsyncHook {
function getHookArrays() {
- if (processing_hook === 0)
- return [active_hooks_array, async_hook_fields];
+ if (active_hooks.call_depth === 0)
+ return [active_hooks.array, async_hook_fields];
// If this hook is being enabled while in the middle of processing the array
// of currently active hooks then duplicate the current set of active hooks
// and store this there. This shouldn't fire until the next time hooks are
// processed.
- if (tmp_active_hooks_array === null)
+ if (active_hooks.tmp_array === null)
storeActiveHooks();
- return [tmp_active_hooks_array, tmp_async_hook_fields];
+ return [active_hooks.tmp_array, active_hooks.tmp_fields];
}
function storeActiveHooks() {
- tmp_active_hooks_array = active_hooks_array.slice();
+ active_hooks.tmp_array = active_hooks.array.slice();
// Don't want to make the assumption that kInit to kDestroy are indexes 0 to
// 4. So do this the long way.
- tmp_async_hook_fields = [];
- tmp_async_hook_fields[kInit] = async_hook_fields[kInit];
- tmp_async_hook_fields[kBefore] = async_hook_fields[kBefore];
- tmp_async_hook_fields[kAfter] = async_hook_fields[kAfter];
- tmp_async_hook_fields[kDestroy] = async_hook_fields[kDestroy];
+ active_hooks.tmp_fields = [];
+ active_hooks.tmp_fields[kInit] = async_hook_fields[kInit];
+ active_hooks.tmp_fields[kBefore] = async_hook_fields[kBefore];
+ active_hooks.tmp_fields[kAfter] = async_hook_fields[kAfter];
+ active_hooks.tmp_fields[kDestroy] = async_hook_fields[kDestroy];
}
// Then restore the correct hooks array in case any hooks were added/removed
// during hook callback execution.
-function restoreTmpHooks() {
- active_hooks_array = tmp_active_hooks_array;
- async_hook_fields[kInit] = tmp_async_hook_fields[kInit];
- async_hook_fields[kBefore] = tmp_async_hook_fields[kBefore];
- async_hook_fields[kAfter] = tmp_async_hook_fields[kAfter];
- async_hook_fields[kDestroy] = tmp_async_hook_fields[kDestroy];
-
- tmp_active_hooks_array = null;
- tmp_async_hook_fields = null;
+function restoreActiveHooks() {
+ active_hooks.array = active_hooks.tmp_array;
+ async_hook_fields[kInit] = active_hooks.tmp_fields[kInit];
+ async_hook_fields[kBefore] = active_hooks.tmp_fields[kBefore];
+ async_hook_fields[kAfter] = active_hooks.tmp_fields[kAfter];
+ async_hook_fields[kDestroy] = active_hooks.tmp_fields[kDestroy];
+
+ active_hooks.tmp_array = null;
+ active_hooks.tmp_fields = null;
}
@@ -211,8 +237,11 @@ class AsyncResource {
constructor(type, triggerAsyncId = initTriggerId()) {
// Unlike emitInitScript, AsyncResource doesn't supports null as the
// triggerAsyncId.
- if (!Number.isSafeInteger(triggerAsyncId) || triggerAsyncId < 0)
- throw new RangeError('triggerAsyncId must be an unsigned integer');
+ if (!Number.isSafeInteger(triggerAsyncId) || triggerAsyncId < -1) {
+ throw new errors.RangeError('ERR_INVALID_ASYNC_ID',
+ 'triggerAsyncId',
+ triggerAsyncId);
+ }
this[async_id_symbol] = ++async_uid_fields[kAsyncUidCntr];
this[trigger_id_symbol] = triggerAsyncId;
@@ -317,14 +346,17 @@ function emitInitScript(asyncId, type, triggerAsyncId, resource) {
async_uid_fields[kInitTriggerId] = 0;
}
- // TODO(trevnorris): I'd prefer allowing these checks to not exist, or only
- // throw in a debug build, in order to improve performance.
- if (!Number.isSafeInteger(asyncId) || asyncId < 0)
- throw new RangeError('asyncId must be an unsigned integer');
- if (typeof type !== 'string' || type.length <= 0)
- throw new TypeError('type must be a string with length > 0');
- if (!Number.isSafeInteger(triggerAsyncId) || triggerAsyncId < 0)
- throw new RangeError('triggerAsyncId must be an unsigned integer');
+ if (!Number.isSafeInteger(asyncId) || asyncId < -1) {
+ throw new errors.RangeError('ERR_INVALID_ASYNC_ID', 'asyncId', asyncId);
+ }
+ if (!Number.isSafeInteger(triggerAsyncId) || triggerAsyncId < -1) {
+ throw new errors.RangeError('ERR_INVALID_ASYNC_ID',
+ 'triggerAsyncId',
+ triggerAsyncId);
+ }
+ if (typeof type !== 'string' || type.length <= 0) {
+ throw new errors.TypeError('ERR_ASYNC_TYPE', type);
+ }
emitInitNative(asyncId, type, triggerAsyncId, resource);
}
@@ -334,25 +366,30 @@ function emitHookFactory(symbol, name) {
// before this is called.
// eslint-disable-next-line func-style
const fn = function(asyncId) {
- processing_hook += 1;
+ active_hooks.call_depth += 1;
// Use a single try/catch for all hook to avoid setting up one per
// iteration.
try {
- for (var i = 0; i < active_hooks_array.length; i++) {
- if (typeof active_hooks_array[i][symbol] === 'function') {
- active_hooks_array[i][symbol](asyncId);
+ for (var i = 0; i < active_hooks.array.length; i++) {
+ if (typeof active_hooks.array[i][symbol] === 'function') {
+ active_hooks.array[i][symbol](asyncId);
}
}
} catch (e) {
fatalError(e);
} finally {
- processing_hook -= 1;
+ active_hooks.call_depth -= 1;
}
- if (processing_hook === 0 && tmp_active_hooks_array !== null) {
- restoreTmpHooks();
+ // Hooks can only be restored if there have been no recursive hook calls.
+ // Also the active hooks do not need to be restored if enable()/disable()
+ // weren't called during hook execution, in which case
+ // active_hooks.tmp_array will be null.
+ if (active_hooks.call_depth === 0 && active_hooks.tmp_array !== null) {
+ restoreActiveHooks();
}
};
+
// Set the name property of the anonymous function as it looks good in the
// stack trace.
Object.defineProperty(fn, 'name', {
@@ -363,13 +400,17 @@ function emitHookFactory(symbol, name) {
function emitBeforeScript(asyncId, triggerAsyncId) {
- // CHECK(Number.isSafeInteger(asyncId) && asyncId > 0)
- // CHECK(Number.isSafeInteger(triggerAsyncId) && triggerAsyncId > 0)
-
- // Validate the ids.
- if (asyncId < 0 || triggerAsyncId < 0) {
- fatalError('before(): asyncId or triggerAsyncId is less than zero ' +
- `(asyncId: ${asyncId}, triggerAsyncId: ${triggerAsyncId})`);
+ // Validate the ids. An id of -1 means it was never set and is visible on the
+ // call graph. An id < -1 should never happen in any circumstance. Throw
+ // on user calls because async state should still be recoverable.
+ if (!Number.isSafeInteger(asyncId) || asyncId < -1) {
+ fatalError(
+ new errors.RangeError('ERR_INVALID_ASYNC_ID', 'asyncId', asyncId));
+ }
+ if (!Number.isSafeInteger(triggerAsyncId) || triggerAsyncId < -1) {
+ fatalError(new errors.RangeError('ERR_INVALID_ASYNC_ID',
+ 'triggerAsyncId',
+ triggerAsyncId));
}
pushAsyncIds(asyncId, triggerAsyncId);
@@ -379,10 +420,12 @@ function emitBeforeScript(asyncId, triggerAsyncId) {
}
-// TODO(trevnorris): Calling emitBefore/emitAfter from native can't adjust the
-// kIdStackIndex. But what happens if the user doesn't have both before and
-// after callbacks.
function emitAfterScript(asyncId) {
+ if (!Number.isSafeInteger(asyncId) || asyncId < -1) {
+ fatalError(
+ new errors.RangeError('ERR_INVALID_ASYNC_ID', 'asyncId', asyncId));
+ }
+
if (async_hook_fields[kAfter] > 0)
emitAfterNative(asyncId);
@@ -391,34 +434,28 @@ function emitAfterScript(asyncId) {
function emitDestroyScript(asyncId) {
- // Return early if there are no destroy callbacks, or on attempt to emit
- // destroy on the void.
- if (async_hook_fields[kDestroy] === 0 || asyncId === 0)
+ if (!Number.isSafeInteger(asyncId) || asyncId < -1) {
+ fatalError(
+ new errors.RangeError('ERR_INVALID_ASYNC_ID', 'asyncId', asyncId));
+ }
+
+ // Return early if there are no destroy callbacks, or invalid asyncId.
+ if (async_hook_fields[kDestroy] === 0 || asyncId <= 0)
return;
async_wrap.addIdToDestroyList(asyncId);
}
-// Emit callbacks for native calls. Since some state can be setup directly from
-// C++ there's no need to perform all the work here.
-
-// This should only be called if hooks_array has kInit > 0. There are no global
-// values to setup. Though hooks_array will be cloned if C++ needed to call
-// init().
-// TODO(trevnorris): Perhaps have MakeCallback call a single JS function that
-// does the before/callback/after calls to remove two additional calls to JS.
-
-// Force the application to shutdown if one of the callbacks throws. This may
-// change in the future depending on whether it can be determined if there's a
-// slim chance of the application remaining stable after handling one of these
-// exceptions.
+// Used by C++ to call all init() callbacks. Because some state can be setup
+// from C++ there's no need to perform all the same operations as in
+// emitInitScript.
function emitInitNative(asyncId, type, triggerAsyncId, resource) {
- processing_hook += 1;
+ active_hooks.call_depth += 1;
// Use a single try/catch for all hook to avoid setting up one per iteration.
try {
- for (var i = 0; i < active_hooks_array.length; i++) {
- if (typeof active_hooks_array[i][init_symbol] === 'function') {
- active_hooks_array[i][init_symbol](
+ for (var i = 0; i < active_hooks.array.length; i++) {
+ if (typeof active_hooks.array[i][init_symbol] === 'function') {
+ active_hooks.array[i][init_symbol](
asyncId, type, triggerAsyncId,
resource
);
@@ -427,18 +464,15 @@ function emitInitNative(asyncId, type, triggerAsyncId, resource) {
} catch (e) {
fatalError(e);
} finally {
- processing_hook -= 1;
+ active_hooks.call_depth -= 1;
}
- // * `tmp_active_hooks_array` is null if no hooks were added/removed while
- // the hooks were running. In that case no restoration is needed.
- // * In the case where another hook was added/removed while the hooks were
- // running and a handle was created causing the `init` hooks to fire again,
- // then `restoreTmpHooks` should not be called for the nested `hooks`.
- // Otherwise `active_hooks_array` can change during execution of the
- // `hooks`.
- if (processing_hook === 0 && tmp_active_hooks_array !== null) {
- restoreTmpHooks();
+ // Hooks can only be restored if there have been no recursive hook calls.
+ // Also the active hooks do not need to be restored if enable()/disable()
+ // weren't called during hook execution, in which case active_hooks.tmp_array
+ // will be null.
+ if (active_hooks.call_depth === 0 && active_hooks.tmp_array !== null) {
+ restoreActiveHooks();
}
}
diff --git a/lib/buffer.js b/lib/buffer.js
index babd63e06f03ba..ac4f61bf5d0849 100644
--- a/lib/buffer.js
+++ b/lib/buffer.js
@@ -47,7 +47,7 @@ exports.kMaxLength = binding.kMaxLength;
const constants = Object.defineProperties({}, {
MAX_LENGTH: {
- value: binding.kStringMaxLength,
+ value: binding.kMaxLength,
writable: false,
enumerable: true
},
@@ -374,17 +374,15 @@ function fromObject(obj) {
return b;
}
- if (obj != null) {
- if (obj.length !== undefined || isAnyArrayBuffer(obj.buffer)) {
- if (typeof obj.length !== 'number' || obj.length !== obj.length) {
- return new FastBuffer();
- }
- return fromArrayLike(obj);
+ if (obj.length !== undefined || isAnyArrayBuffer(obj.buffer)) {
+ if (typeof obj.length !== 'number' || obj.length !== obj.length) {
+ return new FastBuffer();
}
+ return fromArrayLike(obj);
+ }
- if (obj.type === 'Buffer' && Array.isArray(obj.data)) {
- return fromArrayLike(obj.data);
- }
+ if (obj.type === 'Buffer' && Array.isArray(obj.data)) {
+ return fromArrayLike(obj.data);
}
}
diff --git a/lib/console.js b/lib/console.js
index 21c667e5010bc6..8783047bbbc0a2 100644
--- a/lib/console.js
+++ b/lib/console.js
@@ -24,6 +24,9 @@
const util = require('util');
const kCounts = Symbol('counts');
+// Track amount of indentation required via `console.group()`.
+const kGroupIndent = Symbol('groupIndent');
+
function Console(stdout, stderr, ignoreErrors = true) {
if (!(this instanceof Console)) {
return new Console(stdout, stderr, ignoreErrors);
@@ -57,6 +60,9 @@ function Console(stdout, stderr, ignoreErrors = true) {
this[kCounts] = new Map();
+ Object.defineProperty(this, kGroupIndent, { writable: true });
+ this[kGroupIndent] = '';
+
// bind the prototype functions to this Console instance
var keys = Object.keys(Console.prototype);
for (var v = 0; v < keys.length; v++) {
@@ -81,7 +87,15 @@ function createWriteErrorHandler(stream) {
};
}
-function write(ignoreErrors, stream, string, errorhandler) {
+function write(ignoreErrors, stream, string, errorhandler, groupIndent) {
+ if (groupIndent.length !== 0) {
+ if (string.indexOf('\n') !== -1) {
+ string = string.replace(/\n/g, `\n${groupIndent}`);
+ }
+ string = groupIndent + string;
+ }
+ string += '\n';
+
if (!ignoreErrors) return stream.write(string);
// There may be an error occurring synchronously (e.g. for files or TTYs
@@ -110,8 +124,9 @@ function write(ignoreErrors, stream, string, errorhandler) {
Console.prototype.log = function log(...args) {
write(this._ignoreErrors,
this._stdout,
- `${util.format.apply(null, args)}\n`,
- this._stdoutErrorHandler);
+ util.format.apply(null, args),
+ this._stdoutErrorHandler,
+ this[kGroupIndent]);
};
@@ -121,8 +136,9 @@ Console.prototype.info = Console.prototype.log;
Console.prototype.warn = function warn(...args) {
write(this._ignoreErrors,
this._stderr,
- `${util.format.apply(null, args)}\n`,
- this._stderrErrorHandler);
+ util.format.apply(null, args),
+ this._stderrErrorHandler,
+ this[kGroupIndent]);
};
@@ -133,8 +149,9 @@ Console.prototype.dir = function dir(object, options) {
options = Object.assign({customInspect: false}, options);
write(this._ignoreErrors,
this._stdout,
- `${util.inspect(object, options)}\n`,
- this._stdoutErrorHandler);
+ util.inspect(object, options),
+ this._stdoutErrorHandler,
+ this[kGroupIndent]);
};
@@ -209,6 +226,20 @@ Console.prototype.countReset = function countReset(label = 'default') {
counts.delete(`${label}`);
};
+Console.prototype.group = function group(...data) {
+ if (data.length > 0) {
+ this.log(...data);
+ }
+ this[kGroupIndent] += ' ';
+};
+
+Console.prototype.groupCollapsed = Console.prototype.group;
+
+Console.prototype.groupEnd = function groupEnd() {
+ this[kGroupIndent] =
+ this[kGroupIndent].slice(0, this[kGroupIndent].length - 2);
+};
+
module.exports = new Console(process.stdout, process.stderr);
module.exports.Console = Console;
diff --git a/lib/dns.js b/lib/dns.js
index 6946380ae673ce..7a3d3336e3aa1d 100644
--- a/lib/dns.js
+++ b/lib/dns.js
@@ -126,6 +126,7 @@ function lookup(hostname, options, callback) {
var hints = 0;
var family = -1;
var all = false;
+ var verbatim = false;
// Parse arguments
if (hostname && typeof hostname !== 'string') {
@@ -140,6 +141,7 @@ function lookup(hostname, options, callback) {
hints = options.hints >>> 0;
family = options.family >>> 0;
all = options.all === true;
+ verbatim = options.verbatim === true;
if (hints !== 0 &&
hints !== cares.AI_ADDRCONFIG &&
@@ -180,7 +182,7 @@ function lookup(hostname, options, callback) {
req.hostname = hostname;
req.oncomplete = all ? onlookupall : onlookup;
- var err = cares.getaddrinfo(req, hostname, family, hints);
+ var err = cares.getaddrinfo(req, hostname, family, hints, verbatim);
if (err) {
process.nextTick(callback, errnoException(err, 'getaddrinfo', hostname));
return {};
diff --git a/lib/fs.js b/lib/fs.js
index a5403d50f91f3c..17e92f6ee1bba4 100644
--- a/lib/fs.js
+++ b/lib/fs.js
@@ -33,6 +33,7 @@ const { isUint8Array, createPromise, promiseResolve } = process.binding('util');
const binding = process.binding('fs');
const fs = exports;
const Buffer = require('buffer').Buffer;
+const errors = require('internal/errors');
const Stream = require('stream').Stream;
const EventEmitter = require('events');
const FSReqWrap = binding.FSReqWrap;
@@ -1864,6 +1865,61 @@ fs.mkdtempSync = function(prefix, options) {
};
+// Define copyFile() flags.
+Object.defineProperties(fs.constants, {
+ COPYFILE_EXCL: { enumerable: true, value: constants.UV_FS_COPYFILE_EXCL }
+});
+
+
+fs.copyFile = function(src, dest, flags, callback) {
+ if (typeof flags === 'function') {
+ callback = flags;
+ flags = 0;
+ } else if (typeof callback !== 'function') {
+ throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'callback', 'function');
+ }
+
+ src = getPathFromURL(src);
+
+ if (handleError(src, callback))
+ return;
+
+ if (!nullCheck(src, callback))
+ return;
+
+ dest = getPathFromURL(dest);
+
+ if (handleError(dest, callback))
+ return;
+
+ if (!nullCheck(dest, callback))
+ return;
+
+ src = pathModule._makeLong(src);
+ dest = pathModule._makeLong(dest);
+ flags = flags | 0;
+ const req = new FSReqWrap();
+ req.oncomplete = makeCallback(callback);
+ binding.copyFile(src, dest, flags, req);
+};
+
+
+fs.copyFileSync = function(src, dest, flags) {
+ src = getPathFromURL(src);
+ handleError(src);
+ nullCheck(src);
+
+ dest = getPathFromURL(dest);
+ handleError(dest);
+ nullCheck(dest);
+
+ src = pathModule._makeLong(src);
+ dest = pathModule._makeLong(dest);
+ flags = flags | 0;
+ binding.copyFile(src, dest, flags);
+};
+
+
var pool;
function allocNewPool(poolSize) {
diff --git a/lib/http2.js b/lib/http2.js
index e964abf589d0eb..de06de1cc414cb 100644
--- a/lib/http2.js
+++ b/lib/http2.js
@@ -13,7 +13,9 @@ const {
getUnpackedSettings,
createServer,
createSecureServer,
- connect
+ connect,
+ Http2ServerRequest,
+ Http2ServerResponse
} = require('internal/http2/core');
module.exports = {
@@ -23,5 +25,7 @@ module.exports = {
getUnpackedSettings,
createServer,
createSecureServer,
- connect
+ connect,
+ Http2ServerResponse,
+ Http2ServerRequest
};
diff --git a/lib/inspector.js b/lib/inspector.js
index 6a80c36d528a1d..abde3b38cbca41 100644
--- a/lib/inspector.js
+++ b/lib/inspector.js
@@ -29,14 +29,18 @@ class Session extends EventEmitter {
[onMessageSymbol](message) {
const parsed = JSON.parse(message);
- if (parsed.id) {
- const callback = this[messageCallbacksSymbol].get(parsed.id);
- this[messageCallbacksSymbol].delete(parsed.id);
- if (callback)
- callback(parsed.error || null, parsed.result || null);
- } else {
- this.emit(parsed.method, parsed);
- this.emit('inspectorNotification', parsed);
+ try {
+ if (parsed.id) {
+ const callback = this[messageCallbacksSymbol].get(parsed.id);
+ this[messageCallbacksSymbol].delete(parsed.id);
+ if (callback)
+ callback(parsed.error || null, parsed.result || null);
+ } else {
+ this.emit(parsed.method, parsed);
+ this.emit('inspectorNotification', parsed);
+ }
+ } catch (error) {
+ process.emitWarning(error);
}
}
diff --git a/lib/internal/bootstrap_node.js b/lib/internal/bootstrap_node.js
index 01a16a9f0c0936..8fdf94a7cbfd86 100644
--- a/lib/internal/bootstrap_node.js
+++ b/lib/internal/bootstrap_node.js
@@ -14,9 +14,7 @@
process._eventsCount = 0;
const origProcProto = Object.getPrototypeOf(process);
- Object.setPrototypeOf(process, Object.create(EventEmitter.prototype, {
- constructor: Object.getOwnPropertyDescriptor(origProcProto, 'constructor')
- }));
+ Object.setPrototypeOf(origProcProto, EventEmitter.prototype);
EventEmitter.call(process);
@@ -28,25 +26,57 @@
setupProcessICUVersions();
setupGlobalVariables();
- if (!process._noBrowserGlobals) {
+ const browserGlobals = !process._noBrowserGlobals;
+ if (browserGlobals) {
setupGlobalTimeouts();
setupGlobalConsole();
}
const _process = NativeModule.require('internal/process');
+ const perf = process.binding('performance');
+ const {
+ NODE_PERFORMANCE_MILESTONE_BOOTSTRAP_COMPLETE,
+ NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_START,
+ NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_END,
+ NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_START,
+ NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_END,
+ NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_START,
+ NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_END,
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_START,
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_END
+ } = perf.constants;
_process.setup_hrtime();
+ _process.setup_performance();
_process.setup_cpuUsage();
_process.setupMemoryUsage();
_process.setupConfig(NativeModule._source);
NativeModule.require('internal/process/warning').setup();
NativeModule.require('internal/process/next_tick').setup();
NativeModule.require('internal/process/stdio').setup();
+ if (browserGlobals) {
+ // Instantiate eagerly in case the first call is under stack overflow
+ // conditions where instantiation doesn't work.
+ const console = global.console;
+ console.assert;
+ console.clear;
+ console.count;
+ console.countReset;
+ console.dir;
+ console.error;
+ console.log;
+ console.time;
+ console.timeEnd;
+ console.trace;
+ console.warn;
+ }
_process.setupKillAndExit();
_process.setupSignalHandlers();
if (global.__coverage__)
NativeModule.require('internal/process/write-coverage').setup();
+ NativeModule.require('internal/inspector_async_hook').setup();
+
// Do not initialize channel in debugger agent, it deletes env variable
// and the main thread won't see it.
if (process.argv[1] !== '--debug-agent')
@@ -79,6 +109,13 @@
'DeprecationWarning', 'DEP0062', startup, true);
}
+ if (process.binding('config').experimentalModules) {
+ process.emitWarning(
+ 'The ESM module loader is experimental.',
+ 'ExperimentalWarning', undefined);
+ }
+
+
// There are various modes that Node can run in. The most common two
// are running from a script and running the REPL - but there are a few
// others like the debugger or running --eval arguments. Here we decide
@@ -89,7 +126,9 @@
// one to drop a file lib/_third_party_main.js into the build
// directory which will be executed instead of Node's normal loading.
process.nextTick(function() {
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_START);
NativeModule.require('_third_party_main');
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_END);
});
} else if (process.argv[1] === 'inspect' || process.argv[1] === 'debug') {
@@ -104,14 +143,6 @@
NativeModule.require('node-inspect/lib/_inspect').start();
});
- } else if (process.argv[1] === '--remote_debugging_server') {
- // Start the debugging server
- NativeModule.require('internal/inspector/remote_debugging_server');
-
- } else if (process.argv[1] === '--debug-agent') {
- // Start the debugger agent
- NativeModule.require('_debug_agent').start();
-
} else if (process.profProcess) {
NativeModule.require('internal/v8_prof_processor');
@@ -122,22 +153,30 @@
// channel. This needs to be done before any user code gets executed
// (including preload modules).
if (process.argv[1] && process.env.NODE_UNIQUE_ID) {
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_START);
const cluster = NativeModule.require('cluster');
cluster._setupWorker();
-
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_END);
// Make sure it's not accidentally inherited by child processes.
delete process.env.NODE_UNIQUE_ID;
}
if (process._eval != null && !process._forceRepl) {
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_START);
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_END);
// User passed '-e' or '--eval' arguments to Node without '-i' or
// '--interactive'
+
+ perf.markMilestone(
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_START);
preloadModules();
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_END);
const internalModule = NativeModule.require('internal/module');
internalModule.addBuiltinLibsToObject(global);
evalScript('[eval]');
} else if (process.argv[1] && process.argv[1] !== '-') {
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_START);
// make process.argv[1] into a full path
const path = NativeModule.require('path');
process.argv[1] = path.resolve(process.argv[1]);
@@ -153,11 +192,21 @@
checkScriptSyntax(source, filename);
process.exit(0);
}
-
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_END);
+ perf.markMilestone(
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_START);
preloadModules();
+ perf.markMilestone(
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_END);
Module.runMain();
} else {
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_START);
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_END);
+ perf.markMilestone(
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_START);
preloadModules();
+ perf.markMilestone(
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_END);
// If -i or --interactive were passed, or stdin is a TTY.
if (process._forceRepl || NativeModule.require('tty').isatty(0)) {
// REPL
@@ -201,6 +250,7 @@
}
}
}
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_BOOTSTRAP_COMPLETE);
}
function setupProcessObject() {
@@ -332,7 +382,7 @@
// Arrays containing hook flags and ids for async_hook calls.
const { async_hook_fields, async_uid_fields } = async_wrap;
// Internal functions needed to manipulate the stack.
- const { clearIdStack, popAsyncIds } = async_wrap;
+ const { clearIdStack, asyncIdStackSize } = async_wrap;
const { kAfter, kCurrentAsyncId, kInitTriggerId } = async_wrap.constants;
process._fatalException = function(er) {
@@ -369,8 +419,7 @@
do {
NativeModule.require('async_hooks').emitAfter(
async_uid_fields[kCurrentAsyncId]);
- // popAsyncIds() returns true if there are more ids on the stack.
- } while (popAsyncIds(async_uid_fields[kCurrentAsyncId]));
+ } while (asyncIdStackSize() > 0);
// Or completely empty the id stack.
} else {
clearIdStack();
@@ -389,47 +438,26 @@
// of possible types.
const versionTypes = icu.getVersion().split(',');
- function makeGetter(name) {
- return () => {
- // With an argument, getVersion(type) returns
- // the actual version string.
- const version = icu.getVersion(name);
- // Replace the current getter with a new property.
- delete process.versions[name];
- Object.defineProperty(process.versions, name, {
- value: version,
- writable: false,
- enumerable: true
- });
- return version;
- };
- }
-
for (var n = 0; n < versionTypes.length; n++) {
var name = versionTypes[n];
+ const version = icu.getVersion(name);
Object.defineProperty(process.versions, name, {
- configurable: true,
+ writable: false,
enumerable: true,
- get: makeGetter(name)
+ value: version
});
}
}
function tryGetCwd(path) {
- var threw = true;
- var cwd;
try {
- cwd = process.cwd();
- threw = false;
- } finally {
- if (threw) {
- // getcwd(3) can fail if the current working directory has been deleted.
- // Fall back to the directory name of the (absolute) executable path.
- // It's not really correct but what are the alternatives?
- return path.dirname(process.execPath);
- }
+ return process.cwd();
+ } catch (ex) {
+ // getcwd(3) can fail if the current working directory has been deleted.
+ // Fall back to the directory name of the (absolute) executable path.
+ // It's not really correct but what are the alternatives?
+ return path.dirname(process.execPath);
}
- return cwd;
}
function evalScript(name) {
diff --git a/lib/internal/errors.js b/lib/internal/errors.js
index b7dd509070731d..3afbc68d963841 100644
--- a/lib/internal/errors.js
+++ b/lib/internal/errors.js
@@ -109,6 +109,8 @@ module.exports = exports = {
// Note: Please try to keep these in alphabetical order
E('ERR_ARG_NOT_ITERABLE', '%s must be iterable');
E('ERR_ASSERTION', (msg) => msg);
+E('ERR_ASYNC_CALLBACK', (name) => `${name} must be a function`);
+E('ERR_ASYNC_TYPE', (s) => `Invalid name for async "type": ${s}`);
E('ERR_ENCODING_INVALID_ENCODED_DATA',
(enc) => `The encoded data was not valid for encoding ${enc}`);
E('ERR_ENCODING_NOT_SUPPORTED',
@@ -116,7 +118,6 @@ E('ERR_ENCODING_NOT_SUPPORTED',
E('ERR_FALSY_VALUE_REJECTION', 'Promise was rejected with falsy value');
E('ERR_HTTP_HEADERS_SENT',
'Cannot render headers after they are sent to the client');
-E('ERR_HTTP_INVALID_STATUS_CODE', 'Invalid status code: %s');
E('ERR_HTTP_TRAILER_INVALID',
'Trailers are invalid with this transfer encoding');
E('ERR_HTTP_INVALID_CHAR', 'Invalid character in statusMessage.');
@@ -184,6 +185,7 @@ E('ERR_HTTP2_UNSUPPORTED_PROTOCOL',
(protocol) => `protocol "${protocol}" is unsupported.`);
E('ERR_INDEX_OUT_OF_RANGE', 'Index out of range');
E('ERR_INVALID_ARG_TYPE', invalidArgType);
+E('ERR_INVALID_ASYNC_ID', (type, id) => `Invalid ${type} value: ${id}`);
E('ERR_INVALID_CALLBACK', 'callback must be a function');
E('ERR_INVALID_FD', (fd) => `"fd" must be a positive integer: ${fd}`);
E('ERR_INVALID_FILE_URL_HOST', 'File URL host %s');
@@ -193,6 +195,7 @@ E('ERR_INVALID_OPT_VALUE',
(name, value) => {
return `The value "${String(value)}" is invalid for option "${name}"`;
});
+E('ERR_INVALID_PERFORMANCE_MARK', 'The "%s" performance mark has not been set');
E('ERR_INVALID_SYNC_FORK_INPUT',
(value) => {
return 'Asynchronous forks do not support Buffer, Uint8Array or string' +
@@ -208,11 +211,15 @@ E('ERR_IPC_DISCONNECTED', 'IPC channel is already disconnected');
E('ERR_IPC_ONE_PIPE', 'Child process can have only one IPC pipe');
E('ERR_IPC_SYNC_FORK', 'IPC cannot be used with synchronous forks');
E('ERR_MISSING_ARGS', missingArgs);
+E('ERR_MISSING_MODULE', 'Cannot find module %s');
+E('ERR_MODULE_RESOLUTION_LEGACY', '%s not found by import in %s.' +
+ 'Legacy behavior in require would have found it at %s');
E('ERR_NAPI_CONS_FUNCTION', 'Constructor must be a function');
E('ERR_NAPI_CONS_PROTOTYPE_OBJECT', 'Constructor.prototype must be an object');
E('ERR_NO_CRYPTO', 'Node.js is not compiled with OpenSSL crypto support');
E('ERR_NO_ICU', '%s is not supported on Node.js compiled without ICU');
E('ERR_PARSE_HISTORY_DATA', 'Could not parse history data in %s');
+E('ERR_REQUIRE_ESM', 'Must use import to load ES Module: %s');
E('ERR_SOCKET_ALREADY_BOUND', 'Socket is already bound');
E('ERR_SOCKET_BAD_TYPE',
'Bad socket type specified. Valid types are: udp4, udp6');
@@ -226,6 +233,8 @@ E('ERR_UNKNOWN_BUILTIN_MODULE', (id) => `No such built-in module: ${id}`);
E('ERR_UNKNOWN_SIGNAL', (signal) => `Unknown signal: ${signal}`);
E('ERR_UNKNOWN_STDIN_TYPE', 'Unknown stdin file type');
E('ERR_UNKNOWN_STREAM_TYPE', 'Unknown stream file type');
+E('ERR_VALID_PERFORMANCE_ENTRY_TYPE',
+ 'At least one valid performance entry type is required');
// Add new errors from here...
function invalidArgType(name, expected, actual) {
diff --git a/lib/internal/http2/compat.js b/lib/internal/http2/compat.js
index 100e08a25df92b..fa565e9bd67386 100644
--- a/lib/internal/http2/compat.js
+++ b/lib/internal/http2/compat.js
@@ -13,7 +13,9 @@ const kStream = Symbol('stream');
const kRequest = Symbol('request');
const kResponse = Symbol('response');
const kHeaders = Symbol('headers');
+const kRawHeaders = Symbol('rawHeaders');
const kTrailers = Symbol('trailers');
+const kRawTrailers = Symbol('rawTrailers');
let statusMessageWarned = false;
@@ -45,12 +47,28 @@ function isPseudoHeader(name) {
}
}
+function statusMessageWarn() {
+ if (statusMessageWarned === false) {
+ process.emitWarning(
+ 'Status message is not supported by HTTP/2 (RFC7540 8.1.2.4)',
+ 'UnsupportedWarning'
+ );
+ statusMessageWarned = true;
+ }
+}
+
function onStreamData(chunk) {
const request = this[kRequest];
if (!request.push(chunk))
this.pause();
}
+function onStreamTrailers(trailers, flags, rawTrailers) {
+ const request = this[kRequest];
+ Object.assign(request[kTrailers], trailers);
+ request[kRawTrailers].push(...rawTrailers);
+}
+
function onStreamEnd() {
// Cause the request stream to end as well.
const request = this[kRequest];
@@ -58,8 +76,13 @@ function onStreamEnd() {
}
function onStreamError(error) {
- const request = this[kRequest];
- request.emit('error', error);
+ // this is purposefully left blank
+ //
+ // errors in compatibility mode are
+ // not forwarded to the request
+ // and response objects. However,
+ // they are forwarded to 'clientError'
+ // on the server by Http2Stream
}
function onRequestPause() {
@@ -82,11 +105,6 @@ function onStreamResponseDrain() {
response.emit('drain');
}
-function onStreamResponseError(error) {
- const response = this[kResponse];
- response.emit('error', error);
-}
-
function onStreamClosedRequest() {
const req = this[kRequest];
req.push(null);
@@ -106,7 +124,7 @@ function onAborted(hadError, code) {
}
class Http2ServerRequest extends Readable {
- constructor(stream, headers, options) {
+ constructor(stream, headers, options, rawHeaders) {
super(options);
this[kState] = {
statusCode: null,
@@ -114,12 +132,16 @@ class Http2ServerRequest extends Readable {
closedCode: constants.NGHTTP2_NO_ERROR
};
this[kHeaders] = headers;
+ this[kRawHeaders] = rawHeaders;
+ this[kTrailers] = {};
+ this[kRawTrailers] = [];
this[kStream] = stream;
stream[kRequest] = this;
// Pause the stream..
stream.pause();
stream.on('data', onStreamData);
+ stream.on('trailers', onStreamTrailers);
stream.on('end', onStreamEnd);
stream.on('error', onStreamError);
stream.on('close', onStreamClosedRequest);
@@ -155,18 +177,17 @@ class Http2ServerRequest extends Readable {
}
get rawHeaders() {
- const headers = this[kHeaders];
- if (headers === undefined)
- return [];
- const tuples = Object.entries(headers);
- const flattened = Array.prototype.concat.apply([], tuples);
- return flattened.map(String);
+ return this[kRawHeaders];
}
get trailers() {
return this[kTrailers];
}
+ get rawTrailers() {
+ return this[kRawTrailers];
+ }
+
get httpVersionMajor() {
return 2;
}
@@ -249,7 +270,8 @@ class Http2ServerRequest extends Readable {
const state = this[kState];
if (state.closed)
return;
- state.closedCode = code;
+ if (code !== undefined)
+ state.closedCode = code;
state.closed = true;
this.push(null);
this[kStream] = undefined;
@@ -271,9 +293,7 @@ class Http2ServerResponse extends Stream {
stream[kResponse] = this;
this.writable = true;
stream.on('drain', onStreamResponseDrain);
- stream.on('error', onStreamResponseError);
stream.on('close', onStreamClosedResponse);
- stream.on('aborted', onAborted.bind(this));
const onfinish = this[kFinish].bind(this);
stream.on('streamClosed', onfinish);
stream.on('finish', onfinish);
@@ -300,7 +320,7 @@ class Http2ServerResponse extends Stream {
get headersSent() {
const stream = this[kStream];
- return stream.headersSent;
+ return stream !== undefined ? stream.headersSent : this[kState].headersSent;
}
get sendDate() {
@@ -383,30 +403,25 @@ class Http2ServerResponse extends Stream {
}
get statusMessage() {
- if (statusMessageWarned === false) {
- process.emitWarning(
- 'Status message is not supported by HTTP/2 (RFC7540 8.1.2.4)',
- 'UnsupportedWarning'
- );
- statusMessageWarned = true;
- }
+ statusMessageWarn();
return '';
}
+ set statusMessage(msg) {
+ statusMessageWarn();
+ }
+
flushHeaders() {
if (this[kStream].headersSent === false)
this[kBeginSend]();
}
writeHead(statusCode, statusMessage, headers) {
- if (typeof statusMessage === 'string' && statusMessageWarned === false) {
- process.emitWarning(
- 'Status message is not supported by HTTP/2 (RFC7540 8.1.2.4)',
- 'UnsupportedWarning'
- );
- statusMessageWarned = true;
+ if (typeof statusMessage === 'string') {
+ statusMessageWarn();
}
+
if (headers === undefined && typeof statusMessage === 'object') {
headers = statusMessage;
}
@@ -524,22 +539,30 @@ class Http2ServerResponse extends Stream {
const state = this[kState];
if (state.closed)
return;
- state.closedCode = code;
+ if (code !== undefined)
+ state.closedCode = code;
state.closed = true;
+ state.headersSent = this[kStream].headersSent;
this.end();
this[kStream] = undefined;
this.emit('finish');
}
+ // TODO doesn't support callbacks
writeContinue() {
- // TODO mcollina check what is the continue flow
- throw new Error('not implemented yet');
+ const stream = this[kStream];
+ if (stream === undefined) return false;
+ this[kStream].additionalHeaders({
+ [constants.HTTP2_HEADER_STATUS]: constants.HTTP_STATUS_CONTINUE
+ });
+ return true;
}
}
-function onServerStream(stream, headers, flags) {
+function onServerStream(stream, headers, flags, rawHeaders) {
const server = this;
- const request = new Http2ServerRequest(stream, headers);
+ const request = new Http2ServerRequest(stream, headers, undefined,
+ rawHeaders);
const response = new Http2ServerResponse(stream);
// Check for the CONNECT method
@@ -558,7 +581,7 @@ function onServerStream(stream, headers, flags) {
if (server.listenerCount('checkContinue')) {
server.emit('checkContinue', request, response);
} else {
- response.sendContinue();
+ response.writeContinue();
server.emit('request', request, response);
}
} else if (server.listenerCount('checkExpectation')) {
@@ -573,4 +596,8 @@ function onServerStream(stream, headers, flags) {
server.emit('request', request, response);
}
-module.exports = { onServerStream };
+module.exports = {
+ onServerStream,
+ Http2ServerRequest,
+ Http2ServerResponse,
+};
diff --git a/lib/internal/http2/core.js b/lib/internal/http2/core.js
old mode 100755
new mode 100644
index c3681383f87e80..12842e1f170a6e
--- a/lib/internal/http2/core.js
+++ b/lib/internal/http2/core.js
@@ -15,7 +15,10 @@ const fs = require('fs');
const errors = require('internal/errors');
const { Duplex } = require('stream');
const { URL } = require('url');
-const { onServerStream } = require('internal/http2/compat');
+const { onServerStream,
+ Http2ServerRequest,
+ Http2ServerResponse,
+} = require('internal/http2/compat');
const { utcDate } = require('internal/http');
const { _connectionListener: httpConnectionListener } = require('http');
const { isUint8Array } = process.binding('util');
@@ -117,7 +120,8 @@ const {
HTTP2_METHOD_HEAD,
HTTP2_METHOD_CONNECT,
- HTTP_STATUS_CONTENT_RESET,
+ HTTP_STATUS_CONTINUE,
+ HTTP_STATUS_RESET_CONTENT,
HTTP_STATUS_OK,
HTTP_STATUS_NO_CONTENT,
HTTP_STATUS_NOT_MODIFIED,
@@ -146,8 +150,8 @@ function emit() {
// event. If the stream is not new, emit the 'headers' event to pass
// the block of headers on.
function onSessionHeaders(id, cat, flags, headers) {
- _unrefActive(this);
const owner = this[kOwner];
+ _unrefActive(owner);
debug(`[${sessionName(owner[kType])}] headers were received on ` +
`stream ${id}: ${cat}`);
const streams = owner[kState].streams;
@@ -181,7 +185,7 @@ function onSessionHeaders(id, cat, flags, headers) {
'report this as a bug in Node.js');
}
streams.set(id, stream);
- process.nextTick(emit.bind(owner, 'stream', stream, obj, flags));
+ process.nextTick(emit.bind(owner, 'stream', stream, obj, flags, headers));
} else {
let event;
let status;
@@ -214,7 +218,10 @@ function onSessionHeaders(id, cat, flags, headers) {
'report this as a bug in Node.js');
}
debug(`[${sessionName(owner[kType])}] emitting stream '${event}' event`);
- process.nextTick(emit.bind(stream, event, obj, flags));
+ process.nextTick(emit.bind(stream, event, obj, flags, headers));
+ }
+ if (endOfStream) {
+ stream.push(null);
}
}
@@ -258,7 +265,7 @@ function onSessionStreamClose(id, code) {
const stream = owner[kState].streams.get(id);
if (stream === undefined)
return;
- _unrefActive(this);
+ _unrefActive(owner);
// Set the rst state for the stream
abort(stream);
const state = stream[kState];
@@ -280,14 +287,16 @@ function afterFDClose(err) {
// Called when an error event needs to be triggered
function onSessionError(error) {
- _unrefActive(this);
- process.nextTick(() => this[kOwner].emit('error', error));
+ const owner = this[kOwner];
+ _unrefActive(owner);
+ process.nextTick(() => owner.emit('error', error));
}
// Receives a chunk of data for a given stream and forwards it on
// to the Http2Stream Duplex for processing.
function onSessionRead(nread, buf, handle) {
- const streams = this[kOwner][kState].streams;
+ const owner = this[kOwner];
+ const streams = owner[kState].streams;
const id = handle.id;
const stream = streams.get(id);
// It should not be possible for the stream to not exist at this point.
@@ -296,7 +305,7 @@ function onSessionRead(nread, buf, handle) {
'Internal HTTP/2 Failure. Stream does not exist. Please ' +
'report this as a bug in Node.js');
const state = stream[kState];
- _unrefActive(this); // Reset the session timeout timer
+ _unrefActive(owner); // Reset the session timeout timer
_unrefActive(stream); // Reset the stream timeout timer
if (nread >= 0 && !stream.destroyed) {
@@ -315,7 +324,7 @@ function onSessionRead(nread, buf, handle) {
function onSettings(ack) {
const owner = this[kOwner];
debug(`[${sessionName(owner[kType])}] new settings received`);
- _unrefActive(this);
+ _unrefActive(owner);
let event = 'remoteSettings';
if (ack) {
if (owner[kState].pendingAck > 0)
@@ -341,7 +350,7 @@ function onPriority(id, parent, weight, exclusive) {
debug(`[${sessionName(owner[kType])}] priority advisement for stream ` +
`${id}: \n parent: ${parent},\n weight: ${weight},\n` +
` exclusive: ${exclusive}`);
- _unrefActive(this);
+ _unrefActive(owner);
const streams = owner[kState].streams;
const stream = streams.get(id);
const emitter = stream === undefined ? owner : stream;
@@ -363,7 +372,7 @@ function onFrameError(id, type, code) {
const owner = this[kOwner];
debug(`[${sessionName(owner[kType])}] error sending frame type ` +
`${type} on stream ${id}, code: ${code}`);
- _unrefActive(this);
+ _unrefActive(owner);
const streams = owner[kState].streams;
const stream = streams.get(id);
const emitter = stream !== undefined ? stream : owner;
@@ -373,6 +382,8 @@ function onFrameError(id, type, code) {
function emitGoaway(state, code, lastStreamID, buf) {
this.emit('goaway', code, lastStreamID, buf);
// Tear down the session or destroy
+ if (state.destroying || state.destroyed)
+ return;
if (!state.shuttingDown && !state.shutdown) {
this.shutdown({}, this.destroy.bind(this));
} else {
@@ -458,7 +469,7 @@ function requestOnConnect(headers, options) {
break;
case NGHTTP2_ERR_STREAM_ID_NOT_AVAILABLE:
err = new errors.Error('ERR_HTTP2_OUT_OF_STREAMS');
- process.nextTick(() => this.emit('error', err));
+ process.nextTick(() => session.emit('error', err));
break;
case NGHTTP2_ERR_INVALID_ARGUMENT:
err = new errors.Error('ERR_HTTP2_STREAM_SELF_DEPENDENCY');
@@ -481,9 +492,9 @@ function validatePriorityOptions(options) {
if (options.weight === undefined) {
options.weight = NGHTTP2_DEFAULT_WEIGHT;
} else if (typeof options.weight !== 'number') {
- const err = new errors.RangeError('ERR_INVALID_OPT_VALUE',
- 'weight',
- options.weight);
+ const err = new errors.TypeError('ERR_INVALID_OPT_VALUE',
+ 'weight',
+ options.weight);
Error.captureStackTrace(err, validatePriorityOptions);
throw err;
}
@@ -491,9 +502,9 @@ function validatePriorityOptions(options) {
if (options.parent === undefined) {
options.parent = 0;
} else if (typeof options.parent !== 'number' || options.parent < 0) {
- const err = new errors.RangeError('ERR_INVALID_OPT_VALUE',
- 'parent',
- options.parent);
+ const err = new errors.TypeError('ERR_INVALID_OPT_VALUE',
+ 'parent',
+ options.parent);
Error.captureStackTrace(err, validatePriorityOptions);
throw err;
}
@@ -501,9 +512,9 @@ function validatePriorityOptions(options) {
if (options.exclusive === undefined) {
options.exclusive = false;
} else if (typeof options.exclusive !== 'boolean') {
- const err = new errors.RangeError('ERR_INVALID_OPT_VALUE',
- 'exclusive',
- options.exclusive);
+ const err = new errors.TypeError('ERR_INVALID_OPT_VALUE',
+ 'exclusive',
+ options.exclusive);
Error.captureStackTrace(err, validatePriorityOptions);
throw err;
}
@@ -511,9 +522,9 @@ function validatePriorityOptions(options) {
if (options.silent === undefined) {
options.silent = false;
} else if (typeof options.silent !== 'boolean') {
- const err = new errors.RangeError('ERR_INVALID_OPT_VALUE',
- 'silent',
- options.silent);
+ const err = new errors.TypeError('ERR_INVALID_OPT_VALUE',
+ 'silent',
+ options.silent);
Error.captureStackTrace(err, validatePriorityOptions);
throw err;
}
@@ -966,7 +977,7 @@ class Http2Session extends EventEmitter {
state.destroying = true;
// Unenroll the timer
- unenroll(this);
+ this.setTimeout(0, sessionOnTimeout);
// Shut down any still open streams
const streams = state.streams;
@@ -1115,9 +1126,9 @@ class ClientHttp2Session extends Http2Session {
// preference.
options.endStream = isPayloadMeaningless(headers[HTTP2_HEADER_METHOD]);
} else if (typeof options.endStream !== 'boolean') {
- throw new errors.RangeError('ERR_INVALID_OPT_VALUE',
- 'endStream',
- options.endStream);
+ throw new errors.TypeError('ERR_INVALID_OPT_VALUE',
+ 'endStream',
+ options.endStream);
}
if (options.getTrailers !== undefined &&
@@ -1150,11 +1161,6 @@ class ClientHttp2Session extends Http2Session {
function createWriteReq(req, handle, data, encoding) {
switch (encoding) {
- case 'latin1':
- case 'binary':
- return handle.writeLatin1String(req, data);
- case 'buffer':
- return handle.writeBuffer(req, data);
case 'utf8':
case 'utf-8':
return handle.writeUtf8String(req, data);
@@ -1165,6 +1171,11 @@ function createWriteReq(req, handle, data, encoding) {
case 'utf16le':
case 'utf-16le':
return handle.writeUcs2String(req, data);
+ case 'latin1':
+ case 'binary':
+ return handle.writeLatin1String(req, data);
+ case 'buffer':
+ return handle.writeBuffer(req, data);
default:
return handle.writeBuffer(req, Buffer.from(data, encoding));
}
@@ -1276,6 +1287,7 @@ function abort(stream) {
class Http2Stream extends Duplex {
constructor(session, options) {
options.allowHalfOpen = true;
+ options.decodeStrings = false;
super(options);
this.cork();
this[kSession] = session;
@@ -1503,6 +1515,13 @@ class Http2Stream extends Duplex {
this.once('ready', this._destroy.bind(this, err, callback));
return;
}
+
+ const server = session[kServer];
+
+ if (err && server) {
+ server.emit('streamError', err, this);
+ }
+
process.nextTick(() => {
debug(`[${sessionName(session[kType])}] destroying stream ${this[kID]}`);
@@ -1519,16 +1538,15 @@ class Http2Stream extends Duplex {
session.removeListener('close', this[kState].closeHandler);
// Unenroll the timer
- unenroll(this);
+ this.setTimeout(0);
setImmediate(finishStreamDestroy.bind(this, handle));
// All done
const rst = this[kState].rst;
const code = rst ? this[kState].rstCode : NGHTTP2_NO_ERROR;
- if (code !== NGHTTP2_NO_ERROR) {
- const err = new errors.Error('ERR_HTTP2_STREAM_ERROR', code);
- process.nextTick(() => this.emit('error', err));
+ if (!err && code !== NGHTTP2_NO_ERROR) {
+ err = new errors.Error('ERR_HTTP2_STREAM_ERROR', code);
}
process.nextTick(emit.bind(this, 'streamClosed', code));
debug(`[${sessionName(session[kType])}] stream ${this[kID]} destroyed`);
@@ -1631,13 +1649,24 @@ function doSendFileFD(session, options, fd, headers, getTrailers, err, stat) {
abort(this);
return;
}
+ const onError = options.onError;
+
if (err) {
- process.nextTick(() => this.emit('error', err));
+ if (onError) {
+ onError(err);
+ } else {
+ this.destroy(err);
+ }
return;
}
+
if (!stat.isFile()) {
err = new errors.Error('ERR_HTTP2_SEND_FILE');
- process.nextTick(() => this.emit('error', err));
+ if (onError) {
+ onError(err);
+ } else {
+ this.destroy(err);
+ }
return;
}
@@ -1674,12 +1703,17 @@ function doSendFileFD(session, options, fd, headers, getTrailers, err, stat) {
function afterOpen(session, options, headers, getTrailers, err, fd) {
const state = this[kState];
+ const onError = options.onError;
if (this.destroyed || session.destroyed) {
abort(this);
return;
}
if (err) {
- process.nextTick(() => this.emit('error', err));
+ if (onError) {
+ onError(err);
+ } else {
+ this.destroy(err);
+ }
return;
}
state.fd = fd;
@@ -1688,6 +1722,12 @@ function afterOpen(session, options, headers, getTrailers, err, fd) {
doSendFileFD.bind(this, session, options, fd, headers, getTrailers));
}
+function streamOnError(err) {
+ // we swallow the error for parity with HTTP1
+ // all the errors that ends here are not critical for the project
+ debug('ServerHttp2Stream errored, avoiding uncaughtException', err);
+}
+
class ServerHttp2Stream extends Http2Stream {
constructor(session, id, options, headers) {
@@ -1695,6 +1735,7 @@ class ServerHttp2Stream extends Http2Stream {
this[kInit](id);
this[kProtocol] = headers[HTTP2_HEADER_SCHEME];
this[kAuthority] = headers[HTTP2_HEADER_AUTHORITY];
+ this.on('error', streamOnError);
debug(`[${sessionName(session[kType])}] created serverhttp2stream`);
}
@@ -1838,7 +1879,7 @@ class ServerHttp2Stream extends Http2Stream {
// the options.endStream option to true so that the underlying
// bits do not attempt to send any.
if (statusCode === HTTP_STATUS_NO_CONTENT ||
- statusCode === HTTP_STATUS_CONTENT_RESET ||
+ statusCode === HTTP_STATUS_RESET_CONTENT ||
statusCode === HTTP_STATUS_NOT_MODIFIED ||
state.headRequest === true) {
options.endStream = true;
@@ -1932,7 +1973,7 @@ class ServerHttp2Stream extends Http2Stream {
const statusCode = headers[HTTP2_HEADER_STATUS] |= 0;
// Payload/DATA frames are not permitted in these cases
if (statusCode === HTTP_STATUS_NO_CONTENT ||
- statusCode === HTTP_STATUS_CONTENT_RESET ||
+ statusCode === HTTP_STATUS_RESET_CONTENT ||
statusCode === HTTP_STATUS_NOT_MODIFIED) {
throw new errors.Error('ERR_HTTP2_PAYLOAD_FORBIDDEN', statusCode);
}
@@ -2009,7 +2050,7 @@ class ServerHttp2Stream extends Http2Stream {
const statusCode = headers[HTTP2_HEADER_STATUS] |= 0;
// Payload/DATA frames are not permitted in these cases
if (statusCode === HTTP_STATUS_NO_CONTENT ||
- statusCode === HTTP_STATUS_CONTENT_RESET ||
+ statusCode === HTTP_STATUS_RESET_CONTENT ||
statusCode === HTTP_STATUS_NOT_MODIFIED) {
throw new errors.Error('ERR_HTTP2_PAYLOAD_FORBIDDEN', statusCode);
}
@@ -2081,10 +2122,17 @@ class ClientHttp2Stream extends Http2Stream {
this[kState].headersSent = true;
if (id !== undefined)
this[kInit](id);
+ this.on('headers', handleHeaderContinue);
debug(`[${sessionName(session[kType])}] clienthttp2stream created`);
}
}
+function handleHeaderContinue(headers) {
+ if (headers[HTTP2_HEADER_STATUS] === HTTP_STATUS_CONTINUE) {
+ this.emit('continue');
+ }
+}
+
const setTimeout = {
configurable: true,
enumerable: true,
@@ -2140,7 +2188,6 @@ function socketDestroy(error) {
const type = this[kSession][kType];
debug(`[${sessionName(type)}] socket destroy called`);
delete this[kServer];
- this.removeListener('timeout', socketOnTimeout);
// destroy the session first so that it will stop trying to
// send data while we close the socket.
this[kSession].destroy();
@@ -2202,33 +2249,11 @@ function socketOnError(error) {
this.destroy(error);
}
-// When the socket times out on the server, attempt a graceful shutdown
-// of the session.
-function socketOnTimeout() {
- debug('socket timeout');
- process.nextTick(() => {
- const server = this[kServer];
- const session = this[kSession];
- // If server or session are undefined, then we're already in the process of
- // shutting down, do nothing.
- if (server === undefined || session === undefined)
- return;
- if (!server.emit('timeout', session, this)) {
- session.shutdown(
- {
- graceful: true,
- errorCode: NGHTTP2_NO_ERROR
- },
- this.destroy.bind(this));
- }
- });
-}
-
// Handles the on('stream') event for a session and forwards
// it on to the server object.
-function sessionOnStream(stream, headers, flags) {
+function sessionOnStream(stream, headers, flags, rawHeaders) {
debug(`[${sessionName(this[kType])}] emit server stream event`);
- this[kServer].emit('stream', stream, headers, flags);
+ this[kServer].emit('stream', stream, headers, flags, rawHeaders);
}
function sessionOnPriority(stream, parent, weight, exclusive) {
@@ -2241,15 +2266,34 @@ function sessionOnSocketError(error, socket) {
this[kServer].emit('socketError', error, socket, this);
}
+// When the session times out on the server, attempt a graceful shutdown
+function sessionOnTimeout() {
+ debug('session timeout');
+ process.nextTick(() => {
+ // if destroyed or destryoing, do nothing
+ if (this[kState].destroyed || this[kState].destroying)
+ return;
+ const server = this[kServer];
+ const socket = this[kSocket];
+ // If server or socket are undefined, then we're already in the process of
+ // shutting down, do nothing.
+ if (server === undefined || socket === undefined)
+ return;
+ if (!server.emit('timeout', this)) {
+ this.shutdown(
+ {
+ graceful: true,
+ errorCode: NGHTTP2_NO_ERROR
+ },
+ socket.destroy.bind(socket));
+ }
+ });
+}
+
function connectionListener(socket) {
debug('[server] received a connection');
const options = this[kOptions] || {};
- if (this.timeout) {
- socket.setTimeout(this.timeout);
- socket.on('timeout', socketOnTimeout);
- }
-
if (socket.alpnProtocol === false || socket.alpnProtocol === 'http/1.1') {
if (options.allowHTTP1 === true) {
// Fallback to HTTP/1.1
@@ -2277,6 +2321,11 @@ function connectionListener(socket) {
session.on('priority', sessionOnPriority);
session.on('socketError', sessionOnSocketError);
+ if (this.timeout) {
+ session.setTimeout(this.timeout);
+ session.on('timeout', sessionOnTimeout);
+ }
+
socket[kServer] = this;
process.nextTick(emit.bind(this, 'session', session));
@@ -2552,7 +2601,11 @@ module.exports = {
getUnpackedSettings,
createServer,
createSecureServer,
- connect
+ connect,
+ Http2Session,
+ Http2Stream,
+ Http2ServerRequest,
+ Http2ServerResponse
};
/* eslint-enable no-use-before-define */
diff --git a/lib/internal/http2/util.js b/lib/internal/http2/util.js
index 09f55fdc65e309..17f4c22252fd28 100644
--- a/lib/internal/http2/util.js
+++ b/lib/internal/http2/util.js
@@ -35,6 +35,7 @@ const {
HTTP2_HEADER_RANGE,
HTTP2_HEADER_REFERER,
HTTP2_HEADER_RETRY_AFTER,
+ HTTP2_HEADER_SET_COOKIE,
HTTP2_HEADER_USER_AGENT,
HTTP2_HEADER_CONNECTION,
@@ -474,18 +475,36 @@ function toHeaderObject(headers) {
if (existing === undefined) {
obj[name] = value;
} else if (!kSingleValueHeaders.has(name)) {
- if (name === HTTP2_HEADER_COOKIE) {
- // https://tools.ietf.org/html/rfc7540#section-8.1.2.5
- // "...If there are multiple Cookie header fields after decompression,
- // these MUST be concatenated into a single octet string using the
- // two-octet delimiter of 0x3B, 0x20 (the ASCII string "; ") before
- // being passed into a non-HTTP/2 context."
- obj[name] = `${existing}; ${value}`;
- } else {
- if (Array.isArray(existing))
- existing.push(value);
- else
- obj[name] = [existing, value];
+ switch (name) {
+ case HTTP2_HEADER_COOKIE:
+ // https://tools.ietf.org/html/rfc7540#section-8.1.2.5
+ // "...If there are multiple Cookie header fields after decompression,
+ // these MUST be concatenated into a single octet string using the
+ // two-octet delimiter of 0x3B, 0x20 (the ASCII string "; ") before
+ // being passed into a non-HTTP/2 context."
+ obj[name] = `${existing}; ${value}`;
+ break;
+ case HTTP2_HEADER_SET_COOKIE:
+ // https://tools.ietf.org/html/rfc7230#section-3.2.2
+ // "Note: In practice, the "Set-Cookie" header field ([RFC6265]) often
+ // appears multiple times in a response message and does not use the
+ // list syntax, violating the above requirements on multiple header
+ // fields with the same name. Since it cannot be combined into a
+ // single field-value, recipients ought to handle "Set-Cookie" as a
+ // special case while processing header fields."
+ if (Array.isArray(existing))
+ existing.push(value);
+ else
+ obj[name] = [existing, value];
+ break;
+ default:
+ // https://tools.ietf.org/html/rfc7230#section-3.2.2
+ // "A recipient MAY combine multiple header fields with the same field
+ // name into one "field-name: field-value" pair, without changing the
+ // semantics of the message, by appending each subsequent field value
+ // to the combined field value in order, separated by a comma."
+ obj[name] = `${existing}, ${value}`;
+ break;
}
}
}
diff --git a/lib/internal/inspector_async_hook.js b/lib/internal/inspector_async_hook.js
new file mode 100644
index 00000000000000..e32a026cd69155
--- /dev/null
+++ b/lib/internal/inspector_async_hook.js
@@ -0,0 +1,64 @@
+'use strict';
+
+const { createHook } = require('async_hooks');
+const inspector = process.binding('inspector');
+const config = process.binding('config');
+
+if (!inspector || !inspector.asyncTaskScheduled) {
+ exports.setup = function() {};
+ return;
+}
+
+const hook = createHook({
+ init(asyncId, type, triggerAsyncId, resource) {
+ // It's difficult to tell which tasks will be recurring and which won't,
+ // therefore we mark all tasks as recurring. Based on the discussion
+ // in https://github.com/nodejs/node/pull/13870#discussion_r124515293,
+ // this should be fine as long as we call asyncTaskCanceled() too.
+ const recurring = true;
+ inspector.asyncTaskScheduled(type, asyncId, recurring);
+ },
+
+ before(asyncId) {
+ inspector.asyncTaskStarted(asyncId);
+ },
+
+ after(asyncId) {
+ inspector.asyncTaskFinished(asyncId);
+ },
+
+ destroy(asyncId) {
+ inspector.asyncTaskCanceled(asyncId);
+ },
+});
+
+function enable() {
+ if (config.bits < 64) {
+ // V8 Inspector stores task ids as (void*) pointers.
+ // async_hooks store ids as 64bit numbers.
+ // As a result, we cannot reliably translate async_hook ids to V8 async_task
+ // ids on 32bit platforms.
+ process.emitWarning(
+ 'Warning: Async stack traces in debugger are not available ' +
+ `on ${config.bits}bit platforms. The feature is disabled.`,
+ {
+ code: 'INSPECTOR_ASYNC_STACK_TRACES_NOT_AVAILABLE',
+ });
+ } else {
+ hook.enable();
+ }
+}
+
+function disable() {
+ hook.disable();
+}
+
+exports.setup = function() {
+ inspector.registerAsyncHook(enable, disable);
+
+ if (inspector.isEnabled()) {
+ // If the inspector was already enabled via --inspect or --inspect-brk,
+ // the we need to enable the async hook immediately at startup.
+ enable();
+ }
+};
diff --git a/lib/internal/loader/Loader.js b/lib/internal/loader/Loader.js
new file mode 100644
index 00000000000000..a409d397f85dd6
--- /dev/null
+++ b/lib/internal/loader/Loader.js
@@ -0,0 +1,75 @@
+'use strict';
+
+const { URL } = require('url');
+const { getURLFromFilePath } = require('internal/url');
+
+const {
+ getNamespaceOfModuleWrap
+} = require('internal/loader/ModuleWrap');
+
+const ModuleMap = require('internal/loader/ModuleMap');
+const ModuleJob = require('internal/loader/ModuleJob');
+const resolveRequestUrl = require('internal/loader/resolveRequestUrl');
+const errors = require('internal/errors');
+
+function getBase() {
+ try {
+ return getURLFromFilePath(`${process.cwd()}/`);
+ } catch (e) {
+ e.stack;
+ // If the current working directory no longer exists.
+ if (e.code === 'ENOENT') {
+ return undefined;
+ }
+ throw e;
+ }
+}
+
+class Loader {
+ constructor(base = getBase()) {
+ this.moduleMap = new ModuleMap();
+ if (typeof base !== 'undefined' && base instanceof URL !== true) {
+ throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'base', 'URL');
+ }
+ this.base = base;
+ }
+
+ async resolve(specifier) {
+ const request = resolveRequestUrl(this.base, specifier);
+ if (request.url.protocol !== 'file:') {
+ throw new errors.Error('ERR_INVALID_PROTOCOL',
+ request.url.protocol, 'file:');
+ }
+ return request.url;
+ }
+
+ async getModuleJob(dependentJob, specifier) {
+ if (!this.moduleMap.has(dependentJob.url)) {
+ throw new errors.Error('ERR_MISSING_MODULE', dependentJob.url);
+ }
+ const request = await resolveRequestUrl(dependentJob.url, specifier);
+ const url = `${request.url}`;
+ if (this.moduleMap.has(url)) {
+ return this.moduleMap.get(url);
+ }
+ const dependencyJob = new ModuleJob(this, request);
+ this.moduleMap.set(url, dependencyJob);
+ return dependencyJob;
+ }
+
+ async import(specifier) {
+ const request = await resolveRequestUrl(this.base, specifier);
+ const url = `${request.url}`;
+ let job;
+ if (this.moduleMap.has(url)) {
+ job = this.moduleMap.get(url);
+ } else {
+ job = new ModuleJob(this, request);
+ this.moduleMap.set(url, job);
+ }
+ const module = await job.run();
+ return getNamespaceOfModuleWrap(module);
+ }
+}
+Object.setPrototypeOf(Loader.prototype, null);
+module.exports = Loader;
diff --git a/lib/internal/loader/ModuleJob.js b/lib/internal/loader/ModuleJob.js
new file mode 100644
index 00000000000000..db4cb6ae5c5031
--- /dev/null
+++ b/lib/internal/loader/ModuleJob.js
@@ -0,0 +1,116 @@
+'use strict';
+
+const { SafeSet, SafePromise } = require('internal/safe_globals');
+const resolvedPromise = SafePromise.resolve();
+const resolvedArrayPromise = SafePromise.resolve([]);
+const { ModuleWrap } = require('internal/loader/ModuleWrap');
+
+const NOOP = () => { /* No-op */ };
+class ModuleJob {
+ /**
+ * @param {module: ModuleWrap?, compiled: Promise} moduleProvider
+ */
+ constructor(loader, moduleProvider, url) {
+ this.url = `${moduleProvider.url}`;
+ this.moduleProvider = moduleProvider;
+ this.loader = loader;
+ this.error = null;
+ this.hadError = false;
+
+ if (moduleProvider instanceof ModuleWrap !== true) {
+ // linked == promise for dependency jobs, with module populated,
+ // module wrapper linked
+ this.modulePromise = this.moduleProvider.createModule();
+ this.module = undefined;
+ const linked = async () => {
+ const dependencyJobs = [];
+ this.module = await this.modulePromise;
+ this.module.link(async (dependencySpecifier) => {
+ const dependencyJobPromise =
+ this.loader.getModuleJob(this, dependencySpecifier);
+ dependencyJobs.push(dependencyJobPromise);
+ const dependencyJob = await dependencyJobPromise;
+ return dependencyJob.modulePromise;
+ });
+ return SafePromise.all(dependencyJobs);
+ };
+ this.linked = linked();
+
+ // instantiated == deep dependency jobs wrappers instantiated,
+ //module wrapper instantiated
+ this.instantiated = undefined;
+ } else {
+ const getModuleProvider = async () => moduleProvider;
+ this.modulePromise = getModuleProvider();
+ this.moduleProvider = { finish: NOOP };
+ this.module = moduleProvider;
+ this.linked = resolvedArrayPromise;
+ this.instantiated = this.modulePromise;
+ }
+ }
+
+ instantiate() {
+ if (this.instantiated) {
+ return this.instantiated;
+ }
+ return this.instantiated = new Promise(async (resolve, reject) => {
+ const jobsInGraph = new SafeSet();
+ let jobsReadyToInstantiate = 0;
+ // (this must be sync for counter to work)
+ const queueJob = (moduleJob) => {
+ if (jobsInGraph.has(moduleJob)) {
+ return;
+ }
+ jobsInGraph.add(moduleJob);
+ moduleJob.linked.then((dependencyJobs) => {
+ for (const dependencyJob of dependencyJobs) {
+ queueJob(dependencyJob);
+ }
+ checkComplete();
+ }, (e) => {
+ if (!this.hadError) {
+ this.error = e;
+ this.hadError = true;
+ }
+ checkComplete();
+ });
+ };
+ const checkComplete = () => {
+ if (++jobsReadyToInstantiate === jobsInGraph.size) {
+ // I believe we only throw once the whole tree is finished loading?
+ // or should the error bail early, leaving entire tree to still load?
+ if (this.hadError) {
+ reject(this.error);
+ } else {
+ try {
+ this.module.instantiate();
+ for (const dependencyJob of jobsInGraph) {
+ dependencyJob.instantiated = resolvedPromise;
+ }
+ resolve(this.module);
+ } catch (e) {
+ e.stack;
+ reject(e);
+ }
+ }
+ }
+ };
+ queueJob(this);
+ });
+ }
+
+ async run() {
+ const module = await this.instantiate();
+ try {
+ module.evaluate();
+ } catch (e) {
+ e.stack;
+ this.hadError = true;
+ this.error = e;
+ throw e;
+ }
+ return module;
+ }
+}
+Object.setPrototypeOf(ModuleJob.prototype, null);
+module.exports = ModuleJob;
diff --git a/lib/internal/loader/ModuleMap.js b/lib/internal/loader/ModuleMap.js
new file mode 100644
index 00000000000000..aa238afbaedc05
--- /dev/null
+++ b/lib/internal/loader/ModuleMap.js
@@ -0,0 +1,33 @@
+'use strict';
+
+const ModuleJob = require('internal/loader/ModuleJob');
+const { SafeMap } = require('internal/safe_globals');
+const debug = require('util').debuglog('esm');
+const errors = require('internal/errors');
+
+// Tracks the state of the loader-level module cache
+class ModuleMap extends SafeMap {
+ get(url) {
+ if (typeof url !== 'string') {
+ throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'url', 'string');
+ }
+ return super.get(url);
+ }
+ set(url, job) {
+ if (typeof url !== 'string') {
+ throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'url', 'string');
+ }
+ if (job instanceof ModuleJob !== true) {
+ throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'job', 'ModuleJob');
+ }
+ debug(`Storing ${url} in ModuleMap`);
+ return super.set(url, job);
+ }
+ has(url) {
+ if (typeof url !== 'string') {
+ throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'url', 'string');
+ }
+ return super.has(url);
+ }
+}
+module.exports = ModuleMap;
diff --git a/lib/internal/loader/ModuleWrap.js b/lib/internal/loader/ModuleWrap.js
new file mode 100644
index 00000000000000..4d35356ec2433e
--- /dev/null
+++ b/lib/internal/loader/ModuleWrap.js
@@ -0,0 +1,61 @@
+'use strict';
+
+const { ModuleWrap } = process.binding('module_wrap');
+const debug = require('util').debuglog('esm');
+const ArrayJoin = Function.call.bind(Array.prototype.join);
+const ArrayMap = Function.call.bind(Array.prototype.map);
+
+const getNamespaceOfModuleWrap = (m) => {
+ const tmp = new ModuleWrap('import * as _ from "";_;', '');
+ tmp.link(async () => m);
+ tmp.instantiate();
+ return tmp.evaluate();
+};
+
+const createDynamicModule = (exports, url = '', evaluate) => {
+ debug(
+ `creating ESM facade for ${url} with exports: ${ArrayJoin(exports, ', ')}`
+ );
+ const names = ArrayMap(exports, (name) => `${name}`);
+ // sanitized ESM for reflection purposes
+ const src = `export let executor;
+ ${ArrayJoin(ArrayMap(names, (name) => `export let $${name}`), ';\n')}
+ ;(() => [
+ fn => executor = fn,
+ { exports: { ${
+ ArrayJoin(ArrayMap(names, (name) => `${name}: {
+ get: () => $${name},
+ set: v => $${name} = v
+ }`), ',\n')
+} } }
+ ]);
+ `;
+ const reflectiveModule = new ModuleWrap(src, `cjs-facade:${url}`);
+ reflectiveModule.instantiate();
+ const [setExecutor, reflect] = reflectiveModule.evaluate()();
+ // public exposed ESM
+ const reexports = `import { executor,
+ ${ArrayMap(names, (name) => `$${name}`)}
+ } from "";
+ export {
+ ${ArrayJoin(ArrayMap(names, (name) => `$${name} as ${name}`), ', ')}
+ }
+ // add await to this later if top level await comes along
+ typeof executor === "function" ? executor() : void 0;`;
+ if (typeof evaluate === 'function') {
+ setExecutor(() => evaluate(reflect));
+ }
+ const runner = new ModuleWrap(reexports, `${url}`);
+ runner.link(async () => reflectiveModule);
+ runner.instantiate();
+ return {
+ module: runner,
+ reflect
+ };
+};
+
+module.exports = {
+ createDynamicModule,
+ getNamespaceOfModuleWrap,
+ ModuleWrap
+};
diff --git a/lib/internal/loader/resolveRequestUrl.js b/lib/internal/loader/resolveRequestUrl.js
new file mode 100644
index 00000000000000..2245064bfe4ba8
--- /dev/null
+++ b/lib/internal/loader/resolveRequestUrl.js
@@ -0,0 +1,104 @@
+'use strict';
+
+const { URL } = require('url');
+const internalCJSModule = require('internal/module');
+const internalURLModule = require('internal/url');
+const internalFS = require('internal/fs');
+const NativeModule = require('native_module');
+const { extname } = require('path');
+const { realpathSync } = require('fs');
+const preserveSymlinks = !!process.binding('config').preserveSymlinks;
+const {
+ ModuleWrap,
+ createDynamicModule
+} = require('internal/loader/ModuleWrap');
+const errors = require('internal/errors');
+
+const search = require('internal/loader/search');
+const asyncReadFile = require('util').promisify(require('fs').readFile);
+const debug = require('util').debuglog('esm');
+
+const realpathCache = new Map();
+
+class ModuleRequest {
+ constructor(url) {
+ this.url = url;
+ }
+}
+Object.setPrototypeOf(ModuleRequest.prototype, null);
+
+// Strategy for loading a standard JavaScript module
+class StandardModuleRequest extends ModuleRequest {
+ async createModule() {
+ const source = `${await asyncReadFile(this.url)}`;
+ debug(`Loading StandardModule ${this.url}`);
+ return new ModuleWrap(internalCJSModule.stripShebang(source),
+ `${this.url}`);
+ }
+}
+
+// Strategy for loading a node-style CommonJS module
+class CJSModuleRequest extends ModuleRequest {
+ async createModule() {
+ const ctx = createDynamicModule(['default'], this.url, (reflect) => {
+ debug(`Loading CJSModule ${this.url.pathname}`);
+ const CJSModule = require('module');
+ const pathname = internalURLModule.getPathFromURL(this.url);
+ CJSModule._load(pathname);
+ });
+ this.finish = (module) => {
+ ctx.reflect.exports.default.set(module.exports);
+ };
+ return ctx.module;
+ }
+}
+
+// Strategy for loading a node builtin CommonJS module that isn't
+// through normal resolution
+class NativeModuleRequest extends CJSModuleRequest {
+ async createModule() {
+ const ctx = createDynamicModule(['default'], this.url, (reflect) => {
+ debug(`Loading NativeModule ${this.url.pathname}`);
+ const exports = require(this.url.pathname);
+ reflect.exports.default.set(exports);
+ });
+ return ctx.module;
+ }
+}
+
+const normalizeBaseURL = (baseURLOrString) => {
+ if (baseURLOrString instanceof URL) return baseURLOrString;
+ if (typeof baseURLOrString === 'string') return new URL(baseURLOrString);
+ return undefined;
+};
+
+const resolveRequestUrl = (baseURLOrString, specifier) => {
+ if (NativeModule.nonInternalExists(specifier)) {
+ return new NativeModuleRequest(new URL(`node:${specifier}`));
+ }
+
+ const baseURL = normalizeBaseURL(baseURLOrString);
+ let url = search(specifier, baseURL);
+
+ if (url.protocol !== 'file:') {
+ throw new errors.Error('ERR_INVALID_PROTOCOL', url.protocol, 'file:');
+ }
+
+ if (!preserveSymlinks) {
+ const real = realpathSync(internalURLModule.getPathFromURL(url), {
+ [internalFS.realpathCacheKey]: realpathCache
+ });
+ const old = url;
+ url = internalURLModule.getURLFromFilePath(real);
+ url.search = old.search;
+ url.hash = old.hash;
+ }
+
+ const ext = extname(url.pathname);
+ if (ext === '.mjs') {
+ return new StandardModuleRequest(url);
+ }
+
+ return new CJSModuleRequest(url);
+};
+module.exports = resolveRequestUrl;
diff --git a/lib/internal/loader/search.js b/lib/internal/loader/search.js
new file mode 100644
index 00000000000000..f0ec34ae4e77c2
--- /dev/null
+++ b/lib/internal/loader/search.js
@@ -0,0 +1,33 @@
+'use strict';
+
+const { URL } = require('url');
+const CJSmodule = require('module');
+const errors = require('internal/errors');
+const { resolve } = process.binding('module_wrap');
+
+module.exports = (target, base) => {
+ target = `${target}`;
+ if (base === undefined) {
+ // We cannot search without a base.
+ throw new errors.Error('ERR_MISSING_MODULE', target);
+ }
+ base = `${base}`;
+ try {
+ return resolve(target, base);
+ } catch (e) {
+ e.stack; // cause V8 to generate stack before rethrow
+ let error = e;
+ try {
+ const questionedBase = new URL(base);
+ const tmpMod = new CJSmodule(questionedBase.pathname, null);
+ tmpMod.paths = CJSmodule._nodeModulePaths(
+ new URL('./', questionedBase).pathname);
+ const found = CJSmodule._resolveFilename(target, tmpMod);
+ error = new errors.Error('ERR_MODULE_RESOLUTION_LEGACY', target,
+ base, found);
+ } catch (problemChecking) {
+ // ignore
+ }
+ throw error;
+ }
+};
diff --git a/lib/internal/module.js b/lib/internal/module.js
index cf994b51c0675f..a6da58a8d73663 100644
--- a/lib/internal/module.js
+++ b/lib/internal/module.js
@@ -79,8 +79,8 @@ function stripShebang(content) {
const builtinLibs = [
'assert', 'async_hooks', 'buffer', 'child_process', 'cluster', 'crypto',
'dgram', 'dns', 'domain', 'events', 'fs', 'http', 'https', 'net',
- 'os', 'path', 'punycode', 'querystring', 'readline', 'repl', 'stream',
- 'string_decoder', 'tls', 'tty', 'url', 'util', 'v8', 'vm', 'zlib'
+ 'os', 'path', 'perf_hooks', 'punycode', 'querystring', 'readline', 'repl',
+ 'stream', 'string_decoder', 'tls', 'tty', 'url', 'util', 'v8', 'vm', 'zlib'
];
const { exposeHTTP2 } = process.binding('config');
diff --git a/lib/internal/os.js b/lib/internal/os.js
new file mode 100644
index 00000000000000..74ed6e767ee16d
--- /dev/null
+++ b/lib/internal/os.js
@@ -0,0 +1,41 @@
+'use strict';
+
+function getCIDRSuffix(mask, protocol = 'ipv4') {
+ const isV6 = protocol === 'ipv6';
+ const bitsString = mask
+ .split(isV6 ? ':' : '.')
+ .filter((v) => !!v)
+ .map((v) => pad(parseInt(v, isV6 ? 16 : 10).toString(2), isV6))
+ .join('');
+
+ if (isValidMask(bitsString)) {
+ return countOnes(bitsString);
+ } else {
+ return null;
+ }
+}
+
+function pad(binaryString, isV6) {
+ const groupLength = isV6 ? 16 : 8;
+ const binLen = binaryString.length;
+
+ return binLen < groupLength ?
+ `${'0'.repeat(groupLength - binLen)}${binaryString}` : binaryString;
+}
+
+function isValidMask(bitsString) {
+ const firstIndexOfZero = bitsString.indexOf(0);
+ const lastIndexOfOne = bitsString.lastIndexOf(1);
+
+ return firstIndexOfZero < 0 || firstIndexOfZero > lastIndexOfOne;
+}
+
+function countOnes(bitsString) {
+ return bitsString
+ .split('')
+ .reduce((acc, bit) => acc += parseInt(bit, 10), 0);
+}
+
+module.exports = {
+ getCIDRSuffix
+};
diff --git a/lib/internal/process.js b/lib/internal/process.js
index 2e11249cb2dd30..92be0b674056fd 100644
--- a/lib/internal/process.js
+++ b/lib/internal/process.js
@@ -16,6 +16,10 @@ const assert = process.assert = function(x, msg) {
};
+function setup_performance() {
+ require('perf_hooks');
+}
+
// Set up the process.cpuUsage() function.
function setup_cpuUsage() {
// Get the native function, which will be replaced with a JS version.
@@ -259,6 +263,7 @@ function setupRawDebug() {
}
module.exports = {
+ setup_performance,
setup_cpuUsage,
setup_hrtime,
setupMemoryUsage,
diff --git a/lib/internal/process/promises.js b/lib/internal/process/promises.js
index b4a4582c3095a7..663e6f5fec6985 100644
--- a/lib/internal/process/promises.js
+++ b/lib/internal/process/promises.js
@@ -1,5 +1,7 @@
'use strict';
+const { safeToString } = process.binding('util');
+
const promiseRejectEvent = process._promiseRejectEvent;
const hasBeenNotifiedProperty = new WeakMap();
const promiseToGuidProperty = new WeakMap();
@@ -58,12 +60,17 @@ function setupPromises(scheduleMicrotasks) {
}
function emitWarning(uid, reason) {
- const warning = new Error('Unhandled promise rejection ' +
- `(rejection id: ${uid}): ${String(reason)}`);
+ const warning = new Error(
+ `Unhandled promise rejection (rejection id: ${uid}): ` +
+ safeToString(reason));
warning.name = 'UnhandledPromiseRejectionWarning';
warning.id = uid;
- if (reason instanceof Error) {
- warning.stack = reason.stack;
+ try {
+ if (reason instanceof Error) {
+ warning.stack = reason.stack;
+ }
+ } catch (err) {
+ // ignored
}
process.emitWarning(warning);
if (!deprecationWarned) {
diff --git a/lib/internal/safe_globals.js b/lib/internal/safe_globals.js
new file mode 100644
index 00000000000000..ad58fa662b53ef
--- /dev/null
+++ b/lib/internal/safe_globals.js
@@ -0,0 +1,26 @@
+'use strict';
+
+const copyProps = (unsafe, safe) => {
+ for (const key of [...Object.getOwnPropertyNames(unsafe),
+ ...Object.getOwnPropertySymbols(unsafe)
+ ]) {
+ if (!Object.getOwnPropertyDescriptor(safe, key)) {
+ Object.defineProperty(
+ safe,
+ key,
+ Object.getOwnPropertyDescriptor(unsafe, key));
+ }
+ }
+};
+const makeSafe = (unsafe, safe) => {
+ copyProps(unsafe.prototype, safe.prototype);
+ copyProps(unsafe, safe);
+ Object.setPrototypeOf(safe.prototype, null);
+ Object.freeze(safe.prototype);
+ Object.freeze(safe);
+ return safe;
+};
+
+exports.SafeMap = makeSafe(Map, class SafeMap extends Map {});
+exports.SafeSet = makeSafe(Set, class SafeSet extends Set {});
+exports.SafePromise = makeSafe(Promise, class SafePromise extends Promise {});
diff --git a/lib/internal/url.js b/lib/internal/url.js
index 10df37d8c25d7b..54a23e02515b8f 100644
--- a/lib/internal/url.js
+++ b/lib/internal/url.js
@@ -1378,6 +1378,12 @@ function getPathFromURL(path) {
return isWindows ? getPathFromURLWin32(path) : getPathFromURLPosix(path);
}
+function getURLFromFilePath(filepath) {
+ const tmp = new URL('file://');
+ tmp.pathname = filepath;
+ return tmp;
+}
+
function NativeURL(ctx) {
this[context] = ctx;
}
@@ -1406,6 +1412,7 @@ setURLConstructor(constructUrl);
module.exports = {
toUSVString,
getPathFromURL,
+ getURLFromFilePath,
URL,
URLSearchParams,
domainToASCII,
diff --git a/lib/internal/v8_prof_processor.js b/lib/internal/v8_prof_processor.js
index f0bcff7482d724..01b81c6ba56492 100644
--- a/lib/internal/v8_prof_processor.js
+++ b/lib/internal/v8_prof_processor.js
@@ -32,6 +32,7 @@ if (process.platform === 'darwin') {
tickArguments.push.apply(tickArguments, process.argv.slice(1));
script = `(function() {
arguments = ${JSON.stringify(tickArguments)};
+ function write (s) { process.stdout.write(s) }
${script}
})()`;
eval(script);
diff --git a/lib/module.js b/lib/module.js
index 339a228da91bdf..d02a676c90270f 100644
--- a/lib/module.js
+++ b/lib/module.js
@@ -24,6 +24,7 @@
const NativeModule = require('native_module');
const util = require('util');
const internalModule = require('internal/module');
+const { getURLFromFilePath } = require('internal/url');
const vm = require('vm');
const assert = require('assert').ok;
const fs = require('fs');
@@ -32,6 +33,14 @@ const path = require('path');
const internalModuleReadFile = process.binding('fs').internalModuleReadFile;
const internalModuleStat = process.binding('fs').internalModuleStat;
const preserveSymlinks = !!process.binding('config').preserveSymlinks;
+const experimentalModules = !!process.binding('config').experimentalModules;
+
+const errors = require('internal/errors');
+
+const Loader = require('internal/loader/Loader');
+const ModuleJob = require('internal/loader/ModuleJob');
+const { createDynamicModule } = require('internal/loader/ModuleWrap');
+const ESMLoader = new Loader();
function stat(filename) {
filename = path._makeLong(filename);
@@ -436,7 +445,36 @@ Module._load = function(request, parent, isMain) {
debug('Module._load REQUEST %s parent: %s', request, parent.id);
}
- var filename = Module._resolveFilename(request, parent, isMain);
+ var filename = null;
+
+ if (isMain) {
+ let err;
+ try {
+ filename = Module._resolveFilename(request, parent, isMain);
+ } catch (e) {
+ // try to keep stack
+ e.stack;
+ err = e;
+ }
+ if (experimentalModules) {
+ if (filename === null || /\.mjs$/.test(filename)) {
+ try {
+ ESMLoader.import(request).catch((e) => {
+ console.error(e);
+ process.exit(1);
+ });
+ return;
+ } catch (e) {
+ // well, it isn't ESM
+ }
+ }
+ }
+ if (err) {
+ throw err;
+ }
+ } else {
+ filename = Module._resolveFilename(request, parent, isMain);
+ }
var cachedModule = Module._cache[filename];
if (cachedModule) {
@@ -506,6 +544,19 @@ Module.prototype.load = function(filename) {
if (!Module._extensions[extension]) extension = '.js';
Module._extensions[extension](this, filename);
this.loaded = true;
+
+ if (experimentalModules) {
+ const url = getURLFromFilePath(filename);
+ if (ESMLoader.moduleMap.has(`${url}`) !== true) {
+ const ctx = createDynamicModule(['default'], url);
+ ctx.reflect.exports.default.set(this.exports);
+ ESMLoader.moduleMap.set(`${url}`,
+ new ModuleJob(ESMLoader, ctx.module));
+ } else {
+ ESMLoader.moduleMap.get(`${url}`).moduleProvider.finish(
+ Module._cache[filename]);
+ }
+ }
};
@@ -602,6 +653,11 @@ Module._extensions['.node'] = function(module, filename) {
return process.dlopen(module, path._makeLong(filename));
};
+if (experimentalModules) {
+ Module._extensions['.mjs'] = function(module, filename) {
+ throw new errors.Error('ERR_REQUIRE_ESM', filename);
+ };
+}
// bootstrap main module.
Module.runMain = function() {
diff --git a/lib/net.js b/lib/net.js
index 220847ac11cbda..80fd1f82e5375c 100644
--- a/lib/net.js
+++ b/lib/net.js
@@ -871,6 +871,27 @@ function afterWrite(status, handle, req, err) {
}
+function checkBindError(err, port, handle) {
+ // EADDRINUSE may not be reported until we call listen() or connect().
+ // To complicate matters, a failed bind() followed by listen() or connect()
+ // will implicitly bind to a random port. Ergo, check that the socket is
+ // bound to the expected port before calling listen() or connect().
+ //
+ // FIXME(bnoordhuis) Doesn't work for pipe handles, they don't have a
+ // getsockname() method. Non-issue for now, the cluster module doesn't
+ // really support pipes anyway.
+ if (err === 0 && port > 0 && handle.getsockname) {
+ var out = {};
+ err = handle.getsockname(out);
+ if (err === 0 && port !== out.port) {
+ debug(`checkBindError, bound to ${out.port} instead of ${port}`);
+ err = uv.UV_EADDRINUSE;
+ }
+ }
+ return err;
+}
+
+
function internalConnect(
self, address, port, addressType, localAddress, localPort) {
// TODO return promise from Socket.prototype.connect which
@@ -881,9 +902,6 @@ function internalConnect(
var err;
if (localAddress || localPort) {
- debug('binding to localAddress: %s and localPort: %d (addressType: %d)',
- localAddress, localPort, addressType);
-
if (addressType === 4) {
localAddress = localAddress || '0.0.0.0';
err = self._handle.bind(localAddress, localPort);
@@ -894,7 +912,10 @@ function internalConnect(
self.destroy(new TypeError('Invalid addressType: ' + addressType));
return;
}
+ debug('binding to localAddress: %s and localPort: %d (addressType: %d)',
+ localAddress, localPort, addressType);
+ err = checkBindError(err, localPort, self._handle);
if (err) {
const ex = exceptionWithHostPort(err, 'bind', localAddress, localPort);
self.destroy(ex);
@@ -1383,20 +1404,7 @@ function listenInCluster(server, address, port, addressType,
cluster._getServer(server, serverQuery, listenOnMasterHandle);
function listenOnMasterHandle(err, handle) {
- // EADDRINUSE may not be reported until we call listen(). To complicate
- // matters, a failed bind() followed by listen() will implicitly bind to
- // a random port. Ergo, check that the socket is bound to the expected
- // port before calling listen().
- //
- // FIXME(bnoordhuis) Doesn't work for pipe handles, they don't have a
- // getsockname() method. Non-issue for now, the cluster module doesn't
- // really support pipes anyway.
- if (err === 0 && port > 0 && handle.getsockname) {
- var out = {};
- err = handle.getsockname(out);
- if (err === 0 && port !== out.port)
- err = uv.UV_EADDRINUSE;
- }
+ err = checkBindError(err, port, handle);
if (err) {
var ex = exceptionWithHostPort(err, 'bind', address, port);
diff --git a/lib/os.js b/lib/os.js
index 4a99cab81e3e34..078dba3fcca071 100644
--- a/lib/os.js
+++ b/lib/os.js
@@ -24,6 +24,7 @@
const pushValToArrayMax = process.binding('util').pushValToArrayMax;
const constants = process.binding('constants').os;
const deprecate = require('internal/util').deprecate;
+const getCIDRSuffix = require('internal/os').getCIDRSuffix;
const isWindows = process.platform === 'win32';
const {
@@ -121,6 +122,21 @@ function endianness() {
}
endianness[Symbol.toPrimitive] = () => kEndianness;
+function networkInterfaces() {
+ const interfaceAddresses = getInterfaceAddresses();
+
+ return Object.entries(interfaceAddresses).reduce((acc, [key, val]) => {
+ acc[key] = val.map((v) => {
+ const protocol = v.family.toLowerCase();
+ const suffix = getCIDRSuffix(v.netmask, protocol);
+ const cidr = suffix ? `${v.address}/${suffix}` : null;
+
+ return Object.assign({}, v, { cidr });
+ });
+ return acc;
+ }, {});
+}
+
module.exports = exports = {
arch,
cpus,
@@ -130,7 +146,7 @@ module.exports = exports = {
homedir: getHomeDirectory,
hostname: getHostname,
loadavg,
- networkInterfaces: getInterfaceAddresses,
+ networkInterfaces,
platform,
release: getOSRelease,
tmpdir,
diff --git a/lib/path.js b/lib/path.js
index 3fab7f79d746ec..56a7c4f36e35ca 100644
--- a/lib/path.js
+++ b/lib/path.js
@@ -35,6 +35,7 @@ function normalizeStringWin32(path, allowAboveRoot) {
var lastSlash = -1;
var dots = 0;
var code;
+ var isAboveRoot = false;
for (var i = 0; i <= path.length; ++i) {
if (i < path.length)
code = path.charCodeAt(i);
@@ -46,7 +47,7 @@ function normalizeStringWin32(path, allowAboveRoot) {
if (lastSlash === i - 1 || dots === 1) {
// NOOP
} else if (lastSlash !== i - 1 && dots === 2) {
- if (res.length < 2 ||
+ if (res.length < 2 || !isAboveRoot ||
res.charCodeAt(res.length - 1) !== 46/*.*/ ||
res.charCodeAt(res.length - 2) !== 46/*.*/) {
if (res.length > 2) {
@@ -63,12 +64,14 @@ function normalizeStringWin32(path, allowAboveRoot) {
res = res.slice(0, j);
lastSlash = i;
dots = 0;
+ isAboveRoot = false;
continue;
}
} else if (res.length === 2 || res.length === 1) {
res = '';
lastSlash = i;
dots = 0;
+ isAboveRoot = false;
continue;
}
}
@@ -77,12 +80,14 @@ function normalizeStringWin32(path, allowAboveRoot) {
res += '\\..';
else
res = '..';
+ isAboveRoot = true;
}
} else {
if (res.length > 0)
res += '\\' + path.slice(lastSlash + 1, i);
else
res = path.slice(lastSlash + 1, i);
+ isAboveRoot = false;
}
lastSlash = i;
dots = 0;
@@ -101,6 +106,7 @@ function normalizeStringPosix(path, allowAboveRoot) {
var lastSlash = -1;
var dots = 0;
var code;
+ var isAboveRoot = false;
for (var i = 0; i <= path.length; ++i) {
if (i < path.length)
code = path.charCodeAt(i);
@@ -112,7 +118,7 @@ function normalizeStringPosix(path, allowAboveRoot) {
if (lastSlash === i - 1 || dots === 1) {
// NOOP
} else if (lastSlash !== i - 1 && dots === 2) {
- if (res.length < 2 ||
+ if (res.length < 2 || !isAboveRoot ||
res.charCodeAt(res.length - 1) !== 46/*.*/ ||
res.charCodeAt(res.length - 2) !== 46/*.*/) {
if (res.length > 2) {
@@ -129,12 +135,14 @@ function normalizeStringPosix(path, allowAboveRoot) {
res = res.slice(0, j);
lastSlash = i;
dots = 0;
+ isAboveRoot = false;
continue;
}
} else if (res.length === 2 || res.length === 1) {
res = '';
lastSlash = i;
dots = 0;
+ isAboveRoot = false;
continue;
}
}
@@ -143,12 +151,14 @@ function normalizeStringPosix(path, allowAboveRoot) {
res += '/..';
else
res = '..';
+ isAboveRoot = true;
}
} else {
if (res.length > 0)
res += '/' + path.slice(lastSlash + 1, i);
else
res = path.slice(lastSlash + 1, i);
+ isAboveRoot = false;
}
lastSlash = i;
dots = 0;
diff --git a/lib/perf_hooks.js b/lib/perf_hooks.js
new file mode 100644
index 00000000000000..4e7a0de7eb37be
--- /dev/null
+++ b/lib/perf_hooks.js
@@ -0,0 +1,553 @@
+'use strict';
+
+const {
+ PerformanceEntry,
+ mark: _mark,
+ measure: _measure,
+ milestones,
+ observerCounts,
+ setupObservers,
+ timeOrigin,
+ timerify,
+ constants
+} = process.binding('performance');
+
+const {
+ NODE_PERFORMANCE_ENTRY_TYPE_NODE,
+ NODE_PERFORMANCE_ENTRY_TYPE_MARK,
+ NODE_PERFORMANCE_ENTRY_TYPE_MEASURE,
+ NODE_PERFORMANCE_ENTRY_TYPE_GC,
+ NODE_PERFORMANCE_ENTRY_TYPE_FUNCTION,
+
+ NODE_PERFORMANCE_MILESTONE_NODE_START,
+ NODE_PERFORMANCE_MILESTONE_V8_START,
+ NODE_PERFORMANCE_MILESTONE_LOOP_START,
+ NODE_PERFORMANCE_MILESTONE_LOOP_EXIT,
+ NODE_PERFORMANCE_MILESTONE_BOOTSTRAP_COMPLETE,
+ NODE_PERFORMANCE_MILESTONE_ENVIRONMENT,
+ NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_START,
+ NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_END,
+ NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_START,
+ NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_END,
+ NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_START,
+ NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_END,
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_START,
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_END
+} = constants;
+
+const L = require('internal/linkedlist');
+const kInspect = require('internal/util').customInspectSymbol;
+const { inherits } = require('util');
+
+const kCallback = Symbol('callback');
+const kTypes = Symbol('types');
+const kEntries = Symbol('entries');
+const kBuffer = Symbol('buffer');
+const kBuffering = Symbol('buffering');
+const kQueued = Symbol('queued');
+const kTimerified = Symbol('timerified');
+const kInsertEntry = Symbol('insert-entry');
+const kIndexEntry = Symbol('index-entry');
+const kClearEntry = Symbol('clear-entry');
+const kGetEntries = Symbol('get-entries');
+const kIndex = Symbol('index');
+const kMarks = Symbol('marks');
+
+observerCounts[NODE_PERFORMANCE_ENTRY_TYPE_MARK] = 1;
+observerCounts[NODE_PERFORMANCE_ENTRY_TYPE_MEASURE] = 1;
+const observers = {};
+const observerableTypes = [
+ 'node',
+ 'mark',
+ 'measure',
+ 'gc',
+ 'function'
+];
+
+let errors;
+function lazyErrors() {
+ if (errors === undefined)
+ errors = require('internal/errors');
+ return errors;
+}
+
+function now() {
+ const hr = process.hrtime();
+ return hr[0] * 1000 + hr[1] / 1e6;
+}
+
+class PerformanceNodeTiming {
+ constructor() {}
+
+ get name() {
+ return 'node';
+ }
+
+ get entryType() {
+ return 'node';
+ }
+
+ get startTime() {
+ return timeOrigin;
+ }
+
+ get duration() {
+ return now() - timeOrigin;
+ }
+
+ get nodeStart() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_NODE_START];
+ }
+
+ get v8Start() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_V8_START];
+ }
+
+ get environment() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_ENVIRONMENT];
+ }
+
+ get loopStart() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_LOOP_START];
+ }
+
+ get loopExit() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_LOOP_EXIT];
+ }
+
+ get bootstrapComplete() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_BOOTSTRAP_COMPLETE];
+ }
+
+ get thirdPartyMainStart() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_START];
+ }
+
+ get thirdPartyMainEnd() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_END];
+ }
+
+ get clusterSetupStart() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_START];
+ }
+
+ get clusterSetupEnd() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_END];
+ }
+
+ get moduleLoadStart() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_START];
+ }
+
+ get moduleLoadEnd() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_END];
+ }
+
+ get preloadModuleLoadStart() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_START];
+ }
+
+ get preloadModuleLoadEnd() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_END];
+ }
+
+ [kInspect]() {
+ return {
+ name: 'node',
+ entryType: 'node',
+ startTime: this.startTime,
+ duration: this.duration,
+ nodeStart: this.nodeStart,
+ v8Start: this.v8Start,
+ bootstrapComplete: this.bootstrapComplete,
+ environment: this.environment,
+ loopStart: this.loopStart,
+ loopExit: this.loopExit,
+ thirdPartyMainStart: this.thirdPartyMainStart,
+ thirdPartyMainEnd: this.thirdPartyMainEnd,
+ clusterSetupStart: this.clusterSetupStart,
+ clusterSetupEnd: this.clusterSetupEnd,
+ moduleLoadStart: this.moduleLoadStart,
+ moduleLoadEnd: this.moduleLoadEnd,
+ preloadModuleLoadStart: this.preloadModuleLoadStart,
+ preloadModuleLoadEnd: this.preloadModuleLoadEnd
+ };
+ }
+}
+// Use this instead of Extends because we want PerformanceEntry in the
+// prototype chain but we do not want to use the PerformanceEntry
+// constructor for this.
+inherits(PerformanceNodeTiming, PerformanceEntry);
+
+const nodeTiming = new PerformanceNodeTiming();
+
+// Maintains a list of entries as a linked list stored in insertion order.
+class PerformanceObserverEntryList {
+ constructor() {
+ Object.defineProperty(this, kEntries, {
+ writable: true,
+ enumerable: false,
+ value: {}
+ });
+ L.init(this[kEntries]);
+ }
+
+ [kInsertEntry](entry) {
+ const item = { entry };
+ L.append(this[kEntries], item);
+ this[kIndexEntry](item);
+ }
+
+ [kIndexEntry](entry) {
+ // Default implementation does nothing
+ }
+
+ [kGetEntries](name, type) {
+ const ret = [];
+ const list = this[kEntries];
+ if (!L.isEmpty(list)) {
+ let item = L.peek(list);
+ while (item && item !== list) {
+ const entry = item.entry;
+ if ((name && entry.name !== name) ||
+ (type && entry.entryType !== type)) {
+ item = item._idlePrev;
+ continue;
+ }
+ sortedInsert(ret, entry);
+ item = item._idlePrev;
+ }
+ }
+ return ret;
+ }
+
+ // While the items are stored in insertion order, getEntries() is
+ // required to return items sorted by startTime.
+ getEntries() {
+ return this[kGetEntries]();
+ }
+
+ getEntriesByType(type) {
+ return this[kGetEntries](undefined, `${type}`);
+ }
+
+ getEntriesByName(name, type) {
+ return this[kGetEntries](`${name}`, type !== undefined ? `${type}` : type);
+ }
+}
+
+class PerformanceObserver {
+ constructor(callback) {
+ if (typeof callback !== 'function') {
+ const errors = lazyErrors();
+ throw new errors.TypeError('ERR_INVALID_CALLBACK');
+ }
+ Object.defineProperties(this, {
+ [kTypes]: {
+ enumerable: false,
+ writable: true,
+ value: {}
+ },
+ [kCallback]: {
+ enumerable: false,
+ writable: true,
+ value: callback
+ },
+ [kBuffer]: {
+ enumerable: false,
+ writable: true,
+ value: new PerformanceObserverEntryList()
+ },
+ [kBuffering]: {
+ enumerable: false,
+ writable: true,
+ value: false
+ },
+ [kQueued]: {
+ enumerable: false,
+ writable: true,
+ value: false
+ }
+ });
+ }
+
+ disconnect() {
+ const types = this[kTypes];
+ const keys = Object.keys(types);
+ for (var n = 0; n < keys.length; n++) {
+ const item = types[keys[n]];
+ if (item) {
+ L.remove(item);
+ observerCounts[keys[n]]--;
+ }
+ }
+ this[kTypes] = {};
+ }
+
+ observe(options) {
+ const errors = lazyErrors();
+ if (typeof options !== 'object' || options == null) {
+ throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'options', 'Object');
+ }
+ if (!Array.isArray(options.entryTypes)) {
+ throw new errors.TypeError('ERR_INVALID_OPT_VALUE',
+ 'entryTypes', options);
+ }
+ const entryTypes = options.entryTypes.filter(filterTypes).map(mapTypes);
+ if (entryTypes.length === 0) {
+ throw new errors.Error('ERR_VALID_PERFORMANCE_ENTRY_TYPE');
+ }
+ this.disconnect();
+ this[kBuffer][kEntries] = [];
+ L.init(this[kBuffer][kEntries]);
+ this[kBuffering] = Boolean(options.buffered);
+ for (var n = 0; n < entryTypes.length; n++) {
+ const entryType = entryTypes[n];
+ const list = getObserversList(entryType);
+ const item = { obs: this };
+ this[kTypes][entryType] = item;
+ L.append(list, item);
+ observerCounts[entryType]++;
+ }
+ }
+}
+
+class Performance extends PerformanceObserverEntryList {
+ constructor() {
+ super();
+ this[kIndex] = {
+ [kMarks]: new Set()
+ };
+ this[kInsertEntry](nodeTiming);
+ }
+
+ [kIndexEntry](item) {
+ const index = this[kIndex];
+ const type = item.entry.entryType;
+ let items = index[type];
+ if (!items) {
+ items = index[type] = {};
+ L.init(items);
+ }
+ const entry = item.entry;
+ L.append(items, { entry, item });
+ }
+
+ [kClearEntry](type, name) {
+ const index = this[kIndex];
+ const items = index[type];
+ if (!items) return;
+ let item = L.peek(items);
+ while (item && item !== items) {
+ const entry = item.entry;
+ const next = item._idlePrev;
+ if (name !== undefined) {
+ if (entry.name === `${name}`) {
+ L.remove(item); // remove from the index
+ L.remove(item.item); // remove from the master
+ }
+ } else {
+ L.remove(item); // remove from the index
+ L.remove(item.item); // remove from the master
+ }
+ item = next;
+ }
+ }
+
+ get nodeTiming() {
+ return nodeTiming;
+ }
+
+ get timeOrigin() {
+ return timeOrigin;
+ }
+
+ now() {
+ return now();
+ }
+
+ mark(name) {
+ name = `${name}`;
+ _mark(name);
+ this[kIndex][kMarks].add(name);
+ }
+
+ measure(name, startMark, endMark) {
+ name = `${name}`;
+ endMark = `${endMark}`;
+ startMark = startMark !== undefined ? `${startMark}` : '';
+ const marks = this[kIndex][kMarks];
+ if (!marks.has(endMark) && !(endMark in nodeTiming)) {
+ const errors = lazyErrors();
+ throw new errors.Error('ERR_INVALID_PERFORMANCE_MARK', endMark);
+ }
+ _measure(name, startMark, endMark);
+ }
+
+ clearMarks(name) {
+ name = name !== undefined ? `${name}` : name;
+ this[kClearEntry]('mark', name);
+ if (name !== undefined)
+ this[kIndex][kMarks].delete(name);
+ else
+ this[kIndex][kMarks].clear();
+ }
+
+ clearMeasures(name) {
+ this[kClearEntry]('measure', name);
+ }
+
+ clearGC() {
+ this[kClearEntry]('gc');
+ }
+
+ clearFunctions(name) {
+ this[kClearEntry]('function', name);
+ }
+
+ timerify(fn) {
+ if (typeof fn !== 'function') {
+ const errors = lazyErrors();
+ throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'fn', 'Function');
+ }
+ if (fn[kTimerified])
+ return fn[kTimerified];
+ const ret = timerify(fn, fn.length);
+ Object.defineProperty(fn, kTimerified, {
+ enumerable: false,
+ configurable: true,
+ writable: false,
+ value: ret
+ });
+ Object.defineProperties(ret, {
+ [kTimerified]: {
+ enumerable: false,
+ configurable: true,
+ writable: false,
+ value: ret
+ },
+ name: {
+ enumerable: false,
+ configurable: true,
+ writable: false,
+ value: `timerified ${fn.name}`
+ }
+ });
+ return ret;
+ }
+
+ [kInspect]() {
+ return {
+ timeOrigin,
+ nodeTiming
+ };
+ }
+}
+
+const performance = new Performance();
+
+function getObserversList(type) {
+ let list = observers[type];
+ if (list === undefined) {
+ list = observers[type] = {};
+ L.init(list);
+ }
+ return list;
+}
+
+function doNotify() {
+ this[kQueued] = false;
+ this[kCallback](this[kBuffer], this);
+ this[kBuffer][kEntries] = [];
+ L.init(this[kBuffer][kEntries]);
+}
+
+// Set up the callback used to receive PerformanceObserver notifications
+function observersCallback(entry) {
+ const type = mapTypes(entry.entryType);
+ performance[kInsertEntry](entry);
+ const list = getObserversList(type);
+
+ let current = L.peek(list);
+
+ while (current && current.obs) {
+ const observer = current.obs;
+ // First, add the item to the observers buffer
+ const buffer = observer[kBuffer];
+ buffer[kInsertEntry](entry);
+ // Second, check to see if we're buffering
+ if (observer[kBuffering]) {
+ // If we are, schedule a setImmediate call if one hasn't already
+ if (!observer[kQueued]) {
+ observer[kQueued] = true;
+ // Use setImmediate instead of nextTick to give more time
+ // for multiple entries to collect.
+ setImmediate(doNotify.bind(observer));
+ }
+ } else {
+ // If not buffering, notify immediately
+ doNotify.call(observer);
+ }
+ current = current._idlePrev;
+ }
+}
+setupObservers(observersCallback);
+
+function filterTypes(i) {
+ return observerableTypes.indexOf(`${i}`) >= 0;
+}
+
+function mapTypes(i) {
+ switch (i) {
+ case 'node': return NODE_PERFORMANCE_ENTRY_TYPE_NODE;
+ case 'mark': return NODE_PERFORMANCE_ENTRY_TYPE_MARK;
+ case 'measure': return NODE_PERFORMANCE_ENTRY_TYPE_MEASURE;
+ case 'gc': return NODE_PERFORMANCE_ENTRY_TYPE_GC;
+ case 'function': return NODE_PERFORMANCE_ENTRY_TYPE_FUNCTION;
+ }
+}
+
+// The specification requires that PerformanceEntry instances are sorted
+// according to startTime. Unfortunately, they are not necessarily created
+// in that same order, and can be reported to the JS layer in any order,
+// which means we need to keep the list sorted as we insert.
+function getInsertLocation(list, entryStartTime) {
+ let start = 0;
+ let end = list.length;
+ while (start < end) {
+ const pivot = (end + start) >>> 1;
+ if (list[pivot].startTime === entryStartTime)
+ return pivot;
+ if (list[pivot].startTime < entryStartTime)
+ start = pivot + 1;
+ else
+ end = pivot;
+ }
+ return start;
+}
+
+function sortedInsert(list, entry) {
+ const entryStartTime = entry.startTime;
+ if (list.length === 0 ||
+ (list[list.length - 1].startTime < entryStartTime)) {
+ list.push(entry);
+ return;
+ }
+ if (list[0] && (list[0].startTime > entryStartTime)) {
+ list.unshift(entry);
+ return;
+ }
+ const location = getInsertLocation(list, entryStartTime);
+ list.splice(location, 0, entry);
+}
+
+module.exports = {
+ performance,
+ PerformanceObserver
+};
+
+Object.defineProperty(module.exports, 'constants', {
+ configurable: false,
+ enumerable: true,
+ value: constants
+});
diff --git a/lib/repl.js b/lib/repl.js
index a29e0af66e5a9d..2a5077f390b54d 100644
--- a/lib/repl.js
+++ b/lib/repl.js
@@ -1239,13 +1239,16 @@ function defineDefaultCommands(repl) {
try {
var stats = fs.statSync(file);
if (stats && stats.isFile()) {
+ this.editorMode = true;
+ REPLServer.super_.prototype.setPrompt.call(this, '');
var data = fs.readFileSync(file, 'utf8');
var lines = data.split('\n');
- this.displayPrompt();
for (var n = 0; n < lines.length; n++) {
if (lines[n])
this.write(`${lines[n]}\n`);
}
+ this.turnOffEditorMode();
+ this.write('\n');
} else {
this.outputStream.write('Failed to load:' + file +
' is not a valid file\n');
diff --git a/lib/timers.js b/lib/timers.js
index 38d83f8c17bd8f..917db9bdc5acea 100644
--- a/lib/timers.js
+++ b/lib/timers.js
@@ -739,7 +739,7 @@ function tryOnImmediate(immediate, oldTail) {
var threw = true;
emitBefore(immediate[async_id_symbol], immediate[trigger_id_symbol]);
try {
- // make the actual call outside the try/catch to allow it to be optimized
+ // make the actual call outside the try/finally to allow it to be optimized
runCallback(immediate);
threw = false;
} finally {
diff --git a/lib/url.js b/lib/url.js
index ab82cc2abbd8e8..d754fea3b25359 100644
--- a/lib/url.js
+++ b/lib/url.js
@@ -358,9 +358,7 @@ Url.prototype.parse = function parse(url, parseQueryString, slashesDenoteHost) {
// First, make 100% sure that any "autoEscape" chars get
// escaped, even if encodeURIComponent doesn't think they
// need to be.
- const result = autoEscapeStr(rest);
- if (result !== undefined)
- rest = result;
+ rest = autoEscapeStr(rest);
}
var questionIdx = -1;
@@ -441,8 +439,7 @@ function validateHostname(self, rest, hostname) {
// Automatically escape all delimiters and unwise characters from RFC 2396.
// Also escape single quotes in case of an XSS attack.
-// Return undefined if the string doesn't need escaping,
-// otherwise return the escaped string.
+// Return the escaped string.
function autoEscapeStr(rest) {
var escaped = '';
var lastEscapedPos = 0;
@@ -538,12 +535,13 @@ function autoEscapeStr(rest) {
}
}
if (lastEscapedPos === 0) // Nothing has been escaped.
- return;
+ return rest;
+
// There are ordinary characters at the end.
if (lastEscapedPos < rest.length)
- return escaped + rest.slice(lastEscapedPos);
- else // The last character is escaped.
- return escaped;
+ escaped += rest.slice(lastEscapedPos);
+
+ return escaped;
}
// format a parsed object into a url string
diff --git a/lib/util.js b/lib/util.js
index 979b37bcdb4f9a..a2c8a6436f837a 100644
--- a/lib/util.js
+++ b/lib/util.js
@@ -23,6 +23,7 @@
const errors = require('internal/errors');
const { TextDecoder, TextEncoder } = require('internal/encoding');
+const { isBuffer } = require('buffer').Buffer;
const { errname } = process.binding('uv');
@@ -110,51 +111,35 @@ function format(f) {
++i;
continue;
}
+ if (lastPos < i)
+ str += f.slice(lastPos, i);
switch (f.charCodeAt(i + 1)) {
case 100: // 'd'
- if (lastPos < i)
- str += f.slice(lastPos, i);
str += Number(arguments[a++]);
break;
case 105: // 'i'
- if (lastPos < i)
- str += f.slice(lastPos, i);
str += parseInt(arguments[a++]);
break;
case 102: // 'f'
- if (lastPos < i)
- str += f.slice(lastPos, i);
str += parseFloat(arguments[a++]);
break;
case 106: // 'j'
- if (lastPos < i)
- str += f.slice(lastPos, i);
str += tryStringify(arguments[a++]);
break;
case 115: // 's'
- if (lastPos < i)
- str += f.slice(lastPos, i);
str += String(arguments[a++]);
break;
case 79: // 'O'
- if (lastPos < i)
- str += f.slice(lastPos, i);
str += inspect(arguments[a++]);
break;
case 111: // 'o'
- if (lastPos < i)
- str += f.slice(lastPos, i);
str += inspect(arguments[a++],
{ showHidden: true, depth: 4, showProxy: true });
break;
case 37: // '%'
- if (lastPos < i)
- str += f.slice(lastPos, i);
str += '%';
break;
default: // any other character is not a correct placeholder
- if (lastPos < i)
- str += f.slice(lastPos, i);
str += '%';
lastPos = i = i + 1;
continue;
@@ -612,10 +597,13 @@ function formatValue(ctx, value, recurseTimes) {
}
}
- ctx.seen.push(value);
-
- var output = formatter(ctx, value, recurseTimes, visibleKeys, keys);
+ // TODO(addaleax): Make `seen` a Set to avoid linear-time lookup.
+ if (ctx.seen.includes(value)) {
+ return ctx.stylize('[Circular]', 'special');
+ }
+ ctx.seen.push(value);
+ const output = formatter(ctx, value, recurseTimes, visibleKeys, keys);
ctx.seen.pop();
return reduceToSingleString(output, base, braces, ctx.breakLength);
@@ -679,11 +667,12 @@ function formatObject(ctx, value, recurseTimes, visibleKeys, keys) {
function formatArray(ctx, value, recurseTimes, visibleKeys, keys) {
+ const maxLength = Math.min(Math.max(0, ctx.maxArrayLength), value.length);
var output = [];
let visibleLength = 0;
let index = 0;
for (const elem of keys) {
- if (visibleLength === ctx.maxArrayLength)
+ if (visibleLength === maxLength)
break;
// Symbols might have been added to the keys
if (typeof elem !== 'string')
@@ -698,7 +687,7 @@ function formatArray(ctx, value, recurseTimes, visibleKeys, keys) {
const message = `<${emptyItems} empty item${ending}>`;
output.push(ctx.stylize(message, 'undefined'));
index = i;
- if (++visibleLength === ctx.maxArrayLength)
+ if (++visibleLength === maxLength)
break;
}
output.push(formatProperty(ctx, value, recurseTimes, visibleKeys,
@@ -706,7 +695,7 @@ function formatArray(ctx, value, recurseTimes, visibleKeys, keys) {
visibleLength++;
index++;
}
- if (index < value.length && visibleLength !== ctx.maxArrayLength) {
+ if (index < value.length && visibleLength !== maxLength) {
const len = value.length - index;
const ending = len > 1 ? 's' : '';
const message = `<${len} empty item${ending}>`;
@@ -835,21 +824,17 @@ function formatProperty(ctx, value, recurseTimes, visibleKeys, key, array) {
}
}
if (!str) {
- if (ctx.seen.indexOf(desc.value) < 0) {
- if (recurseTimes === null) {
- str = formatValue(ctx, desc.value, null);
+ if (recurseTimes === null) {
+ str = formatValue(ctx, desc.value, null);
+ } else {
+ str = formatValue(ctx, desc.value, recurseTimes - 1);
+ }
+ if (str.indexOf('\n') > -1) {
+ if (array) {
+ str = str.replace(/\n/g, '\n ');
} else {
- str = formatValue(ctx, desc.value, recurseTimes - 1);
+ str = str.replace(/^|\n/g, '\n ');
}
- if (str.indexOf('\n') > -1) {
- if (array) {
- str = str.replace(/\n/g, '\n ');
- } else {
- str = str.replace(/^|\n/g, '\n ');
- }
- }
- } else {
- str = ctx.stylize('[Circular]', 'special');
}
}
if (name === undefined) {
@@ -1134,6 +1119,7 @@ module.exports = exports = {
inspect,
isArray: Array.isArray,
isBoolean,
+ isBuffer,
isNull,
isNullOrUndefined,
isNumber,
@@ -1165,18 +1151,3 @@ module.exports = exports = {
'util.puts is deprecated. Use console.log instead.',
'DEP0027')
};
-
-// Avoid a circular dependency
-var isBuffer;
-Object.defineProperty(exports, 'isBuffer', {
- configurable: true,
- enumerable: true,
- get() {
- if (!isBuffer)
- isBuffer = require('buffer').Buffer.isBuffer;
- return isBuffer;
- },
- set(val) {
- isBuffer = val;
- }
-});
diff --git a/node.gyp b/node.gyp
index 81f549f8b63f73..79d9e0a68dcedf 100644
--- a/node.gyp
+++ b/node.gyp
@@ -50,6 +50,7 @@
'lib/net.js',
'lib/os.js',
'lib/path.js',
+ 'lib/perf_hooks.js',
'lib/process.js',
'lib/punycode.js',
'lib/querystring.js',
@@ -88,9 +89,18 @@
'lib/internal/freelist.js',
'lib/internal/fs.js',
'lib/internal/http.js',
+ 'lib/internal/inspector_async_hook.js',
'lib/internal/linkedlist.js',
+ 'lib/internal/loader/Loader.js',
+ 'lib/internal/loader/ModuleMap.js',
+ 'lib/internal/loader/ModuleJob.js',
+ 'lib/internal/loader/ModuleWrap.js',
+ 'lib/internal/loader/resolveRequestUrl.js',
+ 'lib/internal/loader/search.js',
+ 'lib/internal/safe_globals.js',
'lib/internal/net.js',
'lib/internal/module.js',
+ 'lib/internal/os.js',
'lib/internal/process/next_tick.js',
'lib/internal/process/promises.js',
'lib/internal/process/stdio.js',
@@ -174,6 +184,7 @@
'src/fs_event_wrap.cc',
'src/handle_wrap.cc',
'src/js_stream.cc',
+ 'src/module_wrap.cc',
'src/node.cc',
'src/node_api.cc',
'src/node_api.h',
@@ -184,12 +195,12 @@
'src/node_contextify.cc',
'src/node_debug_options.cc',
'src/node_file.cc',
- 'src/node_http2_core.cc',
'src/node_http2.cc',
'src/node_http_parser.cc',
'src/node_main.cc',
'src/node_os.cc',
- 'src/node_revert.cc',
+ 'src/node_platform.cc',
+ 'src/node_perf.cc',
'src/node_serdes.cc',
'src/node_url.cc',
'src/node_util.cc',
@@ -227,6 +238,7 @@
'src/env-inl.h',
'src/handle_wrap.h',
'src/js_stream.h',
+ 'src/module_wrap.h',
'src/node.h',
'src/node_http2_core.h',
'src/node_http2_core-inl.h',
@@ -237,6 +249,9 @@
'src/node_internals.h',
'src/node_javascript.h',
'src/node_mutex.h',
+ 'src/node_platform.h',
+ 'src/node_perf.h',
+ 'src/node_perf_common.h',
'src/node_root_certs.h',
'src/node_version.h',
'src/node_watchdog.h',
@@ -257,7 +272,6 @@
'src/tracing/node_trace_buffer.h',
'src/tracing/node_trace_writer.h',
'src/tracing/trace_event.h'
- 'src/tree.h',
'src/util.h',
'src/util-inl.h',
'deps/http_parser/http_parser.h',
@@ -631,30 +645,11 @@
'<(SHARED_INTERMEDIATE_DIR)', # for node_natives.h
],
- 'libraries': [
- '<(OBJ_GEN_PATH)<(OBJ_SEPARATOR)node_javascript.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_debug_options.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)async-wrap.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)env.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_buffer.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_i18n.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_url.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)util.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)string_bytes.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)string_search.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)stream_base.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_constants.<(OBJ_SUFFIX)',
- '<(OBJ_PATH)<(OBJ_SEPARATOR)node_revert.<(OBJ_SUFFIX)',
- '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)agent.<(OBJ_SUFFIX)',
- '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)node_trace_buffer.<(OBJ_SUFFIX)',
- '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)node_trace_writer.<(OBJ_SUFFIX)',
- '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)trace_event.<(OBJ_SUFFIX)',
- ],
-
'defines': [ 'NODE_WANT_INTERNALS=1' ],
'sources': [
+ 'src/node_platform.cc',
+ 'src/node_platform.h',
'test/cctest/test_base64.cc',
'test/cctest/test_environment.cc',
'test/cctest/test_util.cc',
@@ -666,6 +661,28 @@
],
'conditions': [
+ ['node_target_type!="static_library"', {
+ 'libraries': [
+ '<(OBJ_GEN_PATH)<(OBJ_SEPARATOR)node_javascript.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_debug_options.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)async-wrap.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)env.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_buffer.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_i18n.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_perf.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_url.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)util.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)string_bytes.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)string_search.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)stream_base.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_constants.<(OBJ_SUFFIX)',
+ '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)agent.<(OBJ_SUFFIX)',
+ '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)node_trace_buffer.<(OBJ_SUFFIX)',
+ '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)node_trace_writer.<(OBJ_SUFFIX)',
+ '<(OBJ_TRACING_PATH)<(OBJ_SEPARATOR)trace_event.<(OBJ_SUFFIX)',
+ ],
+ }],
['v8_enable_inspector==1', {
'sources': [
'test/cctest/test_inspector_socket.cc',
diff --git a/src/async-wrap.cc b/src/async-wrap.cc
index b6588a20ad4071..468ac975247829 100644
--- a/src/async-wrap.cc
+++ b/src/async-wrap.cc
@@ -54,6 +54,7 @@ using v8::String;
using v8::Symbol;
using v8::TryCatch;
using v8::Uint32Array;
+using v8::Undefined;
using v8::Value;
using AsyncHooks = node::Environment::AsyncHooks;
@@ -164,6 +165,7 @@ static void DestroyIdsCb(uv_timer_t* handle) {
if (ret.IsEmpty()) {
ClearFatalExceptionHandlers(env);
FatalException(env->isolate(), try_catch);
+ UNREACHABLE();
}
}
} while (!env->destroy_ids_list()->empty());
@@ -217,69 +219,43 @@ bool DomainExit(Environment* env, v8::Local object) {
}
-static bool PreCallbackExecution(AsyncWrap* wrap, bool run_domain_cbs) {
- if (wrap->env()->using_domains() && run_domain_cbs) {
- bool is_disposed = DomainEnter(wrap->env(), wrap->object());
- if (is_disposed)
- return false;
- }
-
- return AsyncWrap::EmitBefore(wrap->env(), wrap->get_id());
-}
-
-
-bool AsyncWrap::EmitBefore(Environment* env, double async_id) {
+void AsyncWrap::EmitBefore(Environment* env, double async_id) {
AsyncHooks* async_hooks = env->async_hooks();
- if (async_hooks->fields()[AsyncHooks::kBefore] > 0) {
- Local uid = Number::New(env->isolate(), async_id);
- Local fn = env->async_hooks_before_function();
- TryCatch try_catch(env->isolate());
- MaybeLocal ar = fn->Call(
- env->context(), Undefined(env->isolate()), 1, &uid);
- if (ar.IsEmpty()) {
- ClearFatalExceptionHandlers(env);
- FatalException(env->isolate(), try_catch);
- return false;
- }
- }
-
- return true;
-}
-
-
-static bool PostCallbackExecution(AsyncWrap* wrap, bool run_domain_cbs) {
- if (!AsyncWrap::EmitAfter(wrap->env(), wrap->get_id()))
- return false;
+ if (async_hooks->fields()[AsyncHooks::kBefore] == 0)
+ return;
- if (wrap->env()->using_domains() && run_domain_cbs) {
- bool is_disposed = DomainExit(wrap->env(), wrap->object());
- if (is_disposed)
- return false;
+ Local uid = Number::New(env->isolate(), async_id);
+ Local fn = env->async_hooks_before_function();
+ TryCatch try_catch(env->isolate());
+ MaybeLocal ar = fn->Call(
+ env->context(), Undefined(env->isolate()), 1, &uid);
+ if (ar.IsEmpty()) {
+ ClearFatalExceptionHandlers(env);
+ FatalException(env->isolate(), try_catch);
+ UNREACHABLE();
}
-
- return true;
}
-bool AsyncWrap::EmitAfter(Environment* env, double async_id) {
+
+void AsyncWrap::EmitAfter(Environment* env, double async_id) {
AsyncHooks* async_hooks = env->async_hooks();
- // If the callback failed then the after() hooks will be called at the end
- // of _fatalException().
- if (async_hooks->fields()[AsyncHooks::kAfter] > 0) {
- Local uid = Number::New(env->isolate(), async_id);
- Local fn = env->async_hooks_after_function();
- TryCatch try_catch(env->isolate());
- MaybeLocal ar = fn->Call(
- env->context(), Undefined(env->isolate()), 1, &uid);
- if (ar.IsEmpty()) {
- ClearFatalExceptionHandlers(env);
- FatalException(env->isolate(), try_catch);
- return false;
- }
- }
+ if (async_hooks->fields()[AsyncHooks::kAfter] == 0)
+ return;
- return true;
+ // If the user's callback failed then the after() hooks will be called at the
+ // end of _fatalException().
+ Local uid = Number::New(env->isolate(), async_id);
+ Local fn = env->async_hooks_after_function();
+ TryCatch try_catch(env->isolate());
+ MaybeLocal ar = fn->Call(
+ env->context(), Undefined(env->isolate()), 1, &uid);
+ if (ar.IsEmpty()) {
+ ClearFatalExceptionHandlers(env);
+ FatalException(env->isolate(), try_catch);
+ UNREACHABLE();
+ }
}
class PromiseWrap : public AsyncWrap {
@@ -372,9 +348,9 @@ static void PromiseHook(PromiseHookType type, Local promise,
CHECK_NE(wrap, nullptr);
if (type == PromiseHookType::kBefore) {
env->async_hooks()->push_ids(wrap->get_id(), wrap->get_trigger_id());
- PreCallbackExecution(wrap, false);
+ AsyncWrap::EmitBefore(wrap->env(), wrap->get_id());
} else if (type == PromiseHookType::kAfter) {
- PostCallbackExecution(wrap, false);
+ AsyncWrap::EmitAfter(wrap->env(), wrap->get_id());
if (env->current_async_id() == wrap->get_id()) {
// This condition might not be true if async_hooks was enabled during
// the promise callback execution.
@@ -474,6 +450,13 @@ void AsyncWrap::PopAsyncIds(const FunctionCallbackInfo& args) {
}
+void AsyncWrap::AsyncIdStackSize(const FunctionCallbackInfo& args) {
+ Environment* env = Environment::GetCurrent(args);
+ args.GetReturnValue().Set(
+ static_cast(env->async_hooks()->stack_size()));
+}
+
+
void AsyncWrap::ClearIdStack(const FunctionCallbackInfo& args) {
Environment* env = Environment::GetCurrent(args);
env->async_hooks()->clear_id_stack();
@@ -492,6 +475,13 @@ void AsyncWrap::QueueDestroyId(const FunctionCallbackInfo& args) {
PushBackDestroyId(Environment::GetCurrent(args), args[0]->NumberValue());
}
+void AsyncWrap::AddWrapMethods(Environment* env,
+ Local constructor,
+ int flag) {
+ env->SetProtoMethod(constructor, "getAsyncId", AsyncWrap::GetAsyncId);
+ if (flag & kFlagHasReset)
+ env->SetProtoMethod(constructor, "asyncReset", AsyncWrap::AsyncReset);
+}
void AsyncWrap::Initialize(Local target,
Local unused,
@@ -503,6 +493,7 @@ void AsyncWrap::Initialize(Local target,
env->SetMethod(target, "setupHooks", SetupHooks);
env->SetMethod(target, "pushAsyncIds", PushAsyncIds);
env->SetMethod(target, "popAsyncIds", PopAsyncIds);
+ env->SetMethod(target, "asyncIdStackSize", AsyncIdStackSize);
env->SetMethod(target, "clearIdStack", ClearIdStack);
env->SetMethod(target, "addIdToDestroyList", QueueDestroyId);
env->SetMethod(target, "enablePromiseHook", EnablePromiseHook);
@@ -687,19 +678,28 @@ MaybeLocal AsyncWrap::MakeCallback(const Local cb,
get_id(),
get_trigger_id());
- if (!PreCallbackExecution(this, true)) {
- return MaybeLocal();
+ // Return v8::Undefined() because returning an empty handle will cause
+ // ToLocalChecked() to abort.
+ if (env()->using_domains() && DomainEnter(env(), object())) {
+ return Undefined(env()->isolate());
}
- // Finally... Get to running the user's callback.
+ // No need to check a return value because the application will exit if an
+ // exception occurs.
+ AsyncWrap::EmitBefore(env(), get_id());
+
MaybeLocal ret = cb->Call(env()->context(), object(), argc, argv);
if (ret.IsEmpty()) {
return ret;
}
- if (!PostCallbackExecution(this, true)) {
- return Local();
+ AsyncWrap::EmitAfter(env(), get_id());
+
+ // Return v8::Undefined() because returning an empty handle will cause
+ // ToLocalChecked() to abort.
+ if (env()->using_domains() && DomainExit(env(), object())) {
+ return Undefined(env()->isolate());
}
exec_scope.Dispose();
diff --git a/src/async-wrap.h b/src/async-wrap.h
index ffdf8358747f12..a4c42d01b73d36 100644
--- a/src/async-wrap.h
+++ b/src/async-wrap.h
@@ -88,6 +88,11 @@ class AsyncWrap : public BaseObject {
PROVIDERS_LENGTH,
};
+ enum Flags {
+ kFlagNone = 0x0,
+ kFlagHasReset = 0x1
+ };
+
AsyncWrap(Environment* env,
v8::Local object,
ProviderType provider,
@@ -95,6 +100,10 @@ class AsyncWrap : public BaseObject {
virtual ~AsyncWrap();
+ static void AddWrapMethods(Environment* env,
+ v8::Local constructor,
+ int flags = kFlagNone);
+
static void Initialize(v8::Local target,
v8::Local unused,
v8::Local context);
@@ -102,6 +111,7 @@ class AsyncWrap : public BaseObject {
static void GetAsyncId(const v8::FunctionCallbackInfo& args);
static void PushAsyncIds(const v8::FunctionCallbackInfo& args);
static void PopAsyncIds(const v8::FunctionCallbackInfo& args);
+ static void AsyncIdStackSize(const v8::FunctionCallbackInfo& args);
static void ClearIdStack(const v8::FunctionCallbackInfo& args);
static void AsyncReset(const v8::FunctionCallbackInfo& args);
static void QueueDestroyId(const v8::FunctionCallbackInfo& args);
@@ -112,8 +122,8 @@ class AsyncWrap : public BaseObject {
double id,
double trigger_id);
- static bool EmitBefore(Environment* env, double id);
- static bool EmitAfter(Environment* env, double id);
+ static void EmitBefore(Environment* env, double id);
+ static void EmitAfter(Environment* env, double id);
inline ProviderType provider_type() const;
@@ -147,6 +157,7 @@ class AsyncWrap : public BaseObject {
void LoadAsyncWrapperInfo(Environment* env);
+// Return value is an indicator whether the domain was disposed.
bool DomainEnter(Environment* env, v8::Local object);
bool DomainExit(Environment* env, v8::Local object);
diff --git a/src/cares_wrap.cc b/src/cares_wrap.cc
index 7869651a456c0a..e800e0f2fee260 100644
--- a/src/cares_wrap.cc
+++ b/src/cares_wrap.cc
@@ -28,7 +28,6 @@
#include "node.h"
#include "req-wrap.h"
#include "req-wrap-inl.h"
-#include "tree.h"
#include "util.h"
#include "util-inl.h"
#include "uv.h"
@@ -37,6 +36,7 @@
#include
#include
#include
+#include
#if defined(__ANDROID__) || \
defined(__MINGW32__) || \
@@ -122,10 +122,22 @@ struct node_ares_task {
ChannelWrap* channel;
ares_socket_t sock;
uv_poll_t poll_watcher;
- RB_ENTRY(node_ares_task) node;
};
-RB_HEAD(node_ares_task_list, node_ares_task);
+struct TaskHash {
+ size_t operator()(node_ares_task* a) const {
+ return std::hash()(a->sock);
+ }
+};
+
+struct TaskEqual {
+ inline bool operator()(node_ares_task* a, node_ares_task* b) const {
+ return a->sock == b->sock;
+ }
+};
+
+using node_ares_task_list =
+ std::unordered_set;
class ChannelWrap : public AsyncWrap {
public:
@@ -169,8 +181,6 @@ ChannelWrap::ChannelWrap(Environment* env,
query_last_ok_(true),
is_servers_default_(true),
library_inited_(false) {
- RB_INIT(&task_list_);
-
MakeWeak(this);
Setup();
@@ -186,15 +196,23 @@ void ChannelWrap::New(const FunctionCallbackInfo& args) {
class GetAddrInfoReqWrap : public ReqWrap {
public:
- GetAddrInfoReqWrap(Environment* env, Local req_wrap_obj);
+ GetAddrInfoReqWrap(Environment* env,
+ Local req_wrap_obj,
+ bool verbatim);
~GetAddrInfoReqWrap();
size_t self_size() const override { return sizeof(*this); }
+ bool verbatim() const { return verbatim_; }
+
+ private:
+ const bool verbatim_;
};
GetAddrInfoReqWrap::GetAddrInfoReqWrap(Environment* env,
- Local req_wrap_obj)
- : ReqWrap(env, req_wrap_obj, AsyncWrap::PROVIDER_GETADDRINFOREQWRAP) {
+ Local req_wrap_obj,
+ bool verbatim)
+ : ReqWrap(env, req_wrap_obj, AsyncWrap::PROVIDER_GETADDRINFOREQWRAP)
+ , verbatim_(verbatim) {
Wrap(req_wrap_obj, this);
}
@@ -222,25 +240,12 @@ GetNameInfoReqWrap::~GetNameInfoReqWrap() {
}
-int cmp_ares_tasks(const node_ares_task* a, const node_ares_task* b) {
- if (a->sock < b->sock)
- return -1;
- if (a->sock > b->sock)
- return 1;
- return 0;
-}
-
-
-RB_GENERATE_STATIC(node_ares_task_list, node_ares_task, node, cmp_ares_tasks)
-
-
-
/* This is called once per second by loop->timer. It is used to constantly */
/* call back into c-ares for possibly processing timeouts. */
void ChannelWrap::AresTimeout(uv_timer_t* handle) {
ChannelWrap* channel = static_cast(handle->data);
CHECK_EQ(channel->timer_handle(), handle);
- CHECK_EQ(false, RB_EMPTY(channel->task_list()));
+ CHECK_EQ(false, channel->task_list()->empty());
ares_process_fd(channel->cares_channel(), ARES_SOCKET_BAD, ARES_SOCKET_BAD);
}
@@ -306,7 +311,9 @@ void ares_sockstate_cb(void* data,
node_ares_task lookup_task;
lookup_task.sock = sock;
- task = RB_FIND(node_ares_task_list, channel->task_list(), &lookup_task);
+ auto it = channel->task_list()->find(&lookup_task);
+
+ task = (it == channel->task_list()->end()) ? nullptr : *it;
if (read || write) {
if (!task) {
@@ -315,7 +322,7 @@ void ares_sockstate_cb(void* data,
/* If this is the first socket then start the timer. */
uv_timer_t* timer_handle = channel->timer_handle();
if (!uv_is_active(reinterpret_cast(timer_handle))) {
- CHECK(RB_EMPTY(channel->task_list()));
+ CHECK(channel->task_list()->empty());
uv_timer_start(timer_handle, ChannelWrap::AresTimeout, 1000, 1000);
}
@@ -327,7 +334,7 @@ void ares_sockstate_cb(void* data,
return;
}
- RB_INSERT(node_ares_task_list, channel->task_list(), task);
+ channel->task_list()->insert(task);
}
/* This should never fail. If it fails anyway, the query will eventually */
@@ -343,11 +350,11 @@ void ares_sockstate_cb(void* data,
CHECK(task &&
"When an ares socket is closed we should have a handle for it");
- RB_REMOVE(node_ares_task_list, channel->task_list(), task);
+ channel->task_list()->erase(it);
uv_close(reinterpret_cast(&task->poll_watcher),
ares_poll_close_cb);
- if (RB_EMPTY(channel->task_list())) {
+ if (channel->task_list()->empty()) {
uv_timer_stop(channel->timer_handle());
}
}
@@ -1812,70 +1819,38 @@ void AfterGetAddrInfo(uv_getaddrinfo_t* req, int status, struct addrinfo* res) {
};
if (status == 0) {
- // Success
- struct addrinfo *address;
int n = 0;
-
- // Create the response array.
Local results = Array::New(env->isolate());
- char ip[INET6_ADDRSTRLEN];
- const char *addr;
-
- // Iterate over the IPv4 responses again this time creating javascript
- // strings for each IP and filling the results array.
- address = res;
- while (address) {
- CHECK_EQ(address->ai_socktype, SOCK_STREAM);
-
- // Ignore random ai_family types.
- if (address->ai_family == AF_INET) {
- // Juggle pointers
- addr = reinterpret_cast(&(reinterpret_cast(
- address->ai_addr)->sin_addr));
- int err = uv_inet_ntop(address->ai_family,
- addr,
- ip,
- INET6_ADDRSTRLEN);
- if (err)
+ auto add = [&] (bool want_ipv4, bool want_ipv6) {
+ for (auto p = res; p != nullptr; p = p->ai_next) {
+ CHECK_EQ(p->ai_socktype, SOCK_STREAM);
+
+ const char* addr;
+ if (want_ipv4 && p->ai_family == AF_INET) {
+ addr = reinterpret_cast(
+ &(reinterpret_cast(p->ai_addr)->sin_addr));
+ } else if (want_ipv6 && p->ai_family == AF_INET6) {
+ addr = reinterpret_cast(
+ &(reinterpret_cast(p->ai_addr)->sin6_addr));
+ } else {
continue;
+ }
- // Create JavaScript string
- Local s = OneByteString(env->isolate(), ip);
- results->Set(n, s);
- n++;
- }
-
- // Increment
- address = address->ai_next;
- }
-
- // Iterate over the IPv6 responses putting them in the array.
- address = res;
- while (address) {
- CHECK_EQ(address->ai_socktype, SOCK_STREAM);
-
- // Ignore random ai_family types.
- if (address->ai_family == AF_INET6) {
- // Juggle pointers
- addr = reinterpret_cast(&(reinterpret_cast(
- address->ai_addr)->sin6_addr));
- int err = uv_inet_ntop(address->ai_family,
- addr,
- ip,
- INET6_ADDRSTRLEN);
- if (err)
+ char ip[INET6_ADDRSTRLEN];
+ if (uv_inet_ntop(p->ai_family, addr, ip, sizeof(ip)))
continue;
- // Create JavaScript string
Local s = OneByteString(env->isolate(), ip);
results->Set(n, s);
n++;
}
+ };
- // Increment
- address = address->ai_next;
- }
+ const bool verbatim = req_wrap->verbatim();
+ add(true, verbatim);
+ if (verbatim == false)
+ add(false, true);
// No responses were found to return
if (n == 0) {
@@ -1966,6 +1941,7 @@ void GetAddrInfo(const FunctionCallbackInfo& args) {
CHECK(args[0]->IsObject());
CHECK(args[1]->IsString());
CHECK(args[2]->IsInt32());
+ CHECK(args[4]->IsBoolean());
Local req_wrap_obj = args[0].As();
node::Utf8Value hostname(env->isolate(), args[1]);
@@ -1986,7 +1962,7 @@ void GetAddrInfo(const FunctionCallbackInfo& args) {
CHECK(0 && "bad address family");
}
- GetAddrInfoReqWrap* req_wrap = new GetAddrInfoReqWrap(env, req_wrap_obj);
+ auto req_wrap = new GetAddrInfoReqWrap(env, req_wrap_obj, args[4]->IsTrue());
struct addrinfo hints;
memset(&hints, 0, sizeof(struct addrinfo));
@@ -2190,34 +2166,34 @@ void Initialize(Local target,
Local aiw =
FunctionTemplate::New(env->isolate(), is_construct_call_callback);
aiw->InstanceTemplate()->SetInternalFieldCount(1);
- env->SetProtoMethod(aiw, "getAsyncId", AsyncWrap::GetAsyncId);
- aiw->SetClassName(
- FIXED_ONE_BYTE_STRING(env->isolate(), "GetAddrInfoReqWrap"));
- target->Set(FIXED_ONE_BYTE_STRING(env->isolate(), "GetAddrInfoReqWrap"),
- aiw->GetFunction());
+ AsyncWrap::AddWrapMethods(env, aiw);
+ Local addrInfoWrapString =
+ FIXED_ONE_BYTE_STRING(env->isolate(), "GetAddrInfoReqWrap");
+ aiw->SetClassName(addrInfoWrapString);
+ target->Set(addrInfoWrapString, aiw->GetFunction());
Local niw =
FunctionTemplate::New(env->isolate(), is_construct_call_callback);
niw->InstanceTemplate()->SetInternalFieldCount(1);
- env->SetProtoMethod(niw, "getAsyncId", AsyncWrap::GetAsyncId);
- niw->SetClassName(
- FIXED_ONE_BYTE_STRING(env->isolate(), "GetNameInfoReqWrap"));
- target->Set(FIXED_ONE_BYTE_STRING(env->isolate(), "GetNameInfoReqWrap"),
- niw->GetFunction());
+ AsyncWrap::AddWrapMethods(env, niw);
+ Local nameInfoWrapString =
+ FIXED_ONE_BYTE_STRING(env->isolate(), "GetNameInfoReqWrap");
+ niw->SetClassName(nameInfoWrapString);
+ target->Set(nameInfoWrapString, niw->GetFunction());
Local qrw =
FunctionTemplate::New(env->isolate(), is_construct_call_callback);
qrw->InstanceTemplate()->SetInternalFieldCount(1);
- env->SetProtoMethod(qrw, "getAsyncId", AsyncWrap::GetAsyncId);
- qrw->SetClassName(
- FIXED_ONE_BYTE_STRING(env->isolate(), "QueryReqWrap"));
- target->Set(FIXED_ONE_BYTE_STRING(env->isolate(), "QueryReqWrap"),
- qrw->GetFunction());
+ AsyncWrap::AddWrapMethods(env, qrw);
+ Local queryWrapString =
+ FIXED_ONE_BYTE_STRING(env->isolate(), "QueryReqWrap");
+ qrw->SetClassName(queryWrapString);
+ target->Set(queryWrapString, qrw->GetFunction());
Local channel_wrap =
env->NewFunctionTemplate(ChannelWrap::New);
channel_wrap->InstanceTemplate()->SetInternalFieldCount(1);
- env->SetProtoMethod(channel_wrap, "getAsyncId", AsyncWrap::GetAsyncId);
+ AsyncWrap::AddWrapMethods(env, channel_wrap);
env->SetProtoMethod(channel_wrap, "queryAny", Query);
env->SetProtoMethod(channel_wrap, "queryA", Query);
@@ -2236,10 +2212,10 @@ void Initialize(Local target,
env->SetProtoMethod(channel_wrap, "setServers", SetServers);
env->SetProtoMethod(channel_wrap, "cancel", Cancel);
- channel_wrap->SetClassName(
- FIXED_ONE_BYTE_STRING(env->isolate(), "ChannelWrap"));
- target->Set(FIXED_ONE_BYTE_STRING(env->isolate(), "ChannelWrap"),
- channel_wrap->GetFunction());
+ Local channelWrapString =
+ FIXED_ONE_BYTE_STRING(env->isolate(), "ChannelWrap");
+ channel_wrap->SetClassName(channelWrapString);
+ target->Set(channelWrapString, channel_wrap->GetFunction());
}
} // anonymous namespace
diff --git a/src/env-inl.h b/src/env-inl.h
index 888dd807c11e15..d31b3602e97e79 100644
--- a/src/env-inl.h
+++ b/src/env-inl.h
@@ -127,8 +127,8 @@ inline v8::Local Environment::AsyncHooks::provider_string(int idx) {
inline void Environment::AsyncHooks::push_ids(double async_id,
double trigger_id) {
- CHECK_GE(async_id, 0);
- CHECK_GE(trigger_id, 0);
+ CHECK_GE(async_id, -1);
+ CHECK_GE(trigger_id, -1);
ids_stack_.push({ uid_fields_[kCurrentAsyncId],
uid_fields_[kCurrentTriggerId] });
@@ -166,6 +166,10 @@ inline bool Environment::AsyncHooks::pop_ids(double async_id) {
return !ids_stack_.empty();
}
+inline size_t Environment::AsyncHooks::stack_size() {
+ return ids_stack_.size();
+}
+
inline void Environment::AsyncHooks::clear_id_stack() {
while (!ids_stack_.empty())
ids_stack_.pop();
@@ -176,13 +180,14 @@ inline void Environment::AsyncHooks::clear_id_stack() {
inline Environment::AsyncHooks::InitScope::InitScope(
Environment* env, double init_trigger_id)
: env_(env),
- uid_fields_(env->async_hooks()->uid_fields()) {
- env->async_hooks()->push_ids(uid_fields_[AsyncHooks::kCurrentAsyncId],
+ uid_fields_ref_(env->async_hooks()->uid_fields()) {
+ CHECK_GE(init_trigger_id, -1);
+ env->async_hooks()->push_ids(uid_fields_ref_[AsyncHooks::kCurrentAsyncId],
init_trigger_id);
}
inline Environment::AsyncHooks::InitScope::~InitScope() {
- env_->async_hooks()->pop_ids(uid_fields_[AsyncHooks::kCurrentAsyncId]);
+ env_->async_hooks()->pop_ids(uid_fields_ref_[AsyncHooks::kCurrentAsyncId]);
}
inline Environment::AsyncHooks::ExecScope::ExecScope(
@@ -190,6 +195,8 @@ inline Environment::AsyncHooks::ExecScope::ExecScope(
: env_(env),
async_id_(async_id),
disposed_(false) {
+ CHECK_GE(async_id, -1);
+ CHECK_GE(trigger_id, -1);
env->async_hooks()->push_ids(async_id, trigger_id);
}
@@ -315,6 +322,16 @@ inline Environment::Environment(IsolateData* isolate_data,
AssignToContext(context);
destroy_ids_list_.reserve(512);
+ performance_state_ = Calloc(1);
+ performance_state_->milestones[
+ performance::NODE_PERFORMANCE_MILESTONE_ENVIRONMENT] =
+ PERFORMANCE_NOW();
+ performance_state_->milestones[
+ performance::NODE_PERFORMANCE_MILESTONE_NODE_START] =
+ performance::performance_node_start;
+ performance_state_->milestones[
+ performance::NODE_PERFORMANCE_MILESTONE_V8_START] =
+ performance::performance_v8_start;
}
inline Environment::~Environment() {
@@ -330,6 +347,7 @@ inline Environment::~Environment() {
delete[] heap_space_statistics_buffer_;
delete[] http_parser_buffer_;
free(http2_state_buffer_);
+ free(performance_state_);
}
inline v8::Isolate* Environment::isolate() const {
@@ -497,6 +515,41 @@ inline void Environment::set_fs_stats_field_array(double* fields) {
fs_stats_field_array_ = fields;
}
+inline performance::performance_state* Environment::performance_state() {
+ return performance_state_;
+}
+
+inline std::map* Environment::performance_marks() {
+ return &performance_marks_;
+}
+
+inline Environment* Environment::from_performance_check_handle(
+ uv_check_t* handle) {
+ return ContainerOf(&Environment::performance_check_handle_, handle);
+}
+
+inline Environment* Environment::from_performance_idle_handle(
+ uv_idle_t* handle) {
+ return ContainerOf(&Environment::performance_idle_handle_, handle);
+}
+
+inline Environment* Environment::from_performance_prepare_handle(
+ uv_prepare_t* handle) {
+ return ContainerOf(&Environment::performance_prepare_handle_, handle);
+}
+
+inline uv_check_t* Environment::performance_check_handle() {
+ return &performance_check_handle_;
+}
+
+inline uv_idle_t* Environment::performance_idle_handle() {
+ return &performance_idle_handle_;
+}
+
+inline uv_prepare_t* Environment::performance_prepare_handle() {
+ return &performance_prepare_handle_;
+}
+
inline IsolateData* Environment::isolate_data() const {
return isolate_data_;
}
diff --git a/src/env.h b/src/env.h
index 55340947fdd565..9f9a3a23e314d2 100644
--- a/src/env.h
+++ b/src/env.h
@@ -30,16 +30,19 @@
#endif
#include "handle_wrap.h"
#include "req-wrap.h"
-#include "tree.h"
#include "util.h"
#include "uv.h"
#include "v8.h"
#include "node.h"
#include
+#include
#include
#include
#include
+#include
+
+struct nghttp2_rcbuf;
namespace node {
@@ -102,6 +105,7 @@ struct http2_state;
V(callback_string, "callback") \
V(change_string, "change") \
V(channel_string, "channel") \
+ V(constants_string, "constants") \
V(oncertcb_string, "oncertcb") \
V(onclose_string, "_onclose") \
V(code_string, "code") \
@@ -291,16 +295,16 @@ struct http2_state;
V(async_hooks_before_function, v8::Function) \
V(async_hooks_after_function, v8::Function) \
V(binding_cache_object, v8::Object) \
- V(buffer_constructor_function, v8::Function) \
V(buffer_prototype_object, v8::Object) \
V(context, v8::Context) \
V(domain_array, v8::Array) \
V(domains_stack_array, v8::Array) \
V(inspector_console_api_object, v8::Object) \
- V(jsstream_constructor_template, v8::FunctionTemplate) \
V(module_load_list_array, v8::Array) \
V(pbkdf2_constructor_template, v8::ObjectTemplate) \
V(pipe_constructor_template, v8::FunctionTemplate) \
+ V(performance_entry_callback, v8::Function) \
+ V(performance_entry_template, v8::Function) \
V(process_object, v8::Object) \
V(promise_reject_function, v8::Function) \
V(promise_wrap_template, v8::ObjectTemplate) \
@@ -312,7 +316,6 @@ struct http2_state;
V(tcp_constructor_template, v8::FunctionTemplate) \
V(tick_callback_function, v8::Function) \
V(tls_wrap_constructor_function, v8::Function) \
- V(tls_wrap_constructor_template, v8::FunctionTemplate) \
V(tty_constructor_template, v8::FunctionTemplate) \
V(udp_constructor_function, v8::Function) \
V(url_constructor_function, v8::Function) \
@@ -342,6 +345,8 @@ class IsolateData {
#undef VS
#undef VP
+ std::unordered_map> http2_static_strs;
+
private:
#define VP(PropertyName, StringValue) V(v8::Private, PropertyName)
#define VS(PropertyName, StringValue) V(v8::String, PropertyName)
@@ -392,6 +397,7 @@ class Environment {
inline void push_ids(double async_id, double trigger_id);
inline bool pop_ids(double async_id);
+ inline size_t stack_size();
inline void clear_id_stack(); // Used in fatal exceptions.
// Used to propagate the trigger_id to the constructor of any newly created
@@ -405,7 +411,7 @@ class Environment {
private:
Environment* env_;
- double* uid_fields_;
+ double* uid_fields_ref_;
DISALLOW_COPY_AND_ASSIGN(InitScope);
};
@@ -438,12 +444,10 @@ class Environment {
v8::Isolate* isolate_;
// Stores the ids of the current execution context stack.
std::stack ids_stack_;
- // Used to communicate state between C++ and JS cheaply. Is placed in an
- // Uint32Array() and attached to the async_wrap object.
+ // Attached to a Uint32Array that tracks the number of active hooks for
+ // each type.
uint32_t fields_[kFieldsCount];
- // Used to communicate ids between C++ and JS cheaply. Placed in a
- // Float64Array and attached to the async_wrap object. Using a double only
- // gives us 2^53-1 unique ids, but that should be sufficient.
+ // Attached to a Float64Array that tracks the state of async resources.
double uid_fields_[kUidFieldsCount];
DISALLOW_COPY_AND_ASSIGN(AsyncHooks);
@@ -609,6 +613,17 @@ class Environment {
inline double* fs_stats_field_array() const;
inline void set_fs_stats_field_array(double* fields);
+ inline performance::performance_state* performance_state();
+ inline std::map* performance_marks();
+
+ static inline Environment* from_performance_check_handle(uv_check_t* handle);
+ static inline Environment* from_performance_idle_handle(uv_idle_t* handle);
+ static inline Environment* from_performance_prepare_handle(
+ uv_prepare_t* handle);
+ inline uv_check_t* performance_check_handle();
+ inline uv_idle_t* performance_idle_handle();
+ inline uv_prepare_t* performance_prepare_handle();
+
inline void ThrowError(const char* errmsg);
inline void ThrowTypeError(const char* errmsg);
inline void ThrowRangeError(const char* errmsg);
@@ -688,6 +703,10 @@ class Environment {
uv_timer_t destroy_ids_timer_handle_;
uv_prepare_t idle_prepare_handle_;
uv_check_t idle_check_handle_;
+ uv_prepare_t performance_prepare_handle_;
+ uv_check_t performance_check_handle_;
+ uv_idle_t performance_idle_handle_;
+
AsyncHooks async_hooks_;
DomainFlag domain_flag_;
TickInfo tick_info_;
@@ -698,6 +717,10 @@ class Environment {
bool abort_on_uncaught_exception_;
size_t makecallback_cntr_;
std::vector destroy_ids_list_;
+
+ performance::performance_state* performance_state_ = nullptr;
+ std::map performance_marks_;
+
#if HAVE_INSPECTOR
inspector::Agent inspector_agent_;
#endif
diff --git a/src/fs_event_wrap.cc b/src/fs_event_wrap.cc
index 228c3a344edf3c..8ec8dd6dcfbd76 100644
--- a/src/fs_event_wrap.cc
+++ b/src/fs_event_wrap.cc
@@ -94,7 +94,7 @@ void FSEventWrap::Initialize(Local target,
t->InstanceTemplate()->SetInternalFieldCount(1);
t->SetClassName(fsevent_string);
- env->SetProtoMethod(t, "getAsyncId", AsyncWrap::GetAsyncId);
+ AsyncWrap::AddWrapMethods(env, t);
env->SetProtoMethod(t, "start", Start);
env->SetProtoMethod(t, "close", Close);
diff --git a/src/inspector_agent.cc b/src/inspector_agent.cc
index 0f9caa32f2a22e..828006ecf2fbb4 100644
--- a/src/inspector_agent.cc
+++ b/src/inspector_agent.cc
@@ -23,20 +23,27 @@
namespace node {
namespace inspector {
namespace {
+
+using node::FatalError;
+
using v8::Array;
+using v8::Boolean;
using v8::Context;
using v8::External;
using v8::Function;
using v8::FunctionCallbackInfo;
using v8::HandleScope;
+using v8::Integer;
using v8::Isolate;
using v8::Local;
using v8::Maybe;
using v8::MaybeLocal;
+using v8::Name;
using v8::NewStringType;
using v8::Object;
using v8::Persistent;
using v8::String;
+using v8::Undefined;
using v8::Value;
using v8_inspector::StringBuffer;
@@ -495,11 +502,9 @@ class InspectorTimerHandle {
class NodeInspectorClient : public V8InspectorClient {
public:
- NodeInspectorClient(node::Environment* env,
- v8::Platform* platform) : env_(env),
- platform_(platform),
- terminated_(false),
- running_nested_loop_(false) {
+ NodeInspectorClient(node::Environment* env, node::NodePlatform* platform)
+ : env_(env), platform_(platform), terminated_(false),
+ running_nested_loop_(false) {
client_ = V8Inspector::create(env->isolate(), this);
contextCreated(env->context(), "Node.js Main Context");
}
@@ -511,8 +516,7 @@ class NodeInspectorClient : public V8InspectorClient {
terminated_ = false;
running_nested_loop_ = true;
while (!terminated_ && channel_->waitForFrontendMessage()) {
- while (v8::platform::PumpMessageLoop(platform_, env_->isolate()))
- {}
+ platform_->FlushForegroundTasksInternal();
}
terminated_ = false;
running_nested_loop_ = false;
@@ -616,9 +620,31 @@ class NodeInspectorClient : public V8InspectorClient {
timers_.erase(data);
}
+ // Async stack traces instrumentation.
+ void AsyncTaskScheduled(const StringView& task_name, void* task,
+ bool recurring) {
+ client_->asyncTaskScheduled(task_name, task, recurring);
+ }
+
+ void AsyncTaskCanceled(void* task) {
+ client_->asyncTaskCanceled(task);
+ }
+
+ void AsyncTaskStarted(void* task) {
+ client_->asyncTaskStarted(task);
+ }
+
+ void AsyncTaskFinished(void* task) {
+ client_->asyncTaskFinished(task);
+ }
+
+ void AllAsyncTasksCanceled() {
+ client_->allAsyncTasksCanceled();
+ }
+
private:
node::Environment* env_;
- v8::Platform* platform_;
+ node::NodePlatform* platform_;
bool terminated_;
bool running_nested_loop_;
std::unique_ptr client_;
@@ -637,7 +663,7 @@ Agent::Agent(Environment* env) : parent_env_(env),
Agent::~Agent() {
}
-bool Agent::Start(v8::Platform* platform, const char* path,
+bool Agent::Start(node::NodePlatform* platform, const char* path,
const DebugOptions& options) {
path_ = path == nullptr ? "" : path;
debug_options_ = options;
@@ -676,9 +702,21 @@ bool Agent::StartIoThread(bool wait_for_connect) {
}
v8::Isolate* isolate = parent_env_->isolate();
+ HandleScope handle_scope(isolate);
+
+ // Enable tracking of async stack traces
+ if (!enable_async_hook_function_.IsEmpty()) {
+ Local enable_fn = enable_async_hook_function_.Get(isolate);
+ auto context = parent_env_->context();
+ auto result = enable_fn->Call(context, Undefined(isolate), 0, nullptr);
+ if (result.IsEmpty()) {
+ FatalError(
+ "node::InspectorAgent::StartIoThread",
+ "Cannot enable Inspector's AsyncHook, please report this.");
+ }
+ }
// Send message to enable debug in workers
- HandleScope handle_scope(isolate);
Local process_object = parent_env_->process_object();
Local emit_fn =
process_object->Get(FIXED_ONE_BYTE_STRING(isolate, "emit"));
@@ -717,10 +755,40 @@ void Agent::Stop() {
if (io_ != nullptr) {
io_->Stop();
io_.reset();
+ enabled_ = false;
+ }
+
+ v8::Isolate* isolate = parent_env_->isolate();
+ HandleScope handle_scope(isolate);
+
+ // Disable tracking of async stack traces
+ if (!disable_async_hook_function_.IsEmpty()) {
+ Local disable_fn = disable_async_hook_function_.Get(isolate);
+ auto result = disable_fn->Call(parent_env_->context(),
+ Undefined(parent_env_->isolate()), 0, nullptr);
+ if (result.IsEmpty()) {
+ FatalError(
+ "node::InspectorAgent::Stop",
+ "Cannot disable Inspector's AsyncHook, please report this.");
+ }
}
}
void Agent::Connect(InspectorSessionDelegate* delegate) {
+ if (!enabled_) {
+ // Enable tracking of async stack traces
+ v8::Isolate* isolate = parent_env_->isolate();
+ HandleScope handle_scope(isolate);
+ auto context = parent_env_->context();
+ Local enable_fn = enable_async_hook_function_.Get(isolate);
+ auto result = enable_fn->Call(context, Undefined(isolate), 0, nullptr);
+ if (result.IsEmpty()) {
+ FatalError(
+ "node::InspectorAgent::Connect",
+ "Cannot enable Inspector's AsyncHook, please report this.");
+ }
+ }
+
enabled_ = true;
client_->connectFrontend(delegate);
}
@@ -773,6 +841,34 @@ void Agent::PauseOnNextJavascriptStatement(const std::string& reason) {
channel->schedulePauseOnNextStatement(reason);
}
+void Agent::RegisterAsyncHook(Isolate* isolate,
+ v8::Local enable_function,
+ v8::Local disable_function) {
+ enable_async_hook_function_.Reset(isolate, enable_function);
+ disable_async_hook_function_.Reset(isolate, disable_function);
+}
+
+void Agent::AsyncTaskScheduled(const StringView& task_name, void* task,
+ bool recurring) {
+ client_->AsyncTaskScheduled(task_name, task, recurring);
+}
+
+void Agent::AsyncTaskCanceled(void* task) {
+ client_->AsyncTaskCanceled(task);
+}
+
+void Agent::AsyncTaskStarted(void* task) {
+ client_->AsyncTaskStarted(task);
+}
+
+void Agent::AsyncTaskFinished(void* task) {
+ client_->AsyncTaskFinished(task);
+}
+
+void Agent::AllAsyncTasksCanceled() {
+ client_->AllAsyncTasksCanceled();
+}
+
void Open(const FunctionCallbackInfo& args) {
Environment* env = Environment::GetCurrent(args);
inspector::Agent* agent = env->inspector_agent();
@@ -810,6 +906,59 @@ void Url(const FunctionCallbackInfo& args) {
args.GetReturnValue().Set(OneByteString(env->isolate(), url.c_str()));
}
+static void* GetAsyncTask(int64_t asyncId) {
+ // The inspector assumes that when other clients use its asyncTask* API,
+ // they use real pointers, or at least something aligned like real pointer.
+ // In general it means that our task_id should always be even.
+ //
+ // On 32bit platforms, the 64bit asyncId would get truncated when converted
+ // to a 32bit pointer. However, the javascript part will never enable
+ // the async_hook on 32bit platforms, therefore the truncation will never
+ // happen in practice.
+ return reinterpret_cast(asyncId << 1);
+}
+
+template
+static void InvokeAsyncTaskFnWithId(const FunctionCallbackInfo& args) {
+ Environment* env = Environment::GetCurrent(args);
+ CHECK(args[0]->IsNumber());
+ int64_t task_id = args[0]->IntegerValue(env->context()).FromJust();
+ (env->inspector_agent()->*asyncTaskFn)(GetAsyncTask(task_id));
+}
+
+static void AsyncTaskScheduledWrapper(const FunctionCallbackInfo& args) {
+ Environment* env = Environment::GetCurrent(args);
+
+ CHECK(args[0]->IsString());
+ Local task_name = args[0].As();
+ String::Value task_name_value(task_name);
+ StringView task_name_view(*task_name_value, task_name_value.length());
+
+ CHECK(args[1]->IsNumber());
+ int64_t task_id = args[1]->IntegerValue(env->context()).FromJust();
+ void* task = GetAsyncTask(task_id);
+
+ CHECK(args[2]->IsBoolean());
+ bool recurring = args[2]->BooleanValue(env->context()).FromJust();
+
+ env->inspector_agent()->AsyncTaskScheduled(task_name_view, task, recurring);
+}
+
+static void RegisterAsyncHookWrapper(const FunctionCallbackInfo& args) {
+ Environment* env = Environment::GetCurrent(args);
+
+ CHECK(args[0]->IsFunction());
+ v8::Local enable_function = args[0].As();
+ CHECK(args[1]->IsFunction());
+ v8::Local disable_function = args[1].As();
+ env->inspector_agent()->RegisterAsyncHook(env->isolate(),
+ enable_function, disable_function);
+}
+
+static void IsEnabled(const FunctionCallbackInfo& args) {
+ Environment* env = Environment::GetCurrent(args);
+ args.GetReturnValue().Set(env->inspector_agent()->enabled());
+}
// static
void Agent::InitInspector(Local target, Local unused,
@@ -830,6 +979,17 @@ void Agent::InitInspector(Local target, Local unused,
env->SetMethod(target, "connect", ConnectJSBindingsSession);
env->SetMethod(target, "open", Open);
env->SetMethod(target, "url", Url);
+
+ env->SetMethod(target, "asyncTaskScheduled", AsyncTaskScheduledWrapper);
+ env->SetMethod(target, "asyncTaskCanceled",
+ InvokeAsyncTaskFnWithId<&Agent::AsyncTaskCanceled>);
+ env->SetMethod(target, "asyncTaskStarted",
+ InvokeAsyncTaskFnWithId<&Agent::AsyncTaskStarted>);
+ env->SetMethod(target, "asyncTaskFinished",
+ InvokeAsyncTaskFnWithId<&Agent::AsyncTaskFinished>);
+
+ env->SetMethod(target, "registerAsyncHook", RegisterAsyncHookWrapper);
+ env->SetMethod(target, "isEnabled", IsEnabled);
}
void Agent::RequestIoThreadStart() {
diff --git a/src/inspector_agent.h b/src/inspector_agent.h
index cf9a8bff8645ec..8195e001c2eb3c 100644
--- a/src/inspector_agent.h
+++ b/src/inspector_agent.h
@@ -14,19 +14,10 @@
// Forward declaration to break recursive dependency chain with src/env.h.
namespace node {
class Environment;
+class NodePlatform;
} // namespace node
-namespace v8 {
-class Context;
-template
-class FunctionCallbackInfo;
-template
-class Local;
-class Message;
-class Object;
-class Platform;
-class Value;
-} // namespace v8
+#include "v8.h"
namespace v8_inspector {
class StringView;
@@ -52,7 +43,7 @@ class Agent {
~Agent();
// Create client_, may create io_ if option enabled
- bool Start(v8::Platform* platform, const char* path,
+ bool Start(node::NodePlatform* platform, const char* path,
const DebugOptions& options);
// Stop and destroy io_
void Stop();
@@ -67,6 +58,18 @@ class Agent {
void FatalException(v8::Local error,
v8::Local message);
+ // Async stack traces instrumentation.
+ void AsyncTaskScheduled(const v8_inspector::StringView& taskName, void* task,
+ bool recurring);
+ void AsyncTaskCanceled(void* task);
+ void AsyncTaskStarted(void* task);
+ void AsyncTaskFinished(void* task);
+ void AllAsyncTasksCanceled();
+
+ void RegisterAsyncHook(v8::Isolate* isolate,
+ v8::Local enable_function,
+ v8::Local disable_function);
+
// These methods are called by the WS protocol and JS binding to create
// inspector sessions. The inspector responds by using the delegate to send
// messages back.
@@ -107,6 +110,9 @@ class Agent {
std::string path_;
DebugOptions debug_options_;
int next_context_number_;
+
+ v8::Persistent enable_async_hook_function_;
+ v8::Persistent disable_async_hook_function_;
};
} // namespace inspector
diff --git a/src/js_stream.cc b/src/js_stream.cc
index d88cc853c800cc..b62dcf3ef5b407 100644
--- a/src/js_stream.cc
+++ b/src/js_stream.cc
@@ -18,6 +18,7 @@ using v8::HandleScope;
using v8::Local;
using v8::MaybeLocal;
using v8::Object;
+using v8::String;
using v8::Value;
@@ -212,10 +213,12 @@ void JSStream::Initialize(Local target,
Environment* env = Environment::GetCurrent(context);
Local t = env->NewFunctionTemplate(New);
- t->SetClassName(FIXED_ONE_BYTE_STRING(env->isolate(), "JSStream"));
+ Local jsStreamString =
+ FIXED_ONE_BYTE_STRING(env->isolate(), "JSStream");
+ t->SetClassName(jsStreamString);
t->InstanceTemplate()->SetInternalFieldCount(1);
- env->SetProtoMethod(t, "getAsyncId", AsyncWrap::GetAsyncId);
+ AsyncWrap::AddWrapMethods(env, t);
env->SetProtoMethod(t, "doAlloc", DoAlloc);
env->SetProtoMethod(t, "doRead", DoRead);
@@ -226,9 +229,7 @@ void JSStream::Initialize(Local target,
env->SetProtoMethod(t, "emitEOF", EmitEOF);
StreamBase::AddMethods(env, t, StreamBase::kFlagHasWritev);
- target->Set(FIXED_ONE_BYTE_STRING(env->isolate(), "JSStream"),
- t->GetFunction());
- env->set_jsstream_constructor_template(t);
+ target->Set(jsStreamString, t->GetFunction());
}
} // namespace node
diff --git a/src/module_wrap.cc b/src/module_wrap.cc
new file mode 100644
index 00000000000000..05bbe04ef2e605
--- /dev/null
+++ b/src/module_wrap.cc
@@ -0,0 +1,531 @@
+#include
+#include // PATH_MAX
+#include // S_IFDIR
+#include "module_wrap.h"
+
+#include "env.h"
+#include "node_url.h"
+#include "util.h"
+#include "util-inl.h"
+
+namespace node {
+namespace loader {
+
+using node::url::URL;
+using node::url::URL_FLAGS_FAILED;
+using v8::Context;
+using v8::EscapableHandleScope;
+using v8::Function;
+using v8::FunctionCallbackInfo;
+using v8::FunctionTemplate;
+using v8::Integer;
+using v8::IntegrityLevel;
+using v8::Isolate;
+using v8::JSON;
+using v8::Local;
+using v8::MaybeLocal;
+using v8::Module;
+using v8::Object;
+using v8::Persistent;
+using v8::Promise;
+using v8::ScriptCompiler;
+using v8::ScriptOrigin;
+using v8::String;
+using v8::Value;
+
+static const char* EXTENSIONS[] = {".mjs", ".js", ".json", ".node"};
+std::map*> ModuleWrap::module_map_;
+
+ModuleWrap::ModuleWrap(Environment* env,
+ Local object,
+ Local module,
+ Local url) : BaseObject(env, object) {
+ Isolate* iso = Isolate::GetCurrent();
+ module_.Reset(iso, module);
+ url_.Reset(iso, url);
+}
+
+ModuleWrap::~ModuleWrap() {
+ Local module = module_.Get(Isolate::GetCurrent());
+ std::vector* same_hash = module_map_[module->GetIdentityHash()];
+ auto it = std::find(same_hash->begin(), same_hash->end(), this);
+
+ if (it != same_hash->end()) {
+ same_hash->erase(it);
+ }
+
+ module_.Reset();
+}
+
+void ModuleWrap::New(const FunctionCallbackInfo& args) {
+ Environment* env = Environment::GetCurrent(args);
+
+ Isolate* iso = args.GetIsolate();
+
+ if (!args.IsConstructCall()) {
+ env->ThrowError("constructor must be called using new");
+ return;
+ }
+
+ if (args.Length() != 2) {
+ env->ThrowError("constructor must have exactly 2 arguments "
+ "(string, string)");
+ return;
+ }
+
+ if (!args[0]->IsString()) {
+ env->ThrowError("first argument is not a string");
+ return;
+ }
+
+ auto source_text = args[0].As();
+
+ if (!args[1]->IsString()) {
+ env->ThrowError("second argument is not a string");
+ return;
+ }
+
+ Local url = args[1].As();
+
+ Local mod;
+
+ // compile
+ {
+ ScriptOrigin origin(url,
+ Integer::New(iso, 0),
+ Integer::New(iso, 0),
+ False(iso),
+ Integer::New(iso, 0),
+ FIXED_ONE_BYTE_STRING(iso, ""),
+ False(iso),
+ False(iso),
+ True(iso));
+ ScriptCompiler::Source source(source_text, origin);
+ auto maybe_mod = ScriptCompiler::CompileModule(iso, &source);
+ if (maybe_mod.IsEmpty()) {
+ return;
+ }
+ mod = maybe_mod.ToLocalChecked();
+ }
+
+ auto that = args.This();
+ auto ctx = that->CreationContext();
+ auto url_str = FIXED_ONE_BYTE_STRING(iso, "url");
+
+ if (!that->Set(ctx, url_str, url).FromMaybe(false)) {
+ return;
+ }
+
+ ModuleWrap* obj =
+ new ModuleWrap(Environment::GetCurrent(ctx), that, mod, url);
+
+ if (ModuleWrap::module_map_.count(mod->GetIdentityHash()) == 0) {
+ ModuleWrap::module_map_[mod->GetIdentityHash()] =
+ new std::vector();
+ }
+
+ ModuleWrap::module_map_[mod->GetIdentityHash()]->push_back(obj);
+ Wrap(that, obj);
+
+ that->SetIntegrityLevel(ctx, IntegrityLevel::kFrozen);
+ args.GetReturnValue().Set(that);
+}
+
+void ModuleWrap::Link(const FunctionCallbackInfo& args) {
+ Environment* env = Environment::GetCurrent(args);
+ Isolate* iso = args.GetIsolate();
+ EscapableHandleScope handle_scope(iso);
+ if (!args[0]->IsFunction()) {
+ env->ThrowError("first argument is not a function");
+ return;
+ }
+
+ Local resolver_arg = args[0].As();
+
+ auto that = args.This();
+ ModuleWrap* obj = Unwrap(that);
+ auto mod_context = that->CreationContext();
+ if (obj->linked_) return;
+ obj->linked_ = true;
+ Local mod(obj->module_.Get(iso));
+
+ // call the dependency resolve callbacks
+ for (int i = 0; i < mod->GetModuleRequestsLength(); i++) {
+ Local specifier = mod->GetModuleRequest(i);
+ Utf8Value specifier_utf(env->isolate(), specifier);
+ std::string specifier_std(*specifier_utf, specifier_utf.length());
+
+ Local argv[] = {
+ specifier
+ };
+
+ MaybeLocal maybe_resolve_return_value =
+ resolver_arg->Call(mod_context, that, 1, argv);
+ if (maybe_resolve_return_value.IsEmpty()) {
+ return;
+ }
+ Local resolve_return_value =
+ maybe_resolve_return_value.ToLocalChecked();
+ if (!resolve_return_value->IsPromise()) {
+ env->ThrowError("linking error, expected resolver to return a promise");
+ }
+ Local resolve_promise = resolve_return_value.As();
+ obj->resolve_cache_[specifier_std] = new Persistent();
+ obj->resolve_cache_[specifier_std]->Reset(iso, resolve_promise);
+ }
+
+ args.GetReturnValue().Set(handle_scope.Escape(that));
+}
+
+void ModuleWrap::Instantiate(const FunctionCallbackInfo& args) {
+ auto iso = args.GetIsolate();
+ auto that = args.This();
+ auto ctx = that->CreationContext();
+
+ ModuleWrap* obj = Unwrap(that);
+ Local mod = obj->module_.Get(iso);
+ bool ok = mod->Instantiate(ctx, ModuleWrap::ResolveCallback);
+
+ // clear resolve cache on instantiate
+ obj->resolve_cache_.clear();
+
+ if (!ok) {
+ return;
+ }
+}
+
+void ModuleWrap::Evaluate(const FunctionCallbackInfo& args) {
+ auto iso = args.GetIsolate();
+ auto that = args.This();
+ auto ctx = that->CreationContext();
+ ModuleWrap* obj = Unwrap(that);
+ auto result = obj->module_.Get(iso)->Evaluate(ctx);
+
+ if (result.IsEmpty()) {
+ return;
+ }
+
+ auto ret = result.ToLocalChecked();
+ args.GetReturnValue().Set(ret);
+}
+
+MaybeLocal ModuleWrap::ResolveCallback(Local context,
+ Local specifier,
+ Local referrer) {
+ Environment* env = Environment::GetCurrent(context);
+ Isolate* iso = Isolate::GetCurrent();
+ if (ModuleWrap::module_map_.count(referrer->GetIdentityHash()) == 0) {
+ env->ThrowError("linking error, unknown module");
+ return MaybeLocal();
+ }
+
+ std::vector* possible_deps =
+ ModuleWrap::module_map_[referrer->GetIdentityHash()];
+ ModuleWrap* dependent = nullptr;
+
+ for (auto possible_dep : *possible_deps) {
+ if (possible_dep->module_ == referrer) {
+ dependent = possible_dep;
+ }
+ }
+
+ if (dependent == nullptr) {
+ env->ThrowError("linking error, null dep");
+ return MaybeLocal();
+ }
+
+ Utf8Value specifier_utf(env->isolate(), specifier);
+ std::string specifier_std(*specifier_utf, specifier_utf.length());
+
+ if (dependent->resolve_cache_.count(specifier_std) != 1) {
+ env->ThrowError("linking error, not in local cache");
+ return MaybeLocal();
+ }
+
+ Local resolve_promise =
+ dependent->resolve_cache_[specifier_std]->Get(iso);
+
+ if (resolve_promise->State() != Promise::kFulfilled) {
+ env->ThrowError("linking error, dependency promises must be resolved on "
+ "instantiate");
+ return MaybeLocal();
+ }
+
+ auto module_object = resolve_promise->Result().As();
+ if (module_object.IsEmpty() || !module_object->IsObject()) {
+ env->ThrowError("linking error, expected a valid module object from "
+ "resolver");
+ return MaybeLocal();
+ }
+
+ ModuleWrap* mod;
+ ASSIGN_OR_RETURN_UNWRAP(&mod, module_object, MaybeLocal());
+ return mod->module_.Get(env->isolate());
+}
+
+namespace {
+
+URL __init_cwd() {
+ std::string specifier = "file://";
+#ifdef _WIN32
+ // MAX_PATH is in characters, not bytes. Make sure we have enough headroom.
+ char buf[MAX_PATH * 4];
+#else
+ char buf[PATH_MAX];
+#endif
+
+ size_t cwd_len = sizeof(buf);
+ int err = uv_cwd(buf, &cwd_len);
+ if (err) {
+ return URL("");
+ }
+ specifier += buf;
+ specifier += "/";
+ return URL(specifier);
+}
+static URL INITIAL_CWD(__init_cwd());
+inline bool is_relative_or_absolute_path(std::string specifier) {
+ auto len = specifier.length();
+ if (len <= 0) {
+ return false;
+ } else if (specifier[0] == '/') {
+ return true;
+ } else if (specifier[0] == '.') {
+ if (len == 1 || specifier[1] == '/') {
+ return true;
+ } else if (specifier[1] == '.') {
+ if (len == 2 || specifier[2] == '/') {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+struct read_result {
+ bool had_error = false;
+ std::string source;
+} read_result;
+inline const struct read_result read_file(uv_file file) {
+ struct read_result ret;
+ std::string src;
+ uv_fs_t req;
+ void* base = malloc(4096);
+ if (base == nullptr) {
+ ret.had_error = true;
+ return ret;
+ }
+ uv_buf_t buf = uv_buf_init(static_cast(base), 4096);
+ uv_fs_read(uv_default_loop(), &req, file, &buf, 1, 0, nullptr);
+ while (req.result > 0) {
+ src += std::string(static_cast(buf.base), req.result);
+ uv_fs_read(uv_default_loop(), &req, file, &buf, 1, src.length(), nullptr);
+ }
+ ret.source = src;
+ return ret;
+}
+struct file_check {
+ bool failed = true;
+ uv_file file;
+} file_check;
+inline const struct file_check check_file(URL search,
+ bool close = false,
+ bool allow_dir = false) {
+ struct file_check ret;
+ uv_fs_t fs_req;
+ std::string path = search.ToFilePath();
+ if (path.empty()) {
+ return ret;
+ }
+ uv_fs_open(nullptr, &fs_req, path.c_str(), O_RDONLY, 0, nullptr);
+ auto fd = fs_req.result;
+ if (fd < 0) {
+ return ret;
+ }
+ if (!allow_dir) {
+ uv_fs_fstat(nullptr, &fs_req, fd, nullptr);
+ if (fs_req.statbuf.st_mode & S_IFDIR) {
+ uv_fs_close(nullptr, &fs_req, fd, nullptr);
+ return ret;
+ }
+ }
+ ret.failed = false;
+ ret.file = fd;
+ if (close) uv_fs_close(nullptr, &fs_req, fd, nullptr);
+ return ret;
+}
+URL resolve_extensions(URL search, bool check_exact = true) {
+ if (check_exact) {
+ auto check = check_file(search, true);
+ if (!check.failed) {
+ return search;
+ }
+ }
+ for (auto extension : EXTENSIONS) {
+ URL guess(search.path() + extension, &search);
+ auto check = check_file(guess, true);
+ if (!check.failed) {
+ return guess;
+ }
+ }
+ return URL("");
+}
+inline URL resolve_index(URL search) {
+ return resolve_extensions(URL("index", &search), false);
+}
+URL resolve_main(URL search) {
+ URL pkg("package.json", &search);
+ auto check = check_file(pkg);
+ if (!check.failed) {
+ auto iso = Isolate::GetCurrent();
+ auto ctx = iso->GetCurrentContext();
+ auto read = read_file(check.file);
+ uv_fs_t fs_req;
+ // if we fail to close :-/
+ uv_fs_close(nullptr, &fs_req, check.file, nullptr);
+ if (read.had_error) return URL("");
+ std::string pkg_src = read.source;
+ Local src =
+ String::NewFromUtf8(iso, pkg_src.c_str(),
+ String::kNormalString, pkg_src.length());
+ if (src.IsEmpty()) return URL("");
+ auto maybe_pkg_json = JSON::Parse(ctx, src);
+ if (maybe_pkg_json.IsEmpty()) return URL("");
+ auto pkg_json_obj = maybe_pkg_json.ToLocalChecked().As();
+ if (!pkg_json_obj->IsObject()) return URL("");
+ auto maybe_pkg_main = pkg_json_obj->Get(
+ ctx, FIXED_ONE_BYTE_STRING(iso, "main"));
+ if (maybe_pkg_main.IsEmpty()) return URL("");
+ auto pkg_main_str = maybe_pkg_main.ToLocalChecked().As();
+ if (!pkg_main_str->IsString()) return URL("");
+ Utf8Value main_utf8(iso, pkg_main_str);
+ std::string main_std(*main_utf8, main_utf8.length());
+ if (!is_relative_or_absolute_path(main_std)) {
+ main_std.insert(0, "./");
+ }
+ return Resolve(main_std, &search);
+ }
+ return URL("");
+}
+URL resolve_module(std::string specifier, URL* base) {
+ URL parent(".", base);
+ URL dir("");
+ do {
+ dir = parent;
+ auto check = Resolve("./node_modules/" + specifier, &dir, true);
+ if (!(check.flags() & URL_FLAGS_FAILED)) {
+ const auto limit = specifier.find('/');
+ const auto spec_len = limit == std::string::npos ?
+ specifier.length() :
+ limit + 1;
+ std::string chroot =
+ dir.path() + "node_modules/" + specifier.substr(0, spec_len);
+ if (check.path().substr(0, chroot.length()) != chroot) {
+ return URL("");
+ }
+ return check;
+ } else {
+ // TODO(bmeck) PREVENT FALLTHROUGH
+ }
+ parent = URL("..", &dir);
+ } while (parent.path() != dir.path());
+ return URL("");
+}
+
+URL resolve_directory(URL search, bool read_pkg_json) {
+ if (read_pkg_json) {
+ auto main = resolve_main(search);
+ if (!(main.flags() & URL_FLAGS_FAILED)) return main;
+ }
+ return resolve_index(search);
+}
+
+} // anonymous namespace
+
+
+URL Resolve(std::string specifier, URL* base, bool read_pkg_json) {
+ URL pure_url(specifier);
+ if (!(pure_url.flags() & URL_FLAGS_FAILED)) {
+ return pure_url;
+ }
+ if (specifier.length() == 0) {
+ return URL("");
+ }
+ if (is_relative_or_absolute_path(specifier)) {
+ URL resolved(specifier, base);
+ auto file = resolve_extensions(resolved);
+ if (!(file.flags() & URL_FLAGS_FAILED)) return file;
+ if (specifier.back() != '/') {
+ resolved = URL(specifier + "/", base);
+ }
+ return resolve_directory(resolved, read_pkg_json);
+ } else {
+ return resolve_module(specifier, base);
+ }
+ return URL("");
+}
+
+void ModuleWrap::Resolve(const FunctionCallbackInfo& args) {
+ Environment* env = Environment::GetCurrent(args);
+
+ if (args.IsConstructCall()) {
+ env->ThrowError("resolve() must not be called as a constructor");
+ return;
+ }
+ if (args.Length() != 2) {
+ env->ThrowError("resolve must have exactly 2 arguments (string, string)");
+ return;
+ }
+
+ if (!args[0]->IsString()) {
+ env->ThrowError("first argument is not a string");
+ return;
+ }
+ Utf8Value specifier_utf(env->isolate(), args[0]);
+
+ if (!args[1]->IsString()) {
+ env->ThrowError("second argument is not a string");
+ return;
+ }
+ Utf8Value url_utf(env->isolate(), args[1]);
+ URL url(*url_utf, url_utf.length());
+
+ if (url.flags() & URL_FLAGS_FAILED) {
+ env->ThrowError("second argument is not a URL string");
+ return;
+ }
+
+ URL result = node::loader::Resolve(*specifier_utf, &url, true);
+ if (result.flags() & URL_FLAGS_FAILED) {
+ std::string msg = "module ";
+ msg += *specifier_utf;
+ msg += " not found";
+ env->ThrowError(msg.c_str());
+ return;
+ }
+
+ args.GetReturnValue().Set(result.ToObject(env));
+}
+
+void ModuleWrap::Initialize(Local target,
+ Local unused,
+ Local context) {
+ Environment* env = Environment::GetCurrent(context);
+ Isolate* isolate = env->isolate();
+
+ Local tpl = env->NewFunctionTemplate(New);
+ tpl->SetClassName(FIXED_ONE_BYTE_STRING(isolate, "ModuleWrap"));
+ tpl->InstanceTemplate()->SetInternalFieldCount(1);
+
+ env->SetProtoMethod(tpl, "link", Link);
+ env->SetProtoMethod(tpl, "instantiate", Instantiate);
+ env->SetProtoMethod(tpl, "evaluate", Evaluate);
+
+ target->Set(FIXED_ONE_BYTE_STRING(isolate, "ModuleWrap"), tpl->GetFunction());
+ env->SetMethod(target, "resolve", node::loader::ModuleWrap::Resolve);
+}
+
+} // namespace loader
+} // namespace node
+
+NODE_MODULE_CONTEXT_AWARE_BUILTIN(module_wrap,
+ node::loader::ModuleWrap::Initialize)
diff --git a/src/module_wrap.h b/src/module_wrap.h
new file mode 100644
index 00000000000000..c669834c6f3ce5
--- /dev/null
+++ b/src/module_wrap.h
@@ -0,0 +1,58 @@
+#ifndef SRC_MODULE_WRAP_H_
+#define SRC_MODULE_WRAP_H_
+
+#if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
+
+#include
+#include
+#include
+#include "node_url.h"
+#include "base-object.h"
+#include "base-object-inl.h"
+
+namespace node {
+namespace loader {
+
+node::url::URL Resolve(std::string specifier, node::url::URL* base,
+ bool read_pkg_json = false);
+
+class ModuleWrap : public BaseObject {
+ public:
+ static const std::string EXTENSIONS[];
+ static void Initialize(v8::Local target,
+ v8::Local unused,
+ v8::Local context);
+
+ private:
+ ModuleWrap(node::Environment* env,
+ v8::Local object,
+ v8::Local module,
+ v8::Local url);
+ ~ModuleWrap();
+
+ static void New(const v8::FunctionCallbackInfo& args);
+ static void Link(const v8::FunctionCallbackInfo& args);
+ static void Instantiate(const v8::FunctionCallbackInfo& args);
+ static void Evaluate(const v8::FunctionCallbackInfo& args);
+ static void GetUrl(v8::Local property,
+ const v8::PropertyCallbackInfo& info);
+ static void Resolve(const v8::FunctionCallbackInfo& args);
+ static v8::MaybeLocal ResolveCallback(
+ v8::Local context,
+ v8::Local specifier,
+ v8::Local referrer);
+
+ v8::Persistent module_;
+ v8::Persistent url_;
+ bool linked_ = false;
+ std::map*> resolve_cache_;
+
+ static std::map*> module_map_;
+};
+
+} // namespace loader
+} // namespace node
+
+#endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
+
+#endif // SRC_MODULE_WRAP_H_
diff --git a/src/node.cc b/src/node.cc
index 775accc0412606..5cd0ffc29db1e9 100644
--- a/src/node.cc
+++ b/src/node.cc
@@ -23,10 +23,12 @@
#include "node_buffer.h"
#include "node_constants.h"
#include "node_javascript.h"
+#include "node_platform.h"
#include "node_version.h"
#include "node_internals.h"
#include "node_revert.h"
#include "node_debug_options.h"
+#include "node_perf.h"
#if defined HAVE_PERFCTR
#include "node_counters.h"
@@ -183,6 +185,9 @@ static bool trace_enabled = false;
static std::string trace_enabled_categories; // NOLINT(runtime/string)
static bool abort_on_uncaught_exception = false;
+// Bit flag used to track security reverts (see node_revert.h)
+unsigned int reverted = 0;
+
#if defined(NODE_HAVE_I18N_SUPPORT)
// Path to ICU data (for i18n / Intl)
std::string icu_data_dir; // NOLINT(runtime/string)
@@ -220,6 +225,11 @@ bool trace_warnings = false;
// that is used by lib/module.js
bool config_preserve_symlinks = false;
+// Set in node.cc by ParseArgs when --experimental-modules is used.
+// Used in node_config.cc to set a constant on process.binding('config')
+// that is used by lib/module.js
+bool config_experimental_modules = false;
+
// Set by ParseArgs when --pending-deprecation or NODE_PENDING_DEPRECATION
// is used.
bool config_pending_deprecation = false;
@@ -250,22 +260,26 @@ node::DebugOptions debug_options;
static struct {
#if NODE_USE_V8_PLATFORM
- void Initialize(int thread_pool_size) {
- platform_ = v8::platform::CreateDefaultPlatform(
- thread_pool_size,
- v8::platform::IdleTaskSupport::kDisabled,
- v8::platform::InProcessStackDumping::kDisabled);
+ void Initialize(int thread_pool_size, uv_loop_t* loop) {
+ tracing_agent_ =
+ trace_enabled ? new tracing::Agent() : nullptr;
+ platform_ = new NodePlatform(thread_pool_size, loop,
+ trace_enabled ? tracing_agent_->GetTracingController() : nullptr);
V8::InitializePlatform(platform_);
- tracing::TraceEventHelper::SetCurrentPlatform(platform_);
- }
-
- void PumpMessageLoop(Isolate* isolate) {
- v8::platform::PumpMessageLoop(platform_, isolate);
+ tracing::TraceEventHelper::SetTracingController(
+ trace_enabled ? tracing_agent_->GetTracingController() : nullptr);
}
void Dispose() {
+ platform_->Shutdown();
delete platform_;
platform_ = nullptr;
+ delete tracing_agent_;
+ tracing_agent_ = nullptr;
+ }
+
+ void DrainVMTasks() {
+ platform_->DrainBackgroundTasks();
}
#if HAVE_INSPECTOR
@@ -283,21 +297,19 @@ static struct {
#endif // HAVE_INSPECTOR
void StartTracingAgent() {
- CHECK(tracing_agent_ == nullptr);
- tracing_agent_ = new tracing::Agent();
- tracing_agent_->Start(platform_, trace_enabled_categories);
+ tracing_agent_->Start(trace_enabled_categories);
}
void StopTracingAgent() {
tracing_agent_->Stop();
}
- v8::Platform* platform_;
tracing::Agent* tracing_agent_;
+ NodePlatform* platform_;
#else // !NODE_USE_V8_PLATFORM
- void Initialize(int thread_pool_size) {}
- void PumpMessageLoop(Isolate* isolate) {}
+ void Initialize(int thread_pool_size, uv_loop_t* loop) {}
void Dispose() {}
+ void DrainVMTasks() {}
bool StartInspector(Environment *env, const char* script_path,
const node::DebugOptions& options) {
env->ThrowError("Node compiled with NODE_USE_V8_PLATFORM=0");
@@ -1328,8 +1340,9 @@ MaybeLocal MakeCallback(Environment* env,
asyncContext.trigger_async_id);
if (asyncContext.async_id != 0) {
- if (!AsyncWrap::EmitBefore(env, asyncContext.async_id))
- return Local();
+ // No need to check a return value because the application will exit if
+ // an exception occurs.
+ AsyncWrap::EmitBefore(env, asyncContext.async_id);
}
ret = callback->Call(env->context(), recv, argc, argv);
@@ -1342,8 +1355,7 @@ MaybeLocal MakeCallback(Environment* env,
}
if (asyncContext.async_id != 0) {
- if (!AsyncWrap::EmitAfter(env, asyncContext.async_id))
- return Local();
+ AsyncWrap::EmitAfter(env, asyncContext.async_id);
}
}
@@ -3434,11 +3446,11 @@ void SetupProcessObject(Environment* env,
// --security-revert flags
#define V(code, _, __) \
do { \
- if (IsReverted(REVERT_ ## code)) { \
+ if (IsReverted(SECURITY_REVERT_ ## code)) { \
READONLY_PROPERTY(process, "REVERT_" #code, True(env->isolate())); \
} \
} while (0);
- REVERSIONS(V)
+ SECURITY_REVERSIONS(V)
#undef V
size_t exec_path_len = 2 * PATH_MAX;
@@ -3704,6 +3716,7 @@ static void PrintHelp() {
" note: linked-in ICU data is present\n"
#endif
" --preserve-symlinks preserve symbolic links when resolving\n"
+ " --experimental-modules experimental ES Module support\n"
" and caching modules\n"
#endif
"\n"
@@ -3940,6 +3953,8 @@ static void ParseArgs(int* argc,
Revert(cve);
} else if (strcmp(arg, "--preserve-symlinks") == 0) {
config_preserve_symlinks = true;
+ } else if (strcmp(arg, "--experimental-modules") == 0) {
+ config_experimental_modules = true;
} else if (strcmp(arg, "--prof-process") == 0) {
prof_process = true;
short_circuit = true;
@@ -4553,21 +4568,18 @@ inline int Start(Isolate* isolate, IsolateData* isolate_data,
{
SealHandleScope seal(isolate);
bool more;
+ PERFORMANCE_MARK(&env, LOOP_START);
do {
- v8_platform.PumpMessageLoop(isolate);
- more = uv_run(env.event_loop(), UV_RUN_ONCE);
-
- if (more == false) {
- v8_platform.PumpMessageLoop(isolate);
- EmitBeforeExit(&env);
-
- // Emit `beforeExit` if the loop became alive either after emitting
- // event, or after running some callbacks.
- more = uv_loop_alive(env.event_loop());
- if (uv_run(env.event_loop(), UV_RUN_NOWAIT) != 0)
- more = true;
- }
+ uv_run(env.event_loop(), UV_RUN_DEFAULT);
+
+ EmitBeforeExit(&env);
+
+ v8_platform.DrainVMTasks();
+ // Emit `beforeExit` if the loop became alive either after emitting
+ // event, or after running some callbacks.
+ more = uv_loop_alive(env.event_loop());
} while (more == true);
+ PERFORMANCE_MARK(&env, LOOP_EXIT);
}
env.set_trace_sync_io(false);
@@ -4576,6 +4588,7 @@ inline int Start(Isolate* isolate, IsolateData* isolate_data,
RunAtExit(&env);
uv_key_delete(&thread_local_env);
+ v8_platform.DrainVMTasks();
WaitForInspectorDisconnect(&env);
#if defined(LEAK_SANITIZER)
__lsan_do_leak_check();
@@ -4636,6 +4649,7 @@ inline int Start(uv_loop_t* event_loop,
int Start(int argc, char** argv) {
atexit([] () { uv_tty_reset_mode(); });
PlatformInit();
+ node::performance::performance_node_start = PERFORMANCE_NOW();
CHECK_GT(argc, 0);
@@ -4664,7 +4678,7 @@ int Start(int argc, char** argv) {
V8::SetEntropySource(crypto::EntropySource);
#endif // HAVE_OPENSSL
- v8_platform.Initialize(v8_thread_pool_size);
+ v8_platform.Initialize(v8_thread_pool_size, uv_default_loop());
// Enable tracing when argv has --trace-events-enabled.
if (trace_enabled) {
fprintf(stderr, "Warning: Trace event is an experimental feature "
@@ -4672,6 +4686,7 @@ int Start(int argc, char** argv) {
v8_platform.StartTracingAgent();
}
V8::Initialize();
+ node::performance::performance_v8_start = PERFORMANCE_NOW();
v8_initialized = true;
const int exit_code =
Start(uv_default_loop(), argc, argv, exec_argc, exec_argv);
@@ -4681,6 +4696,12 @@ int Start(int argc, char** argv) {
v8_initialized = false;
V8::Dispose();
+ // uv_run cannot be called from the time before the beforeExit callback
+ // runs until the program exits unless the event loop has any referenced
+ // handles after beforeExit terminates. This prevents unrefed timers
+ // that happen to terminate during shutdown from being run unsafely.
+ // Since uv_run cannot be called, uv_async handles held by the platform
+ // will never be fully cleaned up.
v8_platform.Dispose();
delete[] exec_argv;
diff --git a/src/node_api.cc b/src/node_api.cc
index b84a33e510f264..132d90505a8718 100644
--- a/src/node_api.cc
+++ b/src/node_api.cc
@@ -218,6 +218,14 @@ V8EscapableHandleScopeFromJsEscapableHandleScope(
static_assert(sizeof(v8::Local) == sizeof(napi_value),
"Cannot convert between v8::Local and napi_value");
+napi_deferred JsDeferredFromV8Persistent(v8::Persistent* local) {
+ return reinterpret_cast(local);
+}
+
+v8::Persistent* V8PersistentFromJsDeferred(napi_deferred local) {
+ return reinterpret_cast*>(local);
+}
+
napi_value JsValueFromV8LocalValue(v8::Local local) {
return reinterpret_cast(*local);
}
@@ -674,6 +682,8 @@ v8::Local CreateAccessorCallbackData(napi_env env,
return cbdata;
}
+int kWrapperFields = 3;
+
// Pointer used to identify items wrapped by N-API. Used by FindWrapper and
// napi_wrap().
const char napi_wrap_name[] = "N-API Wrapper";
@@ -682,7 +692,8 @@ const char napi_wrap_name[] = "N-API Wrapper";
// wrapper would be the first in the chain, but it is OK for other objects to
// be inserted in the prototype chain.
bool FindWrapper(v8::Local obj,
- v8::Local* result = nullptr) {
+ v8::Local* result = nullptr,
+ v8::Local* parent = nullptr) {
v8::Local wrapper = obj;
do {
@@ -690,8 +701,11 @@ bool FindWrapper(v8::Local obj,
if (proto.IsEmpty() || !proto->IsObject()) {
return false;
}
+ if (parent != nullptr) {
+ *parent = wrapper;
+ }
wrapper = proto.As();
- if (wrapper->InternalFieldCount() == 2) {
+ if (wrapper->InternalFieldCount() == kWrapperFields) {
v8::Local external = wrapper->GetInternalField(1);
if (external->IsExternal() &&
external.As()->Value() == v8impl::napi_wrap_name) {
@@ -745,6 +759,56 @@ napi_env GetEnv(v8::Local context) {
return result;
}
+napi_status Unwrap(napi_env env,
+ napi_value js_object,
+ void** result,
+ v8::Local* wrapper,
+ v8::Local* parent = nullptr) {
+ CHECK_ARG(env, js_object);
+ CHECK_ARG(env, result);
+
+ v8::Local value = v8impl::V8LocalValueFromJsValue(js_object);
+ RETURN_STATUS_IF_FALSE(env, value->IsObject(), napi_invalid_arg);
+ v8::Local obj = value.As();
+
+ RETURN_STATUS_IF_FALSE(
+ env, v8impl::FindWrapper(obj, wrapper, parent), napi_invalid_arg);
+
+ v8::Local unwrappedValue = (*wrapper)->GetInternalField(0);
+ RETURN_STATUS_IF_FALSE(env, unwrappedValue->IsExternal(), napi_invalid_arg);
+
+ *result = unwrappedValue.As()->Value();
+
+ return napi_ok;
+}
+
+napi_status ConcludeDeferred(napi_env env,
+ napi_deferred deferred,
+ napi_value result,
+ bool is_resolved) {
+ NAPI_PREAMBLE(env);
+ CHECK_ARG(env, result);
+
+ v8::Local context = env->isolate->GetCurrentContext();
+ v8::Persistent* deferred_ref =
+ V8PersistentFromJsDeferred(deferred);
+ v8::Local v8_deferred =
+ v8::Local::New(env->isolate, *deferred_ref);
+
+ auto v8_resolver = v8::Local