forked from mirrors/action-gh-release
node_modules
This commit is contained in:
parent
0e414c630a
commit
78c309ef59
555 changed files with 103819 additions and 1 deletions
42
node_modules/@octokit/plugin-throttling/.travis.yml
generated
vendored
Normal file
42
node_modules/@octokit/plugin-throttling/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
language: node_js
|
||||
cache: npm
|
||||
|
||||
# Trigger a push build on master and greenkeeper branches + PRs build on every branches
|
||||
# Avoid double build on PRs (See https://github.com/travis-ci/travis-ci/issues/1147)
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /^greenkeeper.*$/
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- stage: test
|
||||
node_js: 8
|
||||
- node_js: 10
|
||||
env: Node 10 & coverage upload
|
||||
after_script:
|
||||
- npm run coverage:upload
|
||||
- node_js: lts/*
|
||||
env: memory-test
|
||||
script: npm run test:memory
|
||||
- stage: release
|
||||
env: semantic-release
|
||||
node_js: lts/*
|
||||
script: npx semantic-release
|
||||
|
||||
# when Greenkeeper updates @octokit/routes, run "generate-routes" script
|
||||
# and push new routes.json file to the pull request
|
||||
- stage: greenkeeper-routes-update
|
||||
node_js: lts/*
|
||||
script:
|
||||
- git checkout $TRAVIS_BRANCH
|
||||
- node scripts/generate-routes
|
||||
# commit changes and push back to branch on GitHub. If there are no changes then exit without error
|
||||
- 'git commit -a -m "build: routes" --author="Octokit Bot <octokitbot@martynus.net>" && git push "https://${GH_TOKEN}@github.com/$TRAVIS_REPO_SLUG" ${TRAVIS_BRANCH} || true'
|
||||
|
||||
stages:
|
||||
- test
|
||||
- name: release
|
||||
if: branch = master AND type IN (push)
|
||||
- name: greenkeeper-routes-update
|
||||
if: branch =~ ^greenkeeper/@octokit/routes
|
46
node_modules/@octokit/plugin-throttling/CODE_OF_CONDUCT.md
generated
vendored
Normal file
46
node_modules/@octokit/plugin-throttling/CODE_OF_CONDUCT.md
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at opensource+octokit@github.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
21
node_modules/@octokit/plugin-throttling/LICENSE
generated
vendored
Normal file
21
node_modules/@octokit/plugin-throttling/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License
|
||||
|
||||
Copyright (c) 2018 Octokit contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
107
node_modules/@octokit/plugin-throttling/README.md
generated
vendored
Normal file
107
node_modules/@octokit/plugin-throttling/README.md
generated
vendored
Normal file
|
@ -0,0 +1,107 @@
|
|||
# plugin-throttling.js
|
||||
|
||||
> Octokit plugin for GitHub’s recommended request throttling
|
||||
|
||||
[](https://www.npmjs.com/package/@octokit/plugin-throttling)
|
||||
[](https://travis-ci.com/octokit/plugin-throttling.js)
|
||||
[](https://coveralls.io/github/octokit/plugin-throttling.js)
|
||||
[](https://greenkeeper.io/)
|
||||
|
||||
Implements all [recommended best practises](https://developer.github.com/v3/guides/best-practices-for-integrators/) to prevent hitting abuse rate limits.
|
||||
|
||||
## Usage
|
||||
|
||||
The code below creates a "Hello, world!" issue on every repository in a given organization. Without the throttling plugin it would send many requests in parallel and would hit rate limits very quickly. But the `@octokit/plugin-throttling` slows down your requests according to the official guidelines, so you don't get blocked before your quota is exhausted.
|
||||
|
||||
The `throttle.onAbuseLimit` and `throttle.onRateLimit` options are required. Return `true` to automatically retry the request after `retryAfter` seconds.
|
||||
|
||||
```js
|
||||
const Octokit = require('@octokit/rest')
|
||||
.plugin(require('@octokit/plugin-throttling'))
|
||||
|
||||
const octokit = new Octokit({
|
||||
auth: `token ${process.env.TOKEN}`,
|
||||
throttle: {
|
||||
onRateLimit: (retryAfter, options) => {
|
||||
console.warn(`Request quota exhausted for request ${options.method} ${options.url}`)
|
||||
|
||||
if (options.request.retryCount === 0) { // only retries once
|
||||
console.log(`Retrying after ${retryAfter} seconds!`)
|
||||
return true
|
||||
}
|
||||
},
|
||||
onAbuseLimit: (retryAfter, options) => {
|
||||
// does not retry, only logs a warning
|
||||
console.warn(`Abuse detected for request ${options.method} ${options.url}`)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
async function createIssueOnAllRepos (org) {
|
||||
const repos = await octokit.paginate(octokit.repos.listForOrg.endpoint({ org }))
|
||||
return Promise.all(repos.forEach(({ name } => {
|
||||
octokit.issues.create({
|
||||
owner,
|
||||
repo: name,
|
||||
title: 'Hello, world!'
|
||||
})
|
||||
})))
|
||||
}
|
||||
```
|
||||
|
||||
Pass `{ throttle: { enabled: false } }` to disable this plugin.
|
||||
|
||||
### Clustering
|
||||
|
||||
Enabling Clustering support ensures that your application will not go over rate limits **across Octokit instances and across Nodejs processes**.
|
||||
|
||||
First install either `redis` or `ioredis`:
|
||||
```
|
||||
# NodeRedis (https://github.com/NodeRedis/node_redis)
|
||||
npm install --save redis
|
||||
|
||||
# or ioredis (https://github.com/luin/ioredis)
|
||||
npm install --save ioredis
|
||||
```
|
||||
|
||||
Then in your application:
|
||||
```js
|
||||
const Bottleneck = require('bottleneck')
|
||||
const Redis = require('redis')
|
||||
|
||||
const client = Redis.createClient({ /* options */ })
|
||||
const connection = new Bottleneck.RedisConnection({ client })
|
||||
connection.on('error', err => console.error(err))
|
||||
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
onAbuseLimit: (retryAfter, options) => { /* ... */ },
|
||||
onRateLimit: (retryAfter, options) => { /* ... */ },
|
||||
|
||||
// The Bottleneck connection object
|
||||
connection,
|
||||
|
||||
// A "throttling ID". All octokit instances with the same ID
|
||||
// using the same Redis server will share the throttling.
|
||||
id: 'my-super-app',
|
||||
|
||||
// Otherwise the plugin uses a lighter version of Bottleneck without Redis support
|
||||
Bottleneck
|
||||
}
|
||||
})
|
||||
|
||||
// To close the connection and allow your application to exit cleanly:
|
||||
await connection.disconnect()
|
||||
```
|
||||
|
||||
To use the `ioredis` library instead:
|
||||
```js
|
||||
const Redis = require('ioredis')
|
||||
const client = new Redis({ /* options */ })
|
||||
const connection = new Bottleneck.IORedisConnection({ client })
|
||||
connection.on('error', err => console.error(err))
|
||||
```
|
||||
|
||||
## LICENSE
|
||||
|
||||
[MIT](LICENSE)
|
128
node_modules/@octokit/plugin-throttling/lib/index.js
generated
vendored
Normal file
128
node_modules/@octokit/plugin-throttling/lib/index.js
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
module.exports = throttlingPlugin
|
||||
|
||||
const BottleneckLight = require('bottleneck/light')
|
||||
const wrapRequest = require('./wrap-request')
|
||||
const triggersNotificationPaths = require('./triggers-notification-paths')
|
||||
const routeMatcher = require('./route-matcher')(triggersNotificationPaths)
|
||||
|
||||
// Workaround to allow tests to directly access the triggersNotification function.
|
||||
const triggersNotification = throttlingPlugin.triggersNotification =
|
||||
routeMatcher.test.bind(routeMatcher)
|
||||
|
||||
const groups = {}
|
||||
|
||||
const createGroups = function (Bottleneck, common) {
|
||||
groups.global = new Bottleneck.Group({
|
||||
id: 'octokit-global',
|
||||
maxConcurrent: 10,
|
||||
...common
|
||||
})
|
||||
groups.search = new Bottleneck.Group({
|
||||
id: 'octokit-search',
|
||||
maxConcurrent: 1,
|
||||
minTime: 2000,
|
||||
...common
|
||||
})
|
||||
groups.write = new Bottleneck.Group({
|
||||
id: 'octokit-write',
|
||||
maxConcurrent: 1,
|
||||
minTime: 1000,
|
||||
...common
|
||||
})
|
||||
groups.notifications = new Bottleneck.Group({
|
||||
id: 'octokit-notifications',
|
||||
maxConcurrent: 1,
|
||||
minTime: 3000,
|
||||
...common
|
||||
})
|
||||
}
|
||||
|
||||
function throttlingPlugin (octokit, octokitOptions = {}) {
|
||||
const {
|
||||
enabled = true,
|
||||
Bottleneck = BottleneckLight,
|
||||
id = 'no-id',
|
||||
timeout = 1000 * 60 * 2, // Redis TTL: 2 minutes
|
||||
connection
|
||||
} = octokitOptions.throttle || {}
|
||||
if (!enabled) {
|
||||
return
|
||||
}
|
||||
const common = { connection, timeout }
|
||||
|
||||
if (groups.global == null) {
|
||||
createGroups(Bottleneck, common)
|
||||
}
|
||||
|
||||
const state = Object.assign({
|
||||
clustering: connection != null,
|
||||
triggersNotification,
|
||||
minimumAbuseRetryAfter: 5,
|
||||
retryAfterBaseValue: 1000,
|
||||
retryLimiter: new Bottleneck(),
|
||||
id,
|
||||
...groups
|
||||
}, octokitOptions.throttle)
|
||||
|
||||
if (typeof state.onAbuseLimit !== 'function' || typeof state.onRateLimit !== 'function') {
|
||||
throw new Error(`octokit/plugin-throttling error:
|
||||
You must pass the onAbuseLimit and onRateLimit error handlers.
|
||||
See https://github.com/octokit/rest.js#throttling
|
||||
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
onAbuseLimit: (error, options) => {/* ... */},
|
||||
onRateLimit: (error, options) => {/* ... */}
|
||||
}
|
||||
})
|
||||
`)
|
||||
}
|
||||
|
||||
const events = {}
|
||||
const emitter = new Bottleneck.Events(events)
|
||||
events.on('abuse-limit', state.onAbuseLimit)
|
||||
events.on('rate-limit', state.onRateLimit)
|
||||
events.on('error', e => console.warn('Error in throttling-plugin limit handler', e))
|
||||
|
||||
state.retryLimiter.on('failed', async function (error, info) {
|
||||
const options = info.args[info.args.length - 1]
|
||||
const isGraphQL = options.url.startsWith('/graphql')
|
||||
|
||||
if (!(isGraphQL || error.status === 403)) {
|
||||
return
|
||||
}
|
||||
|
||||
const retryCount = ~~options.request.retryCount
|
||||
options.request.retryCount = retryCount
|
||||
|
||||
const { wantRetry, retryAfter } = await (async function () {
|
||||
if (/\babuse\b/i.test(error.message)) {
|
||||
// The user has hit the abuse rate limit. (REST only)
|
||||
// https://developer.github.com/v3/#abuse-rate-limits
|
||||
|
||||
// The Retry-After header can sometimes be blank when hitting an abuse limit,
|
||||
// but is always present after 2-3s, so make sure to set `retryAfter` to at least 5s by default.
|
||||
const retryAfter = Math.max(~~error.headers['retry-after'], state.minimumAbuseRetryAfter)
|
||||
const wantRetry = await emitter.trigger('abuse-limit', retryAfter, options)
|
||||
return { wantRetry, retryAfter }
|
||||
}
|
||||
if (error.headers != null && error.headers['x-ratelimit-remaining'] === '0') {
|
||||
// The user has used all their allowed calls for the current time period (REST and GraphQL)
|
||||
// https://developer.github.com/v3/#rate-limiting
|
||||
|
||||
const rateLimitReset = new Date(~~error.headers['x-ratelimit-reset'] * 1000).getTime()
|
||||
const retryAfter = Math.max(Math.ceil((rateLimitReset - Date.now()) / 1000), 0)
|
||||
const wantRetry = await emitter.trigger('rate-limit', retryAfter, options)
|
||||
return { wantRetry, retryAfter }
|
||||
}
|
||||
return {}
|
||||
})()
|
||||
|
||||
if (wantRetry) {
|
||||
options.request.retryCount++
|
||||
return retryAfter * state.retryAfterBaseValue
|
||||
}
|
||||
})
|
||||
|
||||
octokit.hook.wrap('request', wrapRequest.bind(null, state))
|
||||
}
|
31
node_modules/@octokit/plugin-throttling/lib/route-matcher.js
generated
vendored
Normal file
31
node_modules/@octokit/plugin-throttling/lib/route-matcher.js
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
module.exports = routeMatcher
|
||||
|
||||
function routeMatcher (paths) {
|
||||
// EXAMPLE. For the following paths:
|
||||
/* [
|
||||
"/orgs/:org/invitations",
|
||||
"/repos/:owner/:repo/collaborators/:username"
|
||||
] */
|
||||
|
||||
const regexes = paths.map(p =>
|
||||
p.split('/')
|
||||
.map(c => c.startsWith(':') ? '(?:.+?)' : c)
|
||||
.join('/')
|
||||
)
|
||||
// 'regexes' would contain:
|
||||
/* [
|
||||
'/orgs/(?:.+?)/invitations',
|
||||
'/repos/(?:.+?)/(?:.+?)/collaborators/(?:.+?)'
|
||||
] */
|
||||
|
||||
const regex = `^(?:${regexes.map(r => `(?:${r})`).join('|')})[^/]*$`
|
||||
// 'regex' would contain:
|
||||
/*
|
||||
^(?:(?:\/orgs\/(?:.+?)\/invitations)|(?:\/repos\/(?:.+?)\/(?:.+?)\/collaborators\/(?:.+?)))[^\/]*$
|
||||
|
||||
It may look scary, but paste it into https://www.debuggex.com/
|
||||
and it will make a lot more sense!
|
||||
*/
|
||||
|
||||
return new RegExp(regex, 'i')
|
||||
}
|
15
node_modules/@octokit/plugin-throttling/lib/triggers-notification-paths.json
generated
vendored
Normal file
15
node_modules/@octokit/plugin-throttling/lib/triggers-notification-paths.json
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
[
|
||||
"/orgs/:org/invitations",
|
||||
"/repos/:owner/:repo/collaborators/:username",
|
||||
"/repos/:owner/:repo/commits/:sha/comments",
|
||||
"/repos/:owner/:repo/issues",
|
||||
"/repos/:owner/:repo/issues/:issue_number/comments",
|
||||
"/repos/:owner/:repo/pulls",
|
||||
"/repos/:owner/:repo/pulls/:pull_number/comments",
|
||||
"/repos/:owner/:repo/pulls/:pull_number/merge",
|
||||
"/repos/:owner/:repo/pulls/:pull_number/requested_reviewers",
|
||||
"/repos/:owner/:repo/pulls/:pull_number/reviews",
|
||||
"/repos/:owner/:repo/releases",
|
||||
"/teams/:team_id/discussions",
|
||||
"/teams/:team_id/discussions/:discussion_number/comments"
|
||||
]
|
49
node_modules/@octokit/plugin-throttling/lib/wrap-request.js
generated
vendored
Normal file
49
node_modules/@octokit/plugin-throttling/lib/wrap-request.js
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
module.exports = wrapRequest
|
||||
|
||||
const noop = () => Promise.resolve()
|
||||
|
||||
function wrapRequest (state, request, options) {
|
||||
return state.retryLimiter.schedule(doRequest, state, request, options)
|
||||
}
|
||||
|
||||
async function doRequest (state, request, options) {
|
||||
const isWrite = options.method !== 'GET' && options.method !== 'HEAD'
|
||||
const isSearch = options.method === 'GET' && options.url.startsWith('/search/')
|
||||
const isGraphQL = options.url.startsWith('/graphql')
|
||||
|
||||
const retryCount = ~~options.request.retryCount
|
||||
const jobOptions = retryCount > 0 ? { priority: 0, weight: 0 } : {}
|
||||
if (state.clustering) {
|
||||
// Remove a job from Redis if it has not completed or failed within 60s
|
||||
// Examples: Node process terminated, client disconnected, etc.
|
||||
jobOptions.expiration = 1000 * 60
|
||||
}
|
||||
|
||||
// Guarantee at least 1000ms between writes
|
||||
// GraphQL can also trigger writes
|
||||
if (isWrite || isGraphQL) {
|
||||
await state.write.key(state.id).schedule(jobOptions, noop)
|
||||
}
|
||||
|
||||
// Guarantee at least 3000ms between requests that trigger notifications
|
||||
if (isWrite && state.triggersNotification(options.url)) {
|
||||
await state.notifications.key(state.id).schedule(jobOptions, noop)
|
||||
}
|
||||
|
||||
// Guarantee at least 2000ms between search requests
|
||||
if (isSearch) {
|
||||
await state.search.key(state.id).schedule(jobOptions, noop)
|
||||
}
|
||||
|
||||
const req = state.global.key(state.id).schedule(jobOptions, request, options)
|
||||
if (isGraphQL) {
|
||||
const res = await req
|
||||
if (res.data.errors != null && res.data.errors.some((err) => err.type === 'RATE_LIMITED')) {
|
||||
const err = new Error('GraphQL Rate Limit Exceeded')
|
||||
err.headers = res.headers
|
||||
err.data = res.data
|
||||
throw err
|
||||
}
|
||||
}
|
||||
return req
|
||||
}
|
85
node_modules/@octokit/plugin-throttling/package.json
generated
vendored
Normal file
85
node_modules/@octokit/plugin-throttling/package.json
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
{
|
||||
"_args": [
|
||||
[
|
||||
"@octokit/plugin-throttling@2.6.0",
|
||||
"/Users/dougtangren/code/rust/action-gh-release"
|
||||
]
|
||||
],
|
||||
"_from": "@octokit/plugin-throttling@2.6.0",
|
||||
"_id": "@octokit/plugin-throttling@2.6.0",
|
||||
"_inBundle": false,
|
||||
"_integrity": "sha512-E0xQrcD36sVEeBhut6j9nWX38vm/1LKMRSUqjvJ/mqGLXfHr4jYMsrR3I/nT2QC0eJL1/SKMt7zxOt7pZiFhDA==",
|
||||
"_location": "/@octokit/plugin-throttling",
|
||||
"_phantomChildren": {},
|
||||
"_requested": {
|
||||
"type": "version",
|
||||
"registry": true,
|
||||
"raw": "@octokit/plugin-throttling@2.6.0",
|
||||
"name": "@octokit/plugin-throttling",
|
||||
"escapedName": "@octokit%2fplugin-throttling",
|
||||
"scope": "@octokit",
|
||||
"rawSpec": "2.6.0",
|
||||
"saveSpec": null,
|
||||
"fetchSpec": "2.6.0"
|
||||
},
|
||||
"_requiredBy": [
|
||||
"/"
|
||||
],
|
||||
"_resolved": "https://registry.npmjs.org/@octokit/plugin-throttling/-/plugin-throttling-2.6.0.tgz",
|
||||
"_spec": "2.6.0",
|
||||
"_where": "/Users/dougtangren/code/rust/action-gh-release",
|
||||
"author": {
|
||||
"name": "Simon Grondin",
|
||||
"url": "http://github.com/SGrondin"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/octokit/plugin-throttling.js/issues"
|
||||
},
|
||||
"dependencies": {
|
||||
"bottleneck": "^2.15.3"
|
||||
},
|
||||
"description": "Automatic rate limiting plugin for octokit",
|
||||
"devDependencies": {
|
||||
"@octokit/request": "3.0.3",
|
||||
"@octokit/rest": "^16.3.0",
|
||||
"@octokit/routes": "20.2.4",
|
||||
"chai": "^4.2.0",
|
||||
"coveralls": "^3.0.2",
|
||||
"leakage": "^0.4.0",
|
||||
"mocha": "^6.0.2",
|
||||
"nyc": "^14.0.0",
|
||||
"semantic-release": "^15.13.8",
|
||||
"standard": "^12.0.1"
|
||||
},
|
||||
"homepage": "https://github.com/octokit/plugin-throttling.js#readme",
|
||||
"license": "MIT",
|
||||
"main": "lib/index.js",
|
||||
"name": "@octokit/plugin-throttling",
|
||||
"publishConfig": {
|
||||
"access": "public",
|
||||
"tag": "latest"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/octokit/plugin-throttling.js.git"
|
||||
},
|
||||
"scripts": {
|
||||
"coverage": "nyc report --reporter=html && open coverage/index.html",
|
||||
"coverage:upload": "nyc report --reporter=text-lcov | coveralls",
|
||||
"pretest": "standard",
|
||||
"test": "nyc mocha test/integration/",
|
||||
"test:memory": "node test/memory-leakage-test"
|
||||
},
|
||||
"standard": {
|
||||
"globals": [
|
||||
"describe",
|
||||
"before",
|
||||
"beforeEach",
|
||||
"afterEach",
|
||||
"after",
|
||||
"it",
|
||||
"expect"
|
||||
]
|
||||
},
|
||||
"version": "2.6.0"
|
||||
}
|
22
node_modules/@octokit/plugin-throttling/scripts/generate-routes.js
generated
vendored
Normal file
22
node_modules/@octokit/plugin-throttling/scripts/generate-routes.js
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
/**
|
||||
* We do not want to have `@octokit/routes` as a production dependency due to
|
||||
* its huge size. We are only interested in the REST API endpoint paths that
|
||||
* trigger notifications. So instead we automatically generate a file that
|
||||
* only contains these paths when @octokit/routes has a new release.
|
||||
*/
|
||||
const { writeFileSync } = require('fs')
|
||||
|
||||
const routes = require('@octokit/routes')
|
||||
const paths = []
|
||||
|
||||
Object.keys(routes).forEach(scope => {
|
||||
const scopeEndpoints = routes[scope]
|
||||
scopeEndpoints.forEach(endpoint => {
|
||||
if (endpoint.triggersNotification) {
|
||||
paths.push(endpoint.path)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
const uniquePaths = [...new Set(paths.sort())]
|
||||
writeFileSync('./lib/triggers-notification-paths.json', JSON.stringify(uniquePaths, null, 2) + '\n')
|
170
node_modules/@octokit/plugin-throttling/test/integration/events.js
generated
vendored
Normal file
170
node_modules/@octokit/plugin-throttling/test/integration/events.js
generated
vendored
Normal file
|
@ -0,0 +1,170 @@
|
|||
const expect = require('chai').expect
|
||||
const Octokit = require('./octokit')
|
||||
|
||||
describe('Events', function () {
|
||||
it('Should support non-limit 403s', async function () {
|
||||
const octokit = new Octokit({ throttle: { onAbuseLimit: () => 1, onRateLimit: () => 1 } })
|
||||
let caught = false
|
||||
|
||||
await octokit.request('GET /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
try {
|
||||
await octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 403, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
} catch (error) {
|
||||
expect(error.message).to.equal('Test failed request (403)')
|
||||
caught = true
|
||||
}
|
||||
|
||||
expect(caught).to.equal(true)
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route1',
|
||||
'END GET /route1',
|
||||
'START GET /route2'
|
||||
])
|
||||
})
|
||||
|
||||
describe('\'abuse-limit\'', function () {
|
||||
it('Should detect abuse limit and broadcast event', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
onAbuseLimit: (retryAfter, options) => {
|
||||
expect(retryAfter).to.equal(60)
|
||||
expect(options).to.include({ method: 'GET', url: '/route2' })
|
||||
expect(options.request.retryCount).to.equal(0)
|
||||
eventCount++
|
||||
},
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
await octokit.request('GET /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
try {
|
||||
await octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 403, headers: { 'retry-after': '60' }, data: { message: 'You have been rate limited to prevent abuse' } }]
|
||||
}
|
||||
})
|
||||
throw new Error('Should not reach this point')
|
||||
} catch (error) {
|
||||
expect(error.status).to.equal(403)
|
||||
}
|
||||
|
||||
expect(eventCount).to.equal(1)
|
||||
})
|
||||
|
||||
it('Should ensure retryAfter is a minimum of 5s', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
onAbuseLimit: (retryAfter, options) => {
|
||||
expect(retryAfter).to.equal(5)
|
||||
expect(options).to.include({ method: 'GET', url: '/route2' })
|
||||
expect(options.request.retryCount).to.equal(0)
|
||||
eventCount++
|
||||
},
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
await octokit.request('GET /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
try {
|
||||
await octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 403, headers: { 'retry-after': '2' }, data: { message: 'You have been rate limited to prevent abuse' } }]
|
||||
}
|
||||
})
|
||||
throw new Error('Should not reach this point')
|
||||
} catch (error) {
|
||||
expect(error.status).to.equal(403)
|
||||
}
|
||||
|
||||
expect(eventCount).to.equal(1)
|
||||
})
|
||||
|
||||
it('Should broadcast retryAfter of 5s even when the header is missing', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
onAbuseLimit: (retryAfter, options) => {
|
||||
expect(retryAfter).to.equal(5)
|
||||
expect(options).to.include({ method: 'GET', url: '/route2' })
|
||||
expect(options.request.retryCount).to.equal(0)
|
||||
eventCount++
|
||||
},
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
await octokit.request('GET /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
try {
|
||||
await octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 403, headers: {}, data: { message: 'You have been rate limited to prevent abuse' } }]
|
||||
}
|
||||
})
|
||||
throw new Error('Should not reach this point')
|
||||
} catch (error) {
|
||||
expect(error.status).to.equal(403)
|
||||
}
|
||||
|
||||
expect(eventCount).to.equal(1)
|
||||
})
|
||||
})
|
||||
|
||||
describe('\'rate-limit\'', function () {
|
||||
it('Should detect rate limit exceeded and broadcast event', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
onRateLimit: (retryAfter, options) => {
|
||||
expect(retryAfter).to.be.closeTo(30, 1)
|
||||
expect(options).to.include({ method: 'GET', url: '/route2' })
|
||||
expect(options.request.retryCount).to.equal(0)
|
||||
eventCount++
|
||||
},
|
||||
onAbuseLimit: () => 1
|
||||
}
|
||||
})
|
||||
const t0 = Date.now()
|
||||
|
||||
await octokit.request('GET /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
try {
|
||||
await octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 403, headers: { 'x-ratelimit-remaining': '0', 'x-ratelimit-reset': `${Math.round(t0 / 1000) + 30}` }, data: {} }]
|
||||
}
|
||||
})
|
||||
throw new Error('Should not reach this point')
|
||||
} catch (error) {
|
||||
expect(error.status).to.equal(403)
|
||||
}
|
||||
|
||||
expect(eventCount).to.equal(1)
|
||||
})
|
||||
})
|
||||
})
|
291
node_modules/@octokit/plugin-throttling/test/integration/index.js
generated
vendored
Normal file
291
node_modules/@octokit/plugin-throttling/test/integration/index.js
generated
vendored
Normal file
|
@ -0,0 +1,291 @@
|
|||
const Bottleneck = require('bottleneck')
|
||||
const expect = require('chai').expect
|
||||
const Octokit = require('./octokit')
|
||||
|
||||
describe('General', function () {
|
||||
it('Should be possible to disable the plugin', async function () {
|
||||
const octokit = new Octokit({ throttle: { enabled: false } })
|
||||
|
||||
const req1 = octokit.request('GET /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
const req2 = octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 202, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
const req3 = octokit.request('GET /route3', {
|
||||
request: {
|
||||
responses: [{ status: 203, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
await Promise.all([req1, req2, req3])
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route1',
|
||||
'START GET /route2',
|
||||
'START GET /route3',
|
||||
'END GET /route1',
|
||||
'END GET /route2',
|
||||
'END GET /route3'
|
||||
])
|
||||
})
|
||||
|
||||
it('Should require the user to pass both limit handlers', function () {
|
||||
const message = 'You must pass the onAbuseLimit and onRateLimit error handlers'
|
||||
|
||||
expect(() => new Octokit()).to.throw(message)
|
||||
expect(() => new Octokit({ throttle: {} })).to.throw(message)
|
||||
expect(() => new Octokit({ throttle: { onAbuseLimit: 5, onRateLimit: 5 } })).to.throw(message)
|
||||
expect(() => new Octokit({ throttle: { onAbuseLimit: 5, onRateLimit: () => 1 } })).to.throw(message)
|
||||
expect(() => new Octokit({ throttle: { onAbuseLimit: () => 1 } })).to.throw(message)
|
||||
expect(() => new Octokit({ throttle: { onRateLimit: () => 1 } })).to.throw(message)
|
||||
expect(() => new Octokit({ throttle: { onAbuseLimit: () => 1, onRateLimit: () => 1 } })).to.not.throw()
|
||||
})
|
||||
})
|
||||
|
||||
describe('Github API best practices', function () {
|
||||
it('Should linearize requests', async function () {
|
||||
const octokit = new Octokit({ throttle: { onAbuseLimit: () => 1, onRateLimit: () => 1 } })
|
||||
const req1 = octokit.request('GET /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
const req2 = octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 202, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
const req3 = octokit.request('GET /route3', {
|
||||
request: {
|
||||
responses: [{ status: 203, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
await Promise.all([req1, req2, req3])
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route1',
|
||||
'END GET /route1',
|
||||
'START GET /route2',
|
||||
'END GET /route2',
|
||||
'START GET /route3',
|
||||
'END GET /route3'
|
||||
])
|
||||
})
|
||||
|
||||
it('Should maintain 1000ms between mutating or GraphQL requests', async function () {
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
write: new Bottleneck.Group({ minTime: 50 }),
|
||||
onAbuseLimit: () => 1,
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const req1 = octokit.request('POST /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req2 = octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 202, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req3 = octokit.request('POST /route3', {
|
||||
request: {
|
||||
responses: [{ status: 203, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req4 = octokit.request('POST /graphql', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
await Promise.all([req1, req2, req3, req4])
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route2',
|
||||
'END GET /route2',
|
||||
'START POST /route1',
|
||||
'END POST /route1',
|
||||
'START POST /route3',
|
||||
'END POST /route3',
|
||||
'START POST /graphql',
|
||||
'END POST /graphql'
|
||||
])
|
||||
expect(octokit.__requestTimings[4] - octokit.__requestTimings[0]).to.be.closeTo(50, 20)
|
||||
expect(octokit.__requestTimings[6] - octokit.__requestTimings[4]).to.be.closeTo(50, 20)
|
||||
})
|
||||
|
||||
it('Should maintain 3000ms between requests that trigger notifications', async function () {
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
write: new Bottleneck.Group({ minTime: 50 }),
|
||||
notifications: new Bottleneck.Group({ minTime: 100 }),
|
||||
onAbuseLimit: () => 1,
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const req1 = octokit.request('POST /orgs/:org/invitations', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req2 = octokit.request('POST /route2', {
|
||||
request: {
|
||||
responses: [{ status: 202, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req3 = octokit.request('POST /repos/:owner/:repo/commits/:sha/comments', {
|
||||
request: {
|
||||
responses: [{ status: 302, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
await Promise.all([req1, req2, req3])
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START POST /orgs/:org/invitations',
|
||||
'END POST /orgs/:org/invitations',
|
||||
'START POST /route2',
|
||||
'END POST /route2',
|
||||
'START POST /repos/:owner/:repo/commits/:sha/comments',
|
||||
'END POST /repos/:owner/:repo/commits/:sha/comments'
|
||||
])
|
||||
expect(octokit.__requestTimings[5] - octokit.__requestTimings[0]).to.be.closeTo(100, 20)
|
||||
})
|
||||
|
||||
it('Should match custom routes when checking notification triggers', function () {
|
||||
const plugin = require('../../lib')
|
||||
|
||||
expect(plugin.triggersNotification('/abc/def')).to.equal(false)
|
||||
expect(plugin.triggersNotification('/orgs/abc/invitation')).to.equal(false)
|
||||
expect(plugin.triggersNotification('/repos/abc/releases')).to.equal(false)
|
||||
expect(plugin.triggersNotification('/repos/abc/def/pulls/5')).to.equal(false)
|
||||
|
||||
expect(plugin.triggersNotification('/repos/abc/def/pulls')).to.equal(true)
|
||||
expect(plugin.triggersNotification('/repos/abc/def/pulls/5/comments')).to.equal(true)
|
||||
expect(plugin.triggersNotification('/repos/foo/bar/issues')).to.equal(true)
|
||||
|
||||
expect(plugin.triggersNotification('/repos/:owner/:repo/pulls')).to.equal(true)
|
||||
expect(plugin.triggersNotification('/repos/:owner/:repo/pulls/5/comments')).to.equal(true)
|
||||
expect(plugin.triggersNotification('/repos/:foo/:bar/issues')).to.equal(true)
|
||||
})
|
||||
|
||||
it('Should maintain 2000ms between search requests', async function () {
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
search: new Bottleneck.Group({ minTime: 50 }),
|
||||
onAbuseLimit: () => 1,
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const req1 = octokit.request('GET /search/route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req2 = octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 202, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req3 = octokit.request('GET /search/route3', {
|
||||
request: {
|
||||
responses: [{ status: 203, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
await Promise.all([req1, req2, req3])
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route2',
|
||||
'END GET /route2',
|
||||
'START GET /search/route1',
|
||||
'END GET /search/route1',
|
||||
'START GET /search/route3',
|
||||
'END GET /search/route3'
|
||||
])
|
||||
expect(octokit.__requestTimings[4] - octokit.__requestTimings[2]).to.be.closeTo(50, 20)
|
||||
})
|
||||
|
||||
it('Should optimize throughput rather than maintain ordering', async function () {
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
write: new Bottleneck.Group({ minTime: 50 }),
|
||||
notifications: new Bottleneck.Group({ minTime: 150 }),
|
||||
onAbuseLimit: () => 1,
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const req1 = octokit.request('POST /orgs/abc/invitations', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req2 = octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req3 = octokit.request('GET /route3', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req4 = octokit.request('POST /route4', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req5 = octokit.request('POST /repos/abc/def/commits/12345/comments', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req6 = octokit.request('PATCH /orgs/abc/invitations', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
await Promise.all([req1, req2, req3, req4, req5, req6])
|
||||
await octokit.request('GET /route6', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route2',
|
||||
'END GET /route2',
|
||||
'START GET /route3',
|
||||
'END GET /route3',
|
||||
'START POST /orgs/abc/invitations',
|
||||
'END POST /orgs/abc/invitations',
|
||||
'START POST /route4',
|
||||
'END POST /route4',
|
||||
'START POST /repos/abc/def/commits/12345/comments',
|
||||
'END POST /repos/abc/def/commits/12345/comments',
|
||||
'START PATCH /orgs/abc/invitations',
|
||||
'END PATCH /orgs/abc/invitations',
|
||||
'START GET /route6',
|
||||
'END GET /route6'
|
||||
])
|
||||
|
||||
expect(octokit.__requestTimings[2] - octokit.__requestTimings[0]).to.be.closeTo(0, 20)
|
||||
expect(octokit.__requestTimings[4] - octokit.__requestTimings[2]).to.be.closeTo(0, 20)
|
||||
expect(octokit.__requestTimings[6] - octokit.__requestTimings[4]).to.be.closeTo(50, 20)
|
||||
expect(octokit.__requestTimings[8] - octokit.__requestTimings[6]).to.be.closeTo(100, 20)
|
||||
expect(octokit.__requestTimings[10] - octokit.__requestTimings[8]).to.be.closeTo(150, 20)
|
||||
expect(octokit.__requestTimings[12] - octokit.__requestTimings[10]).to.be.closeTo(0, 30)
|
||||
})
|
||||
})
|
28
node_modules/@octokit/plugin-throttling/test/integration/octokit.js
generated
vendored
Normal file
28
node_modules/@octokit/plugin-throttling/test/integration/octokit.js
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
const Octokit = require('@octokit/rest')
|
||||
const HttpError = require('@octokit/request/lib/http-error')
|
||||
const throttlingPlugin = require('../..')
|
||||
|
||||
module.exports = Octokit
|
||||
.plugin((octokit) => {
|
||||
octokit.__t0 = Date.now()
|
||||
octokit.__requestLog = []
|
||||
octokit.__requestTimings = []
|
||||
|
||||
octokit.hook.wrap('request', async (request, options) => {
|
||||
octokit.__requestLog.push(`START ${options.method} ${options.url}`)
|
||||
octokit.__requestTimings.push(Date.now() - octokit.__t0)
|
||||
await new Promise(resolve => setTimeout(resolve, 0))
|
||||
|
||||
const res = options.request.responses.shift()
|
||||
if (res.status >= 400) {
|
||||
const message = res.data.message != null ? res.data.message : `Test failed request (${res.status})`
|
||||
const error = new HttpError(message, res.status, res.headers, options)
|
||||
throw error
|
||||
} else {
|
||||
octokit.__requestLog.push(`END ${options.method} ${options.url}`)
|
||||
octokit.__requestTimings.push(Date.now() - octokit.__t0)
|
||||
return res
|
||||
}
|
||||
})
|
||||
})
|
||||
.plugin(throttlingPlugin)
|
193
node_modules/@octokit/plugin-throttling/test/integration/retry.js
generated
vendored
Normal file
193
node_modules/@octokit/plugin-throttling/test/integration/retry.js
generated
vendored
Normal file
|
@ -0,0 +1,193 @@
|
|||
const Bottleneck = require('bottleneck')
|
||||
const expect = require('chai').expect
|
||||
const Octokit = require('./octokit')
|
||||
|
||||
describe('Retry', function () {
|
||||
describe('REST', function () {
|
||||
it('Should retry \'abuse-limit\' and succeed', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
minimumAbuseRetryAfter: 0,
|
||||
retryAfterBaseValue: 50,
|
||||
onAbuseLimit: (retryAfter, options) => {
|
||||
expect(options).to.include({ method: 'GET', url: '/route' })
|
||||
expect(options.request.retryCount).to.equal(eventCount)
|
||||
expect(retryAfter).to.equal(eventCount + 1)
|
||||
eventCount++
|
||||
return true
|
||||
},
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const res = await octokit.request('GET /route', {
|
||||
request: {
|
||||
responses: [
|
||||
{ status: 403, headers: { 'retry-after': '1' }, data: { message: 'You have been rate limited to prevent abuse' } },
|
||||
{ status: 200, headers: {}, data: { message: 'Success!' } }
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
expect(res.status).to.equal(200)
|
||||
expect(res.data).to.include({ message: 'Success!' })
|
||||
expect(eventCount).to.equal(1)
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route',
|
||||
'START GET /route',
|
||||
'END GET /route'
|
||||
])
|
||||
expect(octokit.__requestTimings[1] - octokit.__requestTimings[0]).to.be.closeTo(50, 20)
|
||||
})
|
||||
|
||||
it('Should retry \'abuse-limit\' twice and fail', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
minimumAbuseRetryAfter: 0,
|
||||
retryAfterBaseValue: 50,
|
||||
onAbuseLimit: (retryAfter, options) => {
|
||||
expect(options).to.include({ method: 'GET', url: '/route' })
|
||||
expect(options.request.retryCount).to.equal(eventCount)
|
||||
expect(retryAfter).to.equal(eventCount + 1)
|
||||
eventCount++
|
||||
return true
|
||||
},
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const message = 'You have been rate limited to prevent abuse'
|
||||
try {
|
||||
await octokit.request('GET /route', {
|
||||
request: {
|
||||
responses: [
|
||||
{ status: 403, headers: { 'retry-after': '1' }, data: { message } },
|
||||
{ status: 403, headers: { 'retry-after': '2' }, data: { message } },
|
||||
{ status: 404, headers: { 'retry-after': '3' }, data: { message: 'Nope!' } }
|
||||
]
|
||||
}
|
||||
})
|
||||
throw new Error('Should not reach this point')
|
||||
} catch (error) {
|
||||
expect(error.status).to.equal(404)
|
||||
expect(error.message).to.equal('Nope!')
|
||||
}
|
||||
|
||||
expect(eventCount).to.equal(2)
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route',
|
||||
'START GET /route',
|
||||
'START GET /route'
|
||||
])
|
||||
expect(octokit.__requestTimings[1] - octokit.__requestTimings[0]).to.be.closeTo(50, 20)
|
||||
expect(octokit.__requestTimings[2] - octokit.__requestTimings[1]).to.be.closeTo(100, 20)
|
||||
})
|
||||
|
||||
it('Should retry \'rate-limit\' and succeed', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
onRateLimit: (retryAfter, options) => {
|
||||
expect(options).to.include({ method: 'GET', url: '/route' })
|
||||
expect(options.request.retryCount).to.equal(eventCount)
|
||||
expect(retryAfter).to.equal(0)
|
||||
eventCount++
|
||||
return true
|
||||
},
|
||||
onAbuseLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const res = await octokit.request('GET /route', {
|
||||
request: {
|
||||
responses: [
|
||||
{ status: 403, headers: { 'x-ratelimit-remaining': '0', 'x-ratelimit-reset': `123` }, data: {} },
|
||||
{ status: 202, headers: {}, data: { message: 'Yay!' } }
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
expect(res.status).to.equal(202)
|
||||
expect(res.data).to.include({ message: 'Yay!' })
|
||||
expect(eventCount).to.equal(1)
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route',
|
||||
'START GET /route',
|
||||
'END GET /route'
|
||||
])
|
||||
expect(octokit.__requestTimings[1] - octokit.__requestTimings[0]).to.be.closeTo(0, 20)
|
||||
})
|
||||
})
|
||||
|
||||
describe('GraphQL', function () {
|
||||
it('Should retry \'rate-limit\' and succeed', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
write: new Bottleneck.Group({ minTime: 50 }),
|
||||
onRateLimit: (retryAfter, options) => {
|
||||
expect(options).to.include({ method: 'POST', url: '/graphql' })
|
||||
expect(options.request.retryCount).to.equal(eventCount)
|
||||
expect(retryAfter).to.equal(0)
|
||||
eventCount++
|
||||
return true
|
||||
},
|
||||
onAbuseLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const res = await octokit.request('POST /graphql', {
|
||||
request: {
|
||||
responses: [
|
||||
{ status: 200, headers: { 'x-ratelimit-remaining': '0', 'x-ratelimit-reset': `123` }, data: { errors: [{ type: 'RATE_LIMITED' }] } },
|
||||
{ status: 200, headers: {}, data: { message: 'Yay!' } }
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
expect(res.status).to.equal(200)
|
||||
expect(res.data).to.include({ message: 'Yay!' })
|
||||
expect(eventCount).to.equal(1)
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START POST /graphql',
|
||||
'END POST /graphql',
|
||||
'START POST /graphql',
|
||||
'END POST /graphql'
|
||||
])
|
||||
expect(octokit.__requestTimings[2] - octokit.__requestTimings[0]).to.be.closeTo(50, 20)
|
||||
})
|
||||
|
||||
it('Should ignore other error types', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
write: new Bottleneck.Group({ minTime: 50 }),
|
||||
onRateLimit: (retryAfter, options) => {
|
||||
eventCount++
|
||||
return true
|
||||
},
|
||||
onAbuseLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const res = await octokit.request('POST /graphql', {
|
||||
request: {
|
||||
responses: [
|
||||
{ status: 200, headers: { 'x-ratelimit-remaining': '0', 'x-ratelimit-reset': `123` }, data: { errors: [{ type: 'HELLO_WORLD' }] } },
|
||||
{ status: 200, headers: {}, data: { message: 'Yay!' } }
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
expect(res.status).to.equal(200)
|
||||
expect(res.data).to.deep.equal({ errors: [ { type: 'HELLO_WORLD' } ] })
|
||||
expect(eventCount).to.equal(0)
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START POST /graphql',
|
||||
'END POST /graphql'
|
||||
])
|
||||
})
|
||||
})
|
||||
})
|
14
node_modules/@octokit/plugin-throttling/test/memory-leakage-test.js
generated
vendored
Normal file
14
node_modules/@octokit/plugin-throttling/test/memory-leakage-test.js
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
const { iterate } = require('leakage')
|
||||
const Octokit = require('@octokit/rest')
|
||||
.plugin(require('..'))
|
||||
|
||||
const result = iterate(() => {
|
||||
Octokit({
|
||||
throttle: {
|
||||
onAbuseLimit: () => {},
|
||||
onRateLimit: () => {}
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
result.printSummary()
|
Loading…
Add table
Add a link
Reference in a new issue