title
stringlengths
2
169
diff
stringlengths
235
19.5k
body
stringlengths
0
30.5k
url
stringlengths
48
84
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
diff_len
float64
101
3.99k
repo_name
stringclasses
83 values
__index_level_0__
int64
15
52.7k
DOC add example of DataFrame.index
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index c046d55d80b49..55618590071b5 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -532,7 +532,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then pandas.api.extensions.ExtensionArray.ndim \ pandas.api.extensions.ExtensionArray.shape \ pandas.api.extensions.ExtensionArray.tolist \ - pandas.DataFrame.index \ pandas.DataFrame.columns \ pandas.DataFrame.__iter__ \ pandas.DataFrame.keys \ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bd298b8d723b8..abe62b475a759 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -11768,7 +11768,50 @@ def isin(self, values: Series | DataFrame | Sequence | Mapping) -> DataFrame: _info_axis_name: Literal["columns"] = "columns" index = properties.AxisProperty( - axis=1, doc="The index (row labels) of the DataFrame." + axis=1, + doc=""" + The index (row labels) of the DataFrame. + + The index of a DataFrame is a series of labels that identify each row. + The labels can be integers, strings, or any other hashable type. The index + is used for label-based access and alignment, and can be accessed or + modified using this attribute. + + Returns + ------- + pandas.Index + The index labels of the DataFrame. + + See Also + -------- + DataFrame.columns : The column labels of the DataFrame. + DataFrame.to_numpy : Convert the DataFrame to a NumPy array. + + Examples + -------- + >>> df = pd.DataFrame({'Name': ['Alice', 'Bob', 'Aritra'], + ... 'Age': [25, 30, 35], + ... 'Location': ['Seattle', 'New York', 'Kona']}, + ... index=([10, 20, 30])) + >>> df.index + Index([10, 20, 30], dtype='int64') + + In this example, we create a DataFrame with 3 rows and 3 columns, + including Name, Age, and Location information. We set the index labels to + be the integers 10, 20, and 30. We then access the `index` attribute of the + DataFrame, which returns an `Index` object containing the index labels. + + >>> df.index = [100, 200, 300] + >>> df + Name Age Location + 100 Alice 25 Seattle + 200 Bob 30 New York + 300 Aritra 35 Kona + + In this example, we modify the index labels of the DataFrame by assigning + a new list of labels to the `index` attribute. The DataFrame is then + updated with the new labels, and the output shows the modified DataFrame. + """, ) columns = properties.AxisProperty(axis=0, doc="The column labels of the DataFrame.")
DOC add example of DataFrame.index - [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/52835
2023-04-21T18:41:54Z
2023-04-23T19:13:44Z
2023-04-23T19:13:44Z
2023-04-24T13:41:58Z
721
pandas-dev/pandas
45,545
Control filter list
diff --git a/tests-ui/tests/widgetInputs.test.js b/tests-ui/tests/widgetInputs.test.js index 022e549266..e1873105ac 100644 --- a/tests-ui/tests/widgetInputs.test.js +++ b/tests-ui/tests/widgetInputs.test.js @@ -14,10 +14,10 @@ const lg = require("../utils/litegraph"); * @param { InstanceType<Ez["EzGraph"]> } graph * @param { InstanceType<Ez["EzInput"]> } input * @param { string } widgetType - * @param { boolean } hasControlWidget + * @param { number } controlWidgetCount * @returns */ -async function connectPrimitiveAndReload(ez, graph, input, widgetType, hasControlWidget) { +async function connectPrimitiveAndReload(ez, graph, input, widgetType, controlWidgetCount = 0) { // Connect to primitive and ensure its still connected after let primitive = ez.PrimitiveNode(); primitive.outputs[0].connectTo(input); @@ -33,13 +33,17 @@ async function connectPrimitiveAndReload(ez, graph, input, widgetType, hasContro expect(valueWidget.widget.type).toBe(widgetType); // Check if control_after_generate should be added - if (hasControlWidget) { + if (controlWidgetCount) { const controlWidget = primitive.widgets.control_after_generate; expect(controlWidget.widget.type).toBe("combo"); + if(widgetType === "combo") { + const filterWidget = primitive.widgets.control_filter_list; + expect(filterWidget.widget.type).toBe("string"); + } } // Ensure we dont have other widgets - expect(primitive.node.widgets).toHaveLength(1 + +!!hasControlWidget); + expect(primitive.node.widgets).toHaveLength(1 + controlWidgetCount); }); return primitive; @@ -55,8 +59,8 @@ describe("widget inputs", () => { }); [ - { name: "int", type: "INT", widget: "number", control: true }, - { name: "float", type: "FLOAT", widget: "number", control: true }, + { name: "int", type: "INT", widget: "number", control: 1 }, + { name: "float", type: "FLOAT", widget: "number", control: 1 }, { name: "text", type: "STRING" }, { name: "customtext", @@ -64,7 +68,7 @@ describe("widget inputs", () => { opt: { multiline: true }, }, { name: "toggle", type: "BOOLEAN" }, - { name: "combo", type: ["a", "b", "c"], control: true }, + { name: "combo", type: ["a", "b", "c"], control: 2 }, ].forEach((c) => { test(`widget conversion + primitive works on ${c.name}`, async () => { const { ez, graph } = await start({ @@ -106,7 +110,7 @@ describe("widget inputs", () => { n.widgets.ckpt_name.convertToInput(); expect(n.inputs.length).toEqual(inputCount + 1); - const primitive = await connectPrimitiveAndReload(ez, graph, n.inputs.ckpt_name, "combo", true); + const primitive = await connectPrimitiveAndReload(ez, graph, n.inputs.ckpt_name, "combo", 2); // Disconnect & reconnect primitive.outputs[0].connections[0].disconnect(); @@ -226,7 +230,7 @@ describe("widget inputs", () => { // Reload and ensure it still only has 1 converted widget if (!assertNotNullOrUndefined(input)) return; - await connectPrimitiveAndReload(ez, graph, input, "number", true); + await connectPrimitiveAndReload(ez, graph, input, "number", 1); n = graph.find(n); expect(n.widgets).toHaveLength(1); w = n.widgets.example; @@ -258,7 +262,7 @@ describe("widget inputs", () => { // Reload and ensure it still only has 1 converted widget if (assertNotNullOrUndefined(input)) { - await connectPrimitiveAndReload(ez, graph, input, "number", true); + await connectPrimitiveAndReload(ez, graph, input, "number", 1); n = graph.find(n); expect(n.widgets).toHaveLength(1); expect(n.widgets.example.isConvertedToInput).toBeTruthy(); @@ -316,4 +320,76 @@ describe("widget inputs", () => { n1.outputs[0].connectTo(n2.inputs[0]); expect(() => n1.outputs[0].connectTo(n3.inputs[0])).toThrow(); }); + + test("combo primitive can filter list when control_after_generate called", async () => { + const { ez } = await start({ + mockNodeDefs: { + ...makeNodeDef("TestNode1", { example: [["A", "B", "C", "D", "AA", "BB", "CC", "DD", "AAA", "BBB"], {}] }), + }, + }); + + const n1 = ez.TestNode1(); + n1.widgets.example.convertToInput(); + const p = ez.PrimitiveNode() + p.outputs[0].connectTo(n1.inputs[0]); + + const value = p.widgets.value; + const control = p.widgets.control_after_generate.widget; + const filter = p.widgets.control_filter_list; + + expect(p.widgets.length).toBe(3); + control.value = "increment"; + expect(value.value).toBe("A"); + + // Manually trigger after queue when set to increment + control["afterQueued"](); + expect(value.value).toBe("B"); + + // Filter to items containing D + filter.value = "D"; + control["afterQueued"](); + expect(value.value).toBe("D"); + control["afterQueued"](); + expect(value.value).toBe("DD"); + + // Check decrement + value.value = "BBB"; + control.value = "decrement"; + filter.value = "B"; + control["afterQueued"](); + expect(value.value).toBe("BB"); + control["afterQueued"](); + expect(value.value).toBe("B"); + + // Check regex works + value.value = "BBB"; + filter.value = "/[AB]|^C$/"; + control["afterQueued"](); + expect(value.value).toBe("AAA"); + control["afterQueued"](); + expect(value.value).toBe("BB"); + control["afterQueued"](); + expect(value.value).toBe("AA"); + control["afterQueued"](); + expect(value.value).toBe("C"); + control["afterQueued"](); + expect(value.value).toBe("B"); + control["afterQueued"](); + expect(value.value).toBe("A"); + + // Check random + control.value = "randomize"; + filter.value = "/D/"; + for(let i = 0; i < 100; i++) { + control["afterQueued"](); + expect(value.value === "D" || value.value === "DD").toBeTruthy(); + } + + // Ensure it doesnt apply when fixed + control.value = "fixed"; + value.value = "B"; + filter.value = "C"; + control["afterQueued"](); + expect(value.value).toBe("B"); + }); }); diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index bad3ac3a74..5c8fbc9b2d 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -1,4 +1,4 @@ -import { ComfyWidgets, addValueControlWidget } from "../../scripts/widgets.js"; +import { ComfyWidgets, addValueControlWidgets } from "../../scripts/widgets.js"; import { app } from "../../scripts/app.js"; const CONVERTED_TYPE = "converted-widget"; @@ -467,7 +467,11 @@ app.registerExtension({ if (!control_value) { control_value = "fixed"; } - addValueControlWidget(this, widget, control_value); + addValueControlWidgets(this, widget, control_value); + let filter = this.widgets_values?.[2]; + if(filter && this.widgets.length === 3) { + this.widgets[2].value = filter; + } } // When our value changes, update other widgets to reflect our changes diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index ccddc0bc44..fbc1d0fc32 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -24,17 +24,58 @@ function getNumberDefaults(inputData, defaultStep, precision, enable_rounding) { } export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values) { - const valueControl = node.addWidget("combo", "control_after_generate", defaultValue, function (v) { }, { + const widgets = addValueControlWidgets(node, targetWidget, defaultValue, values, { + addFilterList: false, + }); + return widgets[0]; +} + +export function addValueControlWidgets(node, targetWidget, defaultValue = "randomize", values, options) { + if (!options) options = {}; + + const widgets = []; + const valueControl = node.addWidget("combo", "control_after_generate", defaultValue, function (v) { }, { values: ["fixed", "increment", "decrement", "randomize"], serialize: false, // Don't include this in prompt. }); - valueControl.afterQueued = () => { + widgets.push(valueControl); + + const isCombo = targetWidget.type === "combo"; + let comboFilter; + if (isCombo && options.addFilterList !== false) { + comboFilter = node.addWidget("string", "control_filter_list", "", function (v) {}, { + serialize: false, // Don't include this in prompt. + }); + widgets.push(comboFilter); + } + valueControl.afterQueued = () => { var v = valueControl.value; - if (targetWidget.type == "combo" && v !== "fixed") { - let current_index = targetWidget.options.values.indexOf(targetWidget.value); - let current_length = targetWidget.options.values.length; + if (isCombo && v !== "fixed") { + let values = targetWidget.options.values; + const filter = comboFilter?.value; + if (filter) { + let check; + if (filter.startsWith("/") && filter.endsWith("/")) { + try { + const regex = new RegExp(filter.substring(1, filter.length - 1)); + check = (item) => regex.test(item); + } catch (error) { + console.error("Error constructing RegExp filter for node " + node.id, filter, error); + } + } + if (!check) { + const lower = filter.toLocaleLowerCase(); + check = (item) => item.toLocaleLowerCase().includes(lower); + } + values = values.filter(item => check(item)); + if (!values.length && targetWidget.options.values.length) { + console.warn("Filter for node " + node.id + " has filtered out all items", filter); + } + } + let current_index = values.indexOf(targetWidget.value); + let current_length = values.length; switch (v) { case "increment": @@ -51,7 +92,7 @@ export function addValueControlWidget(node, targetWidget, defaultValue = "random current_index = Math.max(0, current_index); current_index = Math.min(current_length - 1, current_index); if (current_index >= 0) { - let value = targetWidget.options.values[current_index]; + let value = values[current_index]; targetWidget.value = value; targetWidget.callback(value); } @@ -88,7 +129,8 @@ export function addValueControlWidget(node, targetWidget, defaultValue = "random targetWidget.callback(targetWidget.value); } } - return valueControl; + + return widgets; }; function seedWidget(node, inputName, inputData, app) {
Allows filtering of the items in a COMBO primitive (e.g. LoadImage images list) when using control_after_generate. You can also use regex to filter the items by wrapping the expression in `/`s e.g. `/(sdxl|sd15)/` Maintains existing behavior of `addValueControlWidget` for extensions currently using it, adding `addValueControlWidgets` which returns an array of added widgets.
https://api.github.com/repos/comfyanonymous/ComfyUI/pulls/2009
2023-11-20T21:48:41Z
2023-11-22T17:52:20Z
2023-11-22T17:52:20Z
2023-11-23T08:37:52Z
2,804
comfyanonymous/ComfyUI
17,901
C.40: Fixed a couple of typos.
diff --git a/CppCoreGuidelines.md b/CppCoreGuidelines.md index 0a4a6c410..bf1bf47b7 100644 --- a/CppCoreGuidelines.md +++ b/CppCoreGuidelines.md @@ -3905,7 +3905,7 @@ That's what constructors are for. int d, m, y; }; -It is often a good idea to express the invariant as an `Ensure` on the constructor. +It is often a good idea to express the invariant as an `Ensures` on the constructor. ##### Note @@ -3941,7 +3941,7 @@ Also, the default for `int` would be better done as a [member initializer](#Rc-i ##### Enforcement -* Flag classes with user-define copy operations but no constructor (a user-defined copy is a good indicator that the class has an invariant) +* Flag classes with user-defined copy operations but no constructor (a user-defined copy is a good indicator that the class has an invariant) ### <a name="Rc-complete"></a> C.41: A constructor should create a fully initialized object
https://api.github.com/repos/isocpp/CppCoreGuidelines/pulls/471
2015-12-18T16:36:52Z
2015-12-18T16:51:25Z
2015-12-18T16:51:25Z
2015-12-19T03:20:50Z
251
isocpp/CppCoreGuidelines
15,485
[3.8] bpo-41132: Use pymalloc allocator in the f-string parser (GH-21173)
diff --git a/Python/ast.c b/Python/ast.c index 0a999fcca43a8e..5efb690c299cac 100644 --- a/Python/ast.c +++ b/Python/ast.c @@ -4898,7 +4898,7 @@ fstring_compile_expr(const char *expr_start, const char *expr_end, len = expr_end - expr_start; /* Allocate 3 extra bytes: open paren, close paren, null byte. */ - str = PyMem_RawMalloc(len + 3); + str = PyMem_Malloc(len + 3); if (str == NULL) { PyErr_NoMemory(); return NULL; @@ -4914,7 +4914,7 @@ fstring_compile_expr(const char *expr_start, const char *expr_end, mod_n = PyParser_SimpleParseStringFlagsFilename(str, "<fstring>", Py_eval_input, 0); if (!mod_n) { - PyMem_RawFree(str); + PyMem_Free(str); return NULL; } /* Reuse str to find the correct column offset. */ @@ -4922,7 +4922,7 @@ fstring_compile_expr(const char *expr_start, const char *expr_end, str[len+1] = '}'; fstring_fix_node_location(n, mod_n, str); mod = PyAST_FromNode(mod_n, &cf, "<fstring>", c->c_arena); - PyMem_RawFree(str); + PyMem_Free(str); PyNode_Free(mod_n); if (!mod) return NULL; @@ -5438,7 +5438,7 @@ ExprList_Append(ExprList *l, expr_ty exp) Py_ssize_t i; /* We're still using the cached data. Switch to alloc-ing. */ - l->p = PyMem_RawMalloc(sizeof(expr_ty) * new_size); + l->p = PyMem_Malloc(sizeof(expr_ty) * new_size); if (!l->p) return -1; /* Copy the cached data into the new buffer. */ @@ -5446,9 +5446,9 @@ ExprList_Append(ExprList *l, expr_ty exp) l->p[i] = l->data[i]; } else { /* Just realloc. */ - expr_ty *tmp = PyMem_RawRealloc(l->p, sizeof(expr_ty) * new_size); + expr_ty *tmp = PyMem_Realloc(l->p, sizeof(expr_ty) * new_size); if (!tmp) { - PyMem_RawFree(l->p); + PyMem_Free(l->p); l->p = NULL; return -1; } @@ -5476,7 +5476,7 @@ ExprList_Dealloc(ExprList *l) /* Do nothing. */ } else { /* We have dynamically allocated. Free the memory. */ - PyMem_RawFree(l->p); + PyMem_Free(l->p); } l->p = NULL; l->size = -1;
<!-- Thanks for your contribution! Please read this comment in its entirety. It's quite important. # Pull Request title It should be in the following format: ``` bpo-NNNN: Summary of the changes made ``` Where: bpo-NNNN refers to the issue number in the https://bugs.python.org. Most PRs will require an issue number. Trivial changes, like fixing a typo, do not need an issue. # Backport Pull Request title If this is a backport PR (PR made against branches other than `master`), please ensure that the PR title is in the following format: ``` [X.Y] <title from the original PR> (GH-NNNN) ``` Where: [X.Y] is the branch name, e.g. [3.6]. GH-NNNN refers to the PR number from `master`. --> <!-- issue-number: [bpo-41132](https://bugs.python.org/issue41132) --> https://bugs.python.org/issue41132 <!-- /issue-number --> Automerge-Triggered-By: @pablogsal
https://api.github.com/repos/python/cpython/pulls/21184
2020-06-27T18:26:32Z
2020-06-27T18:43:42Z
2020-06-27T18:43:42Z
2020-06-27T18:43:44Z
685
python/cpython
4,738
Update README.md
diff --git a/README.md b/README.md index c31017e..b1354cc 100644 --- a/README.md +++ b/README.md @@ -2336,7 +2336,7 @@ nan #### 💡 Explanation: -`'inf'` and `'nan'` are special strings (case-insensitive), which when explicitly typecasted to `float` type, are used to represent mathematical "infinity" and "not a number" respectively. +`'inf'` and `'nan'` are special strings (case-insensitive), which when explicitly typecast-ed to `float` type, are used to represent mathematical "infinity" and "not a number" respectively. --- @@ -2382,7 +2382,7 @@ nan >>> 44 ``` **💡 Explanation:** - This prank comes from [Raymond Hettinger's tweet](https://twitter.com/raymondh/status/1131103570856632321?lang=en). The space invader operator is actually just a malformatted `a -= (-1)`. Which is eqivalent to `a = a - (- 1)`. Similar for the `a += (+ 1)` case. + This prank comes from [Raymond Hettinger's tweet](https://twitter.com/raymondh/status/1131103570856632321?lang=en). The space invader operator is actually just a malformatted `a -= (-1)`. Which is equivalent to `a = a - (- 1)`. Similar for the `a += (+ 1)` case. * Python uses 2 bytes for local variable storage in functions. In theory, this means that only 65536 variables can be defined in a function. However, python has a handy solution built in that can be used to store more than 2^16 variable names. The following code demonstrates what happens in the stack when more than 65536 local variables are defined (Warning: This code prints around 2^18 lines of text, so be prepared!): ```py @@ -2390,7 +2390,7 @@ nan exec(""" def f(): """ + """ - """.join(["X"+str(x)+"=" + str(x) for x in range(65539)])) + """.join(["X" + str(x) + "=" + str(x) for x in range(65539)])) f()
Typo corrected.
https://api.github.com/repos/satwikkansal/wtfpython/pulls/146
2019-10-25T08:58:55Z
2019-10-25T14:22:53Z
2019-10-25T14:22:53Z
2019-10-25T14:22:58Z
524
satwikkansal/wtfpython
25,728
improve(rules): add mercurial (hg) support
diff --git a/tests/rules/test_mercurial.py b/tests/rules/test_mercurial.py new file mode 100644 index 000000000..08962f912 --- /dev/null +++ b/tests/rules/test_mercurial.py @@ -0,0 +1,134 @@ +import pytest + +from tests.utils import Command +from thefuck.rules.mercurial import ( + extract_possisiblities, match, get_new_command +) + + +@pytest.mark.parametrize('command', [ + Command('hg base', stderr=( + "hg: unknown command 'base'" + '\n(did you mean one of blame, phase, rebase?)' + )), + Command('hg branchch', stderr=( + "hg: unknown command 'branchch'" + '\n(did you mean one of branch, branches?)' + )), + Command('hg vert', stderr=( + "hg: unknown command 'vert'" + '\n(did you mean one of revert?)' + )), + Command('hg lgo -r tip', stderr=( + "hg: command 're' is ambiguous:" + '\n(did you mean one of log?)' + )), + Command('hg rerere', stderr=( + "hg: unknown command 'rerere'" + '\n(did you mean one of revert?)' + )), + Command('hg re', stderr=( + "hg: command 're' is ambiguous:" + '\n rebase recover remove rename resolve revert' + )), + Command('hg re re', stderr=( + "hg: command 're' is ambiguous:" + '\n rebase recover remove rename resolve revert' + )), +]) +def test_match(command): + assert match(command, None) + + +@pytest.mark.parametrize('command', [ + Command('hg', stderr=( + '\nMercurial Distributed SCM\n\nbasic commands:' + )), + Command('hg asdf', stderr=( + "hg: unknown command 'asdf'" + '\nMercurial Distributed SCM\n\nbasic commands:' + )), + Command('hg qwer', stderr=( + "hg: unknown command 'qwer'" + '\nMercurial Distributed SCM\n\nbasic commands:' + )), + Command('hg me', stderr=( + "\nabort: no repository found in './thefuck' (.hg not found)!" + )), + Command('hg reb', stderr=( + "\nabort: no repository found in './thefuck' (.hg not found)!" + )), + Command('hg co', stderr=( + "\nabort: no repository found in './thefuck' (.hg not found)!" + )), +]) +def test_not_match(command): + assert not match(command, None) + + +@pytest.mark.parametrize('command, possibilities', [ + (Command('hg base', stderr=( + "hg: unknown command 'base'" + '\n(did you mean one of blame, phase, rebase?)' + )), ['blame', 'phase', 'rebase']), + (Command('hg branchch', stderr=( + "hg: unknown command 'branchch'" + '\n(did you mean one of branch, branches?)' + )), ['branch', 'branches']), + (Command('hg vert', stderr=( + "hg: unknown command 'vert'" + '\n(did you mean one of revert?)' + )), ['revert']), + (Command('hg lgo -r tip', stderr=( + "hg: command 're' is ambiguous:" + '\n(did you mean one of log?)' + )), ['log']), + (Command('hg rerere', stderr=( + "hg: unknown command 'rerere'" + '\n(did you mean one of revert?)' + )), ['revert']), + (Command('hg re', stderr=( + "hg: command 're' is ambiguous:" + '\n rebase recover remove rename resolve revert' + )), ['rebase', 'recover', 'remove', 'rename', 'resolve', 'revert']), + (Command('hg re re', stderr=( + "hg: command 're' is ambiguous:" + '\n rebase recover remove rename resolve revert' + )), ['rebase', 'recover', 'remove', 'rename', 'resolve', 'revert']), +]) +def test_extract_possisiblities(command, possibilities): + assert extract_possisiblities(command) == possibilities + + +@pytest.mark.parametrize('command, new_command', [ + (Command('hg base', stderr=( + "hg: unknown command 'base'" + '\n(did you mean one of blame, phase, rebase?)' + )), 'hg rebase'), + (Command('hg branchch', stderr=( + "hg: unknown command 'branchch'" + '\n(did you mean one of branch, branches?)' + )), 'hg branch'), + (Command('hg vert', stderr=( + "hg: unknown command 'vert'" + '\n(did you mean one of revert?)' + )), 'hg revert'), + (Command('hg lgo -r tip', stderr=( + "hg: command 're' is ambiguous:" + '\n(did you mean one of log?)' + )), 'hg log -r tip'), + (Command('hg rerere', stderr=( + "hg: unknown command 'rerere'" + '\n(did you mean one of revert?)' + )), 'hg revert'), + (Command('hg re', stderr=( + "hg: command 're' is ambiguous:" + '\n rebase recover remove rename resolve revert' + )), 'hg rebase'), + (Command('hg re re', stderr=( + "hg: command 're' is ambiguous:" + '\n rebase recover remove rename resolve revert' + )), 'hg rebase re'), +]) +def test_get_new_command(command, new_command): + assert get_new_command(command, None) == new_command diff --git a/thefuck/rules/mercurial.py b/thefuck/rules/mercurial.py new file mode 100644 index 000000000..934e3f1e8 --- /dev/null +++ b/thefuck/rules/mercurial.py @@ -0,0 +1,34 @@ +import re + +from difflib import get_close_matches + + +def extract_possisiblities(command): + possib = re.findall(r'\n\(did you mean one of ([^\?]+)\?\)', command.stderr) + if possib: + return possib[0].split(', ') + possib = re.findall(r'\n ([^$]+)$', command.stderr) + if possib: + return possib[0].split(' ') + return possib + + +def match(command, settings): + return (command.script.startswith('hg ') + and ('hg: unknown command' in command.stderr + and '(did you mean one of ' in command.stderr + or "hg: command '" in command.stderr + and "' is ambiguous:" in command.stderr + ) + ) + + +def get_new_command(command, settings): + script = command.script.split(' ') + possisiblities = extract_possisiblities(command) + matches = get_close_matches(script[1], possisiblities) + if matches: + script[1] = matches[0] + else: + script[1] = possisiblities[0] + return ' '.join(script)
Please review and comment!
https://api.github.com/repos/nvbn/thefuck/pulls/281
2015-07-07T00:40:07Z
2015-07-07T13:36:06Z
2015-07-07T13:36:06Z
2015-07-08T01:25:42Z
1,738
nvbn/thefuck
30,780
Fix mypyc compatibility issue
diff --git a/src/black/parsing.py b/src/black/parsing.py index 504e20be00..32cfa5239f 100644 --- a/src/black/parsing.py +++ b/src/black/parsing.py @@ -169,6 +169,7 @@ def stringify_ast( yield f"{' ' * depth}{node.__class__.__name__}(" + type_ignore_classes: Tuple[Type[Any], ...] for field in sorted(node._fields): # noqa: F402 # TypeIgnore will not be present using pypy < 3.8, so need for this if not (_IS_PYPY and sys.version_info < (3, 8)):
### Description I can't wait for when we drop Python 2 support FWIW :) ### Checklist - did you ... - [x] Add a CHANGELOG entry if necessary? -> n/a - [x] Add / update tests if necessary? -> n/a - [x] Add new / update outdated documentation? -> n/a
https://api.github.com/repos/psf/black/pulls/2628
2021-11-19T03:07:24Z
2021-11-19T03:20:45Z
2021-11-19T03:20:45Z
2021-11-19T03:20:47Z
155
psf/black
24,458
Added a descriptive error if domain list includes a Unicode-encoded IDN
diff --git a/letsencrypt/configuration.py b/letsencrypt/configuration.py index a2a54d2d062..69778f5f049 100644 --- a/letsencrypt/configuration.py +++ b/letsencrypt/configuration.py @@ -144,6 +144,15 @@ def _check_config_domain_sanity(domains): if any("xn--" in d for d in domains): raise errors.ConfigurationError( "Punycode domains are not supported") + + # Unicode + try: + for domain in domains: + domain.encode('ascii') + except UnicodeDecodeError: + raise errors.ConfigurationError( + "Internationalized domain names are not supported") + # FQDN checks from # http://www.mkyong.com/regular-expressions/domain-name-regular-expression-example/ # Characters used, domain parts < 63 chars, tld > 1 < 64 chars
The current error for IDNs passed in Unicode form is incorrect and does not describe the actual problem: ``` $ letsencrypt --manual -d example.com -d ёжикв.сайт Requested domain is not a FQDN ``` This change checks for any domains that cannot be encoded as ASCII, and if one is present: ``` $ letsencrypt --manual -d example.com -d ёжикв.сайт Internationalized domain names are not supported ```
https://api.github.com/repos/certbot/certbot/pulls/1759
2015-12-05T06:26:33Z
2015-12-05T07:17:21Z
2015-12-05T07:17:21Z
2016-05-06T19:22:00Z
205
certbot/certbot
496
`chat_loaders` refactoring
diff --git a/libs/langchain/langchain/chat_loaders/imessage.py b/libs/langchain/langchain/chat_loaders/imessage.py index d6c02f1e5307a2..eed0cfea3795ee 100644 --- a/libs/langchain/langchain/chat_loaders/imessage.py +++ b/libs/langchain/langchain/chat_loaders/imessage.py @@ -4,13 +4,13 @@ from typing import TYPE_CHECKING, Iterator, List, Optional, Union from langchain import schema -from langchain.chat_loaders import base as chat_loaders +from langchain.chat_loaders.base import BaseChatLoader, ChatSession if TYPE_CHECKING: import sqlite3 -class IMessageChatLoader(chat_loaders.BaseChatLoader): +class IMessageChatLoader(BaseChatLoader): """Load chat sessions from the `iMessage` chat.db SQLite file. It only works on macOS when you have iMessage enabled and have the chat.db file. @@ -18,8 +18,8 @@ class IMessageChatLoader(chat_loaders.BaseChatLoader): The chat.db file is likely located at ~/Library/Messages/chat.db. However, your terminal may not have permission to access this file. To resolve this, you can copy the file to a different location, change the permissions of the file, or - grant full disk access for your terminal emulator in System Settings > Security - and Privacy > Full Disk Access. + grant full disk access for your terminal emulator + in System Settings > Security and Privacy > Full Disk Access. """ def __init__(self, path: Optional[Union[str, Path]] = None): @@ -46,7 +46,7 @@ def __init__(self, path: Optional[Union[str, Path]] = None): def _load_single_chat_session( self, cursor: "sqlite3.Cursor", chat_id: int - ) -> chat_loaders.ChatSession: + ) -> ChatSession: """ Load a single chat session from the iMessage chat.db. @@ -83,9 +83,9 @@ def _load_single_chat_session( ) ) - return chat_loaders.ChatSession(messages=results) + return ChatSession(messages=results) - def lazy_load(self) -> Iterator[chat_loaders.ChatSession]: + def lazy_load(self) -> Iterator[ChatSession]: """ Lazy load the chat sessions from the iMessage chat.db and yield them in the required format. diff --git a/libs/langchain/langchain/chat_loaders/slack.py b/libs/langchain/langchain/chat_loaders/slack.py index 0bbd503979c7c1..7c9f76c9650e83 100644 --- a/libs/langchain/langchain/chat_loaders/slack.py +++ b/libs/langchain/langchain/chat_loaders/slack.py @@ -6,12 +6,12 @@ from typing import Dict, Iterator, List, Union from langchain import schema -from langchain.chat_loaders import base as chat_loaders +from langchain.chat_loaders.base import BaseChatLoader, ChatSession logger = logging.getLogger(__name__) -class SlackChatLoader(chat_loaders.BaseChatLoader): +class SlackChatLoader(BaseChatLoader): """Load `Slack` conversations from a dump zip file.""" def __init__( @@ -27,9 +27,7 @@ def __init__( if not self.zip_path.exists(): raise FileNotFoundError(f"File {self.zip_path} not found") - def _load_single_chat_session( - self, messages: List[Dict] - ) -> chat_loaders.ChatSession: + def _load_single_chat_session(self, messages: List[Dict]) -> ChatSession: results: List[Union[schema.AIMessage, schema.HumanMessage]] = [] previous_sender = None for message in messages: @@ -62,7 +60,7 @@ def _load_single_chat_session( ) ) previous_sender = sender - return chat_loaders.ChatSession(messages=results) + return ChatSession(messages=results) def _read_json(self, zip_file: zipfile.ZipFile, file_path: str) -> List[dict]: """Read JSON data from a zip subfile.""" @@ -72,7 +70,7 @@ def _read_json(self, zip_file: zipfile.ZipFile, file_path: str) -> List[dict]: raise ValueError(f"Expected list of dictionaries, got {type(data)}") return data - def lazy_load(self) -> Iterator[chat_loaders.ChatSession]: + def lazy_load(self) -> Iterator[ChatSession]: """ Lazy load the chat sessions from the Slack dump file and yield them in the required format. diff --git a/libs/langchain/langchain/chat_loaders/telegram.py b/libs/langchain/langchain/chat_loaders/telegram.py index 5f0bbfa3246d86..12c30014ac1fa2 100644 --- a/libs/langchain/langchain/chat_loaders/telegram.py +++ b/libs/langchain/langchain/chat_loaders/telegram.py @@ -7,12 +7,12 @@ from typing import Iterator, List, Union from langchain import schema -from langchain.chat_loaders import base as chat_loaders +from langchain.chat_loaders.base import BaseChatLoader, ChatSession logger = logging.getLogger(__name__) -class TelegramChatLoader(chat_loaders.BaseChatLoader): +class TelegramChatLoader(BaseChatLoader): """Load `telegram` conversations to LangChain chat messages. To export, use the Telegram Desktop app from @@ -35,16 +35,14 @@ def __init__( """ self.path = path if isinstance(path, str) else str(path) - def _load_single_chat_session_html( - self, file_path: str - ) -> chat_loaders.ChatSession: + def _load_single_chat_session_html(self, file_path: str) -> ChatSession: """Load a single chat session from an HTML file. Args: file_path (str): Path to the HTML file. Returns: - chat_loaders.ChatSession: The loaded chat session. + ChatSession: The loaded chat session. """ try: from bs4 import BeautifulSoup @@ -81,18 +79,16 @@ def _load_single_chat_session_html( ) previous_sender = from_name - return chat_loaders.ChatSession(messages=results) + return ChatSession(messages=results) - def _load_single_chat_session_json( - self, file_path: str - ) -> chat_loaders.ChatSession: + def _load_single_chat_session_json(self, file_path: str) -> ChatSession: """Load a single chat session from a JSON file. Args: file_path (str): Path to the JSON file. Returns: - chat_loaders.ChatSession: The loaded chat session. + ChatSession: The loaded chat session. """ with open(file_path, "r", encoding="utf-8") as file: data = json.load(file) @@ -114,7 +110,7 @@ def _load_single_chat_session_json( ) ) - return chat_loaders.ChatSession(messages=results) + return ChatSession(messages=results) def _iterate_files(self, path: str) -> Iterator[str]: """Iterate over files in a directory or zip file. @@ -139,12 +135,12 @@ def _iterate_files(self, path: str) -> Iterator[str]: with tempfile.TemporaryDirectory() as temp_dir: yield zip_file.extract(file, path=temp_dir) - def lazy_load(self) -> Iterator[chat_loaders.ChatSession]: + def lazy_load(self) -> Iterator[ChatSession]: """Lazy load the messages from the chat file and yield them in as chat sessions. Yields: - chat_loaders.ChatSession: The loaded chat session. + ChatSession: The loaded chat session. """ for file_path in self._iterate_files(self.path): if file_path.endswith(".html"): diff --git a/libs/langchain/langchain/chat_loaders/whatsapp.py b/libs/langchain/langchain/chat_loaders/whatsapp.py index e2518ab44df660..39266485e23ea3 100644 --- a/libs/langchain/langchain/chat_loaders/whatsapp.py +++ b/libs/langchain/langchain/chat_loaders/whatsapp.py @@ -5,13 +5,13 @@ from typing import Iterator, List, Union from langchain import schema -from langchain.chat_loaders import base as chat_loaders +from langchain.chat_loaders.base import BaseChatLoader, ChatSession from langchain.schema import messages logger = logging.getLogger(__name__) -class WhatsAppChatLoader(chat_loaders.BaseChatLoader): +class WhatsAppChatLoader(BaseChatLoader): """Load `WhatsApp` conversations from a dump zip file or directory.""" def __init__(self, path: str): @@ -42,7 +42,7 @@ def __init__(self, path: str): flags=re.IGNORECASE, ) - def _load_single_chat_session(self, file_path: str) -> chat_loaders.ChatSession: + def _load_single_chat_session(self, file_path: str) -> ChatSession: """Load a single chat session from a file. Args: @@ -84,7 +84,7 @@ def _load_single_chat_session(self, file_path: str) -> chat_loaders.ChatSession: ) else: logger.debug(f"Could not parse line: {line}") - return chat_loaders.ChatSession(messages=results) + return ChatSession(messages=results) def _iterate_files(self, path: str) -> Iterator[str]: """Iterate over the files in a directory or zip file. @@ -108,7 +108,7 @@ def _iterate_files(self, path: str) -> Iterator[str]: if file.endswith(".txt"): yield zip_file.extract(file) - def lazy_load(self) -> Iterator[chat_loaders.ChatSession]: + def lazy_load(self) -> Iterator[ChatSession]: """Lazy load the messages from the chat file and yield them as chat sessions.
Replaced unnecessary namespace renaming `from langchain.chat_loaders import base as chat_loaders` with `from langchain.chat_loaders.base import BaseChatLoader, ChatSession` and simplified correspondent types. @eyurtsev
https://api.github.com/repos/langchain-ai/langchain/pulls/10381
2023-09-08T17:16:24Z
2023-09-09T22:22:56Z
2023-09-09T22:22:56Z
2023-09-10T18:23:43Z
2,233
langchain-ai/langchain
43,519
Added scibert-nli model card
diff --git a/model_cards/gsarti/scibert-nli/README.md b/model_cards/gsarti/scibert-nli/README.md new file mode 100644 index 0000000000000..1388fea8d557a --- /dev/null +++ b/model_cards/gsarti/scibert-nli/README.md @@ -0,0 +1,32 @@ +# SciBERT-NLI + +This is the model [SciBERT](https://github.com/allenai/scibert) [1] fine-tuned on the [SNLI](https://nlp.stanford.edu/projects/snli/) and the [MultiNLI](https://www.nyu.edu/projects/bowman/multinli/) datasets using the [`sentence-transformers` library](https://github.com/UKPLab/sentence-transformers/) to produce universal sentence embeddings [2]. + +The model uses the original `scivocab` wordpiece vocabulary and was trained using the **average pooling strategy** and a **softmax loss**. + +**Base model**: `allenai/scibert-scivocab-cased` from HuggingFace AutoModel + +**Parameters**: + +| Parameter | Value | +|----------------|-------| +| Batch size | 64 | +| Training steps | 20000 | +| Warmup steps | 1450 | + +**Performances**: The performance was evaluated on the test portion of the [STS dataset](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark) using Spearman rank correlation and compared to the performances of a general BERT base model obtained with the same procedure to verify their similarity. + + +| Model | Score | +|-----------------------------|-------------| +| `scibert-nli` (ours) | 74.50 | +| `bert-base-nli-mean-tokens` | 77.12 | + + +An example usage for similarity-based scientific paper retrieval is provided in the [Covid Papers Browser](https://github.com/gsarti/covid-papers-browser) repository. + +**References:** + +[1] I. Beltagy et al, [SciBERT: A Pretrained Language Model for Scientific Text](https://www.aclweb.org/anthology/D19-1371/) + +[2] A. Conneau et al., [Supervised Learning of Universal Sentence Representations from Natural Language Inference Data](https://www.aclweb.org/anthology/D17-1070/)
https://api.github.com/repos/huggingface/transformers/pulls/3376
2020-03-22T13:26:17Z
2020-03-23T15:55:42Z
2020-03-23T15:55:42Z
2020-03-23T15:55:43Z
544
huggingface/transformers
12,407
Set CSR version in make_csr
diff --git a/letsencrypt/crypto_util.py b/letsencrypt/crypto_util.py index 76265a73914..5fdcba843aa 100644 --- a/letsencrypt/crypto_util.py +++ b/letsencrypt/crypto_util.py @@ -118,6 +118,7 @@ def make_csr(key_str, domains): value=", ".join("DNS:%s" % d for d in domains) ), ]) + req.set_version(2) req.set_pubkey(pkey) req.sign(pkey, "sha256") return tuple(OpenSSL.crypto.dump_certificate_request(method, req)
Fixes #2528.
https://api.github.com/repos/certbot/certbot/pulls/2529
2016-02-23T05:45:45Z
2016-02-23T16:08:23Z
2016-02-23T16:08:23Z
2016-05-06T19:22:24Z
134
certbot/certbot
632
🌐 Update Chinese translation for `docs/zh/docs/tutorial/query-params.md`
diff --git a/docs/zh/docs/tutorial/query-params.md b/docs/zh/docs/tutorial/query-params.md index a0cc7fea39310..308dd68a486ab 100644 --- a/docs/zh/docs/tutorial/query-params.md +++ b/docs/zh/docs/tutorial/query-params.md @@ -1,67 +1,67 @@ # 查询参数 -声明不属于路径参数的其他函数参数时,它们将被自动解释为"查询字符串"参数 +声明的参数不是路径参数时,路径操作函数会把该参数自动解释为**查询**参数。 ```Python hl_lines="9" {!../../../docs_src/query_params/tutorial001.py!} ``` -查询字符串是键值对的集合,这些键值对位于 URL 的 `?` 之后,并以 `&` 符号分隔。 +查询字符串是键值对的集合,这些键值对位于 URL 的 `?` 之后,以 `&` 分隔。 -例如,在以下 url 中: +例如,以下 URL 中: ``` http://127.0.0.1:8000/items/?skip=0&limit=10 ``` -...查询参数为: +……查询参数为: -* `skip`:对应的值为 `0` -* `limit`:对应的值为 `10` +* `skip`:值为 `0` +* `limit`:值为 `10` -由于它们是 URL 的一部分,因此它们的"原始值"是字符串。 +这些值都是 URL 的组成部分,因此,它们的类型**本应**是字符串。 -但是,当你为它们声明了 Python 类型(在上面的示例中为 `int`)时,它们将转换为该类型并针对该类型进行校验。 +但声明 Python 类型(上例中为 `int`)之后,这些值就会转换为声明的类型,并进行类型校验。 -应用于路径参数的所有相同过程也适用于查询参数: +所有应用于路径参数的流程也适用于查询参数: -* (很明显的)编辑器支持 -* 数据<abbr title="将来自 HTTP 请求的字符串转换为 Python 数据类型">"解析"</abbr> +* (显而易见的)编辑器支持 +* 数据<abbr title="将来自 HTTP 请求的字符串转换为 Python 数据类型">**解析**</abbr> * 数据校验 -* 自动生成文档 +* API 文档 ## 默认值 -由于查询参数不是路径的固定部分,因此它们可以是可选的,并且可以有默认值。 +查询参数不是路径的固定内容,它是可选的,还支持默认值。 -在上面的示例中,它们具有 `skip=0` 和 `limit=10` 的默认值。 +上例用 `skip=0` 和 `limit=10` 设定默认值。 -因此,访问 URL: +访问 URL: ``` http://127.0.0.1:8000/items/ ``` -将与访问以下地址相同: +与访问以下地址相同: ``` http://127.0.0.1:8000/items/?skip=0&limit=10 ``` -但是,如果你访问的是: +但如果访问: ``` http://127.0.0.1:8000/items/?skip=20 ``` -函数中的参数值将会是: +查询参数的值就是: * `skip=20`:在 URL 中设定的值 * `limit=10`:使用默认值 ## 可选参数 -通过同样的方式,你可以将它们的默认值设置为 `None` 来声明可选查询参数: +同理,把默认值设为 `None` 即可声明**可选的**查询参数: === "Python 3.10+" @@ -76,20 +76,27 @@ http://127.0.0.1:8000/items/?skip=20 ``` -在这个例子中,函数参数 `q` 将是可选的,并且默认值为 `None`。 +本例中,查询参数 `q` 是可选的,默认值为 `None`。 -!!! check - 还要注意的是,**FastAPI** 足够聪明,能够分辨出参数 `item_id` 是路径参数而 `q` 不是,因此 `q` 是一个查询参数。 +!!! check "检查" + + 注意,**FastAPI** 可以识别出 `item_id` 是路径参数,`q` 不是路径参数,而是查询参数。 + +!!! note "笔记" + + 因为默认值为 `= None`,FastAPI 把 `q` 识别为可选参数。 + + FastAPI 不使用 `Optional[str]` 中的 `Optional`(只使用 `str`),但 `Optional[str]` 可以帮助编辑器发现代码中的错误。 ## 查询参数类型转换 -你还可以声明 `bool` 类型,它们将被自动转换: +参数还可以声明为 `bool` 类型,FastAPI 会自动转换参数类型: -```Python hl_lines="7" +```Python hl_lines="9" {!../../../docs_src/query_params/tutorial003.py!} ``` -这个例子中,如果你访问: +本例中,访问: ``` http://127.0.0.1:8000/items/foo?short=1 @@ -119,42 +126,42 @@ http://127.0.0.1:8000/items/foo?short=on http://127.0.0.1:8000/items/foo?short=yes ``` -或任何其他的变体形式(大写,首字母大写等等),你的函数接收的 `short` 参数都会是布尔值 `True`。对于值为 `False` 的情况也是一样的。 +或其它任意大小写形式(大写、首字母大写等),函数接收的 `short` 参数都是布尔值 `True`。值为 `False` 时也一样。 ## 多个路径和查询参数 -你可以同时声明多个路径参数和查询参数,**FastAPI** 能够识别它们。 +**FastAPI** 可以识别同时声明的多个路径参数和查询参数。 -而且你不需要以任何特定的顺序来声明。 +而且声明查询参数的顺序并不重要。 -它们将通过名称被检测到: +FastAPI 通过参数名进行检测: -```Python hl_lines="6 8" +```Python hl_lines="8 10" {!../../../docs_src/query_params/tutorial004.py!} ``` -## 必需查询参数 +## 必选查询参数 -当你为非路径参数声明了默认值时(目前而言,我们所知道的仅有查询参数),则该参数不是必需的。 +为不是路径参数的参数声明默认值(至此,仅有查询参数),该参数就**不是必选**的了。 -如果你不想添加一个特定的值,而只是想使该参数成为可选的,则将默认值设置为 `None`。 +如果只想把参数设为**可选**,但又不想指定参数的值,则要把默认值设为 `None`。 -但当你想让一个查询参数成为必需的,不声明任何默认值就可以: +如果要把查询参数设置为**必选**,就不要声明默认值: ```Python hl_lines="6-7" {!../../../docs_src/query_params/tutorial005.py!} ``` -这里的查询参数 `needy` 是类型为 `str` 的必需查询参数。 +这里的查询参数 `needy` 是类型为 `str` 的必选查询参数。 -如果你在浏览器中打开一个像下面的 URL: +在浏览器中打开如下 URL: ``` http://127.0.0.1:8000/items/foo-item ``` -...因为没有添加必需的参数 `needy`,你将看到类似以下的错误: +……因为路径中没有必选参数 `needy`,返回的响应中会显示如下错误信息: ```JSON { @@ -171,13 +178,13 @@ http://127.0.0.1:8000/items/foo-item } ``` -由于 `needy` 是必需参数,因此你需要在 URL 中设置它的值: +`needy` 是必选参数,因此要在 URL 中设置值: ``` http://127.0.0.1:8000/items/foo-item?needy=sooooneedy ``` -...这样就正常了: +……这样就正常了: ```JSON { @@ -186,17 +193,18 @@ http://127.0.0.1:8000/items/foo-item?needy=sooooneedy } ``` -当然,你也可以定义一些参数为必需的,一些具有默认值,而某些则完全是可选的: +当然,把一些参数定义为必选,为另一些参数设置默认值,再把其它参数定义为可选,这些操作都是可以的: -```Python hl_lines="7" +```Python hl_lines="10" {!../../../docs_src/query_params/tutorial006.py!} ``` -在这个例子中,有3个查询参数: +本例中有 3 个查询参数: + +* `needy`,必选的 `str` 类型参数 +* `skip`,默认值为 `0` 的 `int` 类型参数 +* `limit`,可选的 `int` 类型参数 -* `needy`,一个必需的 `str` 类型参数。 -* `skip`,一个默认值为 `0` 的 `int` 类型参数。 -* `limit`,一个可选的 `int` 类型参数。 +!!! tip "提示" -!!! tip - 你还可以像在 [路径参数](path-params.md#predefined-values){.internal-link target=_blank} 中那样使用 `Enum`。 + 还可以像在[路径参数](path-params.md#predefined-values){.internal-link target=_blank} 中那样使用 `Enum`。
also fix code highlight for tutorial002.py and tutorial006.py
https://api.github.com/repos/tiangolo/fastapi/pulls/3480
2021-07-07T08:05:07Z
2024-04-01T05:36:48Z
2024-04-01T05:36:48Z
2024-04-01T05:36:48Z
2,290
tiangolo/fastapi
23,195
C.146 Fix variable name in example
diff --git a/CppCoreGuidelines.md b/CppCoreGuidelines.md index ec3200ed6..94876f845 100644 --- a/CppCoreGuidelines.md +++ b/CppCoreGuidelines.md @@ -8207,8 +8207,8 @@ Consider: cout << pb2->id(); // "D" - if (pb1->id() == "D") { // looks innocent - D* pd = static_cast<D*>(pb1); + if (pb2->id() == "D") { // looks innocent + D* pd = static_cast<D*>(pb2); // ... } // ...
https://api.github.com/repos/isocpp/CppCoreGuidelines/pulls/2116
2023-08-06T04:02:14Z
2023-08-06T05:25:02Z
2023-08-06T05:25:02Z
2023-08-06T05:25:02Z
151
isocpp/CppCoreGuidelines
15,273
Reset progress bar in between report runs
diff --git a/frontend/src/components/core/Block/Block.tsx b/frontend/src/components/core/Block/Block.tsx index a403c3d1674f..c6df749bb5fe 100644 --- a/frontend/src/components/core/Block/Block.tsx +++ b/frontend/src/components/core/Block/Block.tsx @@ -16,7 +16,6 @@ */ import React, { PureComponent, ReactNode, Suspense } from "react" -import { Progress } from "reactstrap" import { AutoSizer } from "react-virtualized" import { List, Map as ImmutableMap } from "immutable" import { dispatchOneOf } from "lib/immutableProto" @@ -59,6 +58,7 @@ const Button = React.lazy(() => import("components/widgets/Button/")) const Checkbox = React.lazy(() => import("components/widgets/Checkbox/")) const DateInput = React.lazy(() => import("components/widgets/DateInput/")) const Multiselect = React.lazy(() => import("components/widgets/Multiselect/")) +const Progress = React.lazy(() => import("components/elements/Progress/")) const Radio = React.lazy(() => import("components/widgets/Radio/")) const Selectbox = React.lazy(() => import("components/widgets/Selectbox/")) const Slider = React.lazy(() => import("components/widgets/Slider/")) @@ -249,13 +249,7 @@ class Block extends PureComponent<Props> { plotlyChart: (el: SimpleElement) => ( <PlotlyChart element={el} width={width} /> ), - progress: (el: SimpleElement) => ( - <Progress - value={el.get("value")} - className="stProgress" - style={{ width }} - /> - ), + progress: (el: SimpleElement) => <Progress element={el} width={width} />, table: (el: SimpleElement) => <Table element={el} width={width} />, text: (el: SimpleElement) => <Text element={el} width={width} />, vegaLiteChart: (el: SimpleElement) => ( diff --git a/frontend/src/components/elements/Progress/Progress.scss b/frontend/src/components/elements/Progress/Progress.scss new file mode 100644 index 000000000000..584db1e590f8 --- /dev/null +++ b/frontend/src/components/elements/Progress/Progress.scss @@ -0,0 +1,32 @@ +/** + * Copyright 2018-2019 Streamlit Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +@import "src/assets/css/variables"; + +// Reset the progress bar to 0 without animating on report re-run. +.reportview-container { + .without-transition .progress-bar { + transition: none; + } + + .with-transition .progress-bar { + transition: width 0.1s linear; + } + + .stale-element .progress-bar { + background-color: transparent; + } +} diff --git a/frontend/src/components/elements/Progress/Progress.tsx b/frontend/src/components/elements/Progress/Progress.tsx new file mode 100644 index 000000000000..3142ac25a34b --- /dev/null +++ b/frontend/src/components/elements/Progress/Progress.tsx @@ -0,0 +1,63 @@ +/** + * @license + * Copyright 2018-2019 Streamlit Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from "react" +import { Map as ImmutableMap } from "immutable" +import { Progress as UIProgress } from "reactstrap" + +import "./Progress.scss" + +interface Props { + width: number + element: ImmutableMap<string, any> +} + +const FAST_UPDATE_MS = 50 + +class Progress extends React.PureComponent<Props> { + lastValue = -1 + lastAnimatedTime = -1 + + public render(): React.ReactNode { + const { element, width } = this.props + const value = element.get("value") + const time = new Date().getTime() + + // Make progress bar stop acting weird when moving backwards or quickly. + const isMovingBackwards = value < this.lastValue + const isMovingSuperFast = time - this.lastAnimatedTime < FAST_UPDATE_MS + const className = + isMovingBackwards || isMovingSuperFast + ? "without-transition" + : "with-transition" + + if (className === "with-transition") { + this.lastAnimatedTime = time + } + this.lastValue = value + + return ( + <UIProgress + value={value} + className={"stProgress " + className} + style={{ width }} + /> + ) + } +} + +export default Progress diff --git a/frontend/src/components/elements/Progress/index.tsx b/frontend/src/components/elements/Progress/index.tsx new file mode 100644 index 000000000000..9bf69066a927 --- /dev/null +++ b/frontend/src/components/elements/Progress/index.tsx @@ -0,0 +1,18 @@ +/** + * @license + * Copyright 2018-2019 Streamlit Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export { default } from "./Progress" diff --git a/lib/streamlit/credentials.py b/lib/streamlit/credentials.py index 7202ba6cf8b2..b32909eca171 100644 --- a/lib/streamlit/credentials.py +++ b/lib/streamlit/credentials.py @@ -202,8 +202,10 @@ def activate(self, show_instructions=True): email = "" else: email = click.prompt( - text=EMAIL_PROMPT, prompt_suffix="", default="", - show_default=False + text=EMAIL_PROMPT, + prompt_suffix="", + default="", + show_default=False, ) self.activation = _verify_email(email) diff --git a/lib/streamlit/server/Server.py b/lib/streamlit/server/Server.py index 5280bb3d386d..723de9c72ea8 100644 --- a/lib/streamlit/server/Server.py +++ b/lib/streamlit/server/Server.py @@ -247,10 +247,11 @@ def _create_app(self): routes.extend( [ - (r"/(.*)", StaticFileHandler, { - "path": "%s/" % static_path, - "default_filename": "index.html", - }), + ( + r"/(.*)", + StaticFileHandler, + {"path": "%s/" % static_path, "default_filename": "index.html"}, + ) ] ) diff --git a/lib/streamlit/util.py b/lib/streamlit/util.py index bb553c899e60..d9eafdb92cb8 100644 --- a/lib/streamlit/util.py +++ b/lib/streamlit/util.py @@ -252,6 +252,7 @@ def open_browser(url): # browser even though 'start url' works from the command prompt. # Fun! import webbrowser + webbrowser.open(url) return
**Issue:** https://github.com/streamlit/streamlit-old-private/issues/760 **Description:** - Unmount progress bar when it's stale so it resets **Notes:** - I didn't see any issues with the `update-progress animation` as mentioned in the issue. Perhaps @tconkling can provide reproduction steps for that.
https://api.github.com/repos/streamlit/streamlit/pulls/321
2019-10-08T15:01:11Z
2019-10-14T20:48:35Z
2019-10-14T20:48:35Z
2019-10-14T20:48:38Z
1,968
streamlit/streamlit
22,550
Remove redundant SQL index in Pastebin exercise
diff --git a/solutions/system_design/pastebin/README.md b/solutions/system_design/pastebin/README.md index 756c78c274..2d87ddcc7e 100644 --- a/solutions/system_design/pastebin/README.md +++ b/solutions/system_design/pastebin/README.md @@ -116,7 +116,7 @@ paste_path varchar(255) NOT NULL PRIMARY KEY(shortlink) ``` -We'll create an [index](https://github.com/donnemartin/system-design-primer#use-good-indices) on `shortlink ` and `created_at` to speed up lookups (log-time instead of scanning the entire table) and to keep the data in memory. Reading 1 MB sequentially from memory takes about 250 microseconds, while reading from SSD takes 4x and from disk takes 80x longer.<sup><a href=https://github.com/donnemartin/system-design-primer#latency-numbers-every-programmer-should-know>1</a></sup> +Setting the primary key to be based on the `shortlink` column creates an [index](https://github.com/donnemartin/system-design-primer#use-good-indices) that the database uses to enforce uniqueness. We'll create an additional index on `created_at` to speed up lookups (log-time instead of scanning the entire table) and to keep the data in memory. Reading 1 MB sequentially from memory takes about 250 microseconds, while reading from SSD takes 4x and from disk takes 80x longer.<sup><a href=https://github.com/donnemartin/system-design-primer#latency-numbers-every-programmer-should-know>1</a></sup> To generate the unique url, we could:
Making a column a primary key enforces uniqueness, and the db usually does so by creating a unique clustered index. So, you don't need a second index.
https://api.github.com/repos/donnemartin/system-design-primer/pulls/405
2020-04-20T19:16:06Z
2020-07-07T01:05:51Z
2020-07-07T01:05:51Z
2020-07-07T01:06:04Z
380
donnemartin/system-design-primer
36,749
Prep for 1.32.1
diff --git a/.azure-pipelines/release.yml b/.azure-pipelines/release.yml index 1c983a3b647..2374289e3c9 100644 --- a/.azure-pipelines/release.yml +++ b/.azure-pipelines/release.yml @@ -8,7 +8,7 @@ pr: none variables: dockerTag: ${{variables['Build.SourceBranchName']}} - snapBuildTimeout: 5400 + snapBuildTimeout: 19800 stages: - template: templates/stages/test-and-package-stage.yml diff --git a/certbot/CHANGELOG.md b/certbot/CHANGELOG.md index 2fcb78c99c2..f3bd9f56507 100644 --- a/certbot/CHANGELOG.md +++ b/certbot/CHANGELOG.md @@ -2,6 +2,16 @@ Certbot adheres to [Semantic Versioning](https://semver.org/). +## 1.32.1 - master + +### Fixed + +* Our snaps and docker images were rebuilt to include updated versions of our dependencies. + +This release was not pushed to PyPI since those packages were unaffected. + +More details about these changes can be found on our GitHub repo. + ## 1.32.0 - 2022-11-08 ### Added
I wanted to do this because we were notified that https://ubuntu.com/security/notices/USN-5638-3/ affects our snaps. This probably doesn't affect us, but rebuilding to be safe seems worth it to me personally. I started to just trigger a new v1.32.0 release build, but I don't want to overwrite our 2.0 Docker images under the `latest` tag. Changelog changes here are similar to what has been done for past point releases like https://github.com/certbot/certbot/pull/8501. I also cherry picked #9474 to this branch to help the release process pass.
https://api.github.com/repos/certbot/certbot/pulls/9492
2022-12-02T15:00:30Z
2022-12-05T15:00:44Z
2022-12-05T15:00:44Z
2022-12-05T15:00:45Z
299
certbot/certbot
1,077
Add share.flows command, fix #2779
diff --git a/mitmproxy/addons/__init__.py b/mitmproxy/addons/__init__.py index 8f84c20d9f..619211130b 100644 --- a/mitmproxy/addons/__init__.py +++ b/mitmproxy/addons/__init__.py @@ -20,6 +20,7 @@ from mitmproxy.addons import streambodies from mitmproxy.addons import save from mitmproxy.addons import upstream_auth +from mitmproxy.addons import share def default_addons(): @@ -46,4 +47,5 @@ def default_addons(): streambodies.StreamBodies(), save.Save(), upstream_auth.UpstreamAuth(), + share.Share() ] diff --git a/mitmproxy/addons/save.py b/mitmproxy/addons/save.py index 44afef686e..47da29b256 100644 --- a/mitmproxy/addons/save.py +++ b/mitmproxy/addons/save.py @@ -61,8 +61,8 @@ def save(self, flows: typing.Sequence[flow.Flow], path: mitmproxy.types.Path) -> except IOError as v: raise exceptions.CommandError(v) from v stream = io.FlowWriter(f) - for i in flows: - stream.add(i) + for x in flows: + stream.add(x) f.close() ctx.log.alert("Saved %s flows." % len(flows)) diff --git a/mitmproxy/addons/share.py b/mitmproxy/addons/share.py new file mode 100644 index 0000000000..1a234cc944 --- /dev/null +++ b/mitmproxy/addons/share.py @@ -0,0 +1,79 @@ +import typing +import random +import string +import io +import http.client + +from mitmproxy import command +import mitmproxy.io +from mitmproxy import ctx +from mitmproxy import flow +from mitmproxy.net.http import status_codes + + +class Share: + def encode_multipart_formdata(self, filename: str, content: bytes) -> typing.Tuple[str, bytes]: + params = {"key": filename, "acl": "bucket-owner-full-control", "Content-Type": "application/octet-stream"} + LIMIT = b'---------------------------198495659117975628761412556003' + CRLF = b'\r\n' + l = [] + for (key, value) in params.items(): + l.append(b'--' + LIMIT) + l.append(b'Content-Disposition: form-data; name="%b"' % key.encode("utf-8")) + l.append(b'') + l.append(value.encode("utf-8")) + l.append(b'--' + LIMIT) + l.append(b'Content-Disposition: form-data; name="file"; filename="%b"' % filename.encode("utf-8")) + l.append(b'Content-Type: application/octet-stream') + l.append(b'') + l.append(content) + l.append(b'--' + LIMIT + b'--') + l.append(b'') + body = CRLF.join(l) + content_type = 'multipart/form-data; boundary=%s' % LIMIT.decode("utf-8") + return content_type, body + + def post_multipart(self, host: str, filename: str, content: bytes) -> str: + """ + Upload flows to the specified S3 server. + + Returns: + - The share URL, if upload is successful. + Raises: + - IOError, otherwise. + """ + content_type, body = self.encode_multipart_formdata(filename, content) + conn = http.client.HTTPConnection(host) # FIXME: This ultimately needs to be HTTPSConnection + headers = {'content-type': content_type} + try: + conn.request("POST", "", body, headers) + resp = conn.getresponse() + except Exception as v: + raise IOError(v) + finally: + conn.close() + if resp.status != 204: + if resp.reason: + reason = resp.reason + else: + reason = status_codes.RESPONSES.get(resp.status, str(resp.status)) + raise IOError(reason) + return "https://share.mitmproxy.org/%s" % filename + + @command.command("share.flows") + def share(self, flows: typing.Sequence[flow.Flow]) -> None: + u_id = "".join(random.choice(string.ascii_lowercase + string.digits)for _ in range(7)) + f = io.BytesIO() + stream = mitmproxy.io.FlowWriter(f) + for x in flows: + stream.add(x) + f.seek(0) + content = f.read() + try: + res = self.post_multipart('upload.share.mitmproxy.org.s3.amazonaws.com', u_id, content) + except IOError as v: + ctx.log.warn("%s" % v) + else: + ctx.log.alert("%s" % res) + finally: + f.close() \ No newline at end of file diff --git a/test/mitmproxy/addons/test_share.py b/test/mitmproxy/addons/test_share.py new file mode 100644 index 0000000000..6c3d6e28fc --- /dev/null +++ b/test/mitmproxy/addons/test_share.py @@ -0,0 +1,34 @@ +from unittest import mock +import http.client + +from mitmproxy.test import taddons +from mitmproxy.test import tflow + +from mitmproxy.addons import share +from mitmproxy.addons import view + + +def test_share_command(): + with mock.patch('mitmproxy.addons.share.http.client.HTTPConnection') as mock_http: + sh = share.Share() + with taddons.context() as tctx: + mock_http.return_value.getresponse.return_value = mock.MagicMock(status=204, reason="No Content") + sh.share([tflow.tflow(resp=True)]) + assert tctx.master.has_log("https://share.mitmproxy.org/") + + mock_http.return_value.getresponse.return_value = mock.MagicMock(status=403, reason="Forbidden") + sh.share([tflow.tflow(resp=True)]) + assert tctx.master.has_log("Forbidden") + + mock_http.return_value.getresponse.return_value = mock.MagicMock(status=404, reason="") + sh.share([tflow.tflow(resp=True)]) + assert tctx.master.has_log("Not Found") + + mock_http.return_value.request.side_effect = http.client.CannotSendRequest("Error in sending req") + sh.share([tflow.tflow(resp=True)]) + assert tctx.master.has_log("Error in sending req") + + v = view.View() + tctx.master.addons.add(v) + tctx.master.addons.add(sh) + tctx.master.commands.call_args("share.flows", ["@shown"])
Fixes #2779 . Added the command 'upload.file'
https://api.github.com/repos/mitmproxy/mitmproxy/pulls/2802
2018-01-18T20:46:01Z
2018-02-13T19:01:01Z
2018-02-13T19:01:00Z
2018-06-15T16:54:24Z
1,559
mitmproxy/mitmproxy
28,324
Update README.md
diff --git a/exercises/ansible/README.md b/exercises/ansible/README.md index 12982c3fd..206c4d84e 100644 --- a/exercises/ansible/README.md +++ b/exercises/ansible/README.md @@ -15,6 +15,7 @@ <summary>Describe each of the following components in Ansible, including the relationship between them: * Task + * Inventory * Module * Play * Playbook @@ -23,6 +24,8 @@ Task – a call to a specific Ansible module Module – the actual unit of code executed by Ansible on your own host or a remote host. Modules are indexed by category (database, file, network, …) and also referred to as task plugins. + +Inventory – An inventory file defines hosts and/or groups of hosts on which Ansible tasks executed upon. The inventory file can be in one of many formats, depending on the inventory plugins you have. The most common formats are INI and YAML. Play – One or more tasks executed on a given host(s)
Hey repo owner! I have added a new feature on Ansible topic (inventory component), I think that will increase the understand of all people on this topic about Ansible tool. if i am wrong just ignore Let me know if you have any questions around this.
https://api.github.com/repos/bregman-arie/devops-exercises/pulls/194
2021-11-29T00:17:04Z
2021-11-29T06:11:48Z
2021-11-29T06:11:48Z
2021-11-29T06:11:48Z
242
bregman-arie/devops-exercises
17,400
Better handling of CookieJar Runtime Exception
diff --git a/lib/request/connect.py b/lib/request/connect.py index 8508dee51ff..84ec25e4d38 100644 --- a/lib/request/connect.py +++ b/lib/request/connect.py @@ -587,14 +587,9 @@ class _(dict): if not getRequestHeader(req, HTTP_HEADER.COOKIE) and conf.cj: conf.cj._policy._now = conf.cj._now = int(time.time()) - while True: - try: - cookies = conf.cj._cookies_for_request(req) - except RuntimeError: # NOTE: https://github.com/sqlmapproject/sqlmap/issues/5187 - time.sleep(1) - else: - requestHeaders += "\r\n%s" % ("Cookie: %s" % ";".join("%s=%s" % (getUnicode(cookie.name), getUnicode(cookie.value)) for cookie in cookies)) - break + with conf.cj._cookies_lock: + cookies = conf.cj._cookies_for_request(req) + requestHeaders += "\r\n%s" % ("Cookie: %s" % ";".join("%s=%s" % (getUnicode(cookie.name), getUnicode(cookie.value)) for cookie in cookies)) if post is not None: if not getRequestHeader(req, HTTP_HEADER.CONTENT_LENGTH) and not chunked:
Instead of waiting for the exception and sleeping in between, we can simply use the same lock used by the original cookiejar code (since we are already accessing private member), to prevent the exception from happening in the first place. Fixes #5187
https://api.github.com/repos/sqlmapproject/sqlmap/pulls/5206
2022-10-21T08:56:40Z
2022-10-21T17:10:43Z
2022-10-21T17:10:43Z
2022-10-21T17:10:52Z
294
sqlmapproject/sqlmap
15,062
W&B log epoch
diff --git a/test.py b/test.py index 891f6bef41c..db344e72204 100644 --- a/test.py +++ b/test.py @@ -239,8 +239,8 @@ def test(data, if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) if wandb and wandb.run: - wandb.log({"Images": wandb_images}) - wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]}) + val_batches = [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))] + wandb.log({"Images": wandb_images, "Validation": val_batches}, commit=False) # Save JSON if save_json and len(jdict): diff --git a/train.py b/train.py index 9e6bd867372..5eff4bbac17 100644 --- a/train.py +++ b/train.py @@ -321,7 +321,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # tb_writer.add_graph(model, imgs) # add model to tensorboard elif plots and ni == 10 and wandb: wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') - if x.exists()]}) + if x.exists()]}, commit=False) # end batch ------------------------------------------------------------------------------------------------ # end epoch ---------------------------------------------------------------------------------------------------- diff --git a/utils/plots.py b/utils/plots.py index 4765069e037..67f11bfd201 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -295,7 +295,7 @@ def plot_labels(labels, save_dir=Path(''), loggers=None): # loggers for k, v in loggers.items() or {}: if k == 'wandb' and v: - v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}) + v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False) def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()
May allow W&B results logging with epoch x axis. ## 🛠️ PR Summary <sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub> ### 🌟 Summary Improvement in Weight & Biases logging within YOLOv5's testing and training routines. ### 📊 Key Changes - Modified `wandb.log` calls in `test.py` and `train.py` to use `commit=False` option. - Updated `wandb.log` in `utils/plots.py` with `commit=False`. ### 🎯 Purpose & Impact - 👍 **Purpose**: These changes allow for more efficient logging to Weight & Biases by grouping multiple logs into a single network call. - 🚀 **Impact**: Reduces the number of calls to Weights & Biases API during training and testing, potentially speeding up these processes and improving resource usage. Users of the YOLOv5 framework with integrated Weights & Biases logging will benefit from these optimizations.
https://api.github.com/repos/ultralytics/yolov5/pulls/1946
2021-01-15T04:44:40Z
2021-01-27T05:16:02Z
2021-01-27T05:16:02Z
2024-01-19T19:49:02Z
538
ultralytics/yolov5
24,979
Fix super tiny type error
diff --git a/timm/scheduler/cosine_lr.py b/timm/scheduler/cosine_lr.py index e2c975fb79..4eaaa86a81 100644 --- a/timm/scheduler/cosine_lr.py +++ b/timm/scheduler/cosine_lr.py @@ -8,6 +8,7 @@ import math import numpy as np import torch +from typing import List from .scheduler import Scheduler @@ -77,7 +78,7 @@ def __init__( else: self.warmup_steps = [1 for _ in self.base_values] - def _get_lr(self, t): + def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: diff --git a/timm/scheduler/multistep_lr.py b/timm/scheduler/multistep_lr.py index 10f2fb5044..e5db556d43 100644 --- a/timm/scheduler/multistep_lr.py +++ b/timm/scheduler/multistep_lr.py @@ -53,7 +53,7 @@ def get_curr_decay_steps(self, t): # assumes self.decay_t is sorted return bisect.bisect_right(self.decay_t, t + 1) - def _get_lr(self, t): + def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: diff --git a/timm/scheduler/plateau_lr.py b/timm/scheduler/plateau_lr.py index 9f8271579b..e868bd5e58 100644 --- a/timm/scheduler/plateau_lr.py +++ b/timm/scheduler/plateau_lr.py @@ -5,6 +5,7 @@ Hacked together by / Copyright 2020 Ross Wightman """ import torch +from typing import List from .scheduler import Scheduler @@ -106,5 +107,5 @@ def _apply_noise(self, epoch): param_group['lr'] = new_lr self.restore_lr = restore_lr - def _get_lr(self, t: int) -> float: + def _get_lr(self, t: int) -> List[float]: assert False, 'should not be called as step is overridden' diff --git a/timm/scheduler/poly_lr.py b/timm/scheduler/poly_lr.py index 906f6acf82..8875e15bfe 100644 --- a/timm/scheduler/poly_lr.py +++ b/timm/scheduler/poly_lr.py @@ -6,6 +6,7 @@ """ import math import logging +from typing import List import torch @@ -73,7 +74,7 @@ def __init__( else: self.warmup_steps = [1 for _ in self.base_values] - def _get_lr(self, t): + def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: diff --git a/timm/scheduler/scheduler.py b/timm/scheduler/scheduler.py index 4ae2e2aeb6..583357f7c5 100644 --- a/timm/scheduler/scheduler.py +++ b/timm/scheduler/scheduler.py @@ -1,6 +1,6 @@ import abc from abc import ABC -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional import torch @@ -65,10 +65,10 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: self.__dict__.update(state_dict) @abc.abstractmethod - def _get_lr(self, t: int) -> float: + def _get_lr(self, t: int) -> List[float]: pass - def _get_values(self, t: int, on_epoch: bool = True) -> Optional[float]: + def _get_values(self, t: int, on_epoch: bool = True) -> Optional[List[float]]: proceed = (on_epoch and self.t_in_epochs) or (not on_epoch and not self.t_in_epochs) if not proceed: return None diff --git a/timm/scheduler/step_lr.py b/timm/scheduler/step_lr.py index 70a45a70d4..c205d43715 100644 --- a/timm/scheduler/step_lr.py +++ b/timm/scheduler/step_lr.py @@ -6,6 +6,8 @@ """ import math import torch +from typing import List + from .scheduler import Scheduler @@ -51,7 +53,7 @@ def __init__( else: self.warmup_steps = [1 for _ in self.base_values] - def _get_lr(self, t): + def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: diff --git a/timm/scheduler/tanh_lr.py b/timm/scheduler/tanh_lr.py index 48acc61b03..94455302c6 100644 --- a/timm/scheduler/tanh_lr.py +++ b/timm/scheduler/tanh_lr.py @@ -8,6 +8,7 @@ import math import numpy as np import torch +from typing import List from .scheduler import Scheduler @@ -75,7 +76,7 @@ def __init__( else: self.warmup_steps = [1 for _ in self.base_values] - def _get_lr(self, t): + def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else:
IMHO, for example, CosineLRScheduler returns list of floats, instead of a single float. Therefore, the type signature may need to be updated. Please correct me if I am wrong!
https://api.github.com/repos/huggingface/pytorch-image-models/pulls/2124
2024-03-23T03:27:44Z
2024-04-02T21:31:38Z
2024-04-02T21:31:38Z
2024-04-03T00:39:19Z
1,394
huggingface/pytorch-image-models
16,230
gh-72073: Add Windows case in pathlib.rename
diff --git a/Doc/library/pathlib.rst b/Doc/library/pathlib.rst index d45e7aa84b28ce..cda2cc83225e07 100644 --- a/Doc/library/pathlib.rst +++ b/Doc/library/pathlib.rst @@ -1021,8 +1021,9 @@ call fails (for example because the path doesn't exist). Rename this file or directory to the given *target*, and return a new Path instance pointing to *target*. On Unix, if *target* exists and is a file, - it will be replaced silently if the user has permission. *target* can be - either a string or another path object:: + it will be replaced silently if the user has permission. + On Windows, if *target* exists, :data:`FileExistsError` will be raised. + *target* can be either a string or another path object:: >>> p = Path('foo') >>> p.open('w').write('some text')
#72073 https://docs.python.org/3.12/library/pathlib.html#pathlib.Path.rename Automerge-Triggered-By: GH:brettcannon
https://api.github.com/repos/python/cpython/pulls/93002
2022-05-20T08:05:26Z
2022-05-20T22:25:40Z
2022-05-20T22:25:39Z
2022-05-21T04:31:52Z
229
python/cpython
4,622
Use long classes names for enabled middlewares in startup logs
diff --git a/scrapy/middleware.py b/scrapy/middleware.py index 6120488e22f..be36f977e41 100644 --- a/scrapy/middleware.py +++ b/scrapy/middleware.py @@ -28,6 +28,7 @@ def _get_mwlist_from_settings(cls, settings): def from_settings(cls, settings, crawler=None): mwlist = cls._get_mwlist_from_settings(settings) middlewares = [] + enabled = [] for clspath in mwlist: try: mwcls = load_object(clspath) @@ -38,6 +39,7 @@ def from_settings(cls, settings, crawler=None): else: mw = mwcls() middlewares.append(mw) + enabled.append(clspath) except NotConfigured as e: if e.args: clsname = clspath.split('.')[-1] @@ -45,7 +47,6 @@ def from_settings(cls, settings, crawler=None): {'clsname': clsname, 'eargs': e.args[0]}, extra={'crawler': crawler}) - enabled = [x.__class__.__name__ for x in middlewares] logger.info("Enabled %(componentname)ss:\n%(enabledlist)s", {'componentname': cls.component_name, 'enabledlist': pprint.pformat(enabled)},
Continuation of https://github.com/scrapy/scrapy/pull/1722
https://api.github.com/repos/scrapy/scrapy/pulls/1726
2016-01-26T15:44:27Z
2016-01-26T16:29:44Z
2016-01-26T16:29:44Z
2016-02-01T14:53:45Z
294
scrapy/scrapy
35,115
dns-cloudflare: update URL for obtaining API keys
diff --git a/certbot-dns-cloudflare/certbot_dns_cloudflare/dns_cloudflare.py b/certbot-dns-cloudflare/certbot_dns_cloudflare/dns_cloudflare.py index e3d0d42e047..0bbdf703ae8 100644 --- a/certbot-dns-cloudflare/certbot_dns_cloudflare/dns_cloudflare.py +++ b/certbot-dns-cloudflare/certbot_dns_cloudflare/dns_cloudflare.py @@ -10,7 +10,7 @@ logger = logging.getLogger(__name__) -ACCOUNT_URL = 'https://www.cloudflare.com/a/account/my-account' +ACCOUNT_URL = 'https://dash.cloudflare.com/profile/api-tokens' @zope.interface.implementer(interfaces.IAuthenticator)
Updated the ACCOUNT_URL in the Cloudflare-DNS plugin. This uses the new "dash.cloudflare.com" scheme and future-proofs this URL for an upcoming change to Cloudflare API keys (this is not public yet, so no other changes related to this).
https://api.github.com/repos/certbot/certbot/pulls/7052
2019-05-11T22:00:21Z
2019-06-26T00:53:32Z
2019-06-26T00:53:32Z
2019-06-26T14:11:20Z
168
certbot/certbot
2,376
cookies.txt extension doesn't exist anymore on the Chrome Web Store
diff --git a/README.md b/README.md index 34c6c677d8f..35ae364213e 100644 --- a/README.md +++ b/README.md @@ -879,7 +879,7 @@ Either prepend `https://www.youtube.com/watch?v=` or separate the ID from the op Use the `--cookies` option, for example `--cookies /path/to/cookies/file.txt`. -In order to extract cookies from browser use any conforming browser extension for exporting cookies. For example, [cookies.txt](https://chrome.google.com/webstore/detail/cookiestxt/njabckikapfpffapmjgojcnbfjonfjfg) (for Chrome) or [cookies.txt](https://addons.mozilla.org/en-US/firefox/addon/cookies-txt/) (for Firefox). +In order to extract cookies from browser use any conforming browser extension for exporting cookies. For example, [Get cookies.txt](https://chrome.google.com/webstore/detail/get-cookiestxt/bgaddhkoddajcdgocldbbfleckgcbcid/) (for Chrome) or [cookies.txt](https://addons.mozilla.org/en-US/firefox/addon/cookies-txt/) (for Firefox). Note that the cookies file must be in Mozilla/Netscape format and the first line of the cookies file must be either `# HTTP Cookie File` or `# Netscape HTTP Cookie File`. Make sure you have correct [newline format](https://en.wikipedia.org/wiki/Newline) in the cookies file and convert newlines if necessary to correspond with your OS, namely `CRLF` (`\r\n`) for Windows and `LF` (`\n`) for Unix and Unix-like systems (Linux, macOS, etc.). `HTTP Error 400: Bad Request` when using `--cookies` is a good sign of invalid newline format.
### Before submitting a *pull request* make sure you have: - [x] [Searched](https://github.com/ytdl-org/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Read [adding new extractor tutorial](https://github.com/ytdl-org/youtube-dl#adding-support-for-a-new-site) - [x] Read [youtube-dl coding conventions](https://github.com/ytdl-org/youtube-dl#youtube-dl-coding-conventions) and adjusted the code to meet them - [x] Covered the code with tests (note that PRs without tests will be REJECTED) - [ ] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) ### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) - [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) ### What is the purpose of your *pull request*? - [ ] Bug fix - [x] Improvement - [ ] New extractor - [ ] New feature --- ### Description of your *pull request* and other information The [cookies.txt extension](https://chrome.google.com/webstore/detail/cookiestxt/njabckikapfpffapmjgojcnbfjonfjfg) doesn't exist anymore on the Chrome Web Store, so I propose to change the link in the _README.md_ to another similar extension called [Get cookies.txt](https://chrome.google.com/webstore/detail/get-cookiestxt/bgaddhkoddajcdgocldbbfleckgcbcid/) with the same functions and utility of the old one. I tested the extension personally on my machine and it exports cookies in the Netscape cookie file format which is compatible with youtube-dl, just like the cookies.txt extension did before. This commit does **NOT change any code**, is just a fix for a link on the _README.md_ This PR close #26885
https://api.github.com/repos/ytdl-org/youtube-dl/pulls/27433
2020-12-14T14:15:10Z
2020-12-26T13:50:40Z
2020-12-26T13:50:40Z
2020-12-26T13:52:26Z
393
ytdl-org/youtube-dl
50,343
Add --new-key
diff --git a/certbot-ci/certbot_integration_tests/certbot_tests/assertions.py b/certbot-ci/certbot_integration_tests/certbot_tests/assertions.py index 2720842171e..3650f64f043 100644 --- a/certbot-ci/certbot_integration_tests/certbot_tests/assertions.py +++ b/certbot-ci/certbot_integration_tests/certbot_tests/assertions.py @@ -37,16 +37,19 @@ def assert_elliptic_key(key: str, curve: Type[EllipticCurve]) -> None: assert isinstance(key.curve, curve) -def assert_rsa_key(key: str) -> None: +def assert_rsa_key(key: str, key_size: Optional[int] = None) -> None: """ Asserts that the key at the given path is an RSA key. :param str key: path to key + :param int key_size: if provided, assert that the RSA key is of this size """ with open(key, 'rb') as file: privkey1 = file.read() key = load_pem_private_key(data=privkey1, password=None, backend=default_backend()) assert isinstance(key, RSAPrivateKey) + if key_size: + assert key_size == key.key_size def assert_hook_execution(probe_path: str, probe_content: str) -> None: diff --git a/certbot-ci/certbot_integration_tests/certbot_tests/test_main.py b/certbot-ci/certbot_integration_tests/certbot_tests/test_main.py index 4a33952174f..2827ae939e2 100644 --- a/certbot-ci/certbot_integration_tests/certbot_tests/test_main.py +++ b/certbot-ci/certbot_integration_tests/certbot_tests/test_main.py @@ -8,6 +8,7 @@ import time from typing import Iterable from typing import Generator +from typing import Tuple from typing import Type from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurve @@ -463,6 +464,42 @@ def test_reuse_key(context: IntegrationTestsContext) -> None: assert len({cert1, cert2, cert3}) == 3 +def test_new_key(context: IntegrationTestsContext) -> None: + """Tests --new-key and its interactions with --reuse-key""" + def private_key(generation: int) -> Tuple[str, str]: + pk_path = join(context.config_dir, f'archive/{certname}/privkey{generation}.pem') + with open(pk_path, 'r') as file: + return file.read(), pk_path + + certname = context.get_domain('newkey') + + context.certbot(['--domains', certname, '--reuse-key', + '--key-type', 'rsa', '--rsa-key-size', '4096']) + privkey1, _ = private_key(1) + + # renew: --new-key should replace the key, but keep reuse_key and the key type + params + context.certbot(['renew', '--cert-name', certname, '--new-key']) + privkey2, privkey2_path = private_key(2) + assert privkey1 != privkey2 + assert_saved_lineage_option(context.config_dir, certname, 'reuse_key', 'True') + assert_rsa_key(privkey2_path, 4096) + + # certonly: it should replace the key but the key size will change + context.certbot(['certonly', '-d', certname, '--reuse-key', '--new-key']) + privkey3, privkey3_path = private_key(3) + assert privkey2 != privkey3 + assert_saved_lineage_option(context.config_dir, certname, 'reuse_key', 'True') + assert_rsa_key(privkey3_path, 2048) + + # certonly: it should be possible to change the key type and keep reuse_key + context.certbot(['certonly', '-d', certname, '--reuse-key', '--new-key', '--key-type', 'ecdsa', + '--cert-name', certname]) + privkey4, privkey4_path = private_key(4) + assert privkey3 != privkey4 + assert_saved_lineage_option(context.config_dir, certname, 'reuse_key', 'True') + assert_elliptic_key(privkey4_path, SECP256R1) + + def test_incorrect_key_type(context: IntegrationTestsContext) -> None: with pytest.raises(subprocess.CalledProcessError): context.certbot(['--key-type="failwhale"']) diff --git a/certbot/CHANGELOG.md b/certbot/CHANGELOG.md index 3dd1a9e2651..9814e67eafe 100644 --- a/certbot/CHANGELOG.md +++ b/certbot/CHANGELOG.md @@ -6,7 +6,10 @@ Certbot adheres to [Semantic Versioning](https://semver.org/). ### Added -* +* Added `--new-key`. When renewing or replacing a certificate that has `--reuse-key` + set, it will force a new private key to be generated. + Combining `--reuse-key` and `--new-key` will replace the certificate's private key + and then reuse it for future renewals. ### Changed diff --git a/certbot/certbot/_internal/cli/__init__.py b/certbot/certbot/_internal/cli/__init__.py index d11a454b146..30b3fab1d35 100644 --- a/certbot/certbot/_internal/cli/__init__.py +++ b/certbot/certbot/_internal/cli/__init__.py @@ -223,6 +223,13 @@ def prepare_and_parse_args(plugins: plugins_disco.PluginsRegistry, args: List[st "certificate. Not reusing private keys is the default behavior of " "Certbot. This option may be used to unset --reuse-key on an " "existing certificate.") + helpful.add( + "automation", "--new-key", + dest="new_key", action="store_true", default=flag_default("new_key"), + help="When renewing or replacing a certificate, generate a new private key, " + "even if --reuse-key is set on the existing certificate. Combining " + "--new-key and --reuse-key will result in the private key being replaced and " + "then reused in future renewals.") helpful.add( ["automation", "renew", "certonly"], diff --git a/certbot/certbot/_internal/constants.py b/certbot/certbot/_internal/constants.py index 3867d777c1e..5a9d97d835d 100644 --- a/certbot/certbot/_internal/constants.py +++ b/certbot/certbot/_internal/constants.py @@ -74,6 +74,7 @@ validate_hooks=True, directory_hooks=True, reuse_key=False, + new_key=False, disable_renew_updates=False, random_sleep_on_renew=True, eab_hmac_key=None, diff --git a/certbot/certbot/_internal/renewal.py b/certbot/certbot/_internal/renewal.py index 4fb2ca00aa5..0ba2e810802 100644 --- a/certbot/certbot/_internal/renewal.py +++ b/certbot/certbot/_internal/renewal.py @@ -336,7 +336,7 @@ def renew_cert(config: configuration.NamespaceConfig, domains: Optional[List[str domains = lineage.names() # The private key is the existing lineage private key if reuse_key is set. # Otherwise, generate a fresh private key by passing None. - if config.reuse_key: + if config.reuse_key and not config.new_key: new_key = os.path.normpath(lineage.privkey) _update_renewal_params_from_key(new_key, config) else: diff --git a/certbot/certbot/configuration.py b/certbot/certbot/configuration.py index ebeb8e98c65..d5ad8759957 100644 --- a/certbot/certbot/configuration.py +++ b/certbot/certbot/configuration.py @@ -300,6 +300,13 @@ def issuance_timeout(self) -> int: """ return self.namespace.issuance_timeout + @property + def new_key(self) -> bool: + """This option specifies whether Certbot should generate a new private + key when replacing a certificate, even if reuse_key is set. + """ + return self.namespace.new_key + # Magic methods def __deepcopy__(self, _memo: Any) -> 'NamespaceConfig': diff --git a/certbot/tests/main_test.py b/certbot/tests/main_test.py index c29f4d758e1..09a069c6121 100644 --- a/certbot/tests/main_test.py +++ b/certbot/tests/main_test.py @@ -1115,7 +1115,7 @@ def test_certonly_new_request_failure(self, mock_subscription): def _test_renewal_common(self, due_for_renewal, extra_args, log_out=None, args=None, should_renew=True, error_expected=False, quiet_mode=False, expiry_date=datetime.datetime.now(), - reuse_key=False): + reuse_key=False, new_key=False): cert_path = test_util.vector_path('cert_512.pem') chain_path = os.path.normpath(os.path.join(self.config.config_dir, 'live/foo.bar/fullchain.pem')) @@ -1165,7 +1165,7 @@ def write_msg(message, *args, **kwargs): # pylint: disable=unused-argument traceback.format_exc()) if should_renew: - if reuse_key: + if reuse_key and not new_key: # The location of the previous live privkey.pem is passed # to obtain_certificate mock_client.obtain_certificate.assert_called_once_with(['isnot.org'], @@ -1236,6 +1236,13 @@ def test_reuse_key_no_dry_run(self, unused_save_successor): args = ["renew", "--reuse-key"] self._test_renewal_common(True, [], args=args, should_renew=True, reuse_key=True) + @mock.patch('certbot._internal.storage.RenewableCert.save_successor') + def test_new_key(self, unused_save_successor): + test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf') + args = ["renew", "--reuse-key", "--new-key"] + self._test_renewal_common(True, [], args=args, should_renew=True, reuse_key=True, + new_key=True) + @mock.patch('sys.stdin') def test_noninteractive_renewal_delay(self, stdin): stdin.isatty.return_value = False diff --git a/certbot/tests/renewal_test.py b/certbot/tests/renewal_test.py index 110c0d7bd91..d6e2866dc56 100644 --- a/certbot/tests/renewal_test.py +++ b/certbot/tests/renewal_test.py @@ -99,6 +99,32 @@ def test_reuse_ec_key_renewal_params(self): assert self.config.elliptic_curve == 'secp256r1' + def test_new_key(self): + # When renewing with both reuse_key and new_key, the key should be regenerated, + # the key type, key parameters and reuse_key should be kept. + self.config.reuse_key = True + self.config.new_key = True + self.config.dry_run = True + config = configuration.NamespaceConfig(self.config) + + rc_path = test_util.make_lineage( + self.config.config_dir, 'sample-renewal.conf') + lineage = storage.RenewableCert(rc_path, config) + + le_client = mock.MagicMock() + le_client.obtain_certificate.return_value = (None, None, None, None) + + from certbot._internal import renewal + + with mock.patch('certbot._internal.renewal.hooks.renew_hook'): + renewal.renew_cert(self.config, None, le_client, lineage) + + self.assertEqual(self.config.rsa_key_size, 2048) + self.assertEqual(self.config.key_type, 'rsa') + self.assertTrue(self.config.reuse_key) + # None is passed as the existing key, i.e. the key is not actually being reused. + le_client.obtain_certificate.assert_called_with(mock.ANY, None) + @test_util.patch_display_util() @mock.patch('certbot._internal.renewal.cli.set_by_cli') def test_remove_deprecated_config_elements(self, mock_set_by_cli, unused_mock_get_utility):
https://api.github.com/repos/certbot/certbot/pulls/9252
2022-03-29T21:50:13Z
2022-03-31T18:40:22Z
2022-03-31T18:40:22Z
2022-03-31T18:40:22Z
2,862
certbot/certbot
116
added possibility to freeze layers during training
diff --git a/examples/mnist_transfer_cnn.py b/examples/mnist_transfer_cnn.py new file mode 100644 index 00000000000..579df76d08f --- /dev/null +++ b/examples/mnist_transfer_cnn.py @@ -0,0 +1,114 @@ +from __future__ import absolute_import +from __future__ import print_function +import numpy as np +import datetime + +np.random.seed(1337) # for reproducibility + +from keras.datasets import mnist +from keras.models import Sequential +from keras.layers.core import Dense, Dropout, Activation, Flatten +from keras.layers.convolutional import Convolution2D, MaxPooling2D +from keras.utils import np_utils + +''' + Transfer learning toy example: + 1- Train a simple convnet on the MNIST dataset the first 5 digits [0..4]. + 2- Freeze convolutional layers and fine-tune dense layers for the classification of digits [5..9]. + + Run on GPU: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python mnist_cnn.py + + Get to 99.8% test accuracy after 5 epochs for the first five digits classifier + and 99.2% for the last five digits after transfer + fine-tuning. +''' + +now = datetime.datetime.now + +batch_size = 128 +nb_classes = 5 +nb_epoch = 5 + +# input image dimensions +img_rows, img_cols = 28, 28 +# number of convolutional filters to use +nb_filters = 32 +# size of pooling area for max pooling +nb_pool = 2 +# convolution kernel size +nb_conv = 3 + + +def train_model(model, train, test, nb_classes): + X_train = train[0].reshape(train[0].shape[0], 1, img_rows, img_cols) + X_test = test[0].reshape(test[0].shape[0], 1, img_rows, img_cols) + X_train = X_train.astype("float32") + X_test = X_test.astype("float32") + X_train /= 255 + X_test /= 255 + print('X_train shape:', X_train.shape) + print(X_train.shape[0], 'train samples') + print(X_test.shape[0], 'test samples') + + # convert class vectors to binary class matrices + Y_train = np_utils.to_categorical(train[1], nb_classes) + Y_test = np_utils.to_categorical(test[1], nb_classes) + + model.compile(loss='categorical_crossentropy', optimizer='adadelta') + + t = now() + model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, + validation_data=(X_test, Y_test)) + print('Training time: %s' % (now() - t)) + score = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0) + print('Test score:', score[0]) + print('Test accuracy:', score[1]) + + +# the data, shuffled and split between train and test sets +(X_train, y_train), (X_test, y_test) = mnist.load_data() + +# create two datasets one with digits below 5 and one with 5 and above +X_train_lt5 = X_train[y_train < 5] +y_train_lt5 = y_train[y_train < 5] +X_test_lt5 = X_test[y_test < 5] +y_test_lt5 = y_test[y_test < 5] + +X_train_gte5 = X_train[y_train >= 5] +y_train_gte5 = y_train[y_train >= 5] - 5 # make classes start at 0 for +X_test_gte5 = X_test[y_test >= 5] # np_utils.to_categorical +y_test_gte5 = y_test[y_test >= 5] - 5 + +# define two groups of layers: feature (convolutions) and classification (dense) +feature_layers = [ + Convolution2D(nb_filters, nb_conv, nb_conv, + border_mode='full', + input_shape=(1, img_rows, img_cols)), + Activation('relu'), + Convolution2D(nb_filters, nb_conv, nb_conv), + Activation('relu'), + MaxPooling2D(pool_size=(nb_pool, nb_pool)), + Dropout(0.25), + Flatten(), +] +classification_layers = [ + Dense(128), + Activation('relu'), + Dropout(0.5), + Dense(nb_classes), + Activation('softmax') +] + +# create complete model +model = Sequential() +for l in feature_layers + classification_layers: + model.add(l) + +# train model for 5-digit classification [0..4] +train_model(model, (X_train_lt5, y_train_lt5), (X_test_lt5, y_test_lt5), nb_classes) + +# freeze feature layers and rebuild model +for l in feature_layers: + l.trainable = False + +# transfer: train dense layers for new classification task [5..9] +train_model(model, (X_train_gte5, y_train_gte5), (X_test_gte5, y_test_gte5), nb_classes) diff --git a/keras/layers/containers.py b/keras/layers/containers.py index 8c4ea8604c3..422359a9037 100644 --- a/keras/layers/containers.py +++ b/keras/layers/containers.py @@ -2,6 +2,7 @@ from __future__ import absolute_import from __future__ import print_function +from collections import OrderedDict import theano.tensor as T from ..layers.core import Layer, Merge from ..utils.theano_utils import ndim_tensor @@ -20,11 +21,6 @@ class Sequential(Layer): def __init__(self, layers=[]): self.layers = [] - self.params = [] - self.regularizers = [] - self.constraints = [] - self.updates = [] - for layer in layers: self.add(layer) @@ -38,11 +34,37 @@ def add(self, layer): if not hasattr(self.layers[0], 'input'): self.set_input() - params, regularizers, constraints, updates = layer.get_params() - self.params += params - self.regularizers += regularizers - self.constraints += constraints - self.updates += updates + @property + def params(self): + params = [] + for l in self.layers: + if l.trainable: + params += l.get_params()[0] + return params + + @property + def regularizers(self): + regularizers = [] + for l in self.layers: + if l.trainable: + regularizers += l.get_params()[1] + return regularizers + + @property + def constraints(self): + constraints = [] + for l in self.layers: + if l.trainable: + constraints += l.get_params()[2] + return constraints + + @property + def updates(self): + updates = [] + for l in self.layers: + if l.trainable: + updates += l.get_params()[3] + return updates @property def output_shape(self): @@ -97,7 +119,6 @@ class Graph(Layer): when it has exactly one input and one output. inherited from Layer: - - get_params - get_output_mask - supports_masked_input - get_weights @@ -105,7 +126,7 @@ class Graph(Layer): ''' def __init__(self): self.namespace = set() # strings - self.nodes = {} # layer-like + self.nodes = OrderedDict() # layer-like self.inputs = {} # layer-like self.input_order = [] # strings self.outputs = {} # layer-like @@ -114,11 +135,6 @@ def __init__(self): self.output_config = [] # dicts self.node_config = [] # dicts - self.params = [] - self.regularizers = [] - self.constraints = [] - self.updates = [] - @property def nb_input(self): return len(self.inputs) @@ -127,6 +143,38 @@ def nb_input(self): def nb_output(self): return len(self.outputs) + @property + def params(self): + params = [] + for l in self.nodes.values(): + if l.trainable: + params += l.get_params()[0] + return params + + @property + def regularizers(self): + regularizers = [] + for l in self.nodes.values(): + if l.trainable: + regularizers += l.get_params()[1] + return regularizers + + @property + def constraints(self): + constraints = [] + for l in self.nodes.values(): + if l.trainable: + constraints += l.get_params()[2] + return constraints + + @property + def updates(self): + updates = [] + for l in self.nodes.values(): + if l.trainable: + updates += l.get_params()[3] + return updates + def set_previous(self, layer, connection_map={}): if self.nb_input != layer.nb_output: raise Exception('Cannot connect layers: input count does not match output count.') @@ -220,11 +268,6 @@ def add_node(self, layer, name, input=None, inputs=[], 'merge_mode': merge_mode, 'concat_axis': concat_axis, 'create_output': create_output}) - params, regularizers, constraints, updates = layer.get_params() - self.params += params - self.regularizers += regularizers - self.constraints += constraints - self.updates += updates if create_output: self.add_output(name, input=name) diff --git a/keras/layers/core.py b/keras/layers/core.py index 5c216b0d91c..f21a3cbefae 100644 --- a/keras/layers/core.py +++ b/keras/layers/core.py @@ -20,9 +20,11 @@ class Layer(object): def __init__(self, **kwargs): for kwarg in kwargs: - assert kwarg in {'input_shape'}, "Keyword argument not understood: " + kwarg + assert kwarg in {'input_shape', 'trainable'}, "Keyword argument not understood: " + kwarg if 'input_shape' in kwargs: self.set_input_shape(kwargs['input_shape']) + if 'trainable' in kwargs: + self._trainable = kwargs['trainable'] if not hasattr(self, 'params'): self.params = [] @@ -45,6 +47,17 @@ def build(self): ''' pass + @property + def trainable(self): + if hasattr(self, '_trainable'): + return self._trainable + else: + return True + + @trainable.setter + def trainable(self, value): + self._trainable = value + @property def nb_input(self): return 1 @@ -133,6 +146,8 @@ def get_config(self): config = {"name": self.__class__.__name__} if hasattr(self, '_input_shape'): config['input_shape'] = self._input_shape[1:] + if hasattr(self, '_trainable'): + config['trainable'] = self._trainable return config def get_params(self):
Following the discussion in #622, here is a simple strategy to "freeze" specific layers during training. IT works for `Sequential` and `Graph` models. It adds a `trainable` attribute to the core `Layer` class and check its value when creating the model. If a layer's `trainable` flag is modified after addition to a model, the model needs to be rebuilt and recompiled. For this, a `rebuild` function has been added to `Sequential` and `Graph` classes.
https://api.github.com/repos/keras-team/keras/pulls/839
2015-10-15T11:41:58Z
2015-10-23T23:40:38Z
2015-10-23T23:40:38Z
2015-12-31T15:30:46Z
2,642
keras-team/keras
47,887
exmo(fix): ArgumentsRequired import missing
diff --git a/ts/src/exmo.ts b/ts/src/exmo.ts index 180b9a2a1232..6411a68950ef 100644 --- a/ts/src/exmo.ts +++ b/ts/src/exmo.ts @@ -2,7 +2,7 @@ // --------------------------------------------------------------------------- import Exchange from './abstract/exmo.js'; -import { ExchangeError, OrderNotFound, AuthenticationError, InsufficientFunds, InvalidOrder, InvalidNonce, OnMaintenance, RateLimitExceeded, BadRequest, PermissionDenied } from './base/errors.js'; +import { ArgumentsRequired, ExchangeError, OrderNotFound, AuthenticationError, InsufficientFunds, InvalidOrder, InvalidNonce, OnMaintenance, RateLimitExceeded, BadRequest, PermissionDenied } from './base/errors.js'; import { Precise } from './base/Precise.js'; import { TICK_SIZE } from './base/functions/number.js'; import { sha512 } from './static_dependencies/noble-hashes/sha512.js';
https://api.github.com/repos/ccxt/ccxt/pulls/19307
2023-09-17T00:45:16Z
2023-09-17T07:16:27Z
2023-09-17T07:16:27Z
2023-09-17T07:16:28Z
207
ccxt/ccxt
13,407
Safety pipeline prototype
diff --git a/safety/README.md b/safety/README.md new file mode 100644 index 0000000000..2b6f23ff0f --- /dev/null +++ b/safety/README.md @@ -0,0 +1,47 @@ +# Train & Evaluate Safety models + +This is the Open Assistant Safety Folder and contains the following: + +- Model training scripts +- Model infrence scripts +- Data processing scripts + +## Mission Statment + +Our mission at LAION-AI OpenAssistant safety team is to create a safety pipeline +that is not only compatible with the OpenAssistant model and project but can +also integrate with other systems outside of it. We are dedicated to making this +pipeline modifiable and robust to accommodate the diverse preferences of our +users. + +We understand that our users come from different backgrounds and use various +types of hardware. Therefore, we strive to make our safety pipeline accessible +and able to run on consumer hardware, so everyone can benefit from its +protective features. + +Through our commitment to innovation and collaboration, we will continue to +provide safety solutions that ensure the well-being of our users and the wider +community. + +## Why create a safety pipeline? + +Open source and extendable safety pipelines unfortunately do not exist on the +same on the same scale as those in ChatGPT and other commerical systems. To +further research in implementable, accurate, and extendable safety pipelines, +Open Assistant Safety Team will continue to push models and code to the public. +Much research has been done in things like toxicity detection and bias +mitigation in LLMs, however the implementation of such research in systems that +use language models as conversational agents in production settings has largely +gone undocumented. Furthermore, safety systems that interact with diverse +communities of users must be able accommodate user prefrences. This is paramount +in introducing LLM based systems all over the world. We hope that our work will +generate more research in this field, and allow others to create safe LLM based +systems. + +## Training + +- Set training configuration using `config.yaml` + +```python +python model_training/t5_trainer.py +``` diff --git a/safety/model_training/__init__.py b/safety/model_training/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/safety/model_training/config/config.yaml b/safety/model_training/config/config.yaml new file mode 100644 index 0000000000..35cace8444 --- /dev/null +++ b/safety/model_training/config/config.yaml @@ -0,0 +1,18 @@ +defaults: + - trainer: default +padding_side: "right" +truncation_side: "right" +model: "t5-base" +epochs: 1 +batch_size: 8 +save_folder: "safetyfiles" +max_length: 256 +special_tokens: + context_token: "<ctx>" + sep_token: "<sep>" + label_token: "<cls>" + rot_token: "<rot>" +dataset: + name: "allenai/prosocial-dialog" + train: ["train", "validation"] + test: "test" diff --git a/safety/model_training/config/trainer/default.yaml b/safety/model_training/config/trainer/default.yaml new file mode 100644 index 0000000000..b13dc0b7e7 --- /dev/null +++ b/safety/model_training/config/trainer/default.yaml @@ -0,0 +1,4 @@ +_target_: transformers.TrainingArguments +output_dir: "." +per_device_train_batch_size: 5 +fp16: False diff --git a/safety/model_training/custom_datasets/__init__.py b/safety/model_training/custom_datasets/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/safety/model_training/custom_datasets/rot_dataset.py b/safety/model_training/custom_datasets/rot_dataset.py new file mode 100644 index 0000000000..171451a595 --- /dev/null +++ b/safety/model_training/custom_datasets/rot_dataset.py @@ -0,0 +1,103 @@ +from dataclasses import dataclass +from typing import Dict, List + +import torch +from datasets import concatenate_datasets +from torch.utils.data import Dataset + +LABEL2ID = { + "__casual__": "__casual__", + "__needs_caution__": "__needs_caution__", + "__needs_intervention__": "__needs_intervention__", + "__probably_needs_caution__": "__probably_needs_caution__", + "__possibly_needs_caution__": "__possibly_needs_caution__", +} + + +class SafetyDataset(Dataset): + + """ + Dataset to train safety model with context and ROT from prosocial-dialog + input format : input<ctx>context</s> + output format : <cls>safety_label<rot>ROTs</s> + + """ + + def __init__(self, dataset, split, tokenizer, max_len=512): + super().__init__() + + if isinstance(split, List): + self.split = "-".join(split) + self.dataset = concatenate_datasets([dataset[sp] for sp in split]) + else: + self.split = split + self.dataset = dataset[split] + + self.max_len = max_len + self.tokenizer = tokenizer + self.label2id = LABEL2ID + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, idx): + idx_start = idx + end = self.dataset[max(0, idx_start - 1)]["episode_done"] + while (not end) and (idx_start > 0): + end = self.dataset[max(0, idx_start - 2)]["episode_done"] + idx_start -= 1 + idx_start = max(0, idx_start) + context = [ + f'\nUser: {self.dataset[i]["context"]}\n bot:{self.dataset[i]["response"]}' for i in range(idx_start, idx) + ] + context = self.tokenizer.sep_token.join(context) + rots = self.dataset[idx]["rots"] + label = self.label2id[self.dataset[idx]["safety_label"]] + input_tokens = self.tokenizer.encode(self.dataset[idx]["context"], add_special_tokens=False) + max_len = self.max_len - (len(input_tokens) + 2) + context = self.tokenizer.encode( + context, + add_special_tokens=False, + max_length=max_len, + ) + rots = self.tokenizer.sep_token.join(rots) + input_ids = input_tokens + [self.tokenizer.context_token_id] + context + [self.tokenizer.eos_token_id] + input_ids = input_ids + [self.tokenizer.pad_token_id] * max(0, (self.max_len - len(input_ids))) + mask = [1] * len(input_ids) + [self.tokenizer.pad_token_id] * (self.max_len - len(input_ids)) + target_text = self.tokenizer.label_token + label + self.tokenizer.context_token + rots + decoder_ids = self.tokenizer( + target_text, + add_special_tokens=True, + max_length=self.max_len, + padding="max_length", + ) + + return { + "input_ids": torch.LongTensor(input_ids), + "attention_mask": torch.LongTensor(mask), + "decoder_input_ids": torch.LongTensor(decoder_ids["input_ids"]), + "decoder_attention_mask": torch.LongTensor(decoder_ids["attention_mask"]), + } + + +@dataclass +class SafetyDataCollator: + def __call__(self, batch: List) -> Dict[str, torch.Tensor]: + """ + Take a list of samples from a Dataset and collate them into a batch. + Returns: + A dictionary of tensors + """ + + input_ids = torch.stack([example["input_ids"] for example in batch]) + lm_labels = torch.stack([example["decoder_input_ids"] for example in batch]) + lm_labels[lm_labels[:, :] == 0] = -100 + attention_mask = torch.stack([example["attention_mask"] for example in batch]) + decoder_attention_mask = torch.stack([example["decoder_attention_mask"] for example in batch]) + + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "labels": lm_labels, + "decoder_attention_mask": decoder_attention_mask, + } diff --git a/safety/model_training/t5_trainer.py b/safety/model_training/t5_trainer.py new file mode 100644 index 0000000000..2776803b61 --- /dev/null +++ b/safety/model_training/t5_trainer.py @@ -0,0 +1,50 @@ +import os + +import hydra +from custom_datasets.rot_dataset import SafetyDataCollator, SafetyDataset +from datasets import load_dataset +from hydra.utils import instantiate +from omegaconf import DictConfig, OmegaConf +from transformers import T5ForConditionalGeneration, T5Tokenizer, Trainer +from utils import add_special_tokens + + +@hydra.main(version_base=None, config_path="config", config_name="config") +def train(cfg: DictConfig) -> None: + if not os.path.exists(cfg.save_folder): + os.mkdir(cfg.save_folder) + + model = T5ForConditionalGeneration.from_pretrained(cfg.model) + tokenizer = T5Tokenizer.from_pretrained( + cfg.model, + padding_side=cfg.padding_side, + truncation_side=cfg.truncation_side, + model_max_length=model.config.n_positions, + ) + add_special_tokens(cfg.special_tokens, tokenizer, model) + training_args = instantiate(cfg.trainer) + + dataset = load_dataset(cfg.dataset.name) + train_dataset = SafetyDataset( + dataset, split=OmegaConf.to_object(cfg.dataset.train), tokenizer=tokenizer, max_len=cfg.max_length + ) + valid_dataset = SafetyDataset(dataset, split=cfg.dataset.test, tokenizer=tokenizer, max_len=cfg.max_length) + + # Initialize our Trainer + trainer = Trainer( + model=model, + args=training_args, + train_dataset=train_dataset, + eval_dataset=valid_dataset, + data_collator=SafetyDataCollator(), + ) + + # Training + trainer.train() + + trainer.save_model(os.path.join(cfg.save_folder, f"{cfg.model_name}-model")) + tokenizer.save_vocabulary(os.path.join(cfg.save_folder, f"{cfg.model_name}-tokenizer")) + + +if __name__ == "__main__": + train() diff --git a/safety/model_training/utils.py b/safety/model_training/utils.py new file mode 100644 index 0000000000..8102ea1056 --- /dev/null +++ b/safety/model_training/utils.py @@ -0,0 +1,7 @@ +def add_special_tokens(special_tokens, tokenizer, model): + for key, value in special_tokens.items(): + setattr(tokenizer, key, value) + tokenizer.add_tokens([value]) + setattr(tokenizer, key + "_id", tokenizer.encode(value)[0]) + + model.resize_token_embeddings(len(tokenizer)) diff --git a/safety/requirements.txt b/safety/requirements.txt new file mode 100644 index 0000000000..dffd40c36a --- /dev/null +++ b/safety/requirements.txt @@ -0,0 +1,2 @@ +hydra-core==1.3.2 +omegaconf==2.3.0
The safety team has decided to maintain a `safety` folder. This folder will contain 1. Documentation related to safety models. 2. Reproducible training and evaluation code for safety models. 3. Inference pipelines for safety models @ontocord
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/1972
2023-03-05T11:08:36Z
2023-03-08T16:33:32Z
2023-03-08T16:33:32Z
2023-03-08T16:33:32Z
2,637
LAION-AI/Open-Assistant
37,546
Fix issue with scrolling when package not found
diff --git a/thefuck/specific/archlinux.py b/thefuck/specific/archlinux.py index 5816c50f0..5c95aa5bb 100644 --- a/thefuck/specific/archlinux.py +++ b/thefuck/specific/archlinux.py @@ -24,8 +24,11 @@ def get_pkgfile(command): ).splitlines() return [package.split()[0] for package in packages] - except subprocess.CalledProcessError: - return None + except subprocess.CalledProcessError as err: + if err.returncode == 1 and err.output == "": + return [] + else: + raise err def archlinux_env():
Fix issue with attempting to scroll through possible corrections when not-found package has no packages with matching names causing crash. This behavior originates with commit 6624ecb3b85e82e9f1a08823f6e41ee805d35a9e, to my knowledge, where pacman rules were created. In both uses of `get_pkgfile` (in pacman.py and in pacman_not_found.py), it would be appropriate for `get_pkgfile` to return an empty list when the `pkgfile` does not find any packages by the correct name. However, the `pkgfile` command returns 1 in these circumstances, and as such `subprocess` raises an exception. Now, when this exception is caused by `pkgfile` returning 1 with no output (when it finds no packages), `get_pkgfile` will not cause a crash. To reproduce bug: ``` yaourt -S e fuck ``` then press ↑ or ↓
https://api.github.com/repos/nvbn/thefuck/pulls/573
2016-11-03T07:50:06Z
2017-03-13T12:47:17Z
2017-03-13T12:47:17Z
2017-03-13T12:47:24Z
157
nvbn/thefuck
30,457
Fix typo
diff --git a/README.md b/README.md index 0b576befe1..c11c8f3032 100644 --- a/README.md +++ b/README.md @@ -196,7 +196,7 @@ API | Description | Auth | HTTPS | CORS | | [Open Library](https://openlibrary.org/developers/api) | Books, book covers and related data | No | Yes | No | | [Penguin Publishing](http://www.penguinrandomhouse.biz/webservices/rest/) | Books, book covers and related data | No | Yes | Yes | | [Quran](https://quran.api-docs.io/) | RESTful Quran API with multiple languages | No | Yes | Yes | -| [Quran Cloud](https://alquran.cloud/api) | A RESTful Quran API to retrieve an Ayah, Surah, Juz or the enitre Holy Quran | No | Yes | Yes | +| [Quran Cloud](https://alquran.cloud/api) | A RESTful Quran API to retrieve an Ayah, Surah, Juz or the entire Holy Quran | No | Yes | Yes | | [Quran-api](https://github.com/fawazahmed0/quran-api#readme) | Free Quran API Service with 90+ different languages and 400+ translations | No | Yes | Yes | | [Rig Veda](https://aninditabasu.github.io/indica/html/rv.html) | Gods and poets, their categories, and the verse meters, with the mandal and sukta number | No | Yes | Unknown | | [The Bible](https://docs.api.bible) | Everything you need from the Bible in one discoverable place | `apiKey` | Yes | Unknown |
<!-- Thank you for taking the time to work on a Pull Request for this project! --> <!-- To ensure your PR is dealt with swiftly please check the following: --> - [X ] My submission is formatted according to the guidelines in the [contributing guide](/CONTRIBUTING.md) - [X ] My addition is ordered alphabetically - [X ] My submission has a useful description - [X ] The description does not end with punctuation - [X ] Each table column is padded with one space on either side - [X ] I have searched the repository for any relevant issues or pull requests - [X ] Any category I am creating has the minimum requirement of 3 items - [X ] All changes have been [squashed][squash-link] into a single commit [squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
https://api.github.com/repos/public-apis/public-apis/pulls/2062
2021-09-21T00:44:38Z
2021-09-22T11:31:59Z
2021-09-22T11:31:59Z
2021-09-22T18:06:23Z
376
public-apis/public-apis
35,235
Inline `_make_grid()` meshgrid
diff --git a/models/yolo.py b/models/yolo.py index 7a7308312a1..fa05fcf9a8d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -81,10 +81,7 @@ def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) - if torch_1_10: # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility - yv, xv = torch.meshgrid(y, x, indexing='ij') - else: - yv, xv = torch.meshgrid(y, x) + yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> <!-- Thank you for submitting a YOLOv5 🚀 Pull Request! We want to make contributing to YOLOv5 as easy and transparent as possible. A few tips to get you started: - Search existing YOLOv5 [PRs](https://github.com/ultralytics/yolov5/pull) to see if a similar PR already exists. - Link this PR to a YOLOv5 [issue](https://github.com/ultralytics/yolov5/issues) to help us understand what bug fix or feature is being implemented. - Provide before and after profiling/inference/training results to help us quantify the improvement your PR provides (if applicable). Please see our ✅ [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) for more details. --> ## 🛠️ PR Summary <sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub> ### 🌟 Summary Refinement of grid generation in YOLOv5 for compatibility adjustment. ### 📊 Key Changes - Consolidated conditional code for grid generation into a single line. ### 🎯 Purpose & Impact - 🧹 Cleans up the code by removing unnecessary conditionals. - 🚀 Simplifies maintenance by reducing code complexity. - ✨ Ensures compatibility with various versions of PyTorch (>=0.7) without altering functionality.
https://api.github.com/repos/ultralytics/yolov5/pulls/9170
2022-08-26T13:07:52Z
2022-08-26T13:29:31Z
2022-08-26T13:29:31Z
2024-01-19T06:39:33Z
342
ultralytics/yolov5
24,732
gh-108765: Python.h no longer includes <unistd.h>
diff --git a/Doc/whatsnew/3.13.rst b/Doc/whatsnew/3.13.rst index e7b60ddbdbbfda..c4fb328db9cfa0 100644 --- a/Doc/whatsnew/3.13.rst +++ b/Doc/whatsnew/3.13.rst @@ -921,6 +921,11 @@ Porting to Python 3.13 also the ``HAVE_IEEEFP_H`` macro. (Contributed by Victor Stinner in :gh:`108765`.) +* ``Python.h`` no longer includes the ``<unistd.h>`` standard header file. If + needed, it should now be included explicitly. For example, it provides the + functions: ``close()``, ``getpagesize()``, ``getpid()`` and ``sysconf()``. + (Contributed by Victor Stinner in :gh:`108765`.) + Deprecated ---------- diff --git a/Include/Python.h b/Include/Python.h index 002a79dbdc9362..4cc72bb23ce7a3 100644 --- a/Include/Python.h +++ b/Include/Python.h @@ -26,14 +26,13 @@ #ifdef HAVE_STDDEF_H # include <stddef.h> // size_t #endif -#ifndef MS_WINDOWS -# include <unistd.h> // sysconf() +#ifdef HAVE_SYS_TYPES_H +# include <sys/types.h> // ssize_t #endif -// errno.h, stdio.h, stdlib.h and string.h headers are no longer used by Python -// headers, but kept for backward compatibility (no introduce new compiler -// warnings). They are not included by the limited C API version 3.11 and -// above. +// errno.h, stdio.h, stdlib.h and string.h headers are no longer used by +// Python, but kept for backward compatibility (avoid compiler warnings). +// They are no longer included by limited C API version 3.11 and newer. #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 < 0x030b0000 # include <errno.h> // errno # include <stdio.h> // FILE* diff --git a/Misc/NEWS.d/next/C API/2023-09-01-21-10-29.gh-issue-108765.eeXtYF.rst b/Misc/NEWS.d/next/C API/2023-09-01-21-10-29.gh-issue-108765.eeXtYF.rst new file mode 100644 index 00000000000000..ff8f79998fa968 --- /dev/null +++ b/Misc/NEWS.d/next/C API/2023-09-01-21-10-29.gh-issue-108765.eeXtYF.rst @@ -0,0 +1,4 @@ +``Python.h`` no longer includes the ``<unistd.h>`` standard header file. If +needed, it should now be included explicitly. For example, it provides the +functions: ``close()``, ``getpagesize()``, ``getpid()`` and ``sysconf()``. +Patch by Victor Stinner. diff --git a/Modules/_ctypes/malloc_closure.c b/Modules/_ctypes/malloc_closure.c index 3a859322772ba7..bb4f8f21bd3f77 100644 --- a/Modules/_ctypes/malloc_closure.c +++ b/Modules/_ctypes/malloc_closure.c @@ -1,16 +1,17 @@ #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 #endif + #include <Python.h> #include <ffi.h> #ifdef MS_WIN32 -#include <windows.h> +# include <windows.h> #else -#include <sys/mman.h> -#include <unistd.h> -# if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) -# define MAP_ANONYMOUS MAP_ANON -# endif +# include <sys/mman.h> +# include <unistd.h> // sysconf() +# if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) +# define MAP_ANONYMOUS MAP_ANON +# endif #endif #include "ctypes.h" diff --git a/Modules/_posixsubprocess.c b/Modules/_posixsubprocess.c index ac2b0d4f55468c..ef76d26282e1b3 100644 --- a/Modules/_posixsubprocess.c +++ b/Modules/_posixsubprocess.c @@ -8,28 +8,28 @@ #include "pycore_pystate.h" #include "pycore_signal.h" // _Py_RestoreSignals() #if defined(HAVE_PIPE2) && !defined(_GNU_SOURCE) -# define _GNU_SOURCE +# define _GNU_SOURCE #endif -#include <unistd.h> -#include <fcntl.h> +#include <unistd.h> // close() +#include <fcntl.h> // fcntl() #ifdef HAVE_SYS_TYPES_H -#include <sys/types.h> +# include <sys/types.h> #endif #if defined(HAVE_SYS_STAT_H) -#include <sys/stat.h> +# include <sys/stat.h> // stat() #endif #ifdef HAVE_SYS_SYSCALL_H -#include <sys/syscall.h> +# include <sys/syscall.h> #endif #if defined(HAVE_SYS_RESOURCE_H) -#include <sys/resource.h> +# include <sys/resource.h> #endif #ifdef HAVE_DIRENT_H -#include <dirent.h> +# include <dirent.h> // opendir() +#endif +#if defined(HAVE_SETGROUPS) +# include <grp.h> // setgroups() #endif -#ifdef HAVE_GRP_H -#include <grp.h> -#endif /* HAVE_GRP_H */ #include "posixmodule.h" diff --git a/Modules/_testcapimodule.c b/Modules/_testcapimodule.c index 4fc354ae79bfed..ab33702cdfd872 100644 --- a/Modules/_testcapimodule.c +++ b/Modules/_testcapimodule.c @@ -24,9 +24,6 @@ #include <float.h> // FLT_MAX #include <signal.h> #include <stddef.h> // offsetof() -#ifndef MS_WINDOWS -# include <unistd.h> -#endif #ifdef HAVE_SYS_WAIT_H # include <sys/wait.h> // W_STOPCODE diff --git a/Modules/grpmodule.c b/Modules/grpmodule.c index f5709296334a8f..20e83de84e8340 100644 --- a/Modules/grpmodule.c +++ b/Modules/grpmodule.c @@ -4,7 +4,8 @@ #include "Python.h" #include "posixmodule.h" -#include <grp.h> +#include <grp.h> // getgrgid_r() +#include <unistd.h> // sysconf() #include "clinic/grpmodule.c.h" /*[clinic input] diff --git a/Modules/mmapmodule.c b/Modules/mmapmodule.c index c8cd7e59dbab50..d11200a4042551 100644 --- a/Modules/mmapmodule.c +++ b/Modules/mmapmodule.c @@ -28,6 +28,9 @@ #include "pycore_fileutils.h" // _Py_stat_struct #include <stddef.h> // offsetof() +#ifndef MS_WINDOWS +# include <unistd.h> // close() +#endif // to support MS_WINDOWS_SYSTEM OpenFileMappingA / CreateFileMappingA // need to be replaced with OpenFileMappingW / CreateFileMappingW diff --git a/Modules/posixmodule.c b/Modules/posixmodule.c index 761542866d8f96..6e829b200fa46d 100644 --- a/Modules/posixmodule.c +++ b/Modules/posixmodule.c @@ -286,7 +286,7 @@ corresponding Unix manual entries for more information on calls."); #endif #ifdef HAVE_COPY_FILE_RANGE -# include <unistd.h> +# include <unistd.h> // copy_file_range() #endif #if !defined(CPU_ALLOC) && defined(HAVE_SCHED_SETAFFINITY) diff --git a/Modules/pwdmodule.c b/Modules/pwdmodule.c index cc2e2a43893971..b7034369c4731e 100644 --- a/Modules/pwdmodule.c +++ b/Modules/pwdmodule.c @@ -4,7 +4,8 @@ #include "Python.h" #include "posixmodule.h" -#include <pwd.h> +#include <pwd.h> // getpwuid() +#include <unistd.h> // sysconf() #include "clinic/pwdmodule.c.h" /*[clinic input] diff --git a/Modules/resource.c b/Modules/resource.c index 4614f5e98cc888..f5d9972d9a8ff7 100644 --- a/Modules/resource.c +++ b/Modules/resource.c @@ -1,13 +1,12 @@ - #include "Python.h" -#include <sys/resource.h> +#include <errno.h> // errno +#include <string.h> +#include <sys/resource.h> // getrusage() #ifdef HAVE_SYS_TIME_H -#include <sys/time.h> +# include <sys/time.h> #endif #include <time.h> -#include <string.h> -#include <errno.h> -#include <unistd.h> +#include <unistd.h> // getpagesize() /* On some systems, these aren't in any header file. On others they are, with inconsistent prototypes. diff --git a/Modules/selectmodule.c b/Modules/selectmodule.c index 4987cf0f2065c2..c56e682b21e2a1 100644 --- a/Modules/selectmodule.c +++ b/Modules/selectmodule.c @@ -17,6 +17,9 @@ #include "pycore_time.h" // _PyTime_t #include <stddef.h> // offsetof() +#ifndef MS_WINDOWS +# include <unistd.h> // close() +#endif #ifdef HAVE_SYS_DEVPOLL_H #include <sys/resource.h> diff --git a/Modules/socketmodule.c b/Modules/socketmodule.c index 2f12c9cedbd8a6..74b1c1c661604f 100644 --- a/Modules/socketmodule.c +++ b/Modules/socketmodule.c @@ -269,7 +269,7 @@ shutdown(how) -- shut down traffic in one or both directions\n\ #ifdef HAVE_NETDB_H # include <netdb.h> #endif -# include <unistd.h> +#include <unistd.h> // close() /* Headers needed for inet_ntoa() and inet_addr() */ # include <arpa/inet.h> diff --git a/Programs/_freeze_module.c b/Programs/_freeze_module.c index e55f1d56745c4d..f6c46fa629efba 100644 --- a/Programs/_freeze_module.c +++ b/Programs/_freeze_module.c @@ -19,7 +19,7 @@ #include <sys/types.h> #include <sys/stat.h> #ifndef MS_WINDOWS -#include <unistd.h> +# include <unistd.h> #endif uint32_t _Py_next_func_version = 1; diff --git a/Python/dup2.c b/Python/dup2.c index a1df0492099163..936211f27ec737 100644 --- a/Python/dup2.c +++ b/Python/dup2.c @@ -11,9 +11,9 @@ * Return fd2 if all went well; return BADEXIT otherwise. */ -#include <errno.h> -#include <fcntl.h> -#include <unistd.h> +#include <errno.h> // errno +#include <fcntl.h> // fcntl() +#include <unistd.h> // close() #define BADEXIT -1 diff --git a/Python/perf_trampoline.c b/Python/perf_trampoline.c index b8885a303977d0..10675bf9f8292a 100644 --- a/Python/perf_trampoline.c +++ b/Python/perf_trampoline.c @@ -140,9 +140,9 @@ any DWARF information available for them). #include <fcntl.h> #include <stdio.h> #include <stdlib.h> -#include <sys/mman.h> +#include <sys/mman.h> // mmap() #include <sys/types.h> -#include <unistd.h> +#include <unistd.h> // sysconf() #if defined(__arm__) || defined(__arm64__) || defined(__aarch64__) #define PY_HAVE_INVALIDATE_ICACHE
<!-- Thanks for your contribution! Please read this comment in its entirety. It's quite important. # Pull Request title It should be in the following format: ``` gh-NNNNN: Summary of the changes made ``` Where: gh-NNNNN refers to the GitHub issue number. Most PRs will require an issue number. Trivial changes, like fixing a typo, do not need an issue. # Backport Pull Request title If this is a backport PR (PR made against branches other than `main`), please ensure that the PR title is in the following format: ``` [X.Y] <title from the original PR> (GH-NNNN) ``` Where: [X.Y] is the branch name, e.g. [3.6]. GH-NNNN refers to the PR number from `main`. --> <!-- gh-issue-number: gh-108765 --> * Issue: gh-108765 <!-- /gh-issue-number --> <!-- readthedocs-preview cpython-previews start --> ---- :books: Documentation preview :books:: https://cpython-previews--108783.org.readthedocs.build/ <!-- readthedocs-preview cpython-previews end -->
https://api.github.com/repos/python/cpython/pulls/108783
2023-09-01T19:11:49Z
2023-09-02T14:50:18Z
2023-09-02T14:50:18Z
2023-10-27T20:28:40Z
2,936
python/cpython
4,344
Simplify PreparedRequest.prepare API
diff --git a/requests/models.py b/requests/models.py index 752c58c153..45b3ea9680 100644 --- a/requests/models.py +++ b/requests/models.py @@ -523,6 +523,10 @@ def prepare_cookies(self, cookies): def prepare_hooks(self, hooks): """Prepares the given hooks.""" + # hooks can be passed as None to the prepare method and to this + # method. To prevent iterating over None, simply use an empty list + # if hooks is False-y + hooks = hooks or [] for event in hooks: self.register_hook(event, hooks[event]) diff --git a/test_requests.py b/test_requests.py index 15406a22fc..cad8c055c8 100755 --- a/test_requests.py +++ b/test_requests.py @@ -1613,7 +1613,6 @@ def test_prepare_unicode_url(): p.prepare( method='GET', url=u('http://www.example.com/üniçø∂é'), - hooks=[] ) assert_copy(p, p.copy())
Do not require that hooks be passed as an empty list to PreparedRequest.prepare. In the event hooks is None in prepare or prepare_hooks, use an empty list as a default. Related to #2552
https://api.github.com/repos/psf/requests/pulls/2553
2015-04-21T01:14:53Z
2015-04-21T05:59:55Z
2015-04-21T05:59:55Z
2021-09-08T08:00:49Z
250
psf/requests
33,005
Remove elements that don't add value in ES.84
diff --git a/CppCoreGuidelines.md b/CppCoreGuidelines.md index 7db626d82..1d437b249 100644 --- a/CppCoreGuidelines.md +++ b/CppCoreGuidelines.md @@ -9884,7 +9884,7 @@ Statement rules: * [ES.77: Minimize the use of `break` and `continue` in loops](#Res-continue) * [ES.78: Always end a non-empty `case` with a `break`](#Res-break) * [ES.79: Use `default` to handle common cases (only)](#Res-default) -* [ES.84: Don't (try to) declare a local variable with no name](#Res-noname) +* [ES.84: Don't try to declare a local variable with no name](#Res-noname) * [ES.85: Make empty statements visible](#Res-empty) * [ES.86: Avoid modifying loop control variables inside the body of raw for-loops](#Res-loop-counter) * [ES.87: Don't add redundant `==` or `!=` to conditions](#Res-if) @@ -12789,13 +12789,12 @@ Flag `switch`-statements over an enumeration that don't handle all enumerators a This may yield too many false positives in some code bases; if so, flag only `switch`es that handle most but not all cases (that was the strategy of the very first C++ compiler). -### <a name="Res-noname"></a>ES.84: Don't (try to) declare a local variable with no name +### <a name="Res-noname"></a>ES.84: Don't try to declare a local variable with no name ##### Reason There is no such thing. What looks to a human like a variable without a name is to the compiler a statement consisting of a temporary that immediately goes out of scope. -To avoid unpleasant surprises. ##### Example, bad @@ -12808,7 +12807,6 @@ To avoid unpleasant surprises. This declares an unnamed `lock` object that immediately goes out of scope at the point of the semicolon. This is not an uncommon mistake. In particular, this particular example can lead to hard-to find race conditions. -There are exceedingly clever uses of this "idiom", but they are far rarer than the mistakes. ##### Note @@ -12816,7 +12814,7 @@ Unnamed function arguments are fine. ##### Enforcement -Flag statements that are just a temporary +Flag statements that are just a temporary. ### <a name="Res-empty"></a>ES.85: Make empty statements visible
- Parentheses around "(try to)" in rule title add no meaning. - The sentence fragment "To avoid unpleasant surprises." in Reason adds no info that hasn't already been stated. - "There are exceedingly clever uses of this 'idiom', but..." seems like a distraction instead of a tight conclusion to the example section. We're not seeking "exceedingly clever" in this guide. Per Bjarne's own words, we're seeking the "smaller, simpler, safer language struggling to get out." - Added a period to the end of the Enforcement sentence.
https://api.github.com/repos/isocpp/CppCoreGuidelines/pulls/1390
2019-03-17T13:15:39Z
2019-03-21T18:04:50Z
2019-03-21T18:04:50Z
2019-04-02T00:44:27Z
579
isocpp/CppCoreGuidelines
15,926
fix --certs argument
diff --git a/docs/src/content/concepts-certificates.md b/docs/src/content/concepts-certificates.md index ebc5ede990..0fc32c696c 100644 --- a/docs/src/content/concepts-certificates.md +++ b/docs/src/content/concepts-certificates.md @@ -93,7 +93,7 @@ The files created by mitmproxy in the .mitmproxy directory are as follows: ## Using a custom server certificate -You can use your own (leaf) certificate by passing the `--cert +You can use your own (leaf) certificate by passing the `--certs [domain=]path_to_certificate` option to mitmproxy. Mitmproxy then uses the provided certificate for interception of the specified domain instead of generating a certificate signed by its own CA. @@ -127,13 +127,13 @@ Now, you can run mitmproxy with the generated certificate: **For all domain names** ```bash -mitmproxy --cert *=cert.pem +mitmproxy --certs *=cert.pem ``` **For specific domain names** ```bash -mitmproxy --cert *.example.com=cert.pem +mitmproxy --certs *.example.com=cert.pem ``` **Note:** `*.example.com` is for all the subdomains. You can also use
the help output claims that --certs is correct
https://api.github.com/repos/mitmproxy/mitmproxy/pulls/4412
2021-01-24T21:05:38Z
2021-01-24T21:12:17Z
2021-01-24T21:12:17Z
2021-01-31T09:05:58Z
287
mitmproxy/mitmproxy
27,421
readme.md dataset Table Formatting
diff --git a/model/model_training/README.md b/model/model_training/README.md index 914c82eff7..1f5dbcf9ed 100644 --- a/model/model_training/README.md +++ b/model/model_training/README.md @@ -212,10 +212,9 @@ deepspeed trainer_sft.py --configs defaults your-model-name --deepspeed Here is an uncomplete overview of datasets for sft: <!-- prettier-ignore --> +<!-- prettier-ignore-start --> dataset_name | train_counts | eval_counts | total_counts ----------------------------------------------------------------- - -<!-- prettier-ignore --> +--|--|--|-- joke | 301 | 76 | 377 webgpt | 14251 | 3563 | 17814 gpt4all | 313552 | 78388 | 391940 @@ -233,6 +232,7 @@ prosocial_dialogue | 157160 | 26983 | 184143 explain_prosocial | 360708 | 61248 | 421956 soda | 924102 | 231026 | 1155128 oa_leet10k | 18728 | 4683 | 23411 +<!-- prettier-ignore-end --> This list can be generated with the following command, but beware that this downloads all available datasets (>100GB):
change markdown Table Formatting. that format is broken
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/3219
2023-05-23T23:43:15Z
2023-05-26T21:18:13Z
2023-05-26T21:18:13Z
2023-05-26T21:18:14Z
329
LAION-AI/Open-Assistant
37,250
Enhancement Adding not existent lambda name in response headers
diff --git a/localstack/services/awslambda/lambda_api.py b/localstack/services/awslambda/lambda_api.py index 36e4345824805..c4c74d2d2a3e7 100644 --- a/localstack/services/awslambda/lambda_api.py +++ b/localstack/services/awslambda/lambda_api.py @@ -793,19 +793,23 @@ def forward_to_fallback_url(func_arn, data): Lambda to the configured URL. """ if not config.LAMBDA_FALLBACK_URL: return None + + lambda_name = aws_stack.lambda_function_name(func_arn) if config.LAMBDA_FALLBACK_URL.startswith('dynamodb://'): table_name = urlparse(config.LAMBDA_FALLBACK_URL.replace('dynamodb://', 'http://')).netloc dynamodb = aws_stack.connect_to_service('dynamodb') item = { 'id': {'S': short_uid()}, 'timestamp': {'N': str(now_utc())}, - 'payload': {'S': str(data)} + 'payload': {'S': str(data)}, + 'function_name': {'S': lambda_name} } aws_stack.create_dynamodb_table(table_name, partition_key='id') dynamodb.put_item(TableName=table_name, Item=item) return '' if re.match(r'^https?://.+', config.LAMBDA_FALLBACK_URL): - response = safe_requests.post(config.LAMBDA_FALLBACK_URL, data) + headers = {'lambda-function-name': lambda_name} + response = safe_requests.post(config.LAMBDA_FALLBACK_URL, data, headers=headers) return response.content raise ClientError('Unexpected value for LAMBDA_FALLBACK_URL: %s' % config.LAMBDA_FALLBACK_URL) @@ -1144,14 +1148,14 @@ def invoke_function(function): # Default invocation type is RequestResponse invocation_type = request.environ.get('HTTP_X_AMZ_INVOCATION_TYPE', 'RequestResponse') - def _create_response(result, status_code=200): + def _create_response(result, status_code=200, headers={}): """ Create the final response for the given invocation result """ if isinstance(result, Response): return result details = { 'StatusCode': status_code, 'Payload': result, - 'Headers': {} + 'Headers': headers } if isinstance(result, dict): for key in ('StatusCode', 'Payload', 'FunctionError'): @@ -1187,7 +1191,7 @@ def _create_response(result, status_code=200): not_found = not_found_error('{0}:{1}'.format(arn, qualifier)) if not_found: - forward_result = forward_to_fallback_url(func_arn, data) + forward_result = forward_to_fallback_url(arn, data) if forward_result is not None: return _create_response(forward_result) return not_found diff --git a/tests/integration/test_lambda.py b/tests/integration/test_lambda.py index 9089e6ef6fd70..f61952e453769 100644 --- a/tests/integration/test_lambda.py +++ b/tests/integration/test_lambda.py @@ -120,7 +120,7 @@ def num_items(): def test_forward_to_fallback_url_http(self): class MyUpdateListener(ProxyListener): def forward_request(self, method, path, data, headers): - records.append(data) + records.append({'data': data, 'headers': headers}) return 200 records = [] @@ -130,9 +130,26 @@ def forward_request(self, method, path, data, headers): items_before = len(records) _run_forward_to_fallback_url('%s://localhost:%s' % (get_service_protocol(), local_port)) items_after = len(records) + for record in records: + self.assertIn('non-existing-lambda', record['headers']['lambda-function-name']) + self.assertEqual(items_after, items_before + 3) proxy.stop() + def test_adding_fallback_function_name_in_headers(self): + + lambda_client = aws_stack.connect_to_service('lambda') + ddb_client = aws_stack.connect_to_service('dynamodb') + + db_table = 'lambda-records' + config.LAMBDA_FALLBACK_URL = 'dynamodb://%s' % db_table + + lambda_client.invoke(FunctionName='non-existing-lambda', + Payload=b'{}', InvocationType='RequestResponse') + + result = run_safe(ddb_client.scan, TableName=db_table) + self.assertEqual(result['Items'][0]['function_name']['S'], 'non-existing-lambda') + def test_dead_letter_queue(self): sqs_client = aws_stack.connect_to_service('sqs') lambda_client = aws_stack.connect_to_service('lambda')
Enhancement Adding not existent lambda name in response headers. Fixes #1971
https://api.github.com/repos/localstack/localstack/pulls/2397
2020-05-05T21:09:13Z
2020-05-07T21:44:17Z
2020-05-07T21:44:17Z
2020-05-07T21:44:17Z
1,054
localstack/localstack
28,759
Remove extra spider parameter in item pipeline docs
diff --git a/docs/topics/item-pipeline.rst b/docs/topics/item-pipeline.rst index bc26bbebe55..a5f6e07b89d 100644 --- a/docs/topics/item-pipeline.rst +++ b/docs/topics/item-pipeline.rst @@ -215,7 +215,7 @@ item. screenshot_url = self.SPLASH_URL.format(encoded_item_url) request = scrapy.Request(screenshot_url, callback=NO_CALLBACK) response = await maybe_deferred_to_future( - spider.crawler.engine.download(request, spider) + spider.crawler.engine.download(request) ) if response.status != 200:
Fixes #6008 I looked in other files and this is the only place where we still have an example of passing an extra spider parameter.
https://api.github.com/repos/scrapy/scrapy/pulls/6009
2023-08-10T11:46:17Z
2023-08-10T11:48:44Z
2023-08-10T11:48:44Z
2023-08-10T11:48:44Z
143
scrapy/scrapy
34,279
[events] add tests for ErrorEvent event types
diff --git a/tests/sentry/eventtypes/__init__.py b/tests/sentry/eventtypes/__init__.py new file mode 100644 index 0000000000000..c3961685ab8de --- /dev/null +++ b/tests/sentry/eventtypes/__init__.py @@ -0,0 +1 @@ +from __future__ import absolute_import diff --git a/tests/sentry/eventtypes/test_error.py b/tests/sentry/eventtypes/test_error.py new file mode 100644 index 0000000000000..b4d8108df37f6 --- /dev/null +++ b/tests/sentry/eventtypes/test_error.py @@ -0,0 +1,21 @@ +from __future__ import absolute_import + +from sentry.eventtypes import ErrorEvent +from sentry.testutils import TestCase + + +class ErrorEventTest(TestCase): + def test_to_string_none_value(self): + inst = ErrorEvent({}) + result = inst.to_string({'type': 'Error', 'value': None}) + assert result == 'Error' + + def test_to_string_eliminates_values_with_newline(self): + inst = ErrorEvent({}) + result = inst.to_string({'type': 'Error', 'value': 'foo\nbar'}) + assert result == 'Error: foo' + + def test_to_string_handles_empty_value(self): + inst = ErrorEvent({}) + result = inst.to_string({'type': 'Error', 'value': ''}) + assert result == 'Error'
https://api.github.com/repos/getsentry/sentry/pulls/4261
2016-10-03T23:26:17Z
2016-10-03T23:36:52Z
2016-10-03T23:36:52Z
2020-12-23T10:01:48Z
335
getsentry/sentry
44,182
R.3_609: changed owner<T> to owner<T*> in R.3 per issue #609
diff --git a/CppCoreGuidelines.md b/CppCoreGuidelines.md index 082bbf524..26c3de932 100644 --- a/CppCoreGuidelines.md +++ b/CppCoreGuidelines.md @@ -7232,7 +7232,7 @@ We can fix that problem by making ownership explicit: class X2 { // ... public: - owner<T> p; // OK: p is owning + owner<T*> p; // OK: p is owning T* q; // OK: q is not owning }; @@ -7256,9 +7256,9 @@ Some interfaces cannot be simply annotated with `owner` because they need to rem ##### Note -`owner<T>` has no default semantics beyond `T*`. It can be used without changing any code using it and without affecting ABIs. +`owner<T*>` has no default semantics beyond `T*`. It can be used without changing any code using it and without affecting ABIs. It is simply a indicator to programmers and analysis tools. -For example, if an `owner<T>` is a member of a class, that class better have a destructor that `delete`s it. +For example, if an `owner<T*>` is a member of a class, that class better have a destructor that `delete`s it. ##### Example, bad
In guideline R.3: changed owner<T> references to owner<T*> described in issue #609.
https://api.github.com/repos/isocpp/CppCoreGuidelines/pulls/612
2016-05-19T01:46:53Z
2016-05-19T15:18:30Z
2016-05-19T15:18:30Z
2016-05-19T17:07:01Z
301
isocpp/CppCoreGuidelines
15,869
Fortinet's FortiOS user adgrp
diff --git a/lib/ansible/modules/network/fortios/fortios_user_adgrp.py b/lib/ansible/modules/network/fortios/fortios_user_adgrp.py new file mode 100644 index 00000000000000..7cc8a1c8378594 --- /dev/null +++ b/lib/ansible/modules/network/fortios/fortios_user_adgrp.py @@ -0,0 +1,260 @@ +#!/usr/bin/python +from __future__ import (absolute_import, division, print_function) +# Copyright 2019 Fortinet, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <https://www.gnu.org/licenses/>. + +__metaclass__ = type + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'metadata_version': '1.1'} + +DOCUMENTATION = ''' +--- +module: fortios_user_adgrp +short_description: Configure FSSO groups in Fortinet's FortiOS and FortiGate. +description: + - This module is able to configure a FortiGate or FortiOS by allowing the + user to set and modify user feature and adgrp category. + Examples include all parameters and values need to be adjusted to datasources before usage. + Tested with FOS v6.0.2 +version_added: "2.8" +author: + - Miguel Angel Munoz (@mamunozgonzalez) + - Nicolas Thomas (@thomnico) +notes: + - Requires fortiosapi library developed by Fortinet + - Run as a local_action in your playbook +requirements: + - fortiosapi>=0.9.8 +options: + host: + description: + - FortiOS or FortiGate ip address. + required: true + username: + description: + - FortiOS or FortiGate username. + required: true + password: + description: + - FortiOS or FortiGate password. + default: "" + vdom: + description: + - Virtual domain, among those defined previously. A vdom is a + virtual instance of the FortiGate that can be configured and + used as a different unit. + default: root + https: + description: + - Indicates if the requests towards FortiGate must use HTTPS + protocol + type: bool + default: true + user_adgrp: + description: + - Configure FSSO groups. + default: null + suboptions: + state: + description: + - Indicates whether to create or remove the object + choices: + - present + - absent + name: + description: + - Name. + required: true + server-name: + description: + - FSSO agent name. Source user.fsso.name. +''' + +EXAMPLES = ''' +- hosts: localhost + vars: + host: "192.168.122.40" + username: "admin" + password: "" + vdom: "root" + tasks: + - name: Configure FSSO groups. + fortios_user_adgrp: + host: "{{ host }}" + username: "{{ username }}" + password: "{{ password }}" + vdom: "{{ vdom }}" + https: "False" + user_adgrp: + state: "present" + name: "default_name_3" + server-name: "<your_own_value> (source user.fsso.name)" +''' + +RETURN = ''' +build: + description: Build number of the fortigate image + returned: always + type: str + sample: '1547' +http_method: + description: Last method used to provision the content into FortiGate + returned: always + type: str + sample: 'PUT' +http_status: + description: Last result given by FortiGate on last operation applied + returned: always + type: str + sample: "200" +mkey: + description: Master key (id) used in the last call to FortiGate + returned: success + type: str + sample: "id" +name: + description: Name of the table used to fulfill the request + returned: always + type: str + sample: "urlfilter" +path: + description: Path of the table used to fulfill the request + returned: always + type: str + sample: "webfilter" +revision: + description: Internal revision number + returned: always + type: str + sample: "17.0.2.10658" +serial: + description: Serial number of the unit + returned: always + type: str + sample: "FGVMEVYYQT3AB5352" +status: + description: Indication of the operation's result + returned: always + type: str + sample: "success" +vdom: + description: Virtual domain used + returned: always + type: str + sample: "root" +version: + description: Version of the FortiGate + returned: always + type: str + sample: "v5.6.3" + +''' + +from ansible.module_utils.basic import AnsibleModule + + +def login(data, fos): + host = data['host'] + username = data['username'] + password = data['password'] + + fos.debug('on') + if 'https' in data and not data['https']: + fos.https('off') + else: + fos.https('on') + + fos.login(host, username, password) + + +def filter_user_adgrp_data(json): + option_list = ['name', 'server-name'] + dictionary = {} + + for attribute in option_list: + if attribute in json and json[attribute] is not None: + dictionary[attribute] = json[attribute] + + return dictionary + + +def user_adgrp(data, fos): + vdom = data['vdom'] + user_adgrp_data = data['user_adgrp'] + filtered_data = filter_user_adgrp_data(user_adgrp_data) + + if user_adgrp_data['state'] == "present": + return fos.set('user', + 'adgrp', + data=filtered_data, + vdom=vdom) + + elif user_adgrp_data['state'] == "absent": + return fos.delete('user', + 'adgrp', + mkey=filtered_data['name'], + vdom=vdom) + + +def fortios_user(data, fos): + login(data, fos) + + if data['user_adgrp']: + resp = user_adgrp(data, fos) + + fos.logout() + return not resp['status'] == "success", resp['status'] == "success", resp + + +def main(): + fields = { + "host": {"required": True, "type": "str"}, + "username": {"required": True, "type": "str"}, + "password": {"required": False, "type": "str", "no_log": True}, + "vdom": {"required": False, "type": "str", "default": "root"}, + "https": {"required": False, "type": "bool", "default": True}, + "user_adgrp": { + "required": False, "type": "dict", + "options": { + "state": {"required": True, "type": "str", + "choices": ["present", "absent"]}, + "name": {"required": True, "type": "str"}, + "server-name": {"required": False, "type": "str"} + + } + } + } + + module = AnsibleModule(argument_spec=fields, + supports_check_mode=False) + try: + from fortiosapi import FortiOSAPI + except ImportError: + module.fail_json(msg="fortiosapi module is required") + + fos = FortiOSAPI() + + is_error, has_changed, result = fortios_user(module.params, fos) + + if not is_error: + module.exit_json(changed=has_changed, meta=result) + else: + module.fail_json(msg="Error in repo", meta=result) + + +if __name__ == '__main__': + main()
##### SUMMARY Fortinet is adding Ansible support for FortiOS and FortiGate products. This module follows the same structure, guidelines and ideas given in previous approved module for a parallel feature of FortiGate (webfiltering): https://github.com/ansible/ansible/pull/37196 In this case we are providing a different functionality: "User Adgrp". Please note that this will be part of other modules to come for FortiGate, including different functionalities: system, wireless-controller, firewall, webfilter, ips, web-proxy, wanopt, application, dlp spamfilter, log, vpn, certificate, user, dnsfilter, antivirus, report, waf, authentication, switch controller, endpoint-control and router. We plan to follow the same style, structure and usage as in the previous module in order to make it easier to comply with Ansible guidelines. ##### ISSUE TYPE - New Module Pull Request ##### COMPONENT NAME fortios_user_addgrp ##### ANSIBLE VERSION ``` ansible 2.8.0.dev0 (new_module ddbbe5dfa5) last updated 2018/09/24 14:54:57 (GMT +200) config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/magonzalez/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /home/magonzalez/ansible/lib/ansible executable location = /home/magonzalez/ansible/bin/ansible python version = 2.7.15rc1 (default, Apr 15 2018, 21:51:34) [GCC 7.3.0] ```
https://api.github.com/repos/ansible/ansible/pulls/52831
2019-02-22T16:48:48Z
2019-03-05T11:18:48Z
2019-03-05T11:18:48Z
2019-07-25T16:57:44Z
2,065
ansible/ansible
48,937
Touch up venv docs
diff --git a/Doc/library/venv.rst b/Doc/library/venv.rst index d3d5ae2b007d5f..62732d22438672 100644 --- a/Doc/library/venv.rst +++ b/Doc/library/venv.rst @@ -47,7 +47,7 @@ Creating virtual environments A virtual environment is a directory tree which contains Python executable files and other files which indicate that it is a virtual environment. - Common installation tools such as ``Setuptools`` and ``pip`` work as + Common installation tools such as setuptools_ and pip_ work as expected with virtual environments. In other words, when a virtual environment is active, they install Python packages into the virtual environment without needing to be told to do so explicitly. @@ -64,24 +64,25 @@ Creating virtual environments Python installation). When a virtual environment is active, any options that change the - installation path will be ignored from all distutils configuration files to - prevent projects being inadvertently installed outside of the virtual - environment. + installation path will be ignored from all :mod:`distutils` configuration + files to prevent projects being inadvertently installed outside of the + virtual environment. When working in a command shell, users can make a virtual environment active by running an ``activate`` script in the virtual environment's executables - directory (the precise filename is shell-dependent), which prepends the - virtual environment's directory for executables to the ``PATH`` environment - variable for the running shell. There should be no need in other - circumstances to activate a virtual environment—scripts installed into - virtual environments have a "shebang" line which points to the virtual - environment's Python interpreter. This means that the script will run with - that interpreter regardless of the value of ``PATH``. On Windows, "shebang" - line processing is supported if you have the Python Launcher for Windows - installed (this was added to Python in 3.3 - see :pep:`397` for more - details). Thus, double-clicking an installed script in a Windows Explorer - window should run the script with the correct interpreter without there - needing to be any reference to its virtual environment in ``PATH``. + directory (the precise filename and command to use the file is + shell-dependent), which prepends the virtual environment's directory for + executables to the ``PATH`` environment variable for the running shell. There + should be no need in other circumstances to activate a virtual + environment; scripts installed into virtual environments have a "shebang" + line which points to the virtual environment's Python interpreter. This means + that the script will run with that interpreter regardless of the value of + ``PATH``. On Windows, "shebang" line processing is supported if you have the + Python Launcher for Windows installed (this was added to Python in 3.3 - see + :pep:`397` for more details). Thus, double-clicking an installed script in a + Windows Explorer window should run the script with the correct interpreter + without there needing to be any reference to its virtual environment in + ``PATH``. .. _venv-api: @@ -135,20 +136,20 @@ creation according to their needs, the :class:`EnvBuilder` class. Added the ``upgrade_deps`` parameter Creators of third-party virtual environment tools will be free to use the - provided ``EnvBuilder`` class as a base class. + provided :class:`EnvBuilder` class as a base class. The returned env-builder is an object which has a method, ``create``: .. method:: create(env_dir) - This method takes as required argument the path (absolute or relative to - the current directory) of the target directory which is to contain the + Create a virtual environment by specifying the target directory + (absolute or relative to the current directory) which is to contain the virtual environment. The ``create`` method will either create the environment in the specified directory, or raise an appropriate exception. - The ``create`` method of the ``EnvBuilder`` class illustrates the hooks - available for subclass customization:: + The ``create`` method of the :class:`EnvBuilder` class illustrates the + hooks available for subclass customization:: def create(self, env_dir): """ @@ -476,3 +477,7 @@ subclass which installs setuptools and pip into a created virtual environment:: This script is also available for download `online <https://gist.github.com/vsajip/4673395>`_. + + +.. _setuptools: https://pypi.org/project/setuptools/ +.. _pip: https://pypi.org/project/pip/
https://api.github.com/repos/python/cpython/pulls/14922
2019-07-23T20:51:49Z
2019-07-23T21:34:33Z
2019-07-23T21:34:33Z
2019-07-23T21:34:53Z
1,074
python/cpython
4,321
add: test for proxy.py
diff --git a/test_proxy.py b/test_proxy.py new file mode 100644 index 00000000..fdf9188a --- /dev/null +++ b/test_proxy.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from proxy import Proxy, NoTalkProxy +from io import StringIO +import sys +from time import time + +if sys.version_info < (2, 7): + import unittest2 as unittest +else: + import unittest + + +class ProxyTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + """ Class scope setup. """ + cls.p = Proxy() + + def setUp(cls): + """ Function/test case scope setup. """ + cls.output = StringIO() + cls.saved_stdout = sys.stdout + sys.stdout = cls.output + + def tearDown(cls): + """ Function/test case scope teardown. """ + cls.output.close() + sys.stdout = cls.saved_stdout + + def test_sales_manager_shall_talk_through_proxy_with_delay(cls): + cls.p.busy = 'No' + start_time = time() + cls.p.talk() + end_time = time() + execution_time = end_time - start_time + print_output = cls.output.getvalue() + expected_print_output = 'Proxy checking for Sales Manager availability\n\ +Sales Manager ready to talk\n' + cls.assertEqual(print_output, expected_print_output) + expected_execution_time = 2 + cls.assertEqual(int(execution_time), expected_execution_time) + + def test_sales_manager_shall_respond_through_proxy_with_delay(cls): + cls.p.busy = 'Yes' + start_time = time() + cls.p.talk() + end_time = time() + execution_time = end_time - start_time + print_output = cls.output.getvalue() + expected_print_output = 'Proxy checking for Sales Manager availability\n\ +Sales Manager is busy\n' + cls.assertEqual(print_output, expected_print_output) + expected_execution_time = 2 + cls.assertEqual(int(execution_time), expected_execution_time) + + +class NoTalkProxyTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + """ Class scope setup. """ + cls.ntp = NoTalkProxy() + + def setUp(cls): + """ Function/test case scope setup. """ + cls.output = StringIO() + cls.saved_stdout = sys.stdout + sys.stdout = cls.output + + def tearDown(cls): + """ Function/test case scope teardown. """ + cls.output.close() + sys.stdout = cls.saved_stdout + + def test_sales_manager_shall_not_talk_through_proxy_with_delay(cls): + cls.ntp.busy = 'No' + start_time = time() + cls.ntp.talk() + end_time = time() + execution_time = end_time - start_time + print_output = cls.output.getvalue() + expected_print_output = 'Proxy checking for Sales Manager availability\n\ +This Sales Manager will not talk to you whether he/she is busy or not\n' + cls.assertEqual(print_output, expected_print_output) + expected_execution_time = 2 + cls.assertEqual(int(execution_time), expected_execution_time) + + def test_sales_manager_shall_not_respond_through_proxy_with_delay(cls): + cls.ntp.busy = 'Yes' + start_time = time() + cls.ntp.talk() + end_time = time() + execution_time = end_time - start_time + print_output = cls.output.getvalue() + expected_print_output = 'Proxy checking for Sales Manager availability\n\ +This Sales Manager will not talk to you whether he/she is busy or not\n' + cls.assertEqual(print_output, expected_print_output) + expected_execution_time = 2 + cls.assertEqual(int(execution_time), expected_execution_time) + +if __name__ == "__main__": + unittest.main()
#138 brought some tests for the actual proxy.py version on my local machine back to my mind. Runs with Python3.4.1 on Linux.
https://api.github.com/repos/faif/python-patterns/pulls/139
2016-04-25T18:33:21Z
2016-05-21T18:37:31Z
2016-05-21T18:37:31Z
2016-05-21T18:37:31Z
884
faif/python-patterns
33,696
fix json tool
diff --git a/libs/langchain/langchain/tools/json/tool.py b/libs/langchain/langchain/tools/json/tool.py index 6f6473d51e6b47..6c75de20ce5cb2 100644 --- a/libs/langchain/langchain/tools/json/tool.py +++ b/libs/langchain/langchain/tools/json/tool.py @@ -20,7 +20,7 @@ def _parse_input(text: str) -> List[Union[str, int]]: """Parse input of the form data["key1"][0]["key2"] into a list of keys.""" _res = re.findall(r"\[.*?]", text) # strip the brackets and quotes, convert to int if possible - res = [i[1:-1].replace('"', "") for i in _res] + res = [i[1:-1].replace('"', "").replace("'", "") for i in _res] res = [int(i) if i.isdigit() else i for i in res] return res diff --git a/libs/langchain/tests/unit_tests/tools/test_json.py b/libs/langchain/tests/unit_tests/tools/test_json.py index 36a96595e03d36..b677b1577d3933 100644 --- a/libs/langchain/tests/unit_tests/tools/test_json.py +++ b/libs/langchain/tests/unit_tests/tools/test_json.py @@ -30,6 +30,10 @@ def test_json_spec_value() -> None: assert spec.value('data["baz"]') == "{'test': {'foo': [1, 2, 3]}}" assert spec.value('data["baz"]["test"]') == "{'foo': [1, 2, 3]}" assert spec.value('data["baz"]["test"]["foo"]') == "[1, 2, 3]" + assert spec.value("data['foo']") == "bar" + assert spec.value("data['baz']") == "{'test': {'foo': [1, 2, 3]}}" + assert spec.value("data['baz']['test']") == "{'foo': [1, 2, 3]}" + assert spec.value("data['baz']['test']['foo']") == "[1, 2, 3]" def test_json_spec_value_max_length() -> None:
https://api.github.com/repos/langchain-ai/langchain/pulls/9096
2023-08-11T05:50:06Z
2023-08-11T06:39:26Z
2023-08-11T06:39:26Z
2023-08-11T06:39:26Z
499
langchain-ai/langchain
42,962
[Bilibili] fix bilibili 4k
diff --git a/src/you_get/extractors/bilibili.py b/src/you_get/extractors/bilibili.py index 94e5479f65..7ea626f89d 100644 --- a/src/you_get/extractors/bilibili.py +++ b/src/you_get/extractors/bilibili.py @@ -62,7 +62,7 @@ def bilibili_headers(referer=None, cookie=None): @staticmethod def bilibili_api(avid, cid, qn=0): - return 'https://api.bilibili.com/x/player/playurl?avid=%s&cid=%s&qn=%s&type=&otype=json&fnver=0&fnval=16' % (avid, cid, qn) + return 'https://api.bilibili.com/x/player/playurl?avid=%s&cid=%s&qn=%s&type=&otype=json&fnver=0&fnval=16&fourk=1' % (avid, cid, qn) @staticmethod def bilibili_audio_api(sid):
B站的playurl API加了一个参数 fourk=1 时才会输出4K的播放地址,否则不显示。 bilibili has changed their playurl api, we should add fork=1 to the url params or it will not output the 4k url.
https://api.github.com/repos/soimort/you-get/pulls/2827
2020-09-07T06:34:16Z
2020-10-06T13:22:24Z
2020-10-06T13:22:24Z
2020-10-06T13:22:29Z
238
soimort/you-get
21,402
Fix `test_load_img_url_timeout`
diff --git a/tests/utils/test_image_utils.py b/tests/utils/test_image_utils.py index 1813c2a21f254..5d899c2f1ddf7 100644 --- a/tests/utils/test_image_utils.py +++ b/tests/utils/test_image_utils.py @@ -21,7 +21,7 @@ import numpy as np import pytest from huggingface_hub.file_download import http_get -from requests import ReadTimeout +from requests import ConnectTimeout, ReadTimeout from tests.pipelines.test_pipelines_document_question_answering import INVOICE_URL from transformers import is_torch_available, is_vision_available @@ -491,7 +491,7 @@ def test_load_img_url(self): @is_flaky() def test_load_img_url_timeout(self): - with self.assertRaises(ReadTimeout): + with self.assertRaises((ReadTimeout, ConnectTimeout)): load_image(INVOICE_URL, timeout=0.001) def test_load_img_local(self):
# What does this PR do? #25184 added timeout parameter to some function and also a test. But the expected exception in the test is `ConnectTimeout` (on daily CI) instead of `ReadTimeout`, but it is `ReadTimeout` on `CircleCI`. I haven't looked why there are such difference. But this PR updates the expected value to `(ReadTimeout, ConnectTimeout)` so the test added in #25184 won't fail. (let me know if you think we should dive into this)
https://api.github.com/repos/huggingface/transformers/pulls/25976
2023-09-05T07:08:01Z
2023-09-05T09:34:28Z
2023-09-05T09:34:28Z
2023-09-05T09:35:12Z
212
huggingface/transformers
12,793
Fix Amazon Linux bootstrapping error.
diff --git a/letsencrypt-auto b/letsencrypt-auto index b3e380f9d6c..a3009fe52a2 100755 --- a/letsencrypt-auto +++ b/letsencrypt-auto @@ -85,6 +85,8 @@ ExperimentalBootstrap() { DeterminePythonVersion() { if command -v python2.7 > /dev/null ; then export LE_PYTHON=${LE_PYTHON:-python2.7} + elif command -v python27 > /dev/null ; then + export LE_PYTHON=${LE_PYTHON:-python27} elif command -v python2 > /dev/null ; then export LE_PYTHON=${LE_PYTHON:-python2} elif command -v python > /dev/null ; then @@ -135,7 +137,7 @@ then elif uname | grep -iq Darwin ; then ExperimentalBootstrap "Mac OS X" mac.sh elif grep -iq "Amazon Linux" /etc/issue ; then - ExperimentalBootstrap "Amazon Linux" amazon_linux.sh + ExperimentalBootstrap "Amazon Linux" _rpm_common.sh else echo "Sorry, I don't know how to bootstrap Let's Encrypt on your operating system!" echo
https://api.github.com/repos/certbot/certbot/pulls/1516
2015-11-16T08:26:44Z
2015-11-16T20:19:55Z
2015-11-16T20:19:55Z
2016-05-06T19:22:06Z
271
certbot/certbot
565
[workflow] changed to doc build to be on schedule and release
diff --git a/.github/workflows/doc_build_after_merge.yml b/.github/workflows/doc_build_on_schedule_after_release.yml similarity index 69% rename from .github/workflows/doc_build_after_merge.yml rename to .github/workflows/doc_build_on_schedule_after_release.yml index b6fd57b8d2b4..62dfdc67257c 100644 --- a/.github/workflows/doc_build_after_merge.yml +++ b/.github/workflows/doc_build_on_schedule_after_release.yml @@ -1,18 +1,16 @@ -name: Build Documentation After Merge +name: Build Documentation On Schedule & After Release on: workflow_dispatch: - push: - paths: - - "version.txt" - - "docs/**" - branches: - - "main" + schedule: + - cron: "0 12 * * *" # build doc every day at 8pm Singapore time (12pm UTC time) + release: + types: [published] jobs: build-doc: name: Trigger Documentation Build Workflow - if: ( github.event_name == 'workflow_dispatch' || github.event.pull_request.merged == true ) && github.repository == 'hpcaitech/ColossalAI' + if: github.repository == 'hpcaitech/ColossalAI' runs-on: ubuntu-latest steps: - name: trigger workflow in ColossalAI-Documentation
## 📌 Checklist before creating the PR - [x] I have created an issue for this PR for traceability - [x] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description` - [x] I have added relevant tags if possible for us to better distinguish different PRs ## 🚨 Issue number > Link this PR to your issue with words like fixed to automatically close the linked issue upon merge > > e.g. `fixed #1234`, `closed #1234`, `resolved #1234` Fixed #3814 ## 📝 What does this PR do? > Summarize your work here. > if you have any plots/diagrams/screenshots/tables, please attach them here. This PR is a continuation of #3815 , this PR changed the workflow to be triggered on schedule (8pm Beijing time every day) and upon release. This is because that secrets are not passed to pull request workflows from a forked repository. There is currently no way to get around with it. ## 💥 Checklist before requesting a review - [x] I have linked my PR to an issue ([instruction](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue)) - [x] My issue clearly describes the problem/feature/proposal, with diagrams/charts/table/code if possible - [x] I have performed a self-review of my code - [ ] I have added thorough tests. - [ ] I have added docstrings for all the functions/methods I implemented ## ⭐️ Do you enjoy contributing to Colossal-AI? - [x] 🌝 Yes, I do. - [ ] 🌚 No, I don't. Tell us more if you don't enjoy contributing to Colossal-AI.
https://api.github.com/repos/hpcaitech/ColossalAI/pulls/3825
2023-05-24T02:11:52Z
2023-05-24T02:50:19Z
2023-05-24T02:50:19Z
2023-05-24T02:50:19Z
308
hpcaitech/ColossalAI
11,136

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
32
Add dataset card