patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -116,10 +116,6 @@ function upgradeToVNodes(arr, offset) { if (Array.isArray(obj)) { upgradeToVNodes(obj); } - else if (obj && typeof obj==='object' && !isValidElement(obj) && ((obj.props && obj.type) || obj.text!=null)) { - if (obj.text) continue; - arr[i] = createElement(obj.type, obj.props, obj.props.children); - } } }
1
import { render as preactRender, cloneElement as preactCloneElement, createRef, h, Component, options, toChildArray, createContext, Fragment } from 'preact'; import * as hooks from 'preact/hooks'; export * from 'preact/hooks'; const version = '16.8.0'; // trick libraries to think we are react /* istanbul ignore next */ const REACT_ELEMENT_TYPE = (typeof Symbol!=='undefined' && Symbol.for && Symbol.for('react.element')) || 0xeac7; const CAMEL_PROPS = /^(?:accent|alignment|arabic|baseline|cap|clip|color|fill|flood|font|glyph|horiz|marker|overline|paint|stop|strikethrough|stroke|text|underline|unicode|units|v|vector|vert|word|writing|x)[A-Z]/; let oldEventHook = options.event; options.event = e => { /* istanbul ignore next */ if (oldEventHook) e = oldEventHook(e); e.persist = Object; e.nativeEvent = e; return e; }; /** * Legacy version of createElement. * @param {import('./internal').VNode["type"]} type The node name or Component constructor */ function createFactory(type) { return createElement.bind(null, type); } /** * Normalize DOM vnode properties. * @param {import('./internal').VNode} vnode The vnode to normalize props of * @param {object | null | undefined} props props to normalize */ function handleElementVNode(vnode, props) { let shouldSanitize, attrs, i; for (i in props) if ((shouldSanitize = CAMEL_PROPS.test(i))) break; if (shouldSanitize) { attrs = vnode.props = {}; for (i in props) { attrs[CAMEL_PROPS.test(i) ? i.replace(/([A-Z0-9])/, '-$1').toLowerCase() : i] = props[i]; } } } /** * Proxy render() since React returns a Component reference. * @param {import('./internal').VNode} vnode VNode tree to render * @param {import('./internal').PreactElement} parent DOM node to render vnode tree into * @param {() => void} [callback] Optional callback that will be called after rendering * @returns {import('./internal').Component | null} The root component reference or null */ function render(vnode, parent, callback) { preactRender(vnode, parent); if (typeof callback==='function') callback(); return vnode!=null ? vnode._component : null; } class ContextProvider { getChildContext() { return this.props.context; } render(props) { return props.children; } } /** * Portal component * @param {object | null | undefined} props */ function Portal(props) { let wrap = h(ContextProvider, { context: this.context }, props.vnode); render(wrap, props.container); return null; } /** * Create a `Portal` to continue rendering the vnode tree at a different DOM node * @param {import('./internal').VNode} vnode The vnode to render * @param {import('./internal').PreactElement} container The DOM node to continue rendering in to. */ function createPortal(vnode, container) { return h(Portal, { vnode, container }); } const mapFn = (children, fn) => { if (children == null) return null; children = toChildArray(children); return children.map(fn); }; // This API is completely unnecessary for Preact, so it's basically passthrough. let Children = { map: mapFn, forEach: mapFn, count(children) { return children ? toChildArray(children).length : 0; }, only(children) { children = toChildArray(children); if (children.length!==1) throw new Error('Children.only() expects only one child.'); return children[0]; }, toArray: toChildArray }; /** * Upgrade all found vnodes recursively * @param {Array} arr * @param {number} offset */ function upgradeToVNodes(arr, offset) { for (let i=offset || 0; i<arr.length; i++) { let obj = arr[i]; if (Array.isArray(obj)) { upgradeToVNodes(obj); } else if (obj && typeof obj==='object' && !isValidElement(obj) && ((obj.props && obj.type) || obj.text!=null)) { if (obj.text) continue; arr[i] = createElement(obj.type, obj.props, obj.props.children); } } } /** * Wrap `createElement` to apply various vnode normalizations. * @param {import('./internal').VNode["type"]} type The node name or Component constructor * @param {object | null | undefined} [props] The vnode's properties * @param {Array<import('./internal').ComponentChildren>} [children] The vnode's children * @returns {import('./internal').VNode} */ function createElement(...args) { upgradeToVNodes(args, 2); let vnode = h(...args); vnode.$$typeof = REACT_ELEMENT_TYPE; let type = vnode.type, props = vnode.props; if (typeof type!='function') { if (props.defaultValue) { if (!props.value && props.value!==0) { props.value = props.defaultValue; } delete props.defaultValue; } handleElementVNode(vnode, props); } vnode.preactCompatNormalized = false; return normalizeVNode(vnode); } /** * Normalize a vnode * @param {import('./internal').VNode} vnode */ function normalizeVNode(vnode) { vnode.preactCompatNormalized = true; applyClassName(vnode); applyEventNormalization(vnode); return vnode; } /** * Wrap `cloneElement` to abort if the passed element is not a valid element and apply * all vnode normalizations. * @param {import('./internal').VNode} element The vnode to clone * @param {object} props Props to add when cloning * @param {Array<import('./internal').ComponentChildren} rest Optional component children */ function cloneElement(element) { if (!isValidElement(element)) return element; let vnode = normalizeVNode(preactCloneElement.apply(null, arguments)); vnode.$$typeof = REACT_ELEMENT_TYPE; return vnode; } /** * Check if the passed element is a valid (p)react node. * @param {*} element The element to check * @returns {boolean} */ function isValidElement(element) { return element && element.$$typeof===REACT_ELEMENT_TYPE; } /** * Normalize event handlers like react does. Most famously it uses `onChange` for any input element. * @param {import('./internal').VNode} vnode The vnode to normalize events on */ function applyEventNormalization({ type, props }) { if (!props || typeof type!=='string') return; let newProps = {}; for (let i in props) { newProps[i.toLowerCase()] = i; } if (newProps.ondoubleclick) { props.ondblclick = props[newProps.ondoubleclick]; delete props[newProps.ondoubleclick]; } if (newProps.onbeforeinput) { props.onbeforeinput = props[newProps.onbeforeinput]; delete props[newProps.onbeforeinput]; } // for *textual inputs* (incl textarea), normalize `onChange` -> `onInput`: if (newProps.onchange && (type==='textarea' || (type.toLowerCase()==='input' && !/^fil|che|rad/i.test(props.type)))) { let normalized = newProps.oninput || 'oninput'; if (!props[normalized]) { props[normalized] = props[newProps.onchange]; delete props[newProps.onchange]; } } } /** * Remove a component tree from the DOM, including state and event handlers. * @param {Element | Document | ShadowRoot | DocumentFragment} container * @returns {boolean} */ function unmountComponentAtNode(container) { if (container._prevVNode!=null) { preactRender(null, container); return true; } return false; } /** * Alias `class` prop to `className` if available * @param {import('./internal').VNode} vnode */ function applyClassName(vnode) { let a = vnode.props; if (a.class || a.className) { classNameDescriptor.enumerable = 'className' in a; if (a.className) a.class = a.className; Object.defineProperty(a, 'className', classNameDescriptor); } } let classNameDescriptor = { configurable: true, get() { return this.class; } }; /** * Check if two objects have a different shape * @param {object} a * @param {object} b * @returns {boolean} */ function shallowDiffers(a, b) { for (let i in a) if (!(i in b)) return true; for (let i in b) if (a[i]!==b[i]) return true; return false; } /** * Get the matching DOM node for a component * @param {import('./internal').Component} component * @returns {import('./internal').PreactElement | null} */ function findDOMNode(component) { return component && (component.base || component.nodeType === 1 && component) || null; } /** * Component class with a predefined `shouldComponentUpdate` implementation */ class PureComponent extends Component { constructor(props) { super(props); // Some third-party libraries check if this property is present this.isPureReactComponent = true; } shouldComponentUpdate(props, state) { return shallowDiffers(this.props, props) || shallowDiffers(this.state, state); } } // Some libraries like `react-virtualized` explicitely check for this. Component.prototype.isReactComponent = {}; /** * Memoize a component, so that it only updates when the props actually have * changed. This was previously known as `React.pure`. * @param {import('./internal').ComponentFactory<any>} c The component constructor * @param {(prev: object, next: object) => boolean} [comparer] Custom equality function * @returns {import('./internal').ComponentFactory<any>} */ function memo(c, comparer) { function shouldUpdate(nextProps) { return !comparer(this.props, nextProps); } function Memoed(props, context) { this.shouldComponentUpdate = this.shouldComponentUpdate || (comparer ? shouldUpdate : PureComponent.prototype.shouldComponentUpdate); return c.call(this, props, context); } Memoed.displayName = 'Memo(' + (c.displayName || c.name) + ')'; return Memoed; } // Patch in `UNSAFE_*` lifecycle hooks function setUnsafeDescriptor(obj, key) { Object.defineProperty(obj.prototype, 'UNSAFE_' + key, { configurable: true, get() { return this[key]; }, set(v) { this[key] = v; } }); } setUnsafeDescriptor(Component, 'componentWillMount'); setUnsafeDescriptor(Component, 'componentWillReceiveProps'); setUnsafeDescriptor(Component, 'componentWillUpdate'); /** * Pass ref down to a child. This is mainly used in libraries with HOCs that * wrap components. Using `forwardRef` there is an easy way to get a reference * of the wrapped component instead of one of the wrapper itself. * @param {import('./internal').ForwardFn} fn * @returns {import('./internal').FunctionalComponent} */ function forwardRef(fn) { function Forwarded(props) { let ref = props.ref; delete props.ref; return fn(props, ref); } Forwarded._forwarded = true; Forwarded.displayName = 'ForwardRef(' + (fn.displayName || fn.name) + ')'; return Forwarded; } let oldVNodeHook = options.vnode; options.vnode = vnode => { let type = vnode.type; if (type!=null && type._forwarded) { vnode.props.ref = vnode.ref; vnode.ref = null; } /* istanbul ignore next */ if (oldVNodeHook) oldVNodeHook(vnode); }; export { version, Children, render, render as hydrate, unmountComponentAtNode, createPortal, createElement, createContext, createFactory, cloneElement, createRef, Fragment, isValidElement, findDOMNode, Component, PureComponent, memo, forwardRef }; // React copies the named exports to the default one. export default { ...hooks, version, Children, render, hydrate: render, unmountComponentAtNode, createPortal, createElement, createContext, createFactory, cloneElement, createRef, Fragment, isValidElement, findDOMNode, Component, PureComponent, memo, forwardRef };
1
12,582
This branch is not needed anymore because every `vnode` that passes through `h` in compat or core will have the `$$typeof`-property now .
preactjs-preact
js
@@ -0,0 +1,16 @@ +const { getComposedParent } = axe.commons.dom; + +function shouldMatchElement(el) { + if (!el) { + return true; + } + const ariaHiddenValue = el.getAttribute('aria-hidden') + ? el.getAttribute('aria-hidden') + : null; + if (ariaHiddenValue === null) { + return shouldMatchElement(getComposedParent(el)); + } + return false; +} + +return shouldMatchElement(getComposedParent(node));
1
1
13,441
Did you mean to use `hasAttribute`? You shouldn't. This does not improve performance and it messes with the readability.
dequelabs-axe-core
js
@@ -37,13 +37,14 @@ import pytest import py.path # pylint: disable=no-name-in-module import helpers.stubs as stubsmod +from helpers.utils import CallbackChecker from qutebrowser.config import config, configdata, configtypes, configexc from qutebrowser.utils import objreg, standarddir from qutebrowser.browser.webkit import cookies from qutebrowser.misc import savemanager, sql from qutebrowser.keyinput import modeman -from PyQt5.QtCore import pyqtSignal, QEvent, QSize, Qt, QObject +from PyQt5.QtCore import QEvent, QSize, Qt from PyQt5.QtGui import QKeyEvent from PyQt5.QtWidgets import QWidget, QHBoxLayout, QVBoxLayout from PyQt5.QtNetwork import QNetworkCookieJar
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. # pylint: disable=invalid-name """pytest fixtures used by the whole testsuite. See https://pytest.org/latest/fixture.html """ import sys import tempfile import itertools import textwrap import unittest.mock import types import attr import pytest import py.path # pylint: disable=no-name-in-module import helpers.stubs as stubsmod from qutebrowser.config import config, configdata, configtypes, configexc from qutebrowser.utils import objreg, standarddir from qutebrowser.browser.webkit import cookies from qutebrowser.misc import savemanager, sql from qutebrowser.keyinput import modeman from PyQt5.QtCore import pyqtSignal, QEvent, QSize, Qt, QObject from PyQt5.QtGui import QKeyEvent from PyQt5.QtWidgets import QWidget, QHBoxLayout, QVBoxLayout from PyQt5.QtNetwork import QNetworkCookieJar class WinRegistryHelper: """Helper class for win_registry.""" @attr.s class FakeWindow: """A fake window object for the registry.""" registry = attr.ib() def windowTitle(self): return 'window title - qutebrowser' def __init__(self): self._ids = [] def add_window(self, win_id): assert win_id not in objreg.window_registry registry = objreg.ObjectRegistry() window = self.FakeWindow(registry) objreg.window_registry[win_id] = window self._ids.append(win_id) def cleanup(self): for win_id in self._ids: del objreg.window_registry[win_id] class CallbackChecker(QObject): """Check if a value provided by a callback is the expected one.""" got_result = pyqtSignal(object) UNSET = object() def __init__(self, qtbot, parent=None): super().__init__(parent) self._qtbot = qtbot self._result = self.UNSET def callback(self, result): """Callback which can be passed to runJavaScript.""" self._result = result self.got_result.emit(result) def check(self, expected): """Wait until the JS result arrived and compare it.""" if self._result is self.UNSET: with self._qtbot.waitSignal(self.got_result, timeout=2000): pass assert self._result == expected @pytest.fixture def callback_checker(qtbot): return CallbackChecker(qtbot) class FakeStatusBar(QWidget): """Fake statusbar to test progressbar sizing.""" def __init__(self, parent=None): super().__init__(parent) self.hbox = QHBoxLayout(self) self.hbox.addStretch() self.hbox.setContentsMargins(0, 0, 0, 0) self.setAttribute(Qt.WA_StyledBackground, True) self.setStyleSheet('background-color: red;') def minimumSizeHint(self): return QSize(1, self.fontMetrics().height()) @pytest.fixture def fake_statusbar(qtbot): """Fixture providing a statusbar in a container window.""" container = QWidget() qtbot.add_widget(container) vbox = QVBoxLayout(container) vbox.addStretch() statusbar = FakeStatusBar(container) # to make sure container isn't GCed # pylint: disable=attribute-defined-outside-init statusbar.container = container vbox.addWidget(statusbar) # pylint: enable=attribute-defined-outside-init with qtbot.waitExposed(container): container.show() return statusbar @pytest.fixture def win_registry(): """Fixture providing a window registry for win_id 0 and 1.""" helper = WinRegistryHelper() helper.add_window(0) yield helper helper.cleanup() @pytest.fixture def tab_registry(win_registry): """Fixture providing a tab registry for win_id 0.""" registry = objreg.ObjectRegistry() objreg.register('tab-registry', registry, scope='window', window=0) yield registry objreg.delete('tab-registry', scope='window', window=0) @pytest.fixture def fake_web_tab(stubs, tab_registry, mode_manager, qapp): """Fixture providing the FakeWebTab *class*.""" return stubs.FakeWebTab def _generate_cmdline_tests(): """Generate testcases for test_split_binding.""" @attr.s class TestCase: cmd = attr.ib() valid = attr.ib() separators = [';;', ' ;; ', ';; ', ' ;;'] invalid = ['foo', ''] valid = ['leave-mode', 'hint all'] # Valid command only -> valid for item in valid: yield TestCase(''.join(item), True) # Invalid command only -> invalid for item in invalid: yield TestCase(''.join(item), False) # Invalid command combined with invalid command -> invalid for item in itertools.product(invalid, separators, invalid): yield TestCase(''.join(item), False) # Valid command combined with valid command -> valid for item in itertools.product(valid, separators, valid): yield TestCase(''.join(item), True) # Valid command combined with invalid command -> invalid for item in itertools.product(valid, separators, invalid): yield TestCase(''.join(item), False) # Invalid command combined with valid command -> invalid for item in itertools.product(invalid, separators, valid): yield TestCase(''.join(item), False) # Command with no_cmd_split combined with an "invalid" command -> valid for item in itertools.product(['bind x open'], separators, invalid): yield TestCase(''.join(item), True) # Partial command yield TestCase('message-i', False) @pytest.fixture(params=_generate_cmdline_tests(), ids=lambda e: e.cmd) def cmdline_test(request): """Fixture which generates tests for things validating commandlines.""" return request.param @pytest.fixture(scope='session') def configdata_init(): """Initialize configdata if needed.""" if configdata.DATA is None: configdata.init() @pytest.fixture def config_stub(stubs, monkeypatch, configdata_init): """Fixture which provides a fake config object.""" yaml_config = stubs.FakeYamlConfig() conf = config.Config(yaml_config=yaml_config) monkeypatch.setattr(config, 'instance', conf) container = config.ConfigContainer(conf) monkeypatch.setattr(config, 'val', container) try: configtypes.Font.monospace_fonts = container.fonts.monospace except configexc.NoOptionError: # Completion tests patch configdata so fonts.monospace is unavailable. pass conf.val = container # For easier use in tests return conf @pytest.fixture def key_config_stub(config_stub, monkeypatch): """Fixture which provides a fake key config object.""" keyconf = config.KeyConfig(config_stub) monkeypatch.setattr(config, 'key_instance', keyconf) return keyconf @pytest.fixture def host_blocker_stub(stubs): """Fixture which provides a fake host blocker object.""" stub = stubs.HostBlockerStub() objreg.register('host-blocker', stub) yield stub objreg.delete('host-blocker') @pytest.fixture def quickmark_manager_stub(stubs): """Fixture which provides a fake quickmark manager object.""" stub = stubs.QuickmarkManagerStub() objreg.register('quickmark-manager', stub) yield stub objreg.delete('quickmark-manager') @pytest.fixture def bookmark_manager_stub(stubs): """Fixture which provides a fake bookmark manager object.""" stub = stubs.BookmarkManagerStub() objreg.register('bookmark-manager', stub) yield stub objreg.delete('bookmark-manager') @pytest.fixture def session_manager_stub(stubs): """Fixture which provides a fake session-manager object.""" stub = stubs.SessionManagerStub() objreg.register('session-manager', stub) yield stub objreg.delete('session-manager') @pytest.fixture def tabbed_browser_stubs(qapp, stubs, win_registry): """Fixture providing a fake tabbed-browser object on win_id 0 and 1.""" win_registry.add_window(1) stubs = [stubs.TabbedBrowserStub(), stubs.TabbedBrowserStub()] objreg.register('tabbed-browser', stubs[0], scope='window', window=0) objreg.register('tabbed-browser', stubs[1], scope='window', window=1) yield stubs objreg.delete('tabbed-browser', scope='window', window=0) objreg.delete('tabbed-browser', scope='window', window=1) @pytest.fixture def app_stub(stubs): """Fixture which provides a fake app object.""" stub = stubs.ApplicationStub() objreg.register('app', stub) yield stub objreg.delete('app') @pytest.fixture def status_command_stub(stubs, qtbot, win_registry): """Fixture which provides a fake status-command object.""" cmd = stubs.StatusBarCommandStub() objreg.register('status-command', cmd, scope='window', window=0) qtbot.addWidget(cmd) yield cmd objreg.delete('status-command', scope='window', window=0) @pytest.fixture(scope='session') def stubs(): """Provide access to stub objects useful for testing.""" return stubsmod @pytest.fixture(scope='session') def unicode_encode_err(): """Provide a fake UnicodeEncodeError exception.""" return UnicodeEncodeError('ascii', # codec '', # object 0, # start 2, # end 'fake exception') # reason @pytest.fixture(scope='session') def qnam(qapp): """Session-wide QNetworkAccessManager.""" from PyQt5.QtNetwork import QNetworkAccessManager nam = QNetworkAccessManager() nam.setNetworkAccessible(QNetworkAccessManager.NotAccessible) return nam @pytest.fixture def webengineview(): """Get a QWebEngineView if QtWebEngine is available.""" QtWebEngineWidgets = pytest.importorskip('PyQt5.QtWebEngineWidgets') return QtWebEngineWidgets.QWebEngineView() @pytest.fixture def webpage(qnam): """Get a new QWebPage object.""" QtWebKitWidgets = pytest.importorskip('PyQt5.QtWebKitWidgets') page = QtWebKitWidgets.QWebPage() page.networkAccessManager().deleteLater() page.setNetworkAccessManager(qnam) return page @pytest.fixture def webview(qtbot, webpage): """Get a new QWebView object.""" QtWebKitWidgets = pytest.importorskip('PyQt5.QtWebKitWidgets') view = QtWebKitWidgets.QWebView() qtbot.add_widget(view) view.page().deleteLater() view.setPage(webpage) view.resize(640, 480) return view @pytest.fixture def webframe(webpage): """Convenience fixture to get a mainFrame of a QWebPage.""" return webpage.mainFrame() @pytest.fixture def fake_keyevent_factory(): """Fixture that when called will return a mock instance of a QKeyEvent.""" def fake_keyevent(key, modifiers=0, text='', typ=QEvent.KeyPress): """Generate a new fake QKeyPressEvent.""" evtmock = unittest.mock.create_autospec(QKeyEvent, instance=True) evtmock.key.return_value = key evtmock.modifiers.return_value = modifiers evtmock.text.return_value = text evtmock.type.return_value = typ return evtmock return fake_keyevent @pytest.fixture def cookiejar_and_cache(stubs): """Fixture providing a fake cookie jar and cache.""" jar = QNetworkCookieJar() ram_jar = cookies.RAMCookieJar() cache = stubs.FakeNetworkCache() objreg.register('cookie-jar', jar) objreg.register('ram-cookie-jar', ram_jar) objreg.register('cache', cache) yield objreg.delete('cookie-jar') objreg.delete('ram-cookie-jar') objreg.delete('cache') @pytest.fixture def py_proc(): """Get a python executable and args list which executes the given code.""" if getattr(sys, 'frozen', False): pytest.skip("Can't be run when frozen") def func(code): return (sys.executable, ['-c', textwrap.dedent(code.strip('\n'))]) return func @pytest.fixture def fake_save_manager(): """Create a mock of save-manager and register it into objreg.""" fake_save_manager = unittest.mock.Mock(spec=savemanager.SaveManager) objreg.register('save-manager', fake_save_manager) yield fake_save_manager objreg.delete('save-manager') @pytest.fixture def fake_args(request): ns = types.SimpleNamespace() ns.backend = 'webengine' if request.config.webengine else 'webkit' objreg.register('args', ns) yield ns objreg.delete('args') @pytest.fixture def mode_manager(win_registry, config_stub, qapp): mm = modeman.ModeManager(0) objreg.register('mode-manager', mm, scope='window', window=0) yield mm objreg.delete('mode-manager', scope='window', window=0) @pytest.fixture def config_tmpdir(monkeypatch, tmpdir): """Set tmpdir/config as the configdir. Use this to avoid creating a 'real' config dir (~/.config/qute_test). """ confdir = tmpdir / 'config' confdir.ensure(dir=True) monkeypatch.setattr(standarddir, 'config', lambda auto=False: str(confdir)) return confdir @pytest.fixture def data_tmpdir(monkeypatch, tmpdir): """Set tmpdir/data as the datadir. Use this to avoid creating a 'real' data dir (~/.local/share/qute_test). """ datadir = tmpdir / 'data' datadir.ensure(dir=True) monkeypatch.setattr(standarddir, 'data', lambda system=False: str(datadir)) return datadir @pytest.fixture def runtime_tmpdir(monkeypatch, tmpdir): """Set tmpdir/runtime as the runtime dir. Use this to avoid creating a 'real' runtime dir. """ runtimedir = tmpdir / 'runtime' runtimedir.ensure(dir=True) monkeypatch.setattr(standarddir, 'runtime', lambda: str(runtimedir)) return runtimedir @pytest.fixture def redirect_webengine_data(data_tmpdir, monkeypatch): """Set XDG_DATA_HOME and HOME to a temp location. While data_tmpdir covers most cases by redirecting standarddir.data(), this is not enough for places QtWebEngine references the data dir internally. For these, we need to set the environment variable to redirect data access. We also set HOME as in some places, the home directory is used directly... """ monkeypatch.setenv('XDG_DATA_HOME', str(data_tmpdir)) monkeypatch.setenv('HOME', str(data_tmpdir)) @pytest.fixture() def short_tmpdir(): """A short temporary directory for a XDG_RUNTIME_DIR.""" with tempfile.TemporaryDirectory() as tdir: yield py.path.local(tdir) # pylint: disable=no-member @pytest.fixture def init_sql(data_tmpdir): """Initialize the SQL module, and shut it down after the test.""" path = str(data_tmpdir / 'test.db') sql.init(path) yield sql.close() class ModelValidator: """Validates completion models.""" def __init__(self, modeltester): modeltester.data_display_may_return_none = True self._model = None self._modeltester = modeltester def set_model(self, model): self._model = model self._modeltester.check(model) def validate(self, expected): assert self._model.rowCount() == len(expected) for row, items in enumerate(expected): for col, item in enumerate(items): assert self._model.data(self._model.index(row, col)) == item @pytest.fixture def model_validator(qtmodeltester): return ModelValidator(qtmodeltester)
1
19,638
Please import modules and not classes (except for Qt stuff).
qutebrowser-qutebrowser
py
@@ -20,6 +20,11 @@ public class DummyJavaNode extends AbstractJavaNode { super(id); } + @Override + public void setImage(String image) { + super.setImage(image); + } + @Override public Object jjtAccept(JavaParserVisitor visitor, Object data) { return data;
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.lang.java.ast; import net.sourceforge.pmd.annotation.InternalApi; /** * This is a basic JavaNode implementation, useful when needing to create a * dummy node. */ @Deprecated @InternalApi public class DummyJavaNode extends AbstractJavaNode { @InternalApi @Deprecated public DummyJavaNode(int id) { super(id); } @Override public Object jjtAccept(JavaParserVisitor visitor, Object data) { return data; } @Override public <T> void jjtAccept(SideEffectingVisitor<T> visitor, T data) { // do nothing } }
1
17,275
Hm... why do we have a "DummyJavaNode" in src/main? Looks like, this should have gone into src/test....
pmd-pmd
java
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package v1alpha1 package v1alpha1 // CasKey is used for all cas specific annotation keys
1
/* Copyright 2017 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 // CasKey is used for all cas specific annotation keys type CasKey string const ( // SPCreateCASTemplateCK is the cas template annotation whose value is the name of // cas template that will be used to provision a storagepool SPCreateCASTemplateCK CasKey = "cas.openebs.io/create-pool-template" // SPDeleteCASTemplateCK is the cas template annotation whose value is the name of // cas template that will be used to delete a storagepool SPDeleteCASTemplateCK CasKey = "cas.openebs.io/delete-pool-template" ) // TopLevelProperty represents the top level property that // is a starting point to represent a hierarchical chain of // properties. // // e.g. // Config.prop1.subprop1 = val1 // Config.prop1.subprop2 = val2 // In above example Config is a top level object // // NOTE: // The value of any hierarchical chain of properties // can be parsed via dot notation type TopLevelProperty string const ( // ConfigTLP is a top level property supported by CAS template engine // // The policy specific properties are placed with ConfigTLP as the // top level property ConfigTLP TopLevelProperty = "Config" // VolumeTLP is a top level property supported by CAS template engine // // The properties provided by the caller are placed with VolumeTLP // as the top level property // // NOTE: // CAS template engine cannot modify these properties. These are the // runtime properties that are provided as inputs to CAS template // engine. VolumeTLP TopLevelProperty = "Volume" // SnapshotTLP is a top level property supported by CAS template engine // // The properties provided by the caller are placed with SnapshotTLP // as the top level property // // NOTE: // CAS template engine cannot modify these properties. These are the // runtime properties that are provided as inputs to CAS template // engine. SnapshotTLP TopLevelProperty = "Snapshot" // StoragePoolTLP is a top level property supported by CAS template engine // // The properties provided by the caller are placed with StoragePoolTLP // as the top level property // // NOTE: // CAS template engine cannot modify these properties. These are the // runtime properties that are provided as inputs to CAS template // engine. StoragePoolTLP TopLevelProperty = "Storagepool" // TaskResultTLP is a top level property supported by CAS template engine // // The specific results after the execution of a task are placed with // TaskResultTLP as the top level property // // NOTE: // This is typically used to feed inputs of a task's execution // result to **next task** before the later's execution TaskResultTLP TopLevelProperty = "TaskResult" // CurrentJSONResultTLP is a top level property supported by CAS template engine // The result of the current task's execution is stored in this top // level property. CurrentJSONResultTLP TopLevelProperty = "JsonResult" // ListItemsTLP is a top level property supported by CAS template engine // // Results of one or more tasks' execution can be saved in this property. // // Example: // Below shows how specific properties of a list of items can be retrieved in // a go template. Below dot notation is for illustration purposes and only // reflects the way the specific property value was set. // // {{- .ListItems.volumes.default.mypv2.ip -}} // {{- .ListItems.volumes.default.mypv2.status -}} // {{- .ListItems.volumes.openebs.mypv.ip -}} // {{- .ListItems.volumes.openebs.mypv.status -}} ListItemsTLP TopLevelProperty = "ListItems" ) // StoragePoolTLPProperty is used to define properties that comes // after StoragePoolTLP type StoragePoolTLPProperty string const ( // OwnerCTP indicates the owner of this pool; the one who // is executing this policy // // NOTE: // The corresponding value will be accessed as // {{ .Storagepool.owner }} OwnerCTP StoragePoolTLPProperty = "owner" // DiskListCTP indicates the list of disks DiskListCTP StoragePoolTLPProperty = "diskList" ) // VolumeTLPProperty is used to define properties that comes // after VolumeTLP type VolumeTLPProperty string const ( // OwnerVTP indicates the owner of this volume; the one who // is executing this policy // // NOTE: // The corresponding value will be accessed as // {{ .Volume.owner }} OwnerVTP VolumeTLPProperty = "owner" // RunNamespaceVTP is the namespace where this policy is // supposed to run // NOTE: // The corresponding value will be accessed as // {{ .Volume.runNamespace }} RunNamespaceVTP VolumeTLPProperty = "runNamespace" // CapacityVTP is the capacity of the volume // NOTE: // The corresponding value will be accessed as // {{ .Volume.capacity }} CapacityVTP VolumeTLPProperty = "capacity" // PersistentVolumeClaimVTP is the PVC of the volume // NOTE: // The corresponding value will be accessed as // {{ .Volume.pvc }} PersistentVolumeClaimVTP VolumeTLPProperty = "pvc" // StorageClassVTP is the StorageClass of the volume // // NOTE: // The corresponding value will be accessed as // {{ .Volume.storageclass }} StorageClassVTP VolumeTLPProperty = "storageclass" ) // CloneTLPProperty is used to define properties for clone operations type CloneTLPProperty string const ( // SnapshotNameVTP is the snapshot name SnapshotNameVTP CloneTLPProperty = "snapshotName" // SourceVolumeTargetIPVTP is source volume target IP SourceVolumeTargetIPVTP CloneTLPProperty = "sourceVolumeTargetIP" // IsCloneEnableVTP is a bool value for clone operations // for a volume IsCloneEnableVTP CloneTLPProperty = "isCloneEnable" // SourceVolumeVTP is the name of the source volume SourceVolumeVTP CloneTLPProperty = "sourceVolume" ) // SnapshotTLPProperty is used to define properties for clone operations type SnapshotTLPProperty string const ( // VolumeNameSTP is the snapshot name VolumeSTP SnapshotTLPProperty = "volumeName" ) // PolicyTLPProperty is the name of the property that is found // under PolicyTLP type PolicyTLPProperty string const ( // EnabledPTP is the enabled property of the policy // NOTE: // The corresponding value will be accessed as // {{ .Policy.<PolicyName>.enabled }} EnabledPTP PolicyTLPProperty = "enabled" // ValuePTP is the value property of the policy // NOTE: // The corresponding value will be accessed as // {{ .Policy.<PolicyName>.value }} ValuePTP PolicyTLPProperty = "value" // DataPTP is the data property of the policy // NOTE: // The corresponding value will be accessed as // {{ .Policy.<PolicyName>.data }} DataPTP PolicyTLPProperty = "data" ) const ( // TaskIdentityPrefix is the prefix used for all TaskIdentity TaskIdentityPrefix string = "key" ) // TaskTLPProperty is the name of the property that is found // under TaskTLP type TaskTLPProperty string const ( // APIVersionTTP is the apiVersion property of the task // NOTE: // The corresponding value will be accessed as // {{ .Task.<TaskIdentity>.apiVersion }} APIVersionTTP TaskTLPProperty = "apiVersion" // KindTTP is the kind property of the task // NOTE: // The corresponding value will be accessed as // {{ .Task.<TaskIdentity>.kind }} KindTTP TaskTLPProperty = "kind" ) // TaskResultTLPProperty is the name of the property that is found // under TaskResultTLP type TaskResultTLPProperty string const ( // ObjectNameTRTP is the objectName property of the // TaskResultTLP // // NOTE: // The corresponding value will be accessed as // {{ .TaskResult.<TaskIdentity>.objectName }} ObjectNameTRTP TaskResultTLPProperty = "objectName" // AnnotationsTRTP is the annotations property of the // TaskResultTLP // // NOTE: // The corresponding value will be accessed as // {{ .TaskResult.<TaskIdentity>.annotations }} AnnotationsTRTP TaskResultTLPProperty = "annotations" // TaskResultVerifyErrTRTP is a property of TaskResultTLP // // First error found after **verification** checks done against the result of // the task's execution is stored in this property. // // NOTE: // The corresponding value will be accessed as // {{ .TaskResult.<TaskIdentity>.verifyErr }} TaskResultVerifyErrTRTP TaskResultTLPProperty = "verifyErr" // TaskResultNotFoundErrTRTP is a property of TaskResultTLP // // First error found after **not found** checks done against the result of // the task's execution is stored in this property. // // NOTE: // The corresponding value will be accessed as // {{ .TaskResult.<TaskIdentity>.notFoundErr }} TaskResultNotFoundErrTRTP TaskResultTLPProperty = "notFoundErr" // TaskResultVersionMismatchErrTRTP is a property of TaskResultTLP // // First error found after **version mismatch** checks done against the // result of the task's execution is stored in this property. // // NOTE: // The corresponding value will be accessed as // {{ .TaskResult.<TaskIdentity>.versionMismatchErr }} TaskResultVersionMismatchErrTRTP TaskResultTLPProperty = "versionMismatchErr" ) // ListItemsTLPProperty is the name of the property that is found // under ListItemsTLP type ListItemsTLPProperty string const ( // CurrentRepeatResourceLITP is a property of ListItemsTLP // // It is the current repeat resource due to which a task is getting // executed is set here // // Example: // {{- .ListItems.currentRepeatResource -}} // // Above templating will give the current repeat resource name CurrentRepeatResourceLITP ListItemsTLPProperty = "currentRepeatResource" )
1
10,198
This seems meaningless. If we cannot add any info let us remove this.
openebs-maya
go
@@ -493,6 +493,14 @@ func (c *Container) GetLabels() map[string]string { return c.labels } +// GetPorts gets the ports for a container +func (c *Container) GetPorts() []PortBinding { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.Ports +} + // HealthStatusShouldBeReported returns true if the health check is defined in // the task definition func (c *Container) HealthStatusShouldBeReported() bool {
1
// Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package api import ( "fmt" "strconv" "sync" "time" "github.com/aws/amazon-ecs-agent/agent/credentials" "github.com/aws/aws-sdk-go/aws" ) const ( // defaultContainerSteadyStateStatus defines the container status at // which the container is assumed to be in steady state. It is set // to 'ContainerRunning' unless overridden defaultContainerSteadyStateStatus = ContainerRunning // awslogsAuthExecutionRole is the string value passed in the task payload // that specifies that the log driver should be authenticated using the // execution role awslogsAuthExecutionRole = "ExecutionRole" // dockerHealthCheckType is the type of container health check provided by docker dockerHealthCheckType = "docker" ) // DockerConfig represents additional metadata about a container to run. It's // remodeled from the `ecsacs` api model file. Eventually it should not exist // once this remodeling is refactored out. type DockerConfig struct { // Config is the configuration used to create container Config *string `json:"config"` // HostConfig is the configuration of container related to host resource HostConfig *string `json:"hostConfig"` // Version specifies the docker client API version to use Version *string `json:"version"` } // HealthStatus contains the health check result returned by docker type HealthStatus struct { // Status is the container health status Status ContainerHealthStatus `json:"status,omitempty"` // Since is the timestamp when container health status changed Since *time.Time `json:"statusSince,omitempty"` // ExitCode is the exitcode of health check if failed ExitCode int `json:"exitCode,omitempty"` // Output is the output of health check Output string `json:"output,omitempty"` } // Container is the internal representation of a container in the ECS agent type Container struct { // Name is the name of the container specified in the task definition Name string // Image is the image name specified in the task definition Image string // ImageID is the local ID of the image used in the container ImageID string // Command is the command to run in the container which is specified in the task definition Command []string // CPU is the cpu limitation of the container which is specified in the task definition CPU uint `json:"Cpu"` // Memory is the memory limitation of the container which is specified in the task definition Memory uint // Links contains a list of containers to link, corresponding to docker option: --link Links []string // VolumesFrom contains a list of container's volume to use, corresponding to docker option: --volumes-from VolumesFrom []VolumeFrom `json:"volumesFrom"` // MountPoints contains a list of volume mount paths MountPoints []MountPoint `json:"mountPoints"` // Ports contains a list of ports binding configuration Ports []PortBinding `json:"portMappings"` // Essential denotes whether the container is essential or not Essential bool // EntryPoint is entrypoint of the container, corresponding to docker option: --entrypoint EntryPoint *[]string // Environment is the environment variable set in the container Environment map[string]string `json:"environment"` // Overrides contains the configuration to override of a container Overrides ContainerOverrides `json:"overrides"` // DockerConfig is the configuration used to create the container DockerConfig DockerConfig `json:"dockerConfig"` // RegistryAuthentication is the auth data used to pull image RegistryAuthentication *RegistryAuthenticationData `json:"registryAuthentication"` // HealthCheckType is the mechnism to use for the container health check // currently it only supports 'DOCKER' HealthCheckType string `json:"healthCheckType,omitempty"` // Health contains the health check information of container health check Health HealthStatus `json:"-"` // LogsAuthStrategy specifies how the logs driver for the container will be // authenticated LogsAuthStrategy string // lock is used for fields that are accessed and updated concurrently lock sync.RWMutex // DesiredStatusUnsafe represents the state where the container should go. Generally, // the desired status is informed by the ECS backend as a result of either // API calls made to ECS or decisions made by the ECS service scheduler, // though the agent may also set the DesiredStatusUnsafe if a different "essential" // container in the task exits. The DesiredStatus is almost always either // ContainerRunning or ContainerStopped. // NOTE: Do not access DesiredStatusUnsafe directly. Instead, use `GetDesiredStatus` // and `SetDesiredStatus`. // TODO DesiredStatusUnsafe should probably be private with appropriately written // setter/getter. When this is done, we need to ensure that the UnmarshalJSON // is handled properly so that the state storage continues to work. DesiredStatusUnsafe ContainerStatus `json:"desiredStatus"` // KnownStatusUnsafe represents the state where the container is. // NOTE: Do not access `KnownStatusUnsafe` directly. Instead, use `GetKnownStatus` // and `SetKnownStatus`. // TODO KnownStatusUnsafe should probably be private with appropriately written // setter/getter. When this is done, we need to ensure that the UnmarshalJSON // is handled properly so that the state storage continues to work. KnownStatusUnsafe ContainerStatus `json:"KnownStatus"` // TransitionDependencySet is a set of dependencies that must be satisfied // in order for this container to transition. Each transition dependency // specifies a resource upon which the transition is dependent, a status // that depends on the resource, and the state of the dependency that // satisfies. TransitionDependencySet TransitionDependencySet `json:"TransitionDependencySet"` // SteadyStateDependencies is a list of containers that must be in "steady state" before // this one is created // Note: Current logic requires that the containers specified here are run // before this container can even be pulled. // // Deprecated: Use TransitionDependencySet instead. SteadyStateDependencies is retained for compatibility with old // state files. SteadyStateDependencies []string `json:"RunDependencies"` // Type specifies the container type. Except the 'Normal' type, all other types // are not directly specified by task definitions, but created by the agent. The // JSON tag is retained as this field's previous name 'IsInternal' for maintaining // backwards compatibility. Please see JSON parsing hooks for this type for more // details Type ContainerType `json:"IsInternal"` // AppliedStatus is the status that has been "applied" (e.g., we've called Pull, // Create, Start, or Stop) but we don't yet know that the application was successful. AppliedStatus ContainerStatus // ApplyingError is an error that occurred trying to transition the container // to its desired state. It is propagated to the backend in the form // 'Name: ErrorString' as the 'reason' field. ApplyingError *DefaultNamedError // SentStatusUnsafe represents the last KnownStatusUnsafe that was sent to the ECS // SubmitContainerStateChange API. // TODO SentStatusUnsafe should probably be private with appropriately written // setter/getter. When this is done, we need to ensure that the UnmarshalJSON is // handled properly so that the state storage continues to work. SentStatusUnsafe ContainerStatus `json:"SentStatus"` // MetadataFileUpdated is set to true when we have completed updating the // metadata file MetadataFileUpdated bool `json:"metadataFileUpdated"` // KnownExitCodeUnsafe specifies the exit code for the container. // It is exposed outside of the package so that it's marshalled/unmarshalled in // the JSON body while saving the state. // NOTE: Do not access KnownExitCodeUnsafe directly. Instead, use `GetKnownExitCode` // and `SetKnownExitCode`. KnownExitCodeUnsafe *int `json:"KnownExitCode"` // KnownPortBindings is an array of port bindings for the container. KnownPortBindings []PortBinding // SteadyStateStatusUnsafe specifies the steady state status for the container // If uninitialized, it's assumed to be set to 'ContainerRunning'. Even though // it's not only supposed to be set when the container is being created, it's // exposed outside of the package so that it's marshalled/unmarshalled in the // the JSON body while saving the state SteadyStateStatusUnsafe *ContainerStatus `json:"SteadyStateStatus,omitempty"` createdAt time.Time startedAt time.Time finishedAt time.Time labels map[string]string } // DockerContainer is a mapping between containers-as-docker-knows-them and // containers-as-we-know-them. // This is primarily used in DockerState, but lives here such that tasks and // containers know how to convert themselves into Docker's desired config format type DockerContainer struct { DockerID string `json:"DockerId"` DockerName string // needed for linking Container *Container } // String returns a human readable string representation of DockerContainer func (dc *DockerContainer) String() string { if dc == nil { return "nil" } return fmt.Sprintf("Id: %s, Name: %s, Container: %s", dc.DockerID, dc.DockerName, dc.Container.String()) } // NewContainerWithSteadyState creates a new Container object with the specified // steady state. Containers that need the non default steady state set will // use this method instead of setting it directly func NewContainerWithSteadyState(steadyState ContainerStatus) *Container { steadyStateStatus := steadyState return &Container{ SteadyStateStatusUnsafe: &steadyStateStatus, } } // KnownTerminal returns true if the container's known status is STOPPED func (c *Container) KnownTerminal() bool { return c.GetKnownStatus().Terminal() } // DesiredTerminal returns true if the container's desired status is STOPPED func (c *Container) DesiredTerminal() bool { return c.GetDesiredStatus().Terminal() } // GetKnownStatus returns the known status of the container func (c *Container) GetKnownStatus() ContainerStatus { c.lock.RLock() defer c.lock.RUnlock() return c.KnownStatusUnsafe } // SetKnownStatus sets the known status of the container func (c *Container) SetKnownStatus(status ContainerStatus) { c.lock.Lock() defer c.lock.Unlock() c.KnownStatusUnsafe = status } // GetDesiredStatus gets the desired status of the container func (c *Container) GetDesiredStatus() ContainerStatus { c.lock.RLock() defer c.lock.RUnlock() return c.DesiredStatusUnsafe } // SetDesiredStatus sets the desired status of the container func (c *Container) SetDesiredStatus(status ContainerStatus) { c.lock.Lock() defer c.lock.Unlock() c.DesiredStatusUnsafe = status } // GetSentStatus safely returns the SentStatusUnsafe of the container func (c *Container) GetSentStatus() ContainerStatus { c.lock.RLock() defer c.lock.RUnlock() return c.SentStatusUnsafe } // SetSentStatus safely sets the SentStatusUnsafe of the container func (c *Container) SetSentStatus(status ContainerStatus) { c.lock.Lock() defer c.lock.Unlock() c.SentStatusUnsafe = status } // SetKnownExitCode sets exit code field in container struct func (c *Container) SetKnownExitCode(i *int) { c.lock.Lock() defer c.lock.Unlock() c.KnownExitCodeUnsafe = i } // GetKnownExitCode returns the container exit code func (c *Container) GetKnownExitCode() *int { c.lock.RLock() defer c.lock.RUnlock() return c.KnownExitCodeUnsafe } // SetRegistryAuthCredentials sets the credentials for pulling image from ECR func (c *Container) SetRegistryAuthCredentials(credential credentials.IAMRoleCredentials) { c.lock.Lock() defer c.lock.Unlock() c.RegistryAuthentication.ECRAuthData.SetPullCredentials(credential) } // ShouldPullWithExecutionRole returns whether this container has its own ECR credentials func (c *Container) ShouldPullWithExecutionRole() bool { c.lock.RLock() defer c.lock.RUnlock() return c.RegistryAuthentication != nil && c.RegistryAuthentication.Type == "ecr" && c.RegistryAuthentication.ECRAuthData != nil && c.RegistryAuthentication.ECRAuthData.UseExecutionRole } // String returns a human readable string representation of this object func (c *Container) String() string { ret := fmt.Sprintf("%s(%s) (%s->%s)", c.Name, c.Image, c.GetKnownStatus().String(), c.GetDesiredStatus().String()) if c.GetKnownExitCode() != nil { ret += " - Exit: " + strconv.Itoa(*c.GetKnownExitCode()) } return ret } // GetSteadyStateStatus returns the steady state status for the container. If // Container.steadyState is not initialized, the default steady state status // defined by `defaultContainerSteadyStateStatus` is returned. The 'pause' // container's steady state differs from that of other containers, as the // 'pause' container can reach its teady state once networking resources // have been provisioned for it, which is done in the `ContainerResourcesProvisioned` // state func (c *Container) GetSteadyStateStatus() ContainerStatus { if c.SteadyStateStatusUnsafe == nil { return defaultContainerSteadyStateStatus } return *c.SteadyStateStatusUnsafe } // IsKnownSteadyState returns true if the `KnownState` of the container equals // the `steadyState` defined for the container func (c *Container) IsKnownSteadyState() bool { knownStatus := c.GetKnownStatus() return knownStatus == c.GetSteadyStateStatus() } // GetNextKnownStateProgression returns the state that the container should // progress to based on its `KnownState`. The progression is // incremental until the container reaches its steady state. From then on, // it transitions to `ContainerStopped`. // // For example: // a. if the steady state of the container is defined as `ContainerRunning`, // the progression is: // Container: None -> Pulled -> Created -> Running* -> Stopped -> Zombie // // b. if the steady state of the container is defined as `ContainerResourcesProvisioned`, // the progression is: // Container: None -> Pulled -> Created -> Running -> Provisioned* -> Stopped -> Zombie // // c. if the steady state of the container is defined as `ContainerCreated`, // the progression is: // Container: None -> Pulled -> Created* -> Stopped -> Zombie func (c *Container) GetNextKnownStateProgression() ContainerStatus { if c.IsKnownSteadyState() { return ContainerStopped } return c.GetKnownStatus() + 1 } // IsInternal returns true if the container type is either `ContainerEmptyHostVolume` // or `ContainerCNIPause`. It returns false otherwise func (c *Container) IsInternal() bool { if c.Type == ContainerNormal { return false } return true } // IsRunning returns true if the container's known status is either RUNNING // or RESOURCES_PROVISIONED. It returns false otherwise func (c *Container) IsRunning() bool { return c.GetKnownStatus().IsRunning() } // IsMetadataFileUpdated returns true if the metadata file has been once the // metadata file is ready and will no longer change func (c *Container) IsMetadataFileUpdated() bool { c.lock.RLock() defer c.lock.RUnlock() return c.MetadataFileUpdated } // SetMetadataFileUpdated sets the container's MetadataFileUpdated status to true func (c *Container) SetMetadataFileUpdated() { c.lock.Lock() defer c.lock.Unlock() c.MetadataFileUpdated = true } // IsEssential returns whether the container is an essential container or not func (c *Container) IsEssential() bool { c.lock.RLock() defer c.lock.RUnlock() return c.Essential } // AWSLogAuthExecutionRole returns true if the auth is by execution role func (c *Container) AWSLogAuthExecutionRole() bool { return c.LogsAuthStrategy == awslogsAuthExecutionRole } // SetCreatedAt sets the timestamp for container's creation time func (c *Container) SetCreatedAt(createdAt time.Time) { if createdAt.IsZero() { return } c.lock.Lock() defer c.lock.Unlock() c.createdAt = createdAt } // SetStartedAt sets the timestamp for container's start time func (c *Container) SetStartedAt(startedAt time.Time) { if startedAt.IsZero() { return } c.lock.Lock() defer c.lock.Unlock() c.startedAt = startedAt } // SetFinishedAt sets the timestamp for container's stopped time func (c *Container) SetFinishedAt(finishedAt time.Time) { if finishedAt.IsZero() { return } c.lock.Lock() defer c.lock.Unlock() c.finishedAt = finishedAt } // GetCreatedAt sets the timestamp for container's creation time func (c *Container) GetCreatedAt() time.Time { c.lock.RLock() defer c.lock.RUnlock() return c.createdAt } // GetStartedAt sets the timestamp for container's start time func (c *Container) GetStartedAt() time.Time { c.lock.RLock() defer c.lock.RUnlock() return c.startedAt } // GetFinishedAt sets the timestamp for container's stopped time func (c *Container) GetFinishedAt() time.Time { c.lock.RLock() defer c.lock.RUnlock() return c.finishedAt } // SetLabels sets the labels for a container func (c *Container) SetLabels(labels map[string]string) { c.lock.Lock() defer c.lock.Unlock() c.labels = labels } // GetLabels gets the labels for a container func (c *Container) GetLabels() map[string]string { c.lock.RLock() defer c.lock.RUnlock() return c.labels } // HealthStatusShouldBeReported returns true if the health check is defined in // the task definition func (c *Container) HealthStatusShouldBeReported() bool { return c.HealthCheckType == dockerHealthCheckType } // SetHealthStatus sets the container health status func (c *Container) SetHealthStatus(health HealthStatus) { c.lock.Lock() defer c.lock.Unlock() if c.Health.Status == health.Status { return } c.Health.Status = health.Status c.Health.Since = aws.Time(time.Now()) c.Health.Output = health.Output // Set the health exit code if the health check failed if c.Health.Status == ContainerUnhealthy { c.Health.ExitCode = health.ExitCode } } // GetHealthStatus returns the container health information func (c *Container) GetHealthStatus() HealthStatus { c.lock.RLock() defer c.lock.RUnlock() // Copy the pointer to avoid race condition copyHealth := c.Health if c.Health.Since != nil { copyHealth.Since = aws.Time(aws.TimeValue(c.Health.Since)) } return copyHealth }
1
19,118
Can you rename `Ports` to `PortsUnsafe`?
aws-amazon-ecs-agent
go
@@ -0,0 +1,10 @@ +module ReturnToHelper + def make_return_to(name, path) + sig = OpenSSL::HMAC.digest( + OpenSSL::Digest::SHA256.new, + Rails.application.secrets.secret_key_base, + name + "$" + path + ) + {name: name, path: path, sig: Base64.urlsafe_encode64(sig)} + end +end
1
1
13,014
This feels like it could be moved out to your first gem!
18F-C2
rb
@@ -44,7 +44,7 @@ public abstract class NewSessionQueue implements HasReadyState { public abstract boolean offerFirst(HttpRequest request, RequestId requestId); - public abstract Optional<HttpRequest> poll(); + public abstract Optional<HttpRequest> poll(RequestId requestId); public abstract int clear();
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.grid.sessionqueue; import org.openqa.selenium.grid.data.RequestId; import org.openqa.selenium.internal.Require; import org.openqa.selenium.remote.http.HttpRequest; import org.openqa.selenium.remote.tracing.Tracer; import org.openqa.selenium.status.HasReadyState; import java.time.Duration; import java.time.Instant; import java.util.Optional; public abstract class NewSessionQueue implements HasReadyState { protected final Tracer tracer; protected final Duration retryInterval; protected final Duration requestTimeout; public static final String SESSIONREQUEST_TIMESTAMP_HEADER = "new-session-request-timestamp"; public static final String SESSIONREQUEST_ID_HEADER = "request-id"; public abstract boolean offerLast(HttpRequest request, RequestId requestId); public abstract boolean offerFirst(HttpRequest request, RequestId requestId); public abstract Optional<HttpRequest> poll(); public abstract int clear(); public void addRequestHeaders(HttpRequest request, RequestId reqId) { long timestamp = Instant.now().getEpochSecond(); request.addHeader(SESSIONREQUEST_TIMESTAMP_HEADER, Long.toString(timestamp)); request.addHeader(SESSIONREQUEST_ID_HEADER, reqId.toString()); } public boolean hasRequestTimedOut(HttpRequest request) { String enqueTimestampStr = request.getHeader(SESSIONREQUEST_TIMESTAMP_HEADER); Instant enque = Instant.ofEpochSecond(Long.parseLong(enqueTimestampStr)); Instant deque = Instant.now(); Duration duration = Duration.between(enque, deque); return duration.compareTo(requestTimeout) > 0; } public NewSessionQueue(Tracer tracer, Duration retryInterval, Duration requestTimeout) { this.tracer = Require.nonNull("Tracer", tracer); this.retryInterval = Require.nonNull("Session request retry interval", retryInterval); this.requestTimeout = Require.nonNull("Session request timeout", requestTimeout); } }
1
18,213
The expected behaviour for `poll` would be to match what `Deque` does, which is: > Retrieves and removes the head of the queue represented by this deque As such, you shouldn't need the `RequestId` here.
SeleniumHQ-selenium
java
@@ -7,4 +7,17 @@ axe.utils.toArray = function (thing) { 'use strict'; return Array.prototype.slice.call(thing); -}; +}; + + +/** + * Creates an array without duplicate values from 2 array inputs + * @param {Array} arr1 First array + * @param {Array} arr2 Second array + * @return {Array} + */ +axe.utils.uniqueArray = (arr1, arr2) => { + return arr1.concat(arr2).filter((elem, pos, arr) => { + return arr.indexOf(elem) === pos; + }); +};
1
/** * Converts array-like (numerical indicies and `length` property) structures to actual, real arrays * @param {Mixed} thing Array-like thing to convert * @return {Array} */ axe.utils.toArray = function (thing) { 'use strict'; return Array.prototype.slice.call(thing); };
1
11,993
This is super minor, but it'd be nice if this iterated over an arbitrary number of array inputs.
dequelabs-axe-core
js
@@ -1676,9 +1676,10 @@ _tmain(int argc, TCHAR *targv[]) } # ifdef UNIX /* i#1676: detect whether under gdb */ - _snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "/proc/%d/exe", getppid()); - NULL_TERMINATE_BUFFER(buf); - i = readlink(buf, buf, BUFFER_SIZE_ELEMENTS(buf)); + char path_buf[MAXIMUM_PATH]; + _snprintf(path_buf, BUFFER_SIZE_ELEMENTS(path_buf), "/proc/%d/exe", getppid()); + NULL_TERMINATE_BUFFER(path_buf); + i = readlink(path_buf, buf, BUFFER_SIZE_ELEMENTS(buf)); if (i > 0) { if (i < BUFFER_SIZE_ELEMENTS(buf)) buf[i] = '\0';
1
/* ********************************************************** * Copyright (c) 2011-2018 Google, Inc. All rights reserved. * Copyright (c) 2008-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* compile with make VMAP=1 for a vmap version (makefile defaults to VMSAFE version) */ #include "configure.h" #ifdef WINDOWS # define WIN32_LEAN_AND_MEAN # define UNICODE # define _UNICODE # include <windows.h> # include <io.h> # include "config.h" # include "share.h" #endif #ifdef UNIX # include <errno.h> # include <fcntl.h> # include <unistd.h> # include <sys/stat.h> # include <sys/mman.h> # include <sys/wait.h> #endif #include <string.h> #include <stdarg.h> #include <stdlib.h> #include <stdio.h> #include <time.h> #include <assert.h> #include <ctype.h> #include "globals_shared.h" #include "dr_config.h" /* MUST be before share.h (it sets HOT_PATCHING_INTERFACE) */ #include "dr_inject.h" #include "dr_frontend.h" typedef enum _action_t { action_none, action_nudge, action_register, action_unregister, action_list, } action_t; static bool verbose; static bool quiet; static bool DR_dll_not_needed = false; static bool nocheck; #define die() exit(1) #define fatal(msg, ...) do { \ fprintf(stderr, "ERROR: " msg "\n", ##__VA_ARGS__); \ fflush(stderr); \ exit(1); \ } while (0) /* up to caller to call die() if necessary */ #define error(msg, ...) do { \ fprintf(stderr, "ERROR: " msg "\n", ##__VA_ARGS__); \ fflush(stderr); \ } while (0) #define warn(msg, ...) do { \ if (!quiet) { \ fprintf(stderr, "WARNING: " msg "\n", ##__VA_ARGS__); \ fflush(stderr); \ } \ } while (0) #define info(msg, ...) do { \ if (verbose) { \ fprintf(stderr, "INFO: " msg "\n", ##__VA_ARGS__); \ fflush(stderr); \ } \ } while (0) #ifdef DRCONFIG # define TOOLNAME "drconfig" #elif defined(DRRUN) # define TOOLNAME "drrun" #elif defined(DRINJECT) # define TOOLNAME "drinject" #endif const char *usage_str = #ifdef DRCONFIG "USAGE: "TOOLNAME" [options]\n" " or: "TOOLNAME" [options] [-ops \"<DR options>\"] -c <client> [client options]\n" " or: "TOOLNAME" [options] [-ops \"<DR options>\"] -t <tool> [tool options]\n"; #elif defined(DRRUN) || defined (DRINJECT) "USAGE: "TOOLNAME" [options] <app and args to run>\n" " or: "TOOLNAME" [options] -- <app and args to run>\n" # if defined(DRRUN) " or: "TOOLNAME" [options] [DR options] -- <app and args to run>\n" " or: "TOOLNAME" [options] [DR options] -c <client> [client options]" " -- <app and args to run>\n" " or: "TOOLNAME" [options] [DR options] -t <tool> [tool options]" " -- <app and args to run>\n" # endif ; #endif const char *options_list_str = "\n"TOOLNAME" options (these are distinct from DR runtime options):\n" " -version Display version information\n" " -verbose Display additional information\n" " -quiet Do not display warnings\n" " -nocheck Do not fail due to invalid DynamoRIO installation or app\n" #ifdef DRCONFIG " -reg <process> Register <process> to run under DR\n" " -unreg <process> Unregister <process> from running under DR\n" " -isreg <process> Display whether <process> is registered and if so its\n" " configuration\n" # ifdef WINDOWS " -list_registered Display all registered processes and their configuration\n" # endif /* WINDOWS */ #endif /* DRCONFIG */ " -root <root> DR root directory\n" #if defined(DRCONFIG) || defined(DRRUN) # if defined(MF_API) && defined(PROBE_API) " -mode <mode> DR mode (code, probe, or security)\n" # elif defined(PROBE_API) " -mode <mode> DR mode (code or probe)\n" # elif defined(MF_API) " -mode <mode> DR mode (code or security)\n" # else /* No mode argument, is always code. */ # endif #endif #ifdef DRCONFIG /* FIXME i#840: Syswide NYI on Linux. */ # ifdef WINDOWS " -syswide_on Set up systemwide injection so that registered\n" " applications will run under DR however they are\n" " launched. Otherwise, drinject must be used to\n" " launch a target configured application under DR.\n" " This option requires administrative privileges.\n" " -syswide_off Disable systemwide injection.\n" " This option requires administrative privileges.\n" # endif " -global Use global configuration files instead of local\n" " user-private configuration files. The global\n" " config dir must be set up ahead of time.\n" " This option may require administrative privileges.\n" " If a local file already exists it will take precedence.\n" " -norun Create a configuration that excludes the application\n" " from running under DR control. Useful for following\n" " all child processes except a handful (blacklist).\n" #endif " -debug Use the DR debug library\n" " -32 Target 32-bit or WOW64 applications\n" " -64 Target 64-bit (non-WOW64) applications\n" #if defined(DRCONFIG) || defined(DRRUN) "\n" " -ops \"<options>\" Specify DR runtime options. When specifying\n" " multiple options, enclose the entire list of\n" " options in quotes, or repeat the -ops.\n" " Alternatively, if the application is separated\n" " by \"--\" or if -c or -t is specified, the -ops may be\n" " omitted and DR options listed prior to \"--\", -c,\n" " and -t, without quotes.\n" "\n" " -t <toolname> Registers a pre-configured tool to run alongside DR.\n" " A tool is a client with a configuration file\n" " that sets the client options and path, providing a\n" " convenient launching command via this -t parameter.\n" # ifdef DRRUN " Available tools include: %s.\n" # endif "\n" " -c <path> <options>*\n" " Registers one client to run alongside DR. Assigns\n" " the client an id of 0. All remaining arguments\n" " until the -- arg before the app are interpreted as\n" " client options. Must come after all drrun and DR\n" " ops. Incompatible with -client. Requires using --\n" " to separate the app executable. Neither the path nor\n" " the options may contain semicolon characters or\n" " all 3 quote characters (\", \', `).\n" "\n" " -client <path> <ID> \"<options>\"\n" " Use -c instead, unless you need to set the client ID.\n" " Registers one or more clients to run alongside DR.\n" " This option is only valid when registering a\n" " process. The -client option takes three arguments:\n" " the full path to a client library, a unique 8-digit\n" " hex ID, and an optional list of client options\n" " (use \"\" to specify no options). Multiple clients\n" " can be installed via multiple -client options. In\n" " this case, clients specified first on the command\n" " line have higher priority. Neither the path nor\n" " the options may contain semicolon characters or\n" " all 3 quote characters (\", \', `).\n" " This option must precede any options to DynamoRIO.\n" #endif #ifdef DRCONFIG "\n" # ifdef WINDOWS " Note that nudging 64-bit processes is not yet supported.\n" " -nudge <process> <client ID> <argument>\n" " Nudge the client with ID <client ID> in all running\n" " processes with name <process>, and pass <argument>\n" " to the nudge callback. <client ID> must be the\n" " 8-digit hex ID of the target client. <argument>\n" " should be a hex literal (0, 1, 3f etc.).\n" " -nudge_pid <process_id> <client ID> <argument>\n" " Nudge the client with ID <client ID> in the process with\n" " id <process_id>, and pass <argument> to the nudge\n" " callback. <client ID> must be the 8-digit hex ID\n" " of the target client. <argument> should be a hex\n" " literal (0, 1, 3f etc.).\n" " -nudge_all <client ID> <argument>\n" " Nudge the client with ID <client ID> in all running\n" " processes and pass <argument> to the nudge callback.\n" " <client ID> must be the 8-digit hex ID of the target\n" " client. <argument> should be a hex literal\n" " (0, 1, 3f etc.)\n" " -nudge_timeout <ms> Max time (in milliseconds) to wait for a nudge to\n" " finish before continuing. The default is an infinite\n" " wait. A value of 0 means don't wait for nudges to\n" " complete." # else /* WINDOWS */ /* FIXME i#840: integrate nudgeunix into drconfig on Unix */ "Note: please use the nudgeunix tool to nudge processes on Unix.\n"; # endif /* !WINDOWS */ #else /* DRCONFIG */ " -no_wait Return immediately: do not wait for application exit.\n" " -s <seconds> Kill the application if it runs longer than the\n" " specified number of seconds.\n" " -m <minutes> Kill the application if it runs longer than the\n" " specified number of minutes.\n" " -h <hours> Kill the application if it runs longer than the\n" " specified number of hours.\n" # ifdef UNIX " -killpg Create a new process group for the app. If the app\n" " times out, kill the entire process group. This forces\n" " the child to be a new process with a new pid, rather\n" " than reusing the parent's pid.\n" # endif " -stats Print /usr/bin/time-style elapsed time and memory used.\n" " -mem Print memory usage statistics.\n" " -pidfile <file> Print the pid of the child process to the given file.\n" " -no_inject Run the application natively.\n" " -static Do not inject under the assumption that the application\n" " is statically linked with DynamoRIO. Instead, trigger\n" " automated takeover.\n" # ifdef UNIX /* FIXME i#725: Windows attach NYI */ # ifndef MACOS /* XXX i#1285: private loader NYI on MacOS */ " -early Requests early injection (the default).\n" " -late Requests late injection.\n" # endif " -attach <pid> Attach to the process with the given pid. Pass 0\n" " for pid to launch and inject into a new process.\n" " -logdir <dir> Logfiles will be stored in this directory.\n" # endif " -use_dll <dll> Inject given dll instead of configured DR dll.\n" " -force Inject regardless of configuration.\n" " -exit0 Return a 0 exit code instead of the app's exit code.\n" "\n" " <app and args> Application command line to execute under DR.\n" #endif /* !DRCONFIG */ ; static bool does_file_exist(const char *path) { bool ret = false; return (drfront_access(path, DRFRONT_EXIST, &ret) == DRFRONT_SUCCESS && ret); } #if defined(DRRUN) || defined(DRINJECT) static bool search_env(const char *fname, const char *env_var, char *full_path, const size_t full_path_size) { bool ret = false; if (drfront_searchenv(fname, env_var, full_path, full_path_size, &ret) != DRFRONT_SUCCESS || !ret) { full_path[0] = '\0'; return false; } return true; } #endif #ifdef UNIX # ifndef DRCONFIG static int GetLastError(void) { return errno; } # endif /* DRCONFIG */ #endif /* UNIX */ static void get_absolute_path(const char *src, char *buf, size_t buflen/*# elements*/) { drfront_status_t sc = drfront_get_absolute_path(src, buf, buflen); if (sc != DRFRONT_SUCCESS) fatal("failed (status=%d) to convert %s to an absolute path", sc, src); } /* Opens a filename and mode that are in utf8 */ static FILE * fopen_utf8(const char *path, const char *mode) { #ifdef WINDOWS TCHAR wpath[MAXIMUM_PATH]; TCHAR wmode[MAXIMUM_PATH]; if (drfront_char_to_tchar(path, wpath, BUFFER_SIZE_ELEMENTS(wpath)) != DRFRONT_SUCCESS || drfront_char_to_tchar(mode, wmode, BUFFER_SIZE_ELEMENTS(wmode)) != DRFRONT_SUCCESS) return NULL; return _tfopen(wpath, wmode); #else return fopen(path, mode); #endif } static char tool_list[MAXIMUM_PATH]; static void print_tool_list(FILE *stream) { #ifdef DRRUN if (tool_list[0] != '\0') fprintf(stream, " available tools include: %s\n", tool_list); #endif } /* i#1509: we want to list the available tools for the -t option. * Since we don't have a dir iterator we use a list of tools * in a text file tools/list{32,64} which we create at * install time. Thus we only expect to have it for a package build. */ static void read_tool_list(const char *dr_root, dr_platform_t dr_platform) { FILE *f; char list_file[MAXIMUM_PATH]; size_t sofar = 0; const char *arch = IF_X64_ELSE("64", "32"); if (dr_platform == DR_PLATFORM_32BIT) arch = "32"; else if (dr_platform == DR_PLATFORM_64BIT) arch = "64"; _snprintf(list_file, BUFFER_SIZE_ELEMENTS(list_file), "%s/tools/list%s", dr_root, arch); NULL_TERMINATE_BUFFER(list_file); f = fopen_utf8(list_file, "r"); if (f == NULL) { /* no visible error: we only expect to have a list for a package build */ return; } while (fgets(tool_list + sofar, (int)(BUFFER_SIZE_ELEMENTS(tool_list) - sofar - 1/*space*/), f) != NULL) { NULL_TERMINATE_BUFFER(tool_list); sofar += strlen(tool_list + sofar); tool_list[sofar - 1] = ','; /* replace newline with comma */ /* add space */ if (sofar < BUFFER_SIZE_ELEMENTS(tool_list)) tool_list[sofar++] = ' '; } fclose(f); tool_list[sofar-2] = '\0'; NULL_TERMINATE_BUFFER(tool_list); } #define usage(list_ops, msg, ...) do { \ FILE *stream = (list_ops == true) ? stdout : stderr; \ if ((msg)[0] != '\0') \ fprintf(stderr, "ERROR: " msg "\n\n", ##__VA_ARGS__); \ fprintf(stream, "%s", usage_str); \ print_tool_list(stream); \ if (list_ops) { \ fprintf(stream, options_list_str, tool_list); \ exit(0); \ } else { \ fprintf(stream, "Run with -help to see "TOOLNAME" option list\n"); \ } \ die(); \ } while (0) /* Unregister a process */ bool unregister_proc(const char *process, process_id_t pid, bool global, dr_platform_t dr_platform) { dr_config_status_t status = dr_unregister_process(process, pid, global, dr_platform); if (status == DR_PROC_REG_INVALID) { error("no existing registration for %s", process == NULL ? "<null>" : process); return false; } else if (status == DR_FAILURE) { error("unregistration failed for %s", process == NULL ? "<null>" : process); return false; } return true; } /* Check if the provided root directory actually has the files we * expect. Returns whether a fatal problem. */ static bool check_dr_root(const char *dr_root, bool debug, dr_platform_t dr_platform, bool preinject, bool report) { int i; char buf[MAXIMUM_PATH]; bool ok = true; /* FIXME i#1569: port DynamoRIO to AArch64 so we can enable the check warning */ bool nowarn = IF_X86_ELSE(false, true); const char *checked_files[] = { #ifdef WINDOWS "lib32\\drpreinject.dll", "lib32\\release\\dynamorio.dll", "lib32\\debug\\dynamorio.dll", "lib64\\drpreinject.dll", "lib64\\release\\dynamorio.dll", "lib64\\debug\\dynamorio.dll" #elif defined(MACOS) "lib32/debug/libdrpreload.dylib", "lib32/debug/libdynamorio.dylib", "lib32/release/libdrpreload.dylib", "lib32/release/libdynamorio.dylib", "lib64/debug/libdrpreload.dylib", "lib64/debug/libdynamorio.dylib", "lib64/release/libdrpreload.dylib", "lib64/release/libdynamorio.dylib" #else /* LINUX */ /* With early injection the default, we don't require preload to exist. */ "lib32/debug/libdynamorio.so", "lib32/release/libdynamorio.so", "lib64/debug/libdynamorio.so", "lib64/release/libdynamorio.so" #endif }; const char *arch = IF_X64_ELSE("lib64", "lib32"); if (dr_platform == DR_PLATFORM_32BIT) arch = "lib32"; else if (dr_platform == DR_PLATFORM_64BIT) arch = "lib64"; if (DR_dll_not_needed) { /* assume user knows what he's doing */ return true; } /* don't warn if running from a build dir (i#458) which we attempt to detect * by looking for CMakeCache.txt in the root dir * (warnings can also be suppressed via -quiet) */ _snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%s/%s", dr_root, "CMakeCache.txt"); if (does_file_exist(buf)) nowarn = true; for (i=0; i<BUFFER_SIZE_ELEMENTS(checked_files); i++) { _snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%s/%s", dr_root, checked_files[i]); if (!does_file_exist(buf)) { ok = false; if (!nocheck && ((preinject && strstr(checked_files[i], "drpreinject")) || (!preinject && debug && strstr(checked_files[i], "debug") != NULL) || (!preinject && !debug && strstr(checked_files[i], "release") != NULL)) && strstr(checked_files[i], arch) != NULL) { /* We don't want to create a .1config file that won't be freed * b/c the core is never injected */ if (report) { error("cannot find required file %s\n" "Use -root to specify a proper DynamoRIO root directory.", buf); } return false; } else { if (strstr(checked_files[i], arch) == NULL) { /* Support a single-bitwidth package. */ ok = true; } else if (!nowarn) warn("cannot find %s: is this an incomplete installation?", buf); } } } if (!ok && !nowarn) warn("%s does not appear to be a valid DynamoRIO root", dr_root); return true; } /* Register a process to run under DR */ bool register_proc(const char *process, process_id_t pid, bool global, const char *dr_root, const dr_operation_mode_t dr_mode, bool debug, dr_platform_t dr_platform, const char *extra_ops) { dr_config_status_t status; assert(dr_root != NULL); if (!does_file_exist(dr_root)) { error("cannot access DynamoRIO root directory %s", dr_root); return false; } #ifdef CLIENT_INTERFACE if (dr_mode == DR_MODE_NONE) { error("you must provide a DynamoRIO mode"); return false; } #endif /* warn if the DR root directory doesn't look right, unless -norun, * in which case don't bother */ if (dr_mode != DR_MODE_DO_NOT_RUN && !check_dr_root(dr_root, debug, dr_platform, false/*!pre*/, true/*report*/)) return false; if (dr_process_is_registered(process, pid, global, dr_platform, NULL, NULL, NULL, NULL)) { warn("overriding existing registration"); if (!unregister_proc(process, pid, global, dr_platform)) return false; } status = dr_register_process(process, pid, global, dr_root, dr_mode, debug, dr_platform, extra_ops); if (status != DR_SUCCESS) { /* USERPROFILE is not set by default over cygwin ssh */ char buf[MAXIMUM_PATH]; #ifdef WINDOWS if (drfront_get_env_var("USERPROFILE", buf, BUFFER_SIZE_ELEMENTS(buf)) == DRFRONT_ERROR && drfront_get_env_var("DYNAMORIO_CONFIGDIR", buf, BUFFER_SIZE_ELEMENTS(buf)) == DRFRONT_ERROR) { error("process %s registration failed: " "neither USERPROFILE nor DYNAMORIO_CONFIGDIR env var set!", process == NULL ? "<null>" : process); } else { #endif if (status == DR_CONFIG_DIR_NOT_FOUND) { dr_get_config_dir(global, true/*tmp*/, buf, BUFFER_SIZE_ELEMENTS(buf)); error("process %s registration failed: check config dir %s permissions", process == NULL ? "<null>" : process, buf); #ifdef ANDROID error("for Android apps, set TMPDIR to /data/data/com.your.app"); #endif } else { error("process %s registration failed", process == NULL ? "<null>" : process); } #ifdef WINDOWS } #endif return false; } return true; } /* Check if the specified client library actually exists. */ void check_client_lib(const char *client_lib) { if (!does_file_exist(client_lib)) { warn("%s does not exist", client_lib); } } bool register_client(const char *process_name, process_id_t pid, bool global, dr_platform_t dr_platform, client_id_t client_id, const char *path, const char *options) { size_t priority; dr_config_status_t status; if (!dr_process_is_registered(process_name, pid, global, dr_platform, NULL, NULL, NULL, NULL)) { error("can't register client: process %s is not registered", process_name == NULL ? "<null>" : process_name); return false; } check_client_lib(path); /* just append to the existing client list */ priority = dr_num_registered_clients(process_name, pid, global, dr_platform); info("registering client with id=%d path=|%s| ops=|%s|", client_id, path, options); status = dr_register_client(process_name, pid, global, dr_platform, client_id, priority, path, options); if (status != DR_SUCCESS) { if (status == DR_CONFIG_STRING_TOO_LONG) { error("client %s registration failed: option string too long: \"%s\"", path == NULL ? "<null>" : path, options); } else if (status == DR_CONFIG_OPTIONS_INVALID) { error("client %s registration failed: options cannot contain ';' or all " "3 quote types: %s", path == NULL ? "<null>" : path, options); } else { error("client %s registration failed with error code %d", path == NULL ? "<null>" : path, status); } return false; } return true; } #if defined(WINDOWS) || defined(DRRUN) || defined(DRCONFIG) static const char * platform_name(dr_platform_t platform) { return (platform == DR_PLATFORM_64BIT IF_X64(|| platform == DR_PLATFORM_DEFAULT)) ? "64-bit" : "32-bit/WOW64"; } #endif /* FIXME i#840: Port registered process iterator. */ #ifdef WINDOWS static void list_process(char *name, bool global, dr_platform_t platform, dr_registered_process_iterator_t *iter) { char name_buf[MAXIMUM_PATH] = {0}; char root_dir_buf[MAXIMUM_PATH] = {0}; dr_operation_mode_t dr_mode; bool debug; char dr_options[DR_MAX_OPTIONS_LENGTH] = {0}; dr_client_iterator_t *c_iter; if (name == NULL) { dr_registered_process_iterator_next(iter, name_buf, root_dir_buf, &dr_mode, &debug, dr_options); name = name_buf; } else if (!dr_process_is_registered(name, 0, global, platform, root_dir_buf, &dr_mode, &debug, dr_options)) { printf("Process %s not registered for %s\n", name, platform_name(platform)); return; } if (dr_mode == DR_MODE_DO_NOT_RUN) { printf("Process %s registered to NOT RUN on %s\n", name, platform_name(platform)); } else { printf("Process %s registered for %s\n", name, platform_name(platform)); } printf("\tRoot=\"%s\" Debug=%s\n\tOptions=\"%s\"\n", root_dir_buf, debug ? "yes" : "no", dr_options); c_iter = dr_client_iterator_start(name, 0, global, platform); while (dr_client_iterator_hasnext(c_iter)) { client_id_t id; size_t client_pri; char client_path[MAXIMUM_PATH] = {0}; char client_opts[DR_MAX_OPTIONS_LENGTH] = {0}; dr_client_iterator_next(c_iter, &id, &client_pri, client_path, client_opts); printf("\tClient=0x%08x Priority=%d\n\t\tPath=\"%s\"\n\t\tOptions=\"%s\"\n", id, (uint)client_pri, client_path, client_opts); } dr_client_iterator_stop(c_iter); } #endif /* WINDOWS */ #ifndef DRCONFIG /* i#200/PR 459481: communicate child pid via file */ static void write_pid_to_file(const char *pidfile, process_id_t pid) { FILE *f = fopen_utf8(pidfile, "w"); if (f == NULL) { warn("cannot open %s: %d\n", pidfile, GetLastError()); } else { char pidbuf[16]; ssize_t written; _snprintf(pidbuf, BUFFER_SIZE_ELEMENTS(pidbuf), "%d\n", pid); NULL_TERMINATE_BUFFER(pidbuf); written = fwrite(pidbuf, 1, strlen(pidbuf), f); assert(written == strlen(pidbuf)); fclose(f); } } #endif /* DRCONFIG */ #if defined(DRCONFIG) || defined(DRRUN) static void append_client(const char *client, int id, const char *client_ops, char client_paths[MAX_CLIENT_LIBS][MAXIMUM_PATH], client_id_t client_ids[MAX_CLIENT_LIBS], const char *client_options[MAX_CLIENT_LIBS], size_t *num_clients) { /* We support an empty client for native -t usage */ if (client[0] != '\0') { get_absolute_path(client, client_paths[*num_clients], BUFFER_SIZE_ELEMENTS(client_paths[*num_clients])); NULL_TERMINATE_BUFFER(client_paths[*num_clients]); info("client %d path: %s", (int)*num_clients, client_paths[*num_clients]); } client_ids[*num_clients] = id; client_options[*num_clients] = client_ops; (*num_clients)++; } #endif /* Appends a space-separated option string to buf. A space is appended only if * the buffer is non-empty. Aborts on buffer overflow. Always null terminates * the string. * XXX: Use print_to_buffer. */ static void add_extra_option(char *buf, size_t bufsz, size_t *sofar, const char *fmt, ...) { ssize_t len; va_list ap; if (*sofar > 0 && *sofar < bufsz) buf[(*sofar)++] = ' '; /* Add a space. */ va_start(ap, fmt); len = vsnprintf(buf + *sofar, bufsz - *sofar, fmt, ap); va_end(ap); if (len < 0 || (size_t)len >= bufsz) { error("option string too long, buffer overflow"); die(); } *sofar += len; /* be paranoid: though usually many calls in a row and could delay until end */ buf[bufsz-1] = '\0'; } #if defined(DRCONFIG) || defined(DRRUN) /* Returns the path to the client library. Appends to extra_ops. * A tool config file must contain one of these line types: * CLIENT_ABS=<absolute path to client> * CLIENT_REL=<path to client relative to DR root> * It can contain as many DR_OP= lines as desired. Each must contain * one DynamoRIO option token: * DR_OP=<DR option token> * It can also contain TOOL_OP= lines for tool options, though normally * tool default options should just be set in the tool: * TOOL_OP=<tool option token> * We take one token per line rather than a string of options to avoid * having to do any string parsing. * DR ops go last (thus, user can't override); tool ops go first. * * We also support tools with their own frontend launcher via the following * tool config file lines: * FRONTEND_ABS=<absolute path to frontend> * FRONTEND_REL=<path to frontend relative to DR root> * If either is present, drrun will launch the frontend and pass it the * tool options followed by the app and its options. * The path to DR can be included in the frontend options via this line: * TOOL_OP_DR_PATH * The options to DR can be included in a single token, preceded by a prefix, * via this line: * TOOL_OP_DR_BUNDLE=<prefix> * * A notification message can be presented to the user with: * USER_NOTICE=This tool is currently experimental. Please report issues to <url>. */ static bool read_tool_file(const char *toolname, const char *dr_root, dr_platform_t dr_platform, char *client, size_t client_size, char *ops, size_t ops_size, size_t *ops_sofar, char *tool_ops, size_t tool_ops_size, size_t *tool_ops_sofar, char *native_path OUT, size_t native_path_size) { FILE *f; char config_file[MAXIMUM_PATH]; char line[MAXIMUM_PATH]; bool found_client = false; const char *arch = IF_X64_ELSE("64", "32"); if (dr_platform == DR_PLATFORM_32BIT) arch = "32"; else if (dr_platform == DR_PLATFORM_64BIT) arch = "64"; _snprintf(config_file, BUFFER_SIZE_ELEMENTS(config_file), "%s/tools/%s.drrun%s", dr_root, toolname, arch); NULL_TERMINATE_BUFFER(config_file); info("reading tool config file %s", config_file); f = fopen_utf8(config_file, "r"); if (f == NULL) { error("cannot find tool config file %s", config_file); return false; } while (fgets(line, BUFFER_SIZE_ELEMENTS(line), f) != NULL) { ssize_t len; NULL_TERMINATE_BUFFER(line); len = strlen(line) - 1; while (len >= 0 && (line[len] == '\n' || line[len] == '\r')) { line[len] = '\0'; len--; } if (line[0] == '#') { continue; } else if (strstr(line, "CLIENT_REL=") == line) { _snprintf(client, client_size, "%s/%s", dr_root, line + strlen("CLIENT_REL=")); client[client_size-1] = '\0'; found_client = true; if (native_path[0] != '\0') { add_extra_option(tool_ops, tool_ops_size, tool_ops_sofar, "\"%s\"", client); } } else if (strstr(line, "CLIENT_ABS=") == line) { strncpy(client, line + strlen("CLIENT_ABS="), client_size); found_client = true; if (native_path[0] != '\0') { add_extra_option(tool_ops, tool_ops_size, tool_ops_sofar, "\"%s\"", client); } } else if (strstr(line, "DR_OP=") == line) { if (strcmp(line, "DR_OP=") != 0) { add_extra_option(ops, ops_size, ops_sofar, "\"%s\"", line + strlen("DR_OP=")); } } else if (strstr(line, "TOOL_OP=") == line) { if (strcmp(line, "TOOL_OP=") != 0) { add_extra_option(tool_ops, tool_ops_size, tool_ops_sofar, "\"%s\"", line + strlen("TOOL_OP=")); } # ifdef DRRUN /* native only supported for drrun */ } else if (strstr(line, "FRONTEND_ABS=") == line) { _snprintf(native_path, native_path_size, "%s", line + strlen("FRONTEND_ABS=")); native_path[native_path_size-1] = '\0'; found_client = true; } else if (strstr(line, "FRONTEND_REL=") == line) { _snprintf(native_path, native_path_size, "%s/%s", dr_root, line + strlen("FRONTEND_REL=")); native_path[native_path_size-1] = '\0'; found_client = true; } else if (strstr(line, "TOOL_OP_DR_PATH") == line) { add_extra_option(tool_ops, tool_ops_size, tool_ops_sofar, "\"%s\"", dr_root); } else if (strstr(line, "TOOL_OP_DR_BUNDLE=") == line) { if (strcmp(line, "TOOL_OP_DR_BUNDLE=") != 0) { add_extra_option(tool_ops, tool_ops_size, tool_ops_sofar, "%s `%s`", line + strlen("TOOL_OP_DR_BUNDLE="), ops); } # else } else if (strstr(line, "FRONTEND_ABS=") == line || strstr(line, "FRONTEND_REL=") == line || strstr(line, "TOOL_OP_DR_PATH") == line || strstr(line, "TOOL_OP_DR_BUNDLE=") == line) { usage(false, "this tool's config only works with drrun, not drconfig"); return false; # endif } else if (strstr(line, "USER_NOTICE=") == line) { warn("%s", line + strlen("USER_NOTICE=")); } else if (line[0] != '\0') { error("tool config file is malformed: unknown line %s", line); return false; } } fclose(f); return found_client; } #endif /* DRCONFIG || DRRUN */ #ifdef DRRUN /* This parser modifies the string, adding nulls to split it up in place. * Caller should continue iterating until *token == NULL. */ static char * split_option_token(char *s, char **token OUT, bool split) { bool quoted = false; char endquote = '\0'; if (s == NULL) { *token = NULL; return NULL; } /* first skip leading whitespace */ while (*s != '\0' && isspace(*s)) s++; if (*s == '\"' || *s == '\'' || *s == '`') { quoted = true; endquote = *s; s++; } *token = (*s == '\0' ? NULL : s); while (*s != '\0' && ((!quoted && !isspace(*s)) || (quoted && *s != endquote))) s++; if (*s == '\0') return NULL; else { if (quoted && !split) s++; if (split && *s != '\0') *s++ = '\0'; return s; } } /* Caller must free() the returned argv array. * This routine writes to tool_ops. */ static const char ** switch_to_native_tool(const char **app_argv, const char *native_tool, char *tool_ops) { const char **new_argv, **arg; char *s, *token; uint count, i; for (arg = app_argv, count = 0; *arg != NULL; arg++, count++) ; /* empty */ for (s = split_option_token(tool_ops, &token, false/*do not mutate*/); token != NULL; s = split_option_token(s, &token, false/*do not mutate*/)) { count++; } count++; /* for native_tool path */ count++; /* for "--" */ count++; /* for NULL */ new_argv = (const char **) malloc(count*sizeof(char*)); i = 0; new_argv[i++] = native_tool; for (s = split_option_token(tool_ops, &token, true); token != NULL; s = split_option_token(s, &token, true)) { new_argv[i++] = token; } new_argv[i++] = "--"; for (arg = app_argv; *arg != NULL; arg++) new_argv[i++] = *arg; new_argv[i++] = NULL; assert(i == count); if (verbose) { char buf[MAXIMUM_PATH*2]; char *c = buf; for (i = 0; i < count - 1; i++) { ssize_t len = _snprintf(c, BUFFER_SIZE_ELEMENTS(buf) - (c - buf), " \"%s\"", new_argv[i]); if (len < 0 || (size_t)len >= BUFFER_SIZE_ELEMENTS(buf) - (c - buf)) break; c += len; } NULL_TERMINATE_BUFFER(buf); info("native tool cmdline: %s", buf); } return new_argv; } #endif /* DRRUN */ int _tmain(int argc, TCHAR *targv[]) { char *dr_root = NULL; char client_paths[MAX_CLIENT_LIBS][MAXIMUM_PATH]; #if defined(DRCONFIG) || defined(DRRUN) char *process = NULL; const char *client_options[MAX_CLIENT_LIBS] = {NULL,}; client_id_t client_ids[MAX_CLIENT_LIBS] = {0,}; size_t num_clients = 0; char single_client_ops[DR_MAX_OPTIONS_LENGTH]; #endif #ifndef DRINJECT # if defined(MF_API) || defined(PROBE_API) /* must set -mode */ dr_operation_mode_t dr_mode = DR_MODE_NONE; # else /* only one choice so no -mode */ # ifdef CLIENT_INTERFACE dr_operation_mode_t dr_mode = DR_MODE_CODE_MANIPULATION; # else dr_operation_mode_t dr_mode = DR_MODE_NONE; # endif # endif #endif /* !DRINJECT */ char extra_ops[MAX_OPTIONS_STRING]; size_t extra_ops_sofar = 0; #ifdef DRCONFIG action_t action = action_none; #endif bool use_debug = false; dr_platform_t dr_platform = DR_PLATFORM_DEFAULT; #ifdef WINDOWS /* FIXME i#840: Implement nudges on Linux. */ bool nudge_all = false; process_id_t nudge_pid = 0; client_id_t nudge_id = 0; uint64 nudge_arg = 0; bool list_registered = false; uint nudge_timeout = INFINITE; bool syswide_on = false; bool syswide_off = false; #endif /* WINDOWS */ bool global = false; int exitcode; #if defined(DRRUN) || defined(DRINJECT) char *pidfile = NULL; bool showstats = false; bool showmem = false; bool force_injection = false; bool inject = true; int limit = 0; /* in seconds */ char *drlib_path = NULL; # ifdef WINDOWS time_t start_time, end_time; # else bool use_ptrace = false; bool kill_group = false; # endif char *app_name = NULL; char full_app_name[MAXIMUM_PATH]; const char **app_argv; char custom_dll[MAXIMUM_PATH]; int errcode; void *inject_data; bool success; bool exit0 = false; #endif int i; #ifndef DRINJECT size_t j; #endif char buf[MAXIMUM_PATH]; char default_root[MAXIMUM_PATH]; char *c; #if defined(DRCONFIG) || defined(DRRUN) char native_tool[MAXIMUM_PATH]; #endif #ifdef DRRUN void *tofree = NULL; bool configure = true; #endif char **argv; drfront_status_t sc; #if defined(WINDOWS) && !defined(_UNICODE) # error _UNICODE must be defined #else /* Convert to UTF-8 if necessary */ sc = drfront_convert_args((const TCHAR **)targv, &argv, argc); if (sc != DRFRONT_SUCCESS) fatal("failed to process args: %d", sc); #endif memset(client_paths, 0, sizeof(client_paths)); extra_ops[0] = '\0'; #if defined(DRCONFIG) || defined(DRRUN) native_tool[0] = '\0'; #endif /* default root: we assume this tool is in <root>/bin{32,64}/dr*.exe */ get_absolute_path(argv[0], buf, BUFFER_SIZE_ELEMENTS(buf)); NULL_TERMINATE_BUFFER(buf); c = buf + strlen(buf) - 1; while (*c != '\\' && *c != '/' && c > buf) c--; _snprintf(c+1, BUFFER_SIZE_ELEMENTS(buf) - (c+1-buf), ".."); NULL_TERMINATE_BUFFER(buf); get_absolute_path(buf, default_root, BUFFER_SIZE_ELEMENTS(default_root)); NULL_TERMINATE_BUFFER(default_root); dr_root = default_root; info("default root: %s", default_root); /* we re-read the tool list if the root or platform change */ read_tool_list(dr_root, dr_platform); /* parse command line */ for (i=1; i<argc; i++) { /* params with no arg */ if (strcmp(argv[i], "-verbose") == 0 || strcmp(argv[i], "-v") == 0) { verbose = true; continue; } else if (strcmp(argv[i], "-quiet") == 0) { quiet = true; continue; } else if (strcmp(argv[i], "-nocheck") == 0) { nocheck = true; continue; } else if (strcmp(argv[i], "-debug") == 0) { use_debug = true; continue; } else if (!strcmp(argv[i], "-version")) { #if defined(BUILD_NUMBER) && defined(VERSION_NUMBER) printf(TOOLNAME" version %s -- build %d\n", STRINGIFY(VERSION_NUMBER), BUILD_NUMBER); #elif defined(BUILD_NUMBER) printf(TOOLNAME" custom build %d -- %s\n", BUILD_NUMBER, __DATE__); #else printf(TOOLNAME" custom build -- %s, %s\n", __DATE__, __TIME__); #endif exit(0); } #ifdef DRCONFIG # ifdef WINDOWS /* FIXME i#840: These are NYI for Linux. */ else if (!strcmp(argv[i], "-list_registered")) { action = action_list; list_registered = true; continue; } else if (strcmp(argv[i], "-syswide_on") == 0) { syswide_on = true; continue; } else if (strcmp(argv[i], "-syswide_off") == 0) { syswide_off = true; continue; } # endif else if (strcmp(argv[i], "-global") == 0) { global = true; continue; } else if (strcmp(argv[i], "-norun") == 0) { dr_mode = DR_MODE_DO_NOT_RUN; continue; } #endif else if (strcmp(argv[i], "-32") == 0) { dr_platform = DR_PLATFORM_32BIT; read_tool_list(dr_root, dr_platform); continue; } else if (strcmp(argv[i], "-64") == 0) { dr_platform = DR_PLATFORM_64BIT; read_tool_list(dr_root, dr_platform); continue; } #if defined(DRRUN) || defined(DRINJECT) else if (strcmp(argv[i], "-stats") == 0) { showstats = true; continue; } else if (strcmp(argv[i], "-mem") == 0) { showmem = true; continue; } else if (strcmp(argv[i], "-no_inject") == 0 || /* support old drinjectx param name */ strcmp(argv[i], "-noinject") == 0 || strcmp(argv[i], "-static") == 0) { DR_dll_not_needed = true; inject = false; continue; } else if (strcmp(argv[i], "-force") == 0) { force_injection = true; continue; } else if (strcmp(argv[i], "-no_wait") == 0) { limit = -1; continue; } # ifdef UNIX else if (strcmp(argv[i], "-use_ptrace") == 0) { /* Undocumented option for using ptrace on a fresh process. */ use_ptrace = true; continue; } else if (strcmp(argv[i], "-attach") == 0) { const char *pid_str = argv[++i]; process_id_t pid = strtoul(pid_str, NULL, 10); if (pid == ULONG_MAX) usage(false, "-attach expects an integer pid"); if (pid != 0) usage(false, "attaching to running processes is not yet implemented"); use_ptrace = true; /* FIXME: use pid below to attach. */ continue; } # ifndef MACOS /* XXX i#1285: private loader NYI on MacOS */ else if (strcmp(argv[i], "-early") == 0) { /* Now the default: left here just for back-compat */ continue; } else if (strcmp(argv[i], "-late") == 0) { /* Appending -no_early_inject to extra_ops communicates our intentions * to drinjectlib. */ add_extra_option(extra_ops, BUFFER_SIZE_ELEMENTS(extra_ops), &extra_ops_sofar, "-no_early_inject"); continue; } # endif # endif /* UNIX */ else if (strcmp(argv[i], "-exit0") == 0) { exit0 = true; continue; } #endif else if (strcmp(argv[i], "-help") == 0 || strcmp(argv[i], "--help") == 0 || strcmp(argv[i], "-h") == 0) { usage(true, ""/* no error msg */); continue; } /* all other flags have an argument -- make sure it exists */ else if (argv[i][0] == '-' && i == argc - 1) { usage(false, "invalid arguments"); } /* params with an arg */ if (strcmp(argv[i], "-root") == 0 || /* support -dr_home alias used by script */ strcmp(argv[i], "-dr_home") == 0) { dr_root = argv[++i]; read_tool_list(dr_root, dr_platform); } else if (strcmp(argv[i], "-logdir") == 0) { /* Accept this for compatibility with the old drrun shell script. */ const char *dir = argv[++i]; if (!does_file_exist(dir)) usage(false, "-logdir %s does not exist", dir); add_extra_option(extra_ops, BUFFER_SIZE_ELEMENTS(extra_ops), &extra_ops_sofar, "-logdir `%s`", dir); continue; } #ifdef DRCONFIG else if (strcmp(argv[i], "-reg") == 0) { if (action != action_none) { usage(false, "more than one action specified"); } action = action_register; process = argv[++i]; } else if (strcmp(argv[i], "-unreg") == 0) { if (action != action_none) { usage(false, "more than one action specified"); } action = action_unregister; process = argv[++i]; } else if (strcmp(argv[i], "-isreg") == 0) { if (action != action_none) { usage(false, "more than one action specified"); } action = action_list; process = argv[++i]; } # ifdef WINDOWS /* FIXME i#840: Nudge is NYI for Linux. */ else if (strcmp(argv[i], "-nudge_timeout") == 0) { nudge_timeout = strtoul(argv[++i], NULL, 10); } else if (strcmp(argv[i], "-nudge") == 0 || strcmp(argv[i], "-nudge_pid") == 0 || strcmp(argv[i], "-nudge_all") == 0){ if (action != action_none) { usage(false, "more than one action specified"); } if (i + 2 >= argc || (strcmp(argv[i], "-nudge_all") != 0 && i + 3 >= argc)) { usage(false, "too few arguments to -nudge"); } action = action_nudge; if (strcmp(argv[i], "-nudge") == 0) process = argv[++i]; else if (strcmp(argv[i], "-nudge_pid") == 0) nudge_pid = strtoul(argv[++i], NULL, 10); else nudge_all = true; nudge_id = strtoul(argv[++i], NULL, 16); nudge_arg = _strtoui64(argv[++i], NULL, 16); } # endif #endif #if defined(DRCONFIG) || defined(DRRUN) # if defined(MF_API) || defined(PROBE_API) else if (strcmp(argv[i], "-mode") == 0) { char *mode_str = argv[++i]; if (dr_mode == DR_MODE_DO_NOT_RUN) usage(false, "cannot combine -norun with -mode"); if (strcmp(mode_str, "code") == 0) { dr_mode = DR_MODE_CODE_MANIPULATION; } # ifdef MF_API else if (strcmp(mode_str, "security") == 0) { dr_mode = DR_MODE_MEMORY_FIREWALL; } # endif # ifdef PROBE_API else if (strcmp(mode_str, "probe") == 0) { dr_mode = DR_MODE_PROBE; } # endif else { usage(false, "unknown mode: %s", mode_str); } } # endif else if (strcmp(argv[i], "-client") == 0) { if (num_clients == MAX_CLIENT_LIBS) { error("Maximum number of clients is %d", MAX_CLIENT_LIBS); die(); } else { const char *client; int id; const char *ops; if (i + 3 >= argc) { usage(false, "too few arguments to -client"); } /* Support relative client paths: very useful! */ client = argv[++i]; id = strtoul(argv[++i], NULL, 16); ops = argv[++i]; append_client(client, id, ops, client_paths, client_ids, client_options, &num_clients); } } else if (strcmp(argv[i], "-ops") == 0) { /* support repeating the option (i#477) */ add_extra_option(extra_ops, BUFFER_SIZE_ELEMENTS(extra_ops), &extra_ops_sofar, "%s", argv[++i]); } #endif #if defined(DRRUN) || defined(DRINJECT) else if (strcmp(argv[i], "-pidfile") == 0) { pidfile = argv[++i]; } else if (strcmp(argv[i], "-use_dll") == 0) { DR_dll_not_needed = true; /* Support relative path: very useful! */ get_absolute_path(argv[++i], custom_dll, BUFFER_SIZE_ELEMENTS(custom_dll)); NULL_TERMINATE_BUFFER(custom_dll); drlib_path = custom_dll; } else if (strcmp(argv[i], "-s") == 0) { limit = atoi(argv[++i]); if (limit <= 0) usage(false, "invalid time"); } else if (strcmp(argv[i], "-m") == 0) { limit = atoi(argv[++i])*60; if (limit <= 0) usage(false, "invalid time"); } else if (strcmp(argv[i], "-h") == 0) { limit = atoi(argv[++i])*3600; if (limit <= 0) usage(false, "invalid time"); } # ifdef UNIX else if (strcmp(argv[i], "-killpg") == 0) { kill_group = true; } # endif #endif #if defined(DRCONFIG) || defined(DRRUN) /* if there are still options, assume user is using -- to separate and pass * through options to DR. we do not handle mixing DR options with tool * options: DR must come last. we would need to generate code here from * optionsx.h to do otherwise, or to sanity check the DR options here. */ else if (argv[i][0] == '-') { while (i<argc) { if (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "-t") == 0 || strcmp(argv[i], "--") == 0) { break; } add_extra_option(extra_ops, BUFFER_SIZE_ELEMENTS(extra_ops), &extra_ops_sofar, "\"%s\"", argv[i]); i++; } if (i < argc && (strcmp(argv[i], "-t") == 0 || strcmp(argv[i], "-c") == 0)) { const char *client; char client_buf[MAXIMUM_PATH]; size_t client_sofar = 0; if (i + 1 >= argc) usage(false, "too few arguments to %s", argv[i]); if (num_clients != 0) usage(false, "Cannot use -client with %s.", argv[i]); client = argv[++i]; single_client_ops[0] = '\0'; if (strcmp(argv[i-1], "-t") == 0) { /* Client-requested DR default options come last, so they * cannot be overridden by DR options passed here. * The user must use -c or -client to do that. */ if (!read_tool_file(client, dr_root, dr_platform, client_buf, BUFFER_SIZE_ELEMENTS(client_buf), extra_ops, BUFFER_SIZE_ELEMENTS(extra_ops), &extra_ops_sofar, single_client_ops, BUFFER_SIZE_ELEMENTS(single_client_ops), &client_sofar, native_tool, BUFFER_SIZE_ELEMENTS(native_tool))) usage(false, "unknown %s tool \"%s\" requested", platform_name(dr_platform), client); client = client_buf; } /* Treat everything up to -- or end of argv as client args. */ i++; while (i < argc && strcmp(argv[i], "--") != 0) { # ifdef DRCONFIG if (action == action_none && strcmp(argv[i], "-reg") == 0) { warn("-reg is taken as a client option!"); } # endif /* DRCONFIG */ add_extra_option(single_client_ops, BUFFER_SIZE_ELEMENTS(single_client_ops), &client_sofar, "\"%s\"", argv[i]); i++; } append_client(client, 0, single_client_ops, client_paths, client_ids, client_options, &num_clients); } if (i < argc && strcmp(argv[i], "--") == 0) { i++; goto done_with_options; } } #else /* DRINJECT */ else if (strcmp(argv[i], "--") == 0) { i++; goto done_with_options; } #endif else { #ifdef DRCONFIG usage(false, "unknown option: %s", argv[i]); #else /* start of app and its args */ break; #endif } } #if defined(DRCONFIG) || defined(DRRUN) || defined(DRINJECT) done_with_options: #endif #if defined(DRRUN) || defined(DRINJECT) # ifdef DRRUN /* Support no app if the tool has its own frontend, under the assumption * it may have post-processing or other features. */ if (i < argc || native_tool[0] == '\0') { # endif if (i >= argc) usage(false, "%s", "no app specified"); app_name = argv[i++]; search_env(app_name, "PATH", full_app_name, BUFFER_SIZE_ELEMENTS(full_app_name)); NULL_TERMINATE_BUFFER(full_app_name); if (full_app_name[0] == '\0') { /* may need to append .exe, FIXME : other executable types */ char tmp_buf[MAXIMUM_PATH]; _snprintf(tmp_buf, BUFFER_SIZE_ELEMENTS(tmp_buf), "%s%s", app_name, ".exe"); NULL_TERMINATE_BUFFER(tmp_buf); search_env(tmp_buf, "PATH", full_app_name, BUFFER_SIZE_ELEMENTS(full_app_name)); } if (full_app_name[0] == '\0') { /* last try */ get_absolute_path(app_name, full_app_name, BUFFER_SIZE_ELEMENTS(full_app_name)); NULL_TERMINATE_BUFFER(full_app_name); } if (full_app_name[0] != '\0') app_name = full_app_name; info("targeting application: \"%s\"", app_name); # ifdef DRRUN } # endif /* note that we want target app name as part of cmd line * (hence &argv[i - 1]) * (FYI: if we were using WinMain, the pzsCmdLine passed in * does not have our own app name in it) */ app_argv = (const char **) &argv[i - 1]; if (verbose) { c = buf; for (i = 0; app_argv[i] != NULL; i++) { c += _snprintf(c, BUFFER_SIZE_ELEMENTS(buf) - (c - buf), " \"%s\"", app_argv[i]); } info("app cmdline: %s", buf); } # ifdef DRRUN if (native_tool[0] != '\0') { app_name = native_tool; inject = false; configure = false; app_argv = switch_to_native_tool(app_argv, native_tool, /* this will be changed, but we don't * need it again */ (char *)client_options[0]); tofree = (void *) app_argv; } # endif #else if (i < argc) usage(false, "%s", "invalid extra arguments specified"); #endif #ifdef WINDOWS /* FIXME i#900: This doesn't work on Linux, and doesn't do the right thing * on Windows. */ /* PR 244206: set the registry view before any registry access */ set_dr_platform(dr_platform); #endif /* support running out of a debug build dir */ if (!use_debug && !check_dr_root(dr_root, false, dr_platform, false/*!pre*/, false/*!report*/) && check_dr_root(dr_root, true, dr_platform, false/*!pre*/, false/*!report*/)) { info("debug build directory detected: switching to debug build"); use_debug = true; } #ifdef DRCONFIG if (verbose) { dr_get_config_dir(global, true/*use temp*/, buf, BUFFER_SIZE_ELEMENTS(buf)); info("configuration directory is \"%s\"", buf); } if (action == action_register) { if (!register_proc(process, 0, global, dr_root, dr_mode, use_debug, dr_platform, extra_ops)) die(); for (j=0; j<num_clients; j++) { if (!register_client(process, 0, global, dr_platform, client_ids[j], client_paths[j], client_options[j])) die(); } } else if (action == action_unregister) { if (!unregister_proc(process, 0, global, dr_platform)) die(); } # ifndef WINDOWS else { usage(false, "no action specified"); } # else /* WINDOWS */ /* FIXME i#840: Nudge NYI on Linux. */ else if (action == action_nudge) { int count = 1; dr_config_status_t res = DR_SUCCESS; if (nudge_all) res = dr_nudge_all(nudge_id, nudge_arg, nudge_timeout, &count); else if (nudge_pid != 0) { res = dr_nudge_pid(nudge_pid, nudge_id, nudge_arg, nudge_timeout); if (res == DR_NUDGE_PID_NOT_INJECTED) printf("process %d is not running under DR\n", nudge_pid); if (res != DR_SUCCESS && res != DR_NUDGE_TIMEOUT) { count = 0; } } else res = dr_nudge_process(process, nudge_id, nudge_arg, nudge_timeout, &count); printf("%d processes nudged\n", count); if (res == DR_NUDGE_TIMEOUT) printf("timed out waiting for nudge to complete\n"); else if (res != DR_SUCCESS) printf("nudge operation failed, verify permissions and parameters.\n"); } # ifdef WINDOWS /* FIXME i#840: Process iterator NYI for Linux. */ else if (action == action_list) { if (!list_registered) list_process(process, global, dr_platform, NULL); else /* list all */ { dr_registered_process_iterator_t *iter = dr_registered_process_iterator_start(dr_platform, global); printf("Registered %s processes for %s\n", global ? "global" : "local", platform_name(dr_platform)); while (dr_registered_process_iterator_hasnext(iter)) list_process(NULL, global, dr_platform, iter); dr_registered_process_iterator_stop(iter); } } # endif else if (!syswide_on && !syswide_off) { usage(false, "no action specified"); } if (syswide_on) { DWORD platform; if (get_platform(&platform) != ERROR_SUCCESS) platform = PLATFORM_UNKNOWN; if (platform >= PLATFORM_WIN_8 && IF_X64_ELSE(dr_platform != DR_PLATFORM_32BIT, (dr_platform == DR_PLATFORM_64BIT || !is_wow64(GetCurrentProcess())))) { /* FIXME i#1522: enable AppInit for non-WOW64 on win8+ */ error("syswide_on is not yet supported on Windows 8+ non-WOW64"); die(); } if (!check_dr_root(dr_root, false, dr_platform, true/*pre*/, true/*report*/)) die(); /* If this is the first setting of AppInit on NT, warn about reboot */ if (!dr_syswide_is_on(dr_platform, dr_root)) { if (platform == PLATFORM_WIN_NT_4) { warn("on Windows NT, applications will not be taken over until reboot"); } else if (platform >= PLATFORM_WIN_7) { /* i#323 will fix this but good to warn the user */ warn("on Windows 7+, syswide_on relaxes system security by removing certain code signing requirements"); } } if (dr_register_syswide(dr_platform, dr_root) != ERROR_SUCCESS) { /* PR 233108: try to give more info on whether a privilege failure */ warn("syswide set failed: re-run as administrator"); } } if (syswide_off) { if (dr_unregister_syswide(dr_platform, dr_root) != ERROR_SUCCESS) { /* PR 233108: try to give more info on whether a privilege failure */ warn("syswide set failed: re-run as administrator"); } } # endif /* WINDOWS */ exitcode = 0; goto cleanup; #else /* DRCONFIG */ if (!global) { /* i#939: attempt to work w/o any HOME/USERPROFILE by using a temp dir */ dr_get_config_dir(global, true/*use temp*/, buf, BUFFER_SIZE_ELEMENTS(buf)); info("configuration directory is \"%s\"", buf); } # ifdef UNIX /* i#1676: detect whether under gdb */ _snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "/proc/%d/exe", getppid()); NULL_TERMINATE_BUFFER(buf); i = readlink(buf, buf, BUFFER_SIZE_ELEMENTS(buf)); if (i > 0) { if (i < BUFFER_SIZE_ELEMENTS(buf)) buf[i] = '\0'; else NULL_TERMINATE_BUFFER(buf); } /* On Linux, we use exec by default to create the app process. This matches * our drrun shell script and makes scripting easier for the user. */ if (limit == 0 && !use_ptrace && !kill_group) { info("will exec %s", app_name); errcode = dr_inject_prepare_to_exec(app_name, app_argv, &inject_data); } else # endif /* UNIX */ { errcode = dr_inject_process_create(app_name, app_argv, &inject_data); info("created child with pid %d for %s", dr_inject_get_process_id(inject_data), app_name); } # ifdef UNIX if (limit != 0 && kill_group) { /* Move the child to its own process group. */ process_id_t child_pid = dr_inject_get_process_id(inject_data); int res = setpgid(child_pid, child_pid); if (res < 0) { perror("ERROR in setpgid"); goto error; } } # endif if (errcode == ERROR_IMAGE_MACHINE_TYPE_MISMATCH_EXE /* Check whether -32/64 is specified, but only for Linux as we do * not support cross-arch on Windows yet (i#803). */ IF_UNIX(&& dr_platform != IF_X64_ELSE(DR_PLATFORM_32BIT, DR_PLATFORM_64BIT))) { if (nocheck) { /* Allow override for cases like i#1224 */ warn("Target process %s appears to be for the wrong architecture.", app_name); warn("Attempting to run anyway, but it may run natively if injection fails."); errcode = 0; } else { /* For Windows, better error message than the FormatMessage */ error("Target process %s is for the wrong architecture", app_name); goto error; /* the process was still created */ } } if (errcode != 0 IF_UNIX(&& errcode != ERROR_IMAGE_MACHINE_TYPE_MISMATCH_EXE)) { IF_WINDOWS(int sofar =) _snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "Failed to create process for \"%s\": ", app_name); # ifdef WINDOWS if (sofar > 0) { FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errcode, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR) buf + sofar, BUFFER_SIZE_ELEMENTS(buf) - sofar*sizeof(char), NULL); } # endif /* WINDOWS */ NULL_TERMINATE_BUFFER(buf); error("%s", buf); goto error; } /* i#200/PR 459481: communicate child pid via file */ if (pidfile != NULL) write_pid_to_file(pidfile, dr_inject_get_process_id(inject_data)); # ifdef DRRUN /* even if !inject we create a config file, for use running standalone API * apps. if user doesn't want a config file, should use "drinject -noinject". */ if (configure) { process = dr_inject_get_image_name(inject_data); if (!register_proc(process, dr_inject_get_process_id(inject_data), global, dr_root, dr_mode, use_debug, dr_platform, extra_ops)) goto error; for (j=0; j<num_clients; j++) { if (!register_client(process, dr_inject_get_process_id(inject_data), global, dr_platform, client_ids[j], client_paths[j], client_options[j])) goto error; } } # endif # ifdef UNIX if (use_ptrace) { if (!dr_inject_prepare_to_ptrace(inject_data)) { error("unable to use ptrace"); goto error; } else { info("using ptrace to inject"); } } if (kill_group) { /* Move the child to its own process group. */ bool res = dr_inject_prepare_new_process_group(inject_data); if (!res) { error("error moving child to new process group"); goto error; } } # endif if (inject && !dr_inject_process_inject(inject_data, force_injection, drlib_path)) { # ifdef DRRUN error("unable to inject: exec of |%s| failed", drlib_path); # else error("unable to inject: did you forget to run drconfig first?"); # endif goto error; } IF_WINDOWS(start_time = time(NULL);) if (!dr_inject_process_run(inject_data)) { error("unable to run"); goto error; } # ifdef WINDOWS if (limit == 0 && dr_inject_using_debug_key(inject_data)) { info("%s", "Using debugger key injection"); limit = -1; /* no wait */ } # endif if (limit >= 0) { # ifdef WINDOWS double wallclock; # endif uint64 limit_millis = limit * 1000; info("waiting %sfor app to exit...", (limit <= 0) ? "forever " : ""); success = dr_inject_wait_for_child(inject_data, limit_millis); # ifdef WINDOWS end_time = time(NULL); wallclock = difftime(end_time, start_time); if (showstats || showmem) dr_inject_print_stats(inject_data, (int) wallclock, showstats, showmem); # endif if (!success) info("timeout after %d seconds\n", limit); } else { success = true; /* Don't kill the child if we're not waiting. */ } exitcode = dr_inject_process_exit(inject_data, !success/*kill process*/); if (limit < 0) exitcode = 0; /* Return success if we didn't wait. */ if (exit0) exitcode = 0; goto cleanup; error: /* we created the process suspended so if we later had an error be sure * to kill it instead of leaving it hanging */ if (inject_data != NULL) dr_inject_process_exit(inject_data, true/*kill process*/); # ifdef DRRUN if (tofree != NULL) free(tofree); # endif exitcode = 1; #endif /* !DRCONFIG */ cleanup: sc = drfront_cleanup_args(argv, argc); if (sc != DRFRONT_SUCCESS) fatal("failed to free memory for args: %d", sc); /* FIXME i#840: We can't actually match exit status on Linux perfectly * since the kernel reserves most of the bits for signal codes. At the * very least, we should ensure if the app exits with a signal we exit * non-zero. */ return exitcode; }
1
13,378
What is the problem here? The commit message has strange wording "readlink is defined with restrict pointers": do you mean "restricted"? Is that a well-defined language term? I'm not sure what it means.
DynamoRIO-dynamorio
c
@@ -17,6 +17,7 @@ package com.google.api.codegen.util.ruby; import com.google.api.codegen.util.Name; import com.google.api.codegen.util.NameFormatter; import com.google.api.codegen.util.NamePath; +import com.google.common.collect.ImmutableSet; /** * The NameFormatter for Ruby.
1
/* Copyright 2016 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.util.ruby; import com.google.api.codegen.util.Name; import com.google.api.codegen.util.NameFormatter; import com.google.api.codegen.util.NamePath; /** * The NameFormatter for Ruby. */ public class RubyNameFormatter implements NameFormatter { @Override public String className(Name name) { return name.toUpperCamel(); } @Override public String localVarName(Name name) { return name.toLowerUnderscore(); } @Override public String privateFieldName(Name name) { return name.toLowerUnderscore(); } @Override public String publicFieldName(Name name) { return name.toLowerUnderscore(); } @Override public String varReference(Name name) { return "@" + name.toLowerUnderscore(); } @Override public String publicMethodName(Name name) { return name.toLowerUnderscore(); } @Override public String privateMethodName(Name name) { return name.toLowerUnderscore(); } @Override public String staticFunctionName(Name name) { return name.toLowerUnderscore(); } @Override public String inittedConstantName(Name name) { return name.toUpperCamel(); } @Override public String keyName(Name name) { return name.toLowerUnderscore(); } @Override public String qualifiedName(NamePath namePath) { return namePath.withUpperPieces().toDoubleColoned(); } @Override public String packageFilePathPiece(Name name) { return name.toLowerUnderscore(); } @Override public String classFileNameBase(Name name) { return name.toLowerUnderscore(); } }
1
18,876
Why are we not wrapping here?
googleapis-gapic-generator
java
@@ -2,12 +2,10 @@ namespace Shopsys\FrameworkBundle\Model\Product\Filter; -use Shopsys\FrameworkBundle\Model\Product\Parameter\Parameter; - class ParameterFilterData { /** - * @var \Shopsys\FrameworkBundle\Model\Product\Parameter\Parameter + * @var \Shopsys\FrameworkBundle\Model\Product\Parameter\Parameter|null */ public $parameter;
1
<?php namespace Shopsys\FrameworkBundle\Model\Product\Filter; use Shopsys\FrameworkBundle\Model\Product\Parameter\Parameter; class ParameterFilterData { /** * @var \Shopsys\FrameworkBundle\Model\Product\Parameter\Parameter */ public $parameter; /** * @var \Shopsys\FrameworkBundle\Model\Product\Parameter\ParameterValue[] */ public $values = []; /** * @param \Shopsys\FrameworkBundle\Model\Product\Parameter\Parameter $parameter * @param \Shopsys\FrameworkBundle\Model\Product\Parameter\ParameterValue[] $values */ public function __construct( Parameter $parameter = null, array $values = [] ) { $this->parameter = $parameter; $this->values = $values; } }
1
10,714
I though that all data objects are being unified in the fashion where all default values are initialized in constructor, are not they?
shopsys-shopsys
php
@@ -22,16 +22,16 @@ var sourceHashes = map[string]string{ "libflux/flux-core/src/ast/flatbuffers/mod.rs": "00c75dc1da14487953a4a017616fb8a237fe3da437c876f1328532dd7906f015", "libflux/flux-core/src/ast/flatbuffers/monotype.rs": "4e82aae8e7edb8e230302241fe702ece9ae84fc708003c24709c2ea944cfaae8", "libflux/flux-core/src/ast/flatbuffers/tests.rs": "ea85b78049f0c3d79daedcf9e171ccb00d0a596d1a643aa218590c99e6b160bc", - "libflux/flux-core/src/ast/mod.rs": "676e22d24cd7a97525688ed391a980d08b4bee40c5b5067d187f3ae6d66210ca", + "libflux/flux-core/src/ast/mod.rs": "29b25b75a01cf1e1ac2b52d99b91986cec32011dc55af1ffd7f145838fce858d", "libflux/flux-core/src/ast/tests.rs": "bc7f77d569d8bbd4b9d00653f48bacd47eed46f53024dce836d3c8bbb6a80555", "libflux/flux-core/src/ast/walk/mod.rs": "e8a03023d7426bcf6dfdb1a61ac3263f5cf9194a595a1584dff7c899d06562f1", "libflux/flux-core/src/ast/walk/tests.rs": "f7b2d7dd5643bb795a86c04b6979b136b0de46b52b213caff094aed6d204a05d", "libflux/flux-core/src/formatter/mod.rs": "945736a4ad87adb6a3f359b6f2da6e5492b3cb0efd282e277303e2acae137763", "libflux/flux-core/src/formatter/tests.rs": "b0a10998a65fc4b54a8f68b3a0ed186d8548ba3d7638f911eb188d2ce485206f", "libflux/flux-core/src/lib.rs": "d19b7054e07f234c107d457030a0031374c123fe14a84a5b8e35537d138bac7a", - "libflux/flux-core/src/parser/mod.rs": "e3f11fe29f47271b5c04accc2d7efa35e1dc62c6de036bf0cc0cccda5e4742e8", + "libflux/flux-core/src/parser/mod.rs": "daf24b2c439f4c7a6f138e2b25064a205aee73bb6cffc898df22dd0c962ca5df", "libflux/flux-core/src/parser/strconv.rs": "748c82f6efc2eafaafb872db5b4185ce29aafa8f03ba02c4b84f4a9614e832d2", - "libflux/flux-core/src/parser/tests.rs": "e3a7c9222f90323a7ea9b319bd84f96f66c6f115af6d199a0da332c894713ae4", + "libflux/flux-core/src/parser/tests.rs": "43a1e5d3de85d76da2107544e417f583a1f66c20910286285b9645a309c42425", "libflux/flux-core/src/scanner/mod.rs": "2e15c9e0a73d0936d2eaeec030b636786db6dbe7aab673045de3a3e815c49f8a", "libflux/flux-core/src/scanner/scanner.rl": "530c062363b66daded3c521af44111b99ffee0eeb2a12ccffa8b8f349002d852", "libflux/flux-core/src/scanner/scanner_generated.rs": "d65dcc216ccbb88d8f2e261a994ba2789075e434cb7a0f5e39e390bee2821e5b",
1
// Generated by buildinfo // // DO NOT EDIT! package libflux // sourceHashes is the hash of the build sources for // the rust components used by cgo. // This gets generated from the libflux sources // and forces the cgo library to rebuild and relink // the sources. This is because non-C/C++ sources // are not tracked by Go's build system.' //lint:ignore U1000 generated code var sourceHashes = map[string]string{ "libflux/Cargo.lock": "d9f23baf919497006ca09811dbdda9fbc3f1bc612ed45043102d98c02acb076f", "libflux/Cargo.toml": "91ac4e8b467440c6e8a9438011de0e7b78c2732403bb067d4dd31539ac8a90c1", "libflux/flux-core/Cargo.toml": "49902a1bd6c4aca947e54ee9db785ff50ba935bc22f836acdadcfbe3496055f6", "libflux/flux-core/benches/scanner.rs": "da73723114b3a22ef3f497fbc7bf9b93b4ddc1e38d31d0fee974e027fb45f199", "libflux/flux-core/src/ast/check/mod.rs": "066040a5d7284deb05121370511dbe69b153550120db7d9925c35e13acf301a5", "libflux/flux-core/src/ast/check/tests.rs": "dfa6dc9b877a72825c4b6669606c5b79de1bc11602c4259f712af1a252c92820", "libflux/flux-core/src/ast/flatbuffers/ast_generated.rs": "895e09284030a8ea05883fbb71d67282a6b567678126f2e2aadfc5bca72ed47d", "libflux/flux-core/src/ast/flatbuffers/mod.rs": "00c75dc1da14487953a4a017616fb8a237fe3da437c876f1328532dd7906f015", "libflux/flux-core/src/ast/flatbuffers/monotype.rs": "4e82aae8e7edb8e230302241fe702ece9ae84fc708003c24709c2ea944cfaae8", "libflux/flux-core/src/ast/flatbuffers/tests.rs": "ea85b78049f0c3d79daedcf9e171ccb00d0a596d1a643aa218590c99e6b160bc", "libflux/flux-core/src/ast/mod.rs": "676e22d24cd7a97525688ed391a980d08b4bee40c5b5067d187f3ae6d66210ca", "libflux/flux-core/src/ast/tests.rs": "bc7f77d569d8bbd4b9d00653f48bacd47eed46f53024dce836d3c8bbb6a80555", "libflux/flux-core/src/ast/walk/mod.rs": "e8a03023d7426bcf6dfdb1a61ac3263f5cf9194a595a1584dff7c899d06562f1", "libflux/flux-core/src/ast/walk/tests.rs": "f7b2d7dd5643bb795a86c04b6979b136b0de46b52b213caff094aed6d204a05d", "libflux/flux-core/src/formatter/mod.rs": "945736a4ad87adb6a3f359b6f2da6e5492b3cb0efd282e277303e2acae137763", "libflux/flux-core/src/formatter/tests.rs": "b0a10998a65fc4b54a8f68b3a0ed186d8548ba3d7638f911eb188d2ce485206f", "libflux/flux-core/src/lib.rs": "d19b7054e07f234c107d457030a0031374c123fe14a84a5b8e35537d138bac7a", "libflux/flux-core/src/parser/mod.rs": "e3f11fe29f47271b5c04accc2d7efa35e1dc62c6de036bf0cc0cccda5e4742e8", "libflux/flux-core/src/parser/strconv.rs": "748c82f6efc2eafaafb872db5b4185ce29aafa8f03ba02c4b84f4a9614e832d2", "libflux/flux-core/src/parser/tests.rs": "e3a7c9222f90323a7ea9b319bd84f96f66c6f115af6d199a0da332c894713ae4", "libflux/flux-core/src/scanner/mod.rs": "2e15c9e0a73d0936d2eaeec030b636786db6dbe7aab673045de3a3e815c49f8a", "libflux/flux-core/src/scanner/scanner.rl": "530c062363b66daded3c521af44111b99ffee0eeb2a12ccffa8b8f349002d852", "libflux/flux-core/src/scanner/scanner_generated.rs": "d65dcc216ccbb88d8f2e261a994ba2789075e434cb7a0f5e39e390bee2821e5b", "libflux/flux-core/src/scanner/tests.rs": "e8a07af293311388a4c83f3b3160b465bdaa0b43272ede70bdc4558a567bbeb7", "libflux/flux-core/src/scanner/token.rs": "5090c2ac4b341566a85f7d9ed9b48746dd2084ec2f3effdb4a7dc16cf34d44b9", "libflux/flux-core/src/scanner/unicode.rl": "f923f3b385ddfa65c74427b11971785fc25ea806ca03d547045de808e16ef9a1", "libflux/flux-core/src/scanner/unicode.rl.COPYING": "6cf2d5d26d52772ded8a5f0813f49f83dfa76006c5f398713be3854fe7bc4c7e", "libflux/flux-core/src/semantic/bootstrap.rs": "1c59f8a931cb4da2df3b39294cb9408f156bbb89c2e6e96d240406f63c784814", "libflux/flux-core/src/semantic/check.rs": "476db74a4479baff7091bb85a5ac2b8e9ffc07883bec92f9543a1024366ff9db", "libflux/flux-core/src/semantic/convert.rs": "456baba801b9bccbacaabc0de21232a0230c439134ee5e23b7b2edd855a98a24", "libflux/flux-core/src/semantic/doc.rs": "494895503b8d411be3c4ca088ce71c043f5ac3f8300f239bac79c253b215c30c", "libflux/flux-core/src/semantic/env.rs": "4691fb31b4aeeba4159d009367bc93a25564ed4bdefba3fc7ec41a51cac6d84b", "libflux/flux-core/src/semantic/flatbuffers/mod.rs": "ed53628573204f3849f9dfa08b4aaf7b6ee0bcbb06d13109e559d385cc38ba7f", "libflux/flux-core/src/semantic/flatbuffers/semantic_generated.rs": "57ee3dd0e73e156a0004fa22b4dc6e5356dea412b04b1187ec6e8ef024c42a60", "libflux/flux-core/src/semantic/flatbuffers/tests.rs": "1b9c3ae26c230ed9b36b21130f2db82ae5b89ddcbf79eeb3357b268df0273178", "libflux/flux-core/src/semantic/flatbuffers/types.rs": "f32c5239a01407a6f3b83746ddd2277134beb93be3d5689b14ab5d3515c4d153", "libflux/flux-core/src/semantic/fresh.rs": "24b68843d5ff8927eb14b376c508c0fbf8374c4d18cfdb936001a15e590c4db0", "libflux/flux-core/src/semantic/import.rs": "e291110f42b35a09973c5820d143231b1eeda7800a7e91638bfdc489096c79b9", "libflux/flux-core/src/semantic/infer.rs": "76e0340c8ab89f45aa385febecd06ee599cdc020aca53cab5b434640018417c4", "libflux/flux-core/src/semantic/mod.rs": "3cb4b9cbd51cf450e671338f7cb8114e701ff3f7f817f83b3cc80f1ebca85e55", "libflux/flux-core/src/semantic/nodes.rs": "5026a61d4a9c684be4d18cf6aa42961c3ab1f72d23d5eb8391b7eae635fdc4b6", "libflux/flux-core/src/semantic/sub.rs": "75800e4d426840d0d536e4f5a39400527568e8ad56abe5c5be2d7a4ae64aad10", "libflux/flux-core/src/semantic/tests.rs": "9c66626d17b86d5444afa745c213263f3d61663fcea7f4ca065573c0f477311a", "libflux/flux-core/src/semantic/types.rs": "5544dde6bc8cb11a7157a2e05bf6bada30c124b37ce676c87cb02ea3d8fe93ab", "libflux/flux-core/src/semantic/walk/_walk.rs": "0e8739d439fc26112c654a3c90264d14cdb4f21faf69c5bd3845e70a4c925c5f", "libflux/flux-core/src/semantic/walk/mod.rs": "f422c0fadd953e587096f9d72141b1fd9c76aaf7ca997e212279ee17bfc39e32", "libflux/flux-core/src/semantic/walk/test_utils.rs": "ffec273fc2e54c640ba5f2ce5c1faa1516a3d98b1365d6f4cab762f582102650", "libflux/flux-core/src/semantic/walk/walk_mut.rs": "845e0b706c90fd84b1f7ef08ab85d86a5af24392d7973415ed6d1c778a67e012", "libflux/flux-core/tests/analyze_test.rs": "537fcf7722f940794646d5916e3484262769fd90a7fa5e74e8095fb732136be6", "libflux/flux/Cargo.toml": "c8ae34c66a95d5d1de16e7b20ff618c97ec5a41ad1200a29566ce4904d558426", "libflux/flux/FLUXDOC.md": "92e6dd8043bd87b4924e09aa28fb5346630aee1214de28ea2c8fc0687cad0785", "libflux/flux/benches/basic.rs": "bb1e655d303f2e6e75e930dd98417e46946e72e1456d8cea8ecb215a62b6d107", "libflux/flux/benches/builtins.rs": "625b3de708d49a3cb758f331a01f41805ac59d22bbadfee417922504f2d5797e", "libflux/flux/benches/everything.flux": "085d66a7c698a91b6188526a64b136c414e4faf39b6c2aea1883bfd4a757900e", "libflux/flux/benches/formatter.rs": "e4838c1428d45205292a9dda1759f804779a4e142db4360ec2a6be1c9ca741b8", "libflux/flux/build.rs": "06924d7c98c84ea5e38d446716c7e12cbb60981f21b1eba1e97f278e366a1173", "libflux/flux/src/bin/flux-dump-docs.rs": "73c64a6cd9a4dd280fed402e5bdae52e22b0625d500943e1666f3ba7e3c36cde", "libflux/flux/src/lib.rs": "ac2364f5f219338b5fd00331f302100cdb3c19f48eca68e6664cd0fc10282cfb", "libflux/flux/templates/base.html": "a818747b9621828bb96b94291c60922db54052bbe35d5e354f8e589d2a4ebd02", "libflux/flux/templates/home.html": "f9927514dd42ca7271b4817ad1ca33ec79c03a77a783581b4dcafabd246ebf3f", "libflux/flux/templates/package.html": "635e1d638ad1a430f3894ff0a458572ca3c4dc6b9cec2c57fe771443849e1402", "libflux/flux/templates/value.html": "2af699af2535f83635acbc75c92d2ee941c2335350fc7e82ceb2dccf479360bf", "libflux/include/influxdata/flux.h": "9e057fd9abf8eafe6e9589941d198d9977a50a562a959cae35f25847e415e1d3", "stdlib/array/array.flux": "12adc5b12f82fd4468f37404f65deb5839b70b3cd0e88927e2c0f6cd76b777bf", "stdlib/array/from_group_test.flux": "aef4800416e47fe41329531a243865e5570df6c3070e22b81e0612bde91638a9", "stdlib/array/from_test.flux": "fd3b11ff0ae463ce012275036874bff1cacbadddb1836635c3543e9808110b5e", "stdlib/contrib/RohanSreerama5/naiveBayesClassifier/bayes_test.flux": "358c5d320a801994f411c0ff5424f2302231586e2c856a5ed5858c6e7d4fbb56", "stdlib/contrib/RohanSreerama5/naiveBayesClassifier/naiveBayesClassifier.flux": "563721d5520f4ab1eb17df27f9a45d1b3bbd78abcce44ac095771312c57f7220", "stdlib/contrib/anaisdg/anomalydetection/mad.flux": "ce22598b07c34389caf499c472570c384de125d1bea2555c82559bc14f54d9d9", "stdlib/contrib/anaisdg/anomalydetection/mad_test.flux": "ba55de9927f0ba9bc065bc359090a35675812181def2aaa9a4fd466bcd335826", "stdlib/contrib/anaisdg/statsmodels/linearreg.flux": "6e1452091ea325c83219c860dbea694ca492feb340aadc04c6381dff12dfc0b8", "stdlib/contrib/anaisdg/statsmodels/linearreg_test.flux": "347f8d80901d6f2fa4bb66a1a560fd71a4ea73ea9c4215213f076d12f7c33920", "stdlib/contrib/bonitoo-io/alerta/alerta.flux": "cbcae77dc0fce47c4501882bbea5d7ffca5b81f9aa61de7e34ccb179a5cb8cff", "stdlib/contrib/bonitoo-io/hex/hex.flux": "9460d41054683e01b6310c7f93616712652b255919001c72715d56559083c559", "stdlib/contrib/bonitoo-io/tickscript/alert_test.flux": "5077a1a4939b4907991b4431c634a1da4b33678ba78316bc9fe3ac6cceccbb35", "stdlib/contrib/bonitoo-io/tickscript/alert_with_topic_test.flux": "993114277638a0403c7372adb501888003f67d669a830a4620cd259df7f7e298", "stdlib/contrib/bonitoo-io/tickscript/deadman_empty_test.flux": "c3daa5f29cd2f1e3c971c848cfb3b029e2e13318862c07ac3f3e1aca4242fd96", "stdlib/contrib/bonitoo-io/tickscript/deadman_threshold_test.flux": "47ddb4e30b526f57883ced6e06088a8d0391b607684800e32b09552c7bc3b5b9", "stdlib/contrib/bonitoo-io/tickscript/tickscript.flux": "f59bb38e416b28363b7bcd24d899efcb5dbde317782bb97115474a408b7e95aa", "stdlib/contrib/bonitoo-io/victorops/victorops.flux": "1518d4bee92bdff1d17631c6ad9508d143ae222253744670f6c650780835b278", "stdlib/contrib/bonitoo-io/zenoss/zenoss.flux": "0c47e4cf9675b3e6bee4cfcb69417bdf4a927c1b2ee905cf32084fcb39588585", "stdlib/contrib/chobbs/discord/discord.flux": "7784fb09e7edbceed894c9340f9f7f7aa56ce7cfec858924b4a96ceb92ae5cea", "stdlib/contrib/jsternberg/aggregate/aggregate.flux": "90c412ad540aa3be93ab1a4389306c2876db40a53c5ab56f2c68106dc05a7314", "stdlib/contrib/jsternberg/aggregate/table_test.flux": "9ddba05c01264f2d9d813f515214a408d2118ad85a048b31cd5d0501f7c76b5f", "stdlib/contrib/jsternberg/aggregate/window_test.flux": "fd4c2c5d3275c7ec65b2e75f746559f0f53db97085f2e41487a34397ef844926", "stdlib/contrib/jsternberg/influxdb/influxdb.flux": "503ed93b6591f03646383844c031307e3e49996e2642dd9f2c1bb715b7b0b9af", "stdlib/contrib/jsternberg/math/math.flux": "560cb4a26ce31fa1a279f1863d28ba932fd9ccac310eb028a70bd2dcfa047d6e", "stdlib/contrib/jsternberg/rows/map_drop_value_test.flux": "8370e93460f81b1854d5cb3c7f027bcd445eb4680cde748a217686b6e430b83a", "stdlib/contrib/jsternberg/rows/map_override_type_test.flux": "9ba928e6e1520cb3b8614f9f922bfae168eb16100b34f3f5c8b3855e26238fcb", "stdlib/contrib/jsternberg/rows/map_test.flux": "7e1983468c55bdef47867110a065052727ae38cd13c7f7f08ded22f423612efc", "stdlib/contrib/jsternberg/rows/map_with_test.flux": "75ea8f08060287685ffd6f8c1e905595ff84aad7a8182e3e6f9cf9bab53d3926", "stdlib/contrib/jsternberg/rows/rows.flux": "9be29b5fe3b5eca1df4e873494920c02b0e6bb4f761c5ff6bb0eb4e637b5c3c6", "stdlib/contrib/rhajek/bigpanda/bigpanda.flux": "477af67ff428e4c79ce18013f142444d78ad8d42b5b6e66c09221f92ea5d413b", "stdlib/contrib/sranka/opsgenie/opsgenie.flux": "85a8c9cef2a2eb3c0e173c739d49fcfe9197dbe74303eaa6dfaf146c712cbb9d", "stdlib/contrib/sranka/sensu/sensu.flux": "b81b947b8e5d516fdb60b5976174dda9120a79bb7202d515c712d135274411f4", "stdlib/contrib/sranka/teams/teams.flux": "e3891dd9c6847ef6ba004838bfbcaadc74c961fd114b146e777b44e0fbd57967", "stdlib/contrib/sranka/telegram/telegram.flux": "3a904d436df3c76a287439577739024650b430745db071db03ff8ead6aace30b", "stdlib/contrib/sranka/webexteams/webexteams.flux": "c7523f7e120bbf1834222414a754920b6cfaadc35fa31db100d3fc0534603372", "stdlib/contrib/tomhollingworth/events/duration.flux": "f3f45369e4ae6888285ace1980fecb5f1ce32851bf8c8bd11a15f5b34ba11f28", "stdlib/contrib/tomhollingworth/events/duration_test.flux": "c819d173ebb9ebe7b972ffe35ed00766d43638437806b0b7c96f3326394e0bbc", "stdlib/contrib/tomhollingworth/events/duration_with_stop_test.flux": "a8bb67d9c1dda6dd3d4d56095251e4cea01f393910145f74b5ef4d95febc0345", "stdlib/csv/csv.flux": "57585de485f811940a6723665a61c3c4f48aae1d1db3674a13de8ca39a8fce67", "stdlib/csv/csv_test.flux": "f5a9adc0936c4b8779cb1dfe4fc823a85d70306b62bdcadd6dd14cd46b7485bc", "stdlib/date/date.flux": "aa72c9ee5b6d622762b9b1a041545e1700ceb34c8757c91b9b7d10445e6e0b83", "stdlib/date/hour_duration_test.flux": "8790700f07b9c0b0cb9b50b660199471e330aad12522b1bcde76b6f9b5d51142", "stdlib/date/hour_time_test.flux": "de5d871b71edc41e0ad62fdbc7ef36ace100376320c4c5e823030c3f311673a7", "stdlib/date/microsecond_duration_test.flux": "d1dfa065fa3860abf13fd1aa9b365f114743381455c5fd88d3ee1eaba17e5a99", "stdlib/date/microsecond_time_test.flux": "1a75501eda82dc254f4a0cbb1b003c7ad9afe1843fd915efcf556fe859c66075", "stdlib/date/millisecond_duration_test.flux": "73d61e574b7fe62eea55815eeb316043d85b75d20d6487639b8360ab0b9ac12b", "stdlib/date/millisecond_test.flux": "4cc1ac0ee55b614edbdbf906707fd622b734484df448bf8e37b5728b39a506c3", "stdlib/date/minute_duration_test.flux": "0ea75356594c2a4b72e49f1559bada422b595beee7e1457d6e1b1114f0d9acc3", "stdlib/date/minute_time_test.flux": "d9cb2f330293e42b531e2d4a8ea0057f22873f1a296c30fc04cddb72a046a4ae", "stdlib/date/month_day_duration_test.flux": "1654d7fb9ee53688742d4ea6582d91f0f1bf2a47adaa3888febd63b9c0d71af5", "stdlib/date/month_day_time_test.flux": "9e526dec845ef81245f460f49da58291e1b5d6eae3e5620656921ef79211b9f0", "stdlib/date/month_duration_test.flux": "5e69ff5786aaaa62fc05e8dea78616910e549e5744d6610858f063f330e1c412", "stdlib/date/month_time_test.flux": "1e3b5223bec1909d3f012e013941781ea637cf46694d3871972d8f29c7acabfc", "stdlib/date/nanosecond_duration_test.flux": "1a9d8b110d20a895eb9315397c447c8000c25326be6b3dd9a89e7ce027edd42b", "stdlib/date/nanosecond_time_test.flux": "50b15737e335a9a5dc98567cde367e6d9d57beb1a5d44a02e8cb025e05d19203", "stdlib/date/quarter_duration_test.flux": "fa66bfa33fce0cb03c8311f793804d05770b26e51ff2c69f3da7cb9a66a1e220", "stdlib/date/quarter_time_test.flux": "46942050e6f0b06e61ca0bf04d8e8005cf0accc1a13c7067450e18c71d98e624", "stdlib/date/second_duration_test.flux": "02eba8a396ed8dc7d8f2750b142903b9fe4284b3be6a9814aec34fe716d8ba69", "stdlib/date/second_time_test.flux": "9af6da405f301a596421a1a055cc376037664d4f5ed2a437b6ac1c6442439bda", "stdlib/date/truncate_duration_test.flux": "540b3cdbfa482c2e80e2a5549426599caa33d3cf8ff44b6e824cd5c33ebf8c95", "stdlib/date/truncate_time_test.flux": "8ba9f8b0a9bf1d9c010992d7cd6ade39097bc48592863c69ef2d7ba1ca8e7436", "stdlib/date/week_day_duration_test.flux": "8359762952c36180d008ba5a0a68f0d929c6f080ebf0de31672309387189b6c7", "stdlib/date/week_day_time_test.flux": "e74f006edaf47c9961f234177a219898392a0f73483b5b235afee8ca9f02053f", "stdlib/date/week_duration_test.flux": "206ad4b79699884255d914fba6a5d46ce95d4c799c7b1a4c8d7bd2cd20f0dbd0", "stdlib/date/week_time_test.flux": "a9eeb6bb505c5d227df95aefdead4794f75eef6020b3d64cbb9fa56aba1924cd", "stdlib/date/year_day_duration_test.flux": "09cb3e88ab68da0f6c0e6a945c6d7665d806f8e0bc976ca064e4ad6354b58081", "stdlib/date/year_day_time_test.flux": "9348ef55089bf351c0104a4d924aa45b64f1c9d0109deceef94837a74203558c", "stdlib/date/year_duration_test.flux": "c8f4038524aa6c4fc77a31e41d20f5973b5913562dd9c3877e692e16260c0ade", "stdlib/date/year_time_test.flux": "37e9663f63485dac2f3215dd30387fec1f3f287a9a5fd64b013f53fc9f349db6", "stdlib/dict/dict.flux": "d4122c921fe85de1d093f8b6b2a0ad9c3ee94a488e5f9326c1c1637cd4fa6688", "stdlib/dict/dict_insert_remove_test.flux": "1986d4677df4f72532f47fed6f009d71a3ee9fdc4302662210f6b5405acd87ff", "stdlib/dict/dict_lit_lambda_test.flux": "196220dd9ee726c44d17a93ea194ad52413574aaa54622efbd44282a48f0a170", "stdlib/dict/dict_lit_test.flux": "a50151d9613ea9622b23d8f9292ee97e4464938457584c1acf70c2bcbc67a825", "stdlib/dict/dict_test.flux": "5cd03edc9524353e1d28a4d432d5d13cfdb8edab5dabf1139a53c24511bad71b", "stdlib/dict/empty_dict_lambda_test.flux": "db4aa61af0de5bfe3298dd008f3ad2f9c0b638e653b0675f5dca6ba82c7d959b", "stdlib/dict/empty_dict_lit_test.flux": "29b9201207425c9ea419658e52700661fc1dbe0a2d6c9c912e84cf038eeea766", "stdlib/experimental/aggregate/aggregate.flux": "a5a57185cd8ca25dbbfbfc3253a901df027a15c907fccbb8926056614bbd2f83", "stdlib/experimental/aggregate/aggregate_test.flux": "5f96400a2f07153562338f38261d998cee36ccfae94ead5dccf98a7127f94588", "stdlib/experimental/alignTime_test.flux": "fda718da4d7764798808598fd41435c661061f1dde79d15cb2c8e1bf4c43f932", "stdlib/experimental/array/array.flux": "b5c4e06267d25f34ce56428009b424f6aa7e3a4267201d4e53e31856f8dfcd6e", "stdlib/experimental/bigtable/bigtable.flux": "eaab5de294dca7f9ba94df6546c2e9835558df7da59226c2d5779eeed8b10fc7", "stdlib/experimental/count_test.flux": "403a41883d97ac4a94ba590a878b32a68cfa6d6a5ce219c957d66e27010d9ab6", "stdlib/experimental/csv/csv.flux": "67f9d7b7bda2972d5e280d498c6b3c165fddcbbf26da7c51dec6ddcf345258bc", "stdlib/experimental/distinct_test.flux": "ebac6be0e4fe108ab9b79844788f8c00c0418e069437abeee3a28eb63210c145", "stdlib/experimental/experimental.flux": "19dfa3dff55186a3accbc1b36ccdb614e882a5548a94601c5500a34f9cf9de60", "stdlib/experimental/fill_test.flux": "e050cdacf8a46817cea4c481e288ac9a252af33d0d179cd3c0be9cec3d6e052c", "stdlib/experimental/first_test.flux": "9af76cef19d2cf070147edc18650875454c5b95c168f67eed8aaeb24bdd31ea9", "stdlib/experimental/geo/asTracks_test.flux": "5749bae4d2c6e520fd1fc09f6adf589266e489a656b7419295e39e4aace32f9d", "stdlib/experimental/geo/filterRowsNotStrict_test.flux": "13468503f3d515fbfe135a6bfb0f80a7443494b0b90c76ea14ccaf6de1b2e926", "stdlib/experimental/geo/filterRowsPivoted_test.flux": "f86c3597ae733a57bdf55913a94f2247ff6a1ee325a9960e446020ca1ae0812c", "stdlib/experimental/geo/filterRowsStrict_test.flux": "775d0e03afc5b2e0547e455192b3e4150a4bde3f5e89d16f49e2514725137635", "stdlib/experimental/geo/geo.flux": "8402e043897e8c797669bc09d6aa9f58710a97a2c7183924ff277df4e0209774", "stdlib/experimental/geo/gridFilterLevel_test.flux": "7e31336d04a67f67b02ce1d04e7a31eb46b30c3ca9e020fdf79ff712707e53d4", "stdlib/experimental/geo/gridFilter_test.flux": "0725a69323300c4ae2207bac61cd3665652d3b9c307db2f5a0c8176ca934ce3b", "stdlib/experimental/geo/groupByArea_test.flux": "50ad1aeaaf39016a58acd7ea4bddeb7129619cea21b8f728809e1a3ddf9e2caf", "stdlib/experimental/geo/shapeDataWithFilter_test.flux": "d3499e3f181f1aee29110d8a044c14d8ff1015862ca3b87890d160288b54c994", "stdlib/experimental/geo/shapeData_test.flux": "78ad82c4570f09ec4fa46ac9601e2a24a594d8b7e330abe32397a461f4570bca", "stdlib/experimental/geo/st_contains_linestring_test.flux": "1669b2d46f1e02c09fa2b2883d1d6a672e2de6727802527b449330c51631feee", "stdlib/experimental/geo/st_contains_test.flux": "772f8f9fd5af1c1feb2a93db18e5e9fe76d8451051b84a8d4d8eb8f35e7a5b76", "stdlib/experimental/geo/st_distance_linestring_test.flux": "6fe22c6281255714628970ce1a86a864cada048493ad29610cb2d84417e44fbd", "stdlib/experimental/geo/st_distance_test.flux": "67389bd20004eb7b30ce011a5b4589832d8a07e1b46e37936c9e20625e7aa224", "stdlib/experimental/geo/st_dwithin_linestring_test.flux": "55bbed72fd888fbf0ef9656bc8385422357d8c4883cc50accadce45fe069278a", "stdlib/experimental/geo/st_dwithin_test.flux": "35c3c3431009f82f1c0b09bcc23401ef4be4694bc2c6077526d76884dc9e62ea", "stdlib/experimental/geo/st_intersects_linestring_test.flux": "4ed99640e89f0476f76c0166d1a04d437d16262a4874603497b3c190b3b3dd40", "stdlib/experimental/geo/st_length_linestring_test.flux": "92e0754a252c68a27dd9d4bc7f1d0fd762bacd69a0d5357b0618f399259e6174", "stdlib/experimental/geo/strictFilter_test.flux": "10ad801dbc071ba9b4aad5814bd6d0f06b846a596ae997a3613a842a795474ac", "stdlib/experimental/geo/units_miles_test.flux": "c0d5af1e9b1f2b6c69794e203bba507e4b0baf64a08ad34340178cda7e11f793", "stdlib/experimental/group_test.flux": "fea2cdc5ae084cb292e6de1753f61779308dbeb105bcef186ef39876d7d801b4", "stdlib/experimental/histogram_quantile_test.flux": "2fd5c3e3bc5fb768e974f6a85ebc2f0a406b9648642aebcedbe0a3d2b89db86d", "stdlib/experimental/histogram_test.flux": "6ea809191ea7ca11e35f96a9ecb5c55a00cb6bbc6a94b5f1eb8c3f23b438b91e", "stdlib/experimental/http/http.flux": "98016c860bea9de338285c564c0fadb9ba2f5e90ed26738b2e24b428542654db", "stdlib/experimental/influxdb/influxdb.flux": "6c34174f44b754146510f4c19d31d43c3f5ec653d627cc1b2757b8b3de8fdd95", "stdlib/experimental/integral_test.flux": "61d2833678f785fb08d8d92db9777de78200aa8dd25bc8862d0449e9a62bafc7", "stdlib/experimental/join_right_side_more_cols_test.flux": "fab0cd9485269e1267ca7a86a354fa5863a72d94e03aa16460ffa616efb3dbc5", "stdlib/experimental/join_test.flux": "c1b66df73a0fad627ade769126fa81b1cffaee649b63166f4f565bd60dd635d5", "stdlib/experimental/json/json.flux": "28f1e669887d92fb21a495825b3d966a2370cdd6a2c4d1bf9ebb52e3c9ebfb7c", "stdlib/experimental/json/json_test.flux": "56ee6f5ec0eab036a89bcd74840e75ffc647477877dae25b8c73e54a96f69f67", "stdlib/experimental/kama_test.flux": "ce1ea5bdfc653e1020515de8957a3d8664e968444306f8c5fe9d831695b7b371", "stdlib/experimental/last_test.flux": "acccd34180a34ee95df164d9b77ec03c776fe543d9f45625abbe9fac4bf7e8fd", "stdlib/experimental/max_test.flux": "6d7eed1be23fdbcc98955e19454209f1146477e80dcba8ba87e08570b1deb063", "stdlib/experimental/mean_test.flux": "f0b1307dd1d0f4d34445bd5b166c596800be158415043c29224f255197ab9686", "stdlib/experimental/min_test.flux": "e38f1fbe199118fb081ee8173ae6deb5680f9aa63e634a0f14e5f91d92fdce4b", "stdlib/experimental/mode_test.flux": "bee8cfeefccb3ba1654ce22f0a043e7cfda4230a90bd9eee9d0a3a1de80a90a8", "stdlib/experimental/mqtt/mqtt.flux": "654a4a9a163464e7a23e6a3dfd1f8e81c6a164516372c241cdb18860065e9708", "stdlib/experimental/oee/apq_test.flux": "b84f79dc34d491e21937ce1f46ad7985254dcb782aff7ace578b1a0a70b182e5", "stdlib/experimental/oee/computeapq_test.flux": "93e7be1f222d7c4ae73f79187f171fbb3f3201eb432e9e388ccbada298a82a42", "stdlib/experimental/oee/oee.flux": "35e7bc400e530716f87e15f7ea2333d549c03ba01d1693518633ae574fb8e5c8", "stdlib/experimental/prometheus/prometheus.flux": "6c8234657c1f70469f87e2f81729efd7f00f6977bd5dff437b900eb0a6c0cbc5", "stdlib/experimental/quantile_test.flux": "bd9820e444fdbf4042236dad4e8f45eeeae6170563155f3f26cd671cff161490", "stdlib/experimental/query/from.flux": "5b3c21aa711551fa9ec102f0bcc3ab79efc70ee6434e4025336f991312e742cd", "stdlib/experimental/record/record.flux": "262c695f9fa71330d5e6ea50867531bd7f7452e234911f2a9ca2d43f5793bd48", "stdlib/experimental/record/record_test.flux": "0d5f04d61a427f54be6741bf744c1d4f1885215165c795c3ca8ef17359ae712f", "stdlib/experimental/set_test.flux": "4f6fce8e5db6601076b0c1f65d246ed621232ec0d705d2f5988bf121e161f8a2", "stdlib/experimental/skew_test.flux": "26fbb16fbe1114df87dbc59e03faa4c43a9fa9fe0daf87296f35bd798f4afbd6", "stdlib/experimental/spread_test.flux": "ba59e6f87f4a97e9de2c9716bc41eb4b653181327cb1f3e387a58381acb98611", "stdlib/experimental/stddev_test.flux": "8d1ac201a4e1e3bcd65d2e97a2746a8aceab8d8c877742048dd4254d1b70e9d9", "stdlib/experimental/sum_test.flux": "4dee647bd3ca6f33514e18b8f1aff3c470608e6ac29fdc89a590559de118eba3", "stdlib/experimental/table/fill_test.flux": "84fef4b34f9101736ffed1316afae7fbbe1829dd5873f212052a7a1da23af15f", "stdlib/experimental/table/table.flux": "5d0c9282d1fec1e6b6596550069f510e155ad3a02fa1cb4611217a893accfb37", "stdlib/experimental/unique_test.flux": "52ad71f18bf7fe517cf0024f31947155186fb73a4d3c9aca88a6a4e9a5b9095d", "stdlib/experimental/usage/usage.flux": "a2d409ef565fafe98e5a514eb9a963b82a0d98d74e1b5162b0b144c990fd25c7", "stdlib/experimental/window_test.flux": "9e26c21b78d937d98c771a7ccb1e60aa20af37f8312014f75d136ae01464aee8", "stdlib/generate/generate.flux": "fece60e20ab28408b9ca8dbe6ea09970426e3e31983219e503b1cfb9d07edf5d", "stdlib/http/http.flux": "9d374b9ef6118b965a1ea860d6d775346be22c9617d81aaa3259e86600d579bd", "stdlib/http/http_endpoint_test.flux": "4c7b8c80e3ecc0143c0bd3b554470c5c0d47b7109dfc5eb6d699872c0293c428", "stdlib/http/http_path_encode_endpoint_test.flux": "c1586a07e9ef4d57a50b688d8fb07909493e798be2b5124a3a20d659fbcf607a", "stdlib/influxdata/influxdb/influxdb.flux": "493a34ea763fd889decb08537736be69bcf19457c50500a805e6b6c4aab99c33", "stdlib/influxdata/influxdb/monitor/check_test.flux": "a63adf3ed9a5e3e8ad83b4478e8a39d31f2bbaf5005ea0bbb84b301aa1325950", "stdlib/influxdata/influxdb/monitor/deadman_add_test.flux": "dab8f711f5673ec836e6b0bfb451500998d4a963e1e066c4c5a937eb7897d20e", "stdlib/influxdata/influxdb/monitor/deadman_sub_test.flux": "68a38adc0989b573656ec4dce762181b127d65c8b150d22b0683ac01bc5cad7c", "stdlib/influxdata/influxdb/monitor/monitor.flux": "c68c05584301442b9595c92eaa95a77d8843902f2b3cda5d676163031a03fe6c", "stdlib/influxdata/influxdb/monitor/notify_test.flux": "1af9476f8d85ff2af16238a79695b4b2805c42757f5880e3940074870e3c31f4", "stdlib/influxdata/influxdb/monitor/state_changes_any_to_any_same_source_timestamp_test.flux": "aac5c5b9ba3a3a958d5e5376a20b55eca75b253a39422b4d68e11903e3dde1f4", "stdlib/influxdata/influxdb/monitor/state_changes_any_to_any_test.flux": "43467b488ebd3fd89dc42586835a6ec687fe6354941d7c9eeec841cb056a855c", "stdlib/influxdata/influxdb/monitor/state_changes_big_any_to_any_test.flux": "e8d232452111fda58dd87d984c7ec82d2e0207136584d0d99187547ba2cc9dca", "stdlib/influxdata/influxdb/monitor/state_changes_big_info_to_ok_test.flux": "d08f880fa7ebec882ed25cd945fee1400696e672cff630a1f9bb76d72be981d0", "stdlib/influxdata/influxdb/monitor/state_changes_big_ok_to_info_test.flux": "f79ba67274a6533465be9d1451a6d7c9fb58321ae408ac89dc50615b662e5a33", "stdlib/influxdata/influxdb/monitor/state_changes_custom_any_to_any_test.flux": "2d55b1a62c838610df67c28886499aec9bf10f132a167f3f73971a1fd2e66512", "stdlib/influxdata/influxdb/monitor/state_changes_info_to_any_test.flux": "79b8e0c0ae1ef56d35428e035b55b85f95ca54683afd5f536283c6002831b558", "stdlib/influxdata/influxdb/monitor/state_changes_invalid_any_to_any_test.flux": "795ffb294049b67a9a1da2e92957e028bf74900870c7f0c715cc182f38b9a2f7", "stdlib/influxdata/influxdb/monitor/state_changes_test.flux": "285e8cc3a818c24688432c80ea5fb118b7d74479c5bd49e26b3375466b255f1c", "stdlib/influxdata/influxdb/sample/alignToNow_test.flux": "68f4428ceab3396707f9dbcc2f5ed97d28bd0c4a160d51cbee72459a64cfa328", "stdlib/influxdata/influxdb/sample/list_test.flux": "c1c7d5ef934fb0cfff19d8c3b32fdcbee321a7af82a81fff075fd5874e3d246a", "stdlib/influxdata/influxdb/sample/sample.flux": "cd8ab2eef3a7fbf73b7e8dece8a7ad8429014cd35a9a18f665321f5a10ca5af1", "stdlib/influxdata/influxdb/schema/influxFieldsAsCols_test.flux": "df849dac66af2cb8d91398cb009e09cfa0028037b893c2e1a1811ba12959bf98", "stdlib/influxdata/influxdb/schema/influxRawQuery_test.flux": "46e6b4670162c92b8ebe73eab99aa3ee84ae19b097a761a68b88fe6913a1bf1b", "stdlib/influxdata/influxdb/schema/schema.flux": "1a7a83a4923fc71d28500bdc789d2a781e304f949707955476071a9685df86e9", "stdlib/influxdata/influxdb/schema/show_fields_test.flux": "fa3774d7fa544d49dd407b2e349a813aac86827dbaaf2cc72e92af2d7c1e5a7b", "stdlib/influxdata/influxdb/schema/show_fields_with_meas_pred_test.flux": "ce4eb3c7f9833be2eb5d7c85d1bb68d12344933a36915612b7d36903b096448e", "stdlib/influxdata/influxdb/schema/show_fields_with_pred_test.flux": "5edfadc36b9683c7d048d368cc29cf71aff6fe68ab02fb328cf9f5bb420cd33c", "stdlib/influxdata/influxdb/schema/show_measurements_test.flux": "7b5c2f54db839ad319c9bcd339affcd1bcadc3fc7f52a042567356776ab412fe", "stdlib/influxdata/influxdb/schema/show_measurements_with_many_preds_test.flux": "fc251f958943f77399e2e3ed2e159966d02673c7b0c9cf291dd16136df6c4461", "stdlib/influxdata/influxdb/schema/show_measurements_with_pred_test.flux": "29ffcd930dd587a18155f9670dbf4752ee0c797e65049d425f7b82d3ac59f189", "stdlib/influxdata/influxdb/schema/show_tag_keys_test.flux": "e7fbc9b2a51539bde61153f81c1dd090f72d14d9a3f064d276984c5c818987e1", "stdlib/influxdata/influxdb/schema/show_tag_keys_with_pred_test.flux": "ec81c53de89e57af7ac1a5147a3805db2b6e8fed77c0fb608c8f49ade0fde967", "stdlib/influxdata/influxdb/schema/show_tag_values_empty_test.flux": "93baa172562c75ed21bc6eb1780341844233ccac3cb09d09ed0c18c81b988c95", "stdlib/influxdata/influxdb/schema/show_tag_values_measurement_or_predicate_test.flux": "cb10723984afe0062985f3ebb715f33c02ea2d9e1a300803a8128d019f1bd35e", "stdlib/influxdata/influxdb/schema/show_tag_values_test.flux": "1c0f7b42dbfd86ce00eff863ed8c66d15a735127e37ca9fd8a86ec14fe8c5555", "stdlib/influxdata/influxdb/schema/show_tag_values_with_pred_test.flux": "ec643b6c1b149c18a9a17a6025245a80e8fb076dfd38acd65558d4c879358d4f", "stdlib/influxdata/influxdb/secrets/secrets.flux": "d4aca3c5ff3186665d37ba487e8de0ed13235b3c7802912b34e5502f6dff76e5", "stdlib/influxdata/influxdb/secrets/secrets_test.flux": "89d900a77a9754100794bb26af41df3d830815463e45b0ef659ade44dc1269e6", "stdlib/influxdata/influxdb/tasks/last_success_duration_no_option_test.flux": "87a10b9ac635e16940ce5820bc100272984804b23ca4666195fe210644e48aae", "stdlib/influxdata/influxdb/tasks/last_success_duration_option_test.flux": "8754fd8fbf39bcfff4b33ab0b182217f0d3ef8406ff09146f0872d1d6df64621", "stdlib/influxdata/influxdb/tasks/last_success_with_option_test.flux": "6b3596711c1a9e860856d9d0e8c6a5d805b6b4f2ac3083df88a9a94696f2a549", "stdlib/influxdata/influxdb/tasks/last_success_without_option_test.flux": "10f8e40dbf3185315511bc445c4a3a461cb22dc8bf3279b05ce2362a89a819f8", "stdlib/influxdata/influxdb/tasks/tasks.flux": "63a9ab3a73baefcbb7d18a6ef8d783950c6c65b612316d68215970274e4e4a0a", "stdlib/influxdata/influxdb/to_test.flux": "68259c515b4f6df55e45ab1902723b8215a139d9033f00fee7f616a024705e4e", "stdlib/influxdata/influxdb/v1/v1.flux": "1555b6d792a424b0f73c82a635faa86804ab32d6fc3652c1b6fe6efd2dbc5e25", "stdlib/internal/boolean/boolean.flux": "1c0a1564911dd5f4c35c17efb32158ab31c51b20ac0f25175545e50aadc87c94", "stdlib/internal/debug/debug.flux": "6b9dfb6f1f751079f1c4082399c43ced81dedef0ea0580a11a1568cc9c00e13a", "stdlib/internal/debug/debug_test.flux": "ba5dcde99aed4aa788255157d52e4e6261a61f2752d3e17fe6138e3c38087226", "stdlib/internal/gen/gen.flux": "9af35dba81d7cc0215204b1f9a9d8ee541dd54909e4038d8c0007e8f80860b79", "stdlib/internal/influxql/influxql.flux": "9f7f871e5d0f2e1ffffe9c2ed8b1e2e16858e2d35b7dec90edcee1c74011af15", "stdlib/internal/promql/join_test.flux": "33eaa02598afbdbbf3c43711549d782b8d3e0144131ad2a074833748172bab09", "stdlib/internal/promql/promql.flux": "4f15c9c32eca0e6f41f7ab8023254bb73aa343601297aade2bb8f0c35fbfb935", "stdlib/internal/testutil/testutil.flux": "79f422a79ef1a8177c73c35b9001eb15201021857ea065e6aa5e99ec009ced58", "stdlib/interpolate/interpolate.flux": "a2513d8080619c64eb0fe414d42ac66cd7ef2151c8a2e13abb89f3718c5cb789", "stdlib/interpolate/interpolate_test.flux": "3e5963a5176e17f7dc2eb7f69c9485bb635793e8e93469d6d9ee569aaa30d492", "stdlib/json/json.flux": "3fe957f73bed7ff5b49fcae5f7b82853ebb2ff74e3eed2af8a2eb1f546df88fe", "stdlib/kafka/kafka.flux": "214fb699bcde2db9ecb9a6673bf9f29c481ec51306bcf6316cec23324431ea3d", "stdlib/math/math.flux": "8bd66b62ba1f27f353622aed751a99bbbbc42bda0ebd5cbea6b3ee9cdc388a74", "stdlib/math/math_test.flux": "c3332c3cf8cbd00d64ee0cf36cb58bd156511aa16ec6356023c304e63553241d", "stdlib/pagerduty/pagerduty.flux": "ea8f481ed8766ca6355a505b2b90cf3e55954bfe0dfa8850a8a13eabdee24544", "stdlib/pagerduty/pagerduty_test.flux": "d8f9bd6ffe2de7af4270aa51a2adbaeb77405bc52310fb3fc4f77090362b77c4", "stdlib/planner/aggregate_window_max_eval_test.flux": "207ffcee32ec5933fb6e2fcbccd65513b40b97f01dc590b63995be70ec35ee8d", "stdlib/planner/aggregate_window_max_push_test.flux": "801b39ec4a6665a4a2808fe6e5b0f21df53b9fe4b7440c966ca51607ff83b899", "stdlib/planner/aggregate_window_min_eval_test.flux": "4bd4455b17135fd44508697e7cdfd78ed09feb3aa8dca7a380bef748862a3a19", "stdlib/planner/aggregate_window_min_push_test.flux": "775b643aca6f0c993598fb7f14ece36aed0b1fc1593114ce53372a4feeac9daa", "stdlib/planner/bare_count_eval_test.flux": "199a9b3d769eb8bc46fd6729cf3c956ab73ee3cf13f3ddf3faa7e20b9844b72a", "stdlib/planner/bare_count_push_test.flux": "c42694e0428555424e948f3928a75bb81ffedb5987dce4543fcb3c2e4fba601d", "stdlib/planner/bare_last_test.flux": "36ce743f67489f4da7ff6708788d33d8f3f96db00b63fb6d965bf3edb2309c27", "stdlib/planner/bare_max_eval_test.flux": "acf1bbc28634a5852483712fa3cc9453013042bb55e44a5d81d220efd55b2273", "stdlib/planner/bare_max_push_test.flux": "a0d0983f7c892e0bd2f06463e16ae2c6bb18cf60c9000388fb8b8fcfb73dfc2d", "stdlib/planner/bare_mean_eval_test.flux": "26a39d56247abe3d0f7e940093ce44b4d630650864ba6bae79739ad708084745", "stdlib/planner/bare_mean_push_test.flux": "9c5b6c29e0f76f16d0690c89dc5e0cb179af5b8f47eb4c39d22f23936bf9824b", "stdlib/planner/bare_min_eval_test.flux": "a4556a31831aa712f18055df34ddbfe38a0f3703f5b8f4c925569b83df288dd6", "stdlib/planner/bare_min_push_test.flux": "1acac982cc0813569449ecf24afa1ea493d1d3dc8ed4f326d30f702ff40d9882", "stdlib/planner/bare_sum_eval_test.flux": "0c1e1267a57d5466f8e6ab20fd969a774058e859e1a9f76a1af6248f83c41cd6", "stdlib/planner/bare_sum_push_test.flux": "4d2d43bcdcf50e99568b89fa40c57de9673bdf90570bbb9ec46dcc7f67b19c90", "stdlib/planner/group_agg_test.flux": "cc75ddbaa436a81a92c5fe4bbc0791f9d2b0c581c9303b443e9569e77bc6c64f", "stdlib/planner/group_agg_uneven_keys_test.flux": "f2022ea25d81c30fb7371213a7731164372b7a3c9260d85c9bf9d52673609397", "stdlib/planner/group_count_eval_test.flux": "7b31ca39e4b3591ee92028ef1a4a76debbd9740a0d6a22ee726a5549f31c470d", "stdlib/planner/group_count_push_test.flux": "c00f2e4e1450e8173e51f37e8ea260366eb9003735f66d6cfe889617939f948f", "stdlib/planner/group_first_last_test.flux": "c8f449b05e61c165921fcb089f7bee070ca76913914985918be80c6032680ef8", "stdlib/planner/group_max_eval_test.flux": "abf9fa533a0f3da221662c2de1261ac5d4202a844b53713c5d386453a0715411", "stdlib/planner/group_max_push_test.flux": "37124c059cffbe1f73e362ab5bb3867db388eac7d22096b6c42ea5bd533c906c", "stdlib/planner/group_max_test.flux": "1fca802e902cb87c8ae9e1748a6afd0a798db463805c67a174d65f1bcccd0dd7", "stdlib/planner/group_min_eval_test.flux": "3de6e6e6509f109aac703cab7554e03e28e20199231672ee3863a258dce56df1", "stdlib/planner/group_min_max_table_test.flux": "3dc3d0b4bcca69d52846fbe355263a42a710ff9527f7468be171f734c2c82a06", "stdlib/planner/group_min_push_test.flux": "28cd4b14c89f2b6a9ad3c4299a3478954c8e60edad5317056106a6b04831f643", "stdlib/planner/group_min_test.flux": "e9570fb4df72eef1580fe7d37bc6671d5248e2a5dd00c64668797e2d90bff94f", "stdlib/planner/group_sum_eval_test.flux": "13673f10b7636c422a8d92d11bc236e0732db8b95da25c5bab00cf1d7eacabf7", "stdlib/planner/group_sum_push_test.flux": "9e6e04f6b2f4dec65288853b6c9caeb6a89ead4c3ca78ed3bfef85b36e04a05a", "stdlib/planner/planner.flux": "3d9cf3352c6d598d0fcdfa533b1646085d1ae1d4e7b6115bf607575f45e2d0e5", "stdlib/planner/window_count_eval_test.flux": "f11fbcfb6b49a2362fc568037a6f725bb00ad53315b6bcb752655315e7d45a05", "stdlib/planner/window_count_push_test.flux": "00ead94f41dd5c78d8bb25b26738623ddadacddf52a8a454def744f555035e7c", "stdlib/planner/window_eval_test.flux": "ee50d69bf6913485b890142da7b97481a4882257bc9ae49287af286436b05468", "stdlib/planner/window_group_agg_eval_test.flux": "661c4e4ef17f989d517947354cd6332846885292f0270a8a7008cccba302948b", "stdlib/planner/window_group_agg_push_test.flux": "85dd6264f0f680649c00de8a2e8cf9b4500395d72f9e57dc5eece80226503cdd", "stdlib/planner/window_max_eval_test.flux": "9db98b138b3abfdc7523d030f72664a3291b61a8230e9d5adbfa79392c06774a", "stdlib/planner/window_max_push_test.flux": "a62cadbf218e52ea404959fbad89033f08ba21115c0f3c7ade7dea4e682d3c17", "stdlib/planner/window_mean_eval_test.flux": "b8a5ec9bbae1e066be3ffbeda6b1fa7a5e8e4c424b3d00f8394ea6b6a2f5e243", "stdlib/planner/window_mean_push_test.flux": "ae3d14785e6139263ec231a69dccbdc4d253d05f07aaf733d967bfcfb63af879", "stdlib/planner/window_min_eval_test.flux": "374364538a0effa67fe2a849e50c8e5a8195125d0add59026467a03e53936b34", "stdlib/planner/window_min_push_test.flux": "7679903761694f561aca93ea7656b989dd1b83fa5edd83c2d2d62daee490c391", "stdlib/planner/window_push_test.flux": "032f9e0d9ca14e464d0fa59db1d72bbf607285ee964fb663fd08080c4e91fcc6", "stdlib/planner/window_sum_eval_test.flux": "60f5972f387352c3a2d25878ab3aff5e7a9beff50a703191bc3964174ff926c9", "stdlib/planner/window_sum_push_test.flux": "bda59a0978244e20adea8fee12e1fba0c31b3bb41deace05bd50d6bf774214e3", "stdlib/profiler/profiler.flux": "b50f4aa92dbad42e0786ff81f0436394f78de601be57a4adb5346be40711a4e2", "stdlib/pushbullet/pushbullet.flux": "c7b86140064b1c342233f200afd68a843e2fb8998a7ed2a6dd370263602fa1f2", "stdlib/regexp/regexp.flux": "e5caf06d2f2df570246e75080de1b774d872cbe1f1147d87495fd51bb9dc06fc", "stdlib/regexp/replaceAllString_test.flux": "e1628e446aad247c1c8a2cfa353ddabdf7e2e8593e1889a689550d5e83e0fde7", "stdlib/runtime/runtime.flux": "d9ad6c2327cf025de4ef46e5b2dbba596da9e5a90fb3d10a3f9cae254e4f0866", "stdlib/sampledata/sampledata.flux": "60aa15e3aedca550359848344d331d3de84fad7beb5a3fdb5919e0a70067a2c3", "stdlib/sampledata/sampledata_test.flux": "4d34ab45f116586ec9ed4847486622e270c3cc42f2843c00f23e095cee1fc5df", "stdlib/slack/slack.flux": "323c7d40a79d1dad8e53f58d75dd02cf26ba04769ebc82bc0b7a4882c1b9e2ae", "stdlib/socket/socket.flux": "6cba525239585709892d40fdf950d663775049288632c2c98cf9737b4e3a3003", "stdlib/sql/sql.flux": "a985baeb4189d377074053ef62dc02a5c17251eb74cda5e1bbf173fb544b2577", "stdlib/strings/length_test.flux": "8bcdacb15035942bdbfb244a2cf0ec2a1032697349e2b55efd61debe8a0e994f", "stdlib/strings/replaceAll_test.flux": "fbb65aec9062a94055fef4b1ebf33f7f0c2a2f9e49dbe989d7449bf912a6a077", "stdlib/strings/replace_test.flux": "0d6ac7f9327b222c25857d2d9aa20c642fbee6ddae0ee8b2e4554e141d696638", "stdlib/strings/strings.flux": "5f1ec156b66a64935950b79b3753c9ef019be83ae6cd424992caec3c1a36d9b4", "stdlib/strings/strings_test.flux": "5e69d1087f9c4a3eb363f0f0413bd4fc2729028f860f30afaa0b5a8246248fea", "stdlib/strings/subset_test.flux": "1e0c4988dba3547e22aa42ad4f28b57984e65f88133d2256be42750b2c7eefd5", "stdlib/strings/substring_test.flux": "273bc2a58219a0d5cdefa2ee4ff63aed49590e107bd664fd9f4919394fb5eebd", "stdlib/strings/title_test.flux": "c435a1b035e3d772b53d0917262e70a54c8a6276f6750f0341d49e6631dbcdc8", "stdlib/strings/toLower_test.flux": "c6dfa403f0c1d31e148787b5c7362c1b0885d90cccde528ba0f538ba7a6e8b1a", "stdlib/strings/toUpper_test.flux": "433418ef48fbc2fc418ff7ee8f8c2157cc607cb29d9f3959b7813d5d27eee741", "stdlib/strings/trim_test.flux": "3fa0483ea23cbcae3330b6d0bc57075709cff798685940388e505513fbcd4951", "stdlib/system/system.flux": "830cfadc3c127d1291a19fb684f5f1b6d2f24221b086d427e64b9e64e6ea6d10", "stdlib/testing/chronograf/aggregate_window_count_test.flux": "14d1037e7c084852e352452e37669c2fe6e3b56800517bdbba2bf188fdd38f6f", "stdlib/testing/chronograf/aggregate_window_mean_test.flux": "b4568e2bb015afa736e6decb80ceb647dcfdd4b9abccc1c093c7f242f0028c60", "stdlib/testing/chronograf/aggregate_window_median_test.flux": "ae995c3f4806bdf9e768a77de64443c2e4070cb1e56204b96fecfc13f8931d40", "stdlib/testing/chronograf/buckets_test.flux": "6c0d7d6af6f980c8b80c91bd861f7c3a0ae76d22a0b980dab09ac55ef0b257ab", "stdlib/testing/chronograf/measurement_tag_keys_test.flux": "f70314bf79c7320ab70fcbf9eb564a7944a9d3dd8f97750bb604d5aff7dca9bd", "stdlib/testing/expect/expect.flux": "b9f44704b8d8713943ec4ed2291a954329388c68f7e866d6e5434ddf5c49d3bf", "stdlib/testing/influxql/aggregate_group_by_time_test.flux": "fcb72a48fbce4f8743f850201e64e05c07586582b4366aac10fbc6675b408816", "stdlib/testing/influxql/cumulative_sum_test.flux": "40b81f9800a76eebf14b08b2180a6ab26ffa2871057ff82759ff449db4264c00", "stdlib/testing/influxql/elapsed_test.flux": "73b150a7f1975874c6f52aff557ba4d032a418e77c37b67fa8bfd501eb9ba736", "stdlib/testing/influxql/filter_by_regex_tag_test.flux": "98583461818d0870954e613531cfd06785f8a227517e7305c1573feb356fee53", "stdlib/testing/influxql/filter_by_values_with_and_test.flux": "915a9c95db26d60a24cbec5e1aae8c7f049d48a62b60ada61bfcd74fa0e66ce3", "stdlib/testing/influxql/regex_measurement_test.flux": "c0e042eb64b4c662a6c5eafadd2496e26fc6fa35fe39fff00a3b1d4d560a3958", "stdlib/testing/influxql/selector_test.flux": "7054626be8289947333e5f591a2da98e5f21b12da3c9308666aa8b8d4f3167b1", "stdlib/testing/influxql/series_agg_test.flux": "403bf45a55e71b4c99b0aec8d2f6f6817ac414e3c29346673f9139be20dafdf5", "stdlib/testing/kapacitor/combine_pivot_test.flux": "2725204fb7d806913af08b960feac9faa15873ad1bd51cc5e08b66edc8c1679f", "stdlib/testing/kapacitor/delete_drop_test.flux": "226b9788746abd63270a0c0b0dad891d00e1b701a109d700fa1c7ddbce901eac", "stdlib/testing/kapacitor/eval_map_with_test.flux": "4cc417ab271580edc1ad6f99fe0cf4dfe2038218e828f0752e79ccf3fadc7805", "stdlib/testing/kapacitor/fill_default_test.flux": "c58170d0e41aef14cdab6e36d5570255f9ccec670897b68efc26eb2afaa7b288", "stdlib/testing/kapacitor/flatten_pivot_test.flux": "8a91422b07518e813c9790e98078b8f04d4c7edb80ac66ef2a76daeb62042604", "stdlib/testing/kapacitor/noop_yield_test.flux": "d84190744228d284e8e44729ce474b73eb69e6827e68b7417563e34cb9765e52", "stdlib/testing/pandas/capitalize_strings_toUpper_test.flux": "139c0c6f9a319530584d3e7b684ea45e04d63102897417b41c6ca29cecfbe37c", "stdlib/testing/pandas/cat_strings_joinStr_test.flux": "d4af5aab8667d86ff33f8457725e1e17add2fca49ba9ed7e7ff0ddf59bb635dd", "stdlib/testing/pandas/center_strings_joinStr_test.flux": "bece7a98182660daf0460477846b334b2b621c401a4a0d5869591a27df488407", "stdlib/testing/pandas/contains_filter_by_regex_match_test.flux": "8e2c0ab8bfba1bb2c66307ce653640fa58297818dd200a33f0ae030bf8efb3a5", "stdlib/testing/pandas/count_string_countStr_test.flux": "ec9b1d59240ff2a91363b8654b1faca5f8ed911ee4c42e2d5853f45fd71f6e66", "stdlib/testing/pandas/endswith_strings_regexp_hasSuffix_test.flux": "dfeff59bb79e1b9a7ff56d3aae6ade14b3a1e8685bf47ade7b502edb3164718e", "stdlib/testing/pandas/extract_regexp_findStringIndex_test.flux": "a856dd8673ec6a8f5cefaf66d99d8887be4bd9e184c7dc4b7427735a07619118", "stdlib/testing/pandas/extract_regexp_findString_test.flux": "cbd50099266928fc4432bcbc2677a5c1ac1e2662c69883deface8e0576333bf8", "stdlib/testing/pandas/get_strings_substring_test.flux": "944ceaa8a9f455eba4f9c5fdaca569a961ca7267bc4a739cd721e000b671ee75", "stdlib/testing/pandas/ljust_string_joinStr_test.flux": "6a71cd6c686c1317778b57780f62f40b0b4a48771750b9b4b35a2a4ff67bf956", "stdlib/testing/pandas/lower_strings_toLower_test.flux": "0959e6f76a7dc96788c67249633be20c9bbfbef20480a04fdb8627f293d54412", "stdlib/testing/pandas/lstrip_string_trimLeft_test.flux": "00b17815b36b449ef7cfe719f7c63a8d2bed3b4e3ee0114bbdcc98a7ac1a0316", "stdlib/testing/pandas/partition_strings_splitN_test.flux": "03dca2bfd42b1e2337a572cbd4229182b012e093590c8864915da148449662cd", "stdlib/testing/pandas/repeat_strings_repeat_test.flux": "aa3ff8ef847bfeb97e764f9b45cede19499637478f059f2d09ee0342bf33775a", "stdlib/testing/pandas/replace_strings_replace_test.flux": "e621b872aae5f1462997387cd4bc48d278f2ed42875ed4474c959f7b3bcbe6bd", "stdlib/testing/pandas/rstrip_string_trimRight_test.flux": "71110b7b23ab635f58d907ee1b8fe6c6b491aec6b50bf5e8ccab411537be6b4e", "stdlib/testing/pandas/startswith_strings_regexp_hasPrefix_test.flux": "a3a119c4b98619b332d11b4e2fefa2d71fa2cc81ddc0ce26b3181e0a60445d8a", "stdlib/testing/pandas/strip_strings_trimLeftRight_test.flux": "2b8478acb300f8ae7ddc2c2ad07ca7134d00474a557182f823cca45da512e7a8", "stdlib/testing/pandas/title_strings_title_test.flux": "8e8fad3140e589b8dd90ef606dd41805475081230b410ba7bc9339ed79756c12", "stdlib/testing/prometheus/histogramQuantile_test.flux": "afeda5e3fba3d7ed26feea0c5f6b18733cd2439d0c48e018b626090b0d6ef403", "stdlib/testing/promql/changes_test.flux": "86268bc11fdb74327c6f0c63ed37c91b1885fd16798ed6ad6d484304dd1a97a9", "stdlib/testing/promql/dayOfMonth_test.flux": "04d2868ccb807f8efed6b54f31a0244ecc483f61481ca0025230ca4cae54947f", "stdlib/testing/promql/dayOfWeek_test.flux": "84947d568f217da8e1a829a58565a6e53049439579019632d04831bfd62a1790", "stdlib/testing/promql/daysInMonth_test.flux": "b03cf6023a05aceaf3b3093b90c0309e27090f2ec9e0f0db06754d83d248d06e", "stdlib/testing/promql/emptyTable_test.flux": "94b08f050a5741bd63141e2e7b19c2a3eda28f7981873576590a853b2f3ee4d0", "stdlib/testing/promql/extrapolatedRate_counter_rate_test.flux": "d92f6e960d0fcb0e2e7ed3f5625487379c4a42b1cd6d6848ce1b3be31a43148d", "stdlib/testing/promql/extrapolatedRate_nocounter_test.flux": "4cbf32d7fab2ff2ffd1773bd5890f40bcce2345f245e1764f17be30822c2a14c", "stdlib/testing/promql/extrapolatedRate_norate_test.flux": "d4fce2f430bce9c7a01b1b358a3c26b8051f1f9af15449458c3c0a391bc251d1", "stdlib/testing/promql/histogramQuantile_test.flux": "53a1e733ef85290a4c02beb267bc13b703516740ffe0bededba3fdb5894c55d4", "stdlib/testing/promql/holtWinters_test.flux": "bd4c6c2fef230c87129aa1f0ca3f56f793b9559597cabfb671c6eaa86713eab4", "stdlib/testing/promql/hour_test.flux": "6b60cc41956a852f519f1b702e464f298b63c8bc364c73c9432ae3a09c4547cb", "stdlib/testing/promql/instantRate_test.flux": "50faf496e39770b3d35c3a08c3f54dae1bd4f0557031543c1d57766b175797ed", "stdlib/testing/promql/labelReplace_empty_dst_test.flux": "d672db968855d3545eea21158ad84ffa89c153ae4a16ad763cad177834367693", "stdlib/testing/promql/labelReplace_full_string_match_test.flux": "2eecc880db706872f63f9710d48876fa6f69d4eb223166904ff28c0f991873d1", "stdlib/testing/promql/labelReplace_multiple_groups_test.flux": "c3caec2046b1eb78f83a2bda1369db1c5abc1b131f56a1c5b6f0f3ee0d1a2c43", "stdlib/testing/promql/labelReplace_src_empty_test.flux": "015682cf3a71ac6b75194865ee4babda3ced34f478da91ab70956cb3267646cf", "stdlib/testing/promql/labelReplace_src_nonexistent_test.flux": "c79124547f9b43eae551b83890668cf247b6406381814e2e466869834b192245", "stdlib/testing/promql/labelReplace_src_not_matched_test.flux": "36660c50c2c3a5d27681f10c7abd7f29c603b28f008142483daa1056173887a2", "stdlib/testing/promql/labelReplace_sub_string_match_test.flux": "6a095739fc9db88372d4a9203da3365715d5725c02b99d15a810a0a710a97871", "stdlib/testing/promql/linearRegression_nopredict_test.flux": "2ccdaf11eb8ba2a6a1fb581edb3334d5e87838bb7719ebe625053c4d7f29535f", "stdlib/testing/promql/linearRegression_predict_test.flux": "16507403e127f695f2921401133c4eaa434524b91362c12b306b2d0e768bc507", "stdlib/testing/promql/minute_test.flux": "6ba1488509868090ea7516fab1c4c440591715f72258971c60ad0a199e25442e", "stdlib/testing/promql/month_test.flux": "fbd1e5bb6e1c921b457f01b86ad010f9290bf7dd5ddbabc57aa7328d4afbcddf", "stdlib/testing/promql/quantile_neg_test.flux": "0db2ce6112d9b9b7790a36c2a4cb6a153ef330a7b1bda94a32218c78f8b21fc2", "stdlib/testing/promql/quantile_pos_test.flux": "f4d7c83a72d11fb81ba0e80c9df8c52a1269e88fddb02a452bc19d323a89aa81", "stdlib/testing/promql/quantile_test.flux": "93054c48a0594db8bcd9af58d449f35a972e4464c0e979f9f7ff756502390930", "stdlib/testing/promql/resets_test.flux": "64c99bb2b5ac4678e371878c38d4f52c856821ea3dffdcb819861ee4aa6f7adf", "stdlib/testing/promql/timestamp_test.flux": "99c0703dff1e3d39278b89bd309419866f5e0e236dba339791720dbd2e332d34", "stdlib/testing/promql/year_test.flux": "bcfec01ddbf2b04857b8eeed30d8076025d7b6710ca5339f0e0e1c79f49ed730", "stdlib/testing/testing.flux": "b04c8c00be3a1b99dfdf8a10e73f5086a5592533377159c9bec4def236db03c8", "stdlib/testing/usage/api_test.flux": "050c8ef74a489e3eae8d44134869ee80aa2280d0bcf4a2c2e55e3e7446b5952c", "stdlib/testing/usage/duration_test.flux": "44c33cb3fa3c33bf0c20007f11bcae6deb4565b2c183517ca0e6527138f9e0b4", "stdlib/testing/usage/reads_test.flux": "11c6041a1dd9d825004cb12928fe429adbf6f5e2fa2162dabec82ed8fe714a44", "stdlib/testing/usage/storage_test.flux": "dca86b9d7ab151d808ecbe5c6661e9a0d24e331455e222817cf70bfc71054a47", "stdlib/testing/usage/writes_test.flux": "ef158c8fe85d44e938e592c6c3dfadb992b7579fb9fd7e53905e858cec578d1a", "stdlib/universe/aggregate_empty_window_count_test.flux": "dc0c323d3b821d6f87fb8a29aff1aef920b5c818ff0bc532eab23c48979bcf6f", "stdlib/universe/aggregate_empty_window_first_test.flux": "844fc0a5a22d11b4023782ba12cf0f37db0de07a024a5b6f4bb905d88977249f", "stdlib/universe/aggregate_empty_window_last_test.flux": "ff4d169fb54dc75e05aa6f91aff617cf8e6002400a79d3b35b3bc670329f68a6", "stdlib/universe/aggregate_empty_window_max_test.flux": "3bd3667f858df50060c835c5e93130d1bf4236151353327debed4f607dbb9922", "stdlib/universe/aggregate_empty_window_mean_test.flux": "9c8531a217b80090b4afd2133ca25831eb93ec6d4e0b41aa01c298ff34df636b", "stdlib/universe/aggregate_empty_window_min_test.flux": "6efda61580933ea08747235a60addb055970da11feba09899ab48dd5d9f3d02c", "stdlib/universe/aggregate_empty_window_sum_test.flux": "f32929e9fbeb35425548e822cd206075281bea6532a69ce14c55f10d398a642e", "stdlib/universe/aggregate_fill_window_test.flux": "b0f913dc3242c8402e309604721ffb9b151b5eba314bea3be3159b070a3e7c60", "stdlib/universe/aggregate_window__offset_test.flux": "63aeb7d720093d231bfa129d14ccd3a7137300f7c4c5dcaa2023ed123310bd4d", "stdlib/universe/aggregate_window_max_test.flux": "02b052656f8eb28a620f420012443a81de9460d7964bf677c49a46fd2dea48f7", "stdlib/universe/aggregate_window_mean_test.flux": "9fea8f96e351219b9995b7cdd5df8bac19f47d3872ec45fbfa08743990b13739", "stdlib/universe/aggregate_window_median_test.flux": "907bb969a4df3d7cfd790cd22deda26061207516506707490e18104965e29f0a", "stdlib/universe/aggregate_window_test.flux": "40322386f6a3309092010a48bfd680d73d5600fd137bb0794331b0cd6214a66d", "stdlib/universe/cmo_test.flux": "3979f3ddf24ee2dae513ff6e62012dd0ab7e8cf7bd1922cee97998dae05916a7", "stdlib/universe/columns_test.flux": "40198be1aa62447586cea66ed13386fb47b72af999f1a1bd44bd1bf0478e0f6d", "stdlib/universe/count_test.flux": "f3131502ba9527a76e6aa27df918bb81ce8790410ddc2decd9e44bfc4ad8e61c", "stdlib/universe/cov_test.flux": "9a72b214c90b1af95804bc58d52e74c958da8acd31296e5014bd81b8b85ffb27", "stdlib/universe/covariance_missing_column_1_test.flux": "46f4105a9f82632c5c594a55a9154831269a782a63c82808e38cf48dd7713170", "stdlib/universe/covariance_missing_column_2_test.flux": "149b92ab3c361e3b8f2896fb569eeeda84813170c2ddb11ce08db929fada6a71", "stdlib/universe/covariance_test.flux": "4e98e5c715d4f2754cabad7f536cb1e7201798730e36c26a816df6da2b51b094", "stdlib/universe/cumulative_sum_default_test.flux": "b0255c385793d5789f0195d4a99eb3f8603c762f4a21612131b2783ef0fdf6ac", "stdlib/universe/cumulative_sum_noop_test.flux": "978fb401a07208ec88ed4dd1b579a9d94b0a25da4fa535fdf943ed43629d3b64", "stdlib/universe/cumulative_sum_test.flux": "6b784279023c0724aa616e8676f83dfaf7675fb942294a89871234b568afcb66", "stdlib/universe/derivative_test.flux": "3a9f7025e539efe83a31a4cb9a8e776a5d8c54761bedbe812736b2b2d23e7a4f", "stdlib/universe/difference_columns_test.flux": "6d71a13c5ca1a6a48b459401844c3e8e677e8363f86afbc95917db91364d4eaf", "stdlib/universe/difference_keepfirst_test.flux": "42b501c425f0486694735e7fe0cfb23fe5ae0c8018571ebb4b3b1a17699ade49", "stdlib/universe/difference_nonnegative_test.flux": "d68ec7c37768dae7829da8210e7441e02e44280cf619f28a069b8353d3bdb736", "stdlib/universe/difference_one_value_test.flux": "6f5f880761c17009657bac235a0d42ae16042d4013a5a8043186a04cbc910920", "stdlib/universe/difference_panic_test.flux": "630c44cab83bfec0911844816f90bd944c70de52f24b9841faf200864f568be0", "stdlib/universe/difference_test.flux": "badb73954fe9832ced4ed474322671e38a8c6e9f17e750f78ec23d5dd6c3303d", "stdlib/universe/distinct_test.flux": "e784b93ba8e702e828dcd0bb5c4047d0c5147c2557f2221a6d98e0411ebaf75e", "stdlib/universe/double_exponential_moving_average_test.flux": "01f4a233dff2148bc8e0b431299b89bac3fa219581e8946c772808568509fbc5", "stdlib/universe/drop_after_rename_test.flux": "d1d9fb2ab9bfc51eed0010ca229f10456e82cbf1a170d5658f854c1dbe94b89e", "stdlib/universe/drop_before_rename_test.flux": "a5d5d645effa9754566eaf1c49adb652fe7df9a3dd143c737290bb9ad03e473b", "stdlib/universe/drop_fn_test.flux": "c305976982f77b69007e266fa3340e6eb91baf15efafdfe2416ec9f264c007fb", "stdlib/universe/drop_newname_after_test.flux": "f8e837749dd17a08ad10ca32cb820bb0964ba92d09044f7c4e1e72ad3fca7d73", "stdlib/universe/drop_newname_before_test.flux": "2fe7cd959917dcf185a0a98b70311c81cf829fe81ab73cb85004db1256087e88", "stdlib/universe/drop_non_existent_test.flux": "751af09c3addf37d55277529f633ad973cc0413c296979fed4e1147d8c597b4c", "stdlib/universe/drop_referenced_test.flux": "90ea049b1c7becdb798d330ac60ed6c7adece1ee7ca6e574519f3a39fc200e31", "stdlib/universe/drop_unused_test.flux": "0fcd19d4320b3abd3781ff658bcd54db804e26cf71d021700b8455c1e95b3715", "stdlib/universe/duplicate_overwrite_test.flux": "2529a90deb963648bf2fa49dd17481a9d101f01bab94ddc815af0d6e7dcbf83a", "stdlib/universe/duplicate_test.flux": "5611ec3ed4586b7417bce54af6589033be692ac10ac6a4cac16b19f24089e1ec", "stdlib/universe/dynamic_query_test.flux": "4fd53b9c3f194fc9941ac0b59fb23586e610484db09004e076805aad1b1551e7", "stdlib/universe/elapsed_median_test.flux": "b2926ee7e31e44a2865e34434a7f95bb8b252becddd1f978f0027dce35be1d55", "stdlib/universe/elapsed_test.flux": "db5e1eed1dbbbab147ab3814addaf3890bcaa3e3fa313bedde3d966ad4611a2e", "stdlib/universe/exponential_moving_average_test.flux": "78b7df95dc25c1e537b2e5d71d16d7d92749b7e17f7b93addae9f5a51bc00218", "stdlib/universe/fill_bool_test.flux": "7fb53fe7d994f99562622e02faccf46bd272fa8429ca14e8bc8f38ae4984061a", "stdlib/universe/fill_float_test.flux": "9b9b8e0b165ba7d267a41c400b2edd25b8dd91438ee2f0419432274d89b96333", "stdlib/universe/fill_int_test.flux": "2729088396e5c732bf383f159850f738f9e0abeee73e772de9b75fd359c00b9d", "stdlib/universe/fill_previous_test.flux": "4195082bbbc39b940a542bee7b0cc773a44cf230efcd2bd9bd389b5e5367e896", "stdlib/universe/fill_string_test.flux": "ac85e7de36c89b68eafa049cad707f8811c7d152803d99b0bd68f38530628a16", "stdlib/universe/fill_time_test.flux": "1a5595a7377f2c2c76f3e7874b238d1f513b0389464ed9e768a099bbb8bcc344", "stdlib/universe/fill_uint_test.flux": "c1fb66072f49c6de66140b029083106e641e417d2bb49721b90aa7a54a39f5e5", "stdlib/universe/filter_by_regex_compile_test.flux": "a09ea839cdc535d09d08e1757665875929cad84f0186e2d74cda664b3b02da73", "stdlib/universe/filter_by_regex_function_test.flux": "1ea47a78fbfca97808579eaad61f15eadf0db1d051e739a6f64378ec84c57e58", "stdlib/universe/filter_by_regex_test.flux": "f872566c2fee06f09f3db32f59f8040a6d104a8673b7611149383bcd5ee46905", "stdlib/universe/filter_by_tags_test.flux": "9ea9ee3fd4bf7ded9ab2503f9848e1ed7e7a6cedfc361548215cd133121b9122", "stdlib/universe/filter_drop_empty_test.flux": "3cacf5dda42347f6f36c8e948b999c329d58c8d9d9ade56c804499b73ef6422a", "stdlib/universe/filter_keep_empty_test.flux": "0f2bcb40e94556633999f385df8b269c5a8de15b5f895d752c78545e44496a44", "stdlib/universe/filter_mixed_empty_test.flux": "790328e57fb7cc220e3c2fda6020e6136aa3c1c25f4caf9d92ab67227f49b50a", "stdlib/universe/filter_partial_pushdown_test.flux": "f4618dccf1fc39108049bbcdbbc370e219977e4c839cec078979dc6ae2da05a7", "stdlib/universe/first_test.flux": "ff178a025faf612a011a44d0205bb76fc0d1d6acb52949aa6005126acf448a8f", "stdlib/universe/group_by_field_test.flux": "67d5eb382cd5a67c672e8fe9f81b9d37fa42d9d6e531792cbe1def55c141e600", "stdlib/universe/group_by_irregular_test.flux": "b74944c8d826a930fcefa1f695d2ac22f9bbaa62f6b5dcc501c3c88001131d62", "stdlib/universe/group_except_test.flux": "67cd762fca4468e57b16a81119bbf6d413c3397cd841490014d25e9870dd91c1", "stdlib/universe/group_nulls_test.flux": "1ffe996153793de8f3ef96f88410dda67cabd788afdb357c9420eae376aee5cc", "stdlib/universe/group_test.flux": "f3d583650276343c119f37628c97182e8ea90821c44262b4ee65ab5a9cfae270", "stdlib/universe/group_ungroup_test.flux": "4bf56972b81aeb61e915148c00350a39873e71aa194fe2ba0823fa9f249ecd45", "stdlib/universe/highestAverage_test.flux": "19493504f36aaf8fc579339f22f61940b0343848cb893d862276f1937631968e", "stdlib/universe/highestCurrent_test.flux": "89504efb0dadd28b65e035e458a575360bf8071904aa6f000b4203070b61c373", "stdlib/universe/highestMax_test.flux": "9d6bf74836764bac33f18ccbe26f17a125d256619ad8437d7cc84855edf8fbd9", "stdlib/universe/histogram_normalize_test.flux": "c637aeaf8d8749fd7efbca7d63e4135f52d372916388fb7f8d956532129848b3", "stdlib/universe/histogram_quantile_minvalue_test.flux": "08efa0df7bc0e5b9dfa66f2f94a25bb043a32c339e9341727215e540fda2e3ba", "stdlib/universe/histogram_quantile_test.flux": "67f97571d576f183a53977323538eff2ccfa52e91a2655dd20f43a88b98c742d", "stdlib/universe/histogram_test.flux": "2005b97db044f6885d6eee79a8af8ee428c626168c3acd2327c9fe5898483226", "stdlib/universe/holt_winters_panic_test.flux": "5114247dfbc4cd1b8b8015b4e76e8f6e05a3aa5144fac73b10ddfb9b045e690a", "stdlib/universe/holt_winters_test.flux": "2276c960ba6baf6c539c0e6f16148b9e24b4b850cc61933b1e3a4e8a33d81d42", "stdlib/universe/hour_selection_test.flux": "6702470f67efeab149332876c4a02e78c86ce1f03c855f4fd4eb1cc0a09b6946", "stdlib/universe/increase_test.flux": "e3358175cdc2c0d6cc755093822f5c8685008efdf45919572179c437b848d77c", "stdlib/universe/integral_columns_test.flux": "f9922394511d31323a05ade5082e41926f6f0e3781705ba6166764664c6a8396", "stdlib/universe/integral_interpolate_test.flux": "dddda2dad8c07891025868b8a0c08fa6aab56e9fa9f83a27c7cf18fc983b8638", "stdlib/universe/integral_test.flux": "439ce6fb39d8e214445fc0e87963846af91efe0ca67300cd924a896423f6b96b", "stdlib/universe/join_across_measurements_test.flux": "f56e51db608f1b04f01215279d21affd83b5802068794cfd3730272306e5b5d6", "stdlib/universe/join_agg_test.flux": "bf89dbe3c400418488af058e8e5a56c7171678ee2028607a6310ddb7f0a76625", "stdlib/universe/join_mismatched_schema_test.flux": "78c5c1b3da6fa074122641b38166cfa6f06d8f3005fd2c28a4749c7721c6957c", "stdlib/universe/join_missing_on_col_test.flux": "66d8b8f0f9c4c02bf76af1424a8a03b0bbbda2cb06542c205ceafc564851ab16", "stdlib/universe/join_panic_test.flux": "165809f897023f15cf8cd9499e83e9ed9f2c63b330f984a418f2588c1606707f", "stdlib/universe/join_test.flux": "e5d894f43ba93b0cf56b010a9797be99ed938a403673c1ada627e46a500dc8eb", "stdlib/universe/join_two_same_sources_test.flux": "e5679c7111377d04ed76394e14c8ad06a693aa2fa68cebe227c7b13156e47fe1", "stdlib/universe/join_use_previous_test.flux": "b161efc97bf77fb7fea4f25c69408132c7e34e2b2fa6cc3b1d203c85f3d47801", "stdlib/universe/kama_test.flux": "6e8b63bf2a3dcfb797b96dcf2eeff3857740d8e8cf1b8ee56de7fc9a8af819b6", "stdlib/universe/kama_v2_test.flux": "49251c487c9957b724c83543d2c93a57b9a8efc82c69ebbc1a3f4196d7bdfef1", "stdlib/universe/keep_fn_test.flux": "a47cc685c0016b3d9284454d2c7b0fdcc83157e78413df209ed2576ef749af42", "stdlib/universe/keep_non_existent_only_test.flux": "fb8afa848afe609a25f5c7fcc745e97f46828c55a8684b25b9db401dcd78fe78", "stdlib/universe/keep_non_existent_test.flux": "dd472d41bd247dd85e0a01d7eba42c295b22ab6253a74080baf86904d645ab0d", "stdlib/universe/keep_test.flux": "3ff71046d81733cd586a453e6066ec06ec9bdfe873c406a2b2047f9201553c2e", "stdlib/universe/ker_test.flux": "49c70c8d8cc8199ee0785cac38a2dde9618fe10365e2e6fbb0dcb6b26eac1153", "stdlib/universe/key_values_host_name_test.flux": "a1df8df465fa2f2796e0000f0976934885766c4c7d008b12f60291956e086622", "stdlib/universe/key_values_test.flux": "f53a8dbaffba561c556502a4a2c268101f19445ecc47b1af92cc03fc58c01f7d", "stdlib/universe/keys_test.flux": "8e83c4b385c2751b9e04c78ef0ee02bc823943cfea83cec2d1851e2505be56e7", "stdlib/universe/last_test.flux": "633bb15ecb96685b07f37a96819765795a9baadedeb2d0dbf2f2db0f057db8e7", "stdlib/universe/limit_offset_test.flux": "a4e7752c03f6cd92386612034f1e382558576d663e305ac9b28d2d7fee39e212", "stdlib/universe/limit_test.flux": "b4aae75db042292c82f1809df60c4963bf98d641781d932f7cacf0de7268e147", "stdlib/universe/lowestAverage_test.flux": "aa0e8e9e9a282dc298fc33c12267605ada3ef081896fe3c2100cc4c962ce33fd", "stdlib/universe/lowestCurrent_test.flux": "d33ddffd0b9191dca6d9bd755276a26c2133574c60f4d30b16a357232d291baa", "stdlib/universe/lowestMin_test.flux": "6b0f07e5b7f27ad48411f210c9beb11f6c07026c0e25cfadcefc8778dc1410ad", "stdlib/universe/map_extension_with_test.flux": "e42b8e19fd5e6d0a42557c5d6ab2f7a4e74957fe3782d7b92f6b5b70b807c53e", "stdlib/universe/map_extern_dynamic_var_test.flux": "5ab589978494ebb053cc2eb410b47774d5c17f62c069f5bd50452abea865ad2e", "stdlib/universe/map_extern_var_test.flux": "d65d0fb624043fbddc0327de1d6f1f54e0b67ff5a29d92ddb43aec2906ee1d77", "stdlib/universe/map_field_type_change_test.flux": "be046fc8a5da2e94e32e359de0dcb086ec8fdae994b4b6e7cea1870d772e03ba", "stdlib/universe/map_local_var_test.flux": "98edf16916c7898d29262dcf9bc4def088a49a2ce2229fcff5132f5084cf3a9c", "stdlib/universe/map_nulls_test.flux": "a0f83e04b2f2b6c8fc6d9279a6b307e9480b18a7cb06a783a4ceb3e05933bd11", "stdlib/universe/map_polymorphism_test.flux": "5da0ed0d052e41afbce40635a5b0a728a7e873f7220f424be5ea3ff4b47ac9f2", "stdlib/universe/map_shadow_var_test.flux": "ce0e1cd6ee6568dceb40c0f5f28ec0a4ccb7f109d468ae332d0d908bc227b36e", "stdlib/universe/map_test.flux": "ca82fc25383ae080a0fb9c0f0a97cbaa5e596f04f11ea9ef2843cdbc5920c085", "stdlib/universe/map_with_obj_test.flux": "c293cd3b7ced888ec64491311f179b9f80e4f36f61dd6a265fa95e58268a6bed", "stdlib/universe/math_fn_constant_test.flux": "9256357ec8737ebaa3e69a76b5934bdadb397ca74a6606b7947d73862bf3f83b", "stdlib/universe/math_m_max_test.flux": "4d676644b6e31597b9cfd99b5599675ead75e9d9c7d5be27d0abe3210b9fac6d", "stdlib/universe/max_test.flux": "9ddecf204c5208a030cbaa121a3a72a5aeedcc9630853f6595af622366160e82", "stdlib/universe/max_time_test.flux": "f692f42908f5377a3963cc414d401e117bf51fe6f8e731fbe9f40d7f9f078d73", "stdlib/universe/mean_test.flux": "e86b0020cafca013d766b27c92761b77e70988b5679ec8efdb41409442b2379d", "stdlib/universe/median_column_test.flux": "3bb1b538bf583d3c1e4f9ea9092b0d2ff5da9c0de85852198fe65488c67af4b6", "stdlib/universe/median_compression_test.flux": "88de5d5f073c44cc31c4249974181a8390c4273d4ff871a8ccb1b2ecbbdc735f", "stdlib/universe/median_method_test.flux": "ed735a49da1b6245a5062082fdcaf7eb13bcca1208e97971f08aebf52755aaf4", "stdlib/universe/median_test.flux": "dd5894305a34ad1e1fcfe99e28410bf837b8d5debc7045162ec5a60b28abe115", "stdlib/universe/merge_filter_flag_off_test.flux": "aa63881c5444cd4f5e0117c53d2bd16f4abe9d82b3b8e7873bb78d48d23ed56a", "stdlib/universe/merge_filter_flag_on_test.flux": "1e503bd584894d977f417fd915f65d2fba81f12f920b8950d73ec6ed61c25e8d", "stdlib/universe/merge_filter_test.flux": "828b44ced02359b2e7d8785c16393f8b59beece6bf177e8f614608e88a4d5bee", "stdlib/universe/meta_query_keys_test.flux": "b569d8a901f996cab0f17f0cae46e92d55c3bc6519ea6caa1bf273b47bdabde0", "stdlib/universe/min_test.flux": "863907f99186e67cbe957cbdfc33033cb4245e2cfb94c8ae7527118076ad421f", "stdlib/universe/mode_string_test.flux": "d1dfc9ef936e63ee90222f0adb3b5b9cfdb0eb9b24ba4d5e439211f33f04a888", "stdlib/universe/mode_test.flux": "b566fb6638c56e9874327321feac3eadc410ae7e6de4659053b9f68e5c23b347", "stdlib/universe/moving_average_test.flux": "c8aae21fc434b0feea75a1379f11d07362ed1bf868f279a8265adb04f522bfe0", "stdlib/universe/multiple_range_test.flux": "22e1511b1d36e5191f7df0b9b8930b032882fda93973410177697e6fbfd27a34", "stdlib/universe/null_as_value_test.flux": "a4d58d2f95d52fe682879096dd8d53f791f394aa3b4360995524918588bc8e72", "stdlib/universe/parse_regex_test.flux": "77d40062e65cae97e9afc495fd328e8450f57bc9e4f5314afb952b0b36647df9", "stdlib/universe/pivot_col_order_test.flux": "251daa82288406a4bc89c3db8a466d1c6d10c6aadc888ed26c9ff82e1c42918d", "stdlib/universe/pivot_fields_test.flux": "73559500bd49bc7f4792dc672544d0bd8c1ad6f0ce7073531e66e4705efab10a", "stdlib/universe/pivot_mean_test.flux": "6791ac07a36c0f761c625f3e6c6b18f45cd99f724fdfd1adea8968b7f1b6d0cf", "stdlib/universe/pivot_table_test.flux": "cd31c461f16f11fff0455f696b290aac70833eab64be99f907de15ec581aeed9", "stdlib/universe/pivot_task_test_test.flux": "c835cd8c74220c3352c7540f5f5fc53b9cad14ecaedafb25f438267c65aa3666", "stdlib/universe/pivot_test.flux": "4568aa37ad4d203a4c72270f851b015a4eadc9a8baf255cb6f088c7ff0dcc63e", "stdlib/universe/quantile_aggregate_test.flux": "596256940c3a3f011f09abb322bfb6c3a666cc08cd09518e5a54b6512c6ed221", "stdlib/universe/quantile_defaults_test.flux": "18bf66ecd7af89aa303c3094ad3591dc38cfee1c65729b9e646fe601e4558efe", "stdlib/universe/quantile_tdigest_test.flux": "92821a1cabb67d8a0b5b72e284047792617f29d5efd9192570f7bf4bd3fd1711", "stdlib/universe/quantile_test.flux": "91a77faea93ab5cb4df0f289f01a256f8bd0593d89bb877a2b735bb3ab023a89", "stdlib/universe/range_nsecs_test.flux": "9cf01a93a08b7ff5dd715d80968db9857140fc859f631fedb32c6287cc13357b", "stdlib/universe/range_stop_test.flux": "462bb39ce7d3acd0d59673e5826abf3c71eb6046299cb79160f12d430edfec57", "stdlib/universe/range_test.flux": "9b768ecc5bcaa36f47802a9e7f0e6e88ded27704b02946400bba8a3dceb4c05b", "stdlib/universe/reduce_noref_test.flux": "38a043e41c9994328c44c165f54c8a32ca8123242ee290050391839ac5b7d127", "stdlib/universe/reduce_test.flux": "2f5b4a8fa04661b6ec52b2110be588cfbe05dd60a6c71f2d2f60f27e0404ed9d", "stdlib/universe/relative_strength_index_test.flux": "d9b8ed3f1702aa4f9c267ac1d47022fd450c91bee7ba0c00ca905a88dc2d1a4b", "stdlib/universe/rename_fn_test.flux": "62ef2c7035c903a560700255e0532603ac134631eaedceba5937a2229e003460", "stdlib/universe/rename_multiple_test.flux": "2010a4820161b0d1b7b31136a29e4dfea01dd2d56d76d04608a5c918619b3dcc", "stdlib/universe/rename_test.flux": "22b9c2b606a9eac5cd07c1a6f28d37c0ddc6dc03573b36c7c821de77dfb72c03", "stdlib/universe/rowfn_with_import_test.flux": "669fa847dafc3fed17f5a230376b410a76d9483fcf855eacaaa6d6a2f9008936", "stdlib/universe/sample_test.flux": "a0637e7332d7a7891655a79ded94457c93f502fa10f25cb1cec5362874581339", "stdlib/universe/selector_preserve_time_test.flux": "8182a9799250cb1270870b87d3b3a4366e41ea7a995923abe547977408df5822", "stdlib/universe/set_new_column_test.flux": "3ed3acb98adf8e8b7b3baab0db466f2e491d7a20f74561321d14bcb2f21b7840", "stdlib/universe/set_test.flux": "a7927d11f839db953831ab9290daac8d538ca47eba5a14c48a0d590aad8cd746", "stdlib/universe/shift_negative_duration_test.flux": "8bac53ecde1ee2d175bde57a62254aa195177b7f05b2d9b7d18d597dcf0e86f9", "stdlib/universe/shift_test.flux": "1fd4b01522ac3efcdcc8dc10a94c65605018cd4b646db6be247e9ebdc9fd188f", "stdlib/universe/show_all_tag_keys_test.flux": "809b35913149fd7a1c00f7b995ff5541054e9736d0700233d83646f77dafe86d", "stdlib/universe/simple_max_test.flux": "4425e3033c0db63f273a852011accae0ffa68169b391807e54ec7339947acc60", "stdlib/universe/skew_test.flux": "aa790e67511f21c2eecc957656bcb7604970276fa0433d2f78b84da97097e4c9", "stdlib/universe/sort2_test.flux": "8555a673a0a9f33986ede6cb6a86097e37d410585e37d93169d8c6cea261f628", "stdlib/universe/sort_test.flux": "231121ef03f52827ee1231035159d8c8585a0f5a3f1e4325b6798f16a143d437", "stdlib/universe/spread_test.flux": "320dde43106a0011b3305e7cf29838ac6a6209e69826c11fa67b2cc37eb7bc7e", "stdlib/universe/state_count_test.flux": "cb3e418a1f7407b824401e028897ad605e2ec653c10379ea83af44ac155bd8a7", "stdlib/universe/state_duration_test.flux": "ebf5eec548d2dda937c5bcca8f4be6b65f8ef77395fe8de4438d031f0ad0ddab", "stdlib/universe/stddev_test.flux": "10717ac6a0e9f1606f31c66839db056dc5d77635d6e400a68c9f791387c6db2e", "stdlib/universe/string_interp_test.flux": "5761893d84eaa963caff41d9cf916a27119e1d6694cbafe3efdb38f077af2424", "stdlib/universe/string_max_test.flux": "48d1c761df1e2fb624b8f454aea8d709c67a3038287097fedee8bb21d26bc6c7", "stdlib/universe/string_sort_test.flux": "201a80f54f6b8dcbafb4736d689915928655a06e9b3dd88edb376a66a427cb4e", "stdlib/universe/sum_test.flux": "7648246347f100a41f56f7ddf73130aecd00d9ce8b235d3e08203c4e11236a0a", "stdlib/universe/table_fns_findcolumn_map_test.flux": "682b344d6a24eaba85994012dafefe02899e777a97c870179deeeee97926a483", "stdlib/universe/table_fns_findrecord_map_test.flux": "0e3d641c78eb934518641378eba21444942a5c8335e6c638510506724a46f1aa", "stdlib/universe/table_fns_test.flux": "7a7efd821463718a698cccb3379501585d96266b6bcfcf048e498b41e3d4d56f", "stdlib/universe/tail_offset_test.flux": "0a8034bd3fd8fd86b52f939677ec0cef3136b4d378b18bf6f7b2bfed29c669ef", "stdlib/universe/tail_test.flux": "938afb9f20a22029a3068fae0daacf77ddb83943ed70ef66b8bbc7a0eb79070d", "stdlib/universe/task_per_line_test.flux": "7542b5bf93d9e20af2faa6e63baf5a5cb7116e3dc921ab4db68c4f180ad61e56", "stdlib/universe/time_weighted_avg_test.flux": "228156abd1f91b1588cec556175576afdcfb4f1f5b0f8cff771aac39995f56fe", "stdlib/universe/timed_moving_average_test.flux": "b488427680e31d6385b3c0c058ed47f4f747d30c927fe17966dd6d2fe6d862e4", "stdlib/universe/to_convert_test.flux": "0086cb1260ca8669268a2ce4e739426e9dfc9da49f23eef2df5bb76fb8d92a0d", "stdlib/universe/today_test.flux": "514c988f2d4452c2130f11be734fcb29c8b8242f260c62039324476376d58ee4", "stdlib/universe/top_test.flux": "396222f5e5dacb217c7adbb81cc25656f6da2993fed2f258b24aaffdb1346e1a", "stdlib/universe/triple_exponential_derivative_test.flux": "37f08c4e9c5017d360ac45671296cb7a0d12cfec19cf3b78bb53a2837ed13e7c", "stdlib/universe/triple_exponential_moving_average_test.flux": "334cd641d2bb409d843c1647f50fdfce60912a0e1c90f87415d2531c8c52111f", "stdlib/universe/union_heterogeneous_test.flux": "8b5f3dc3937b7051f8a84635f984950d091f7b214e9c9f7e8d01ae367b12680e", "stdlib/universe/union_test.flux": "c2a3059856caeace8cf8e69008e31fc06bd907450df3cdee14fc2146539fef73", "stdlib/universe/unique_test.flux": "d8d84060b14ef61e64cb003c95648a1ce7c3a4e71cc7ed8a54869e1093ec625d", "stdlib/universe/universe.flux": "16789b61cefd467f0616f8f40769e22bd8db3a7e0748262313a2ffd3e4216111", "stdlib/universe/universe_truncateTimeColumn_test.flux": "a1502106ef7c8d0cc138044fa81a1feeab266a3a247b7fe44aec6ec305e524e5", "stdlib/universe/window_aggregate_test.flux": "d521c5708e6fc02c99774ed56b31aa9d38038d5c534bce59f0b051d14b7a8969", "stdlib/universe/window_default_start_align_test.flux": "58062a508284be8992df90b5a40d9356264358cef20e6fec23813a305aab6433", "stdlib/universe/window_default_test.flux": "498abfd9ef15964cb425521d36c1324e02d416a71004d8954f1106d2a9ec0abc", "stdlib/universe/window_generate_empty_test.flux": "9c0ab9048145d6c967f03242348101571b92e67f0bfd3f591f65553c34c90691", "stdlib/universe/window_group_mean_ungroup_test.flux": "4ad43dd3ddaaa69e069a225e11310ced6e3cde266dc200cb691a3c4ec6238a89", "stdlib/universe/window_null_test.flux": "7facbcee778f33ef733017b39c82d7b10eb339fa299f84f02502549e9f065127", "stdlib/universe/window_start_bound_test.flux": "9df0b6c5b286762bb36d7900e507c02fb6882bd0af56361a62058cced25ca854", "stdlib/universe/window_test.flux": "3a7267214389c94a90defa625a896527a3c1f79cf1e26bb82f549f92aacb3fe0", "stdlib/universe/yield_test.flux": "0484f5afea429ca8ca55ec9498cdb1692f5425f41355be8992bc5024a9254d00", }
1
16,859
Does this file actually need to be committed to git? Having to run `make generate` is rather tedious on each PR and it conflicts easily.
influxdata-flux
go
@@ -440,18 +440,6 @@ func parseConfig(loc location.Location, opts options.Options) (interface{}, erro cfg.ProjectID = os.Getenv("GOOGLE_PROJECT_ID") } - if cfg.JSONKeyPath == "" { - if path := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"); path != "" { - // Check read access - if _, err := ioutil.ReadFile(path); err != nil { - return nil, errors.Fatalf("Failed to read google credential from file %v: %v", path, err) - } - cfg.JSONKeyPath = path - } else { - return nil, errors.Fatal("No credential file path is set") - } - } - if err := opts.Apply(loc.Scheme, &cfg); err != nil { return nil, err }
1
package main import ( "context" "fmt" "io" "io/ioutil" "os" "path/filepath" "runtime" "strings" "syscall" "time" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/azure" "github.com/restic/restic/internal/backend/b2" "github.com/restic/restic/internal/backend/gs" "github.com/restic/restic/internal/backend/local" "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/backend/rest" "github.com/restic/restic/internal/backend/s3" "github.com/restic/restic/internal/backend/sftp" "github.com/restic/restic/internal/backend/swift" "github.com/restic/restic/internal/cache" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/limiter" "github.com/restic/restic/internal/options" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/errors" "golang.org/x/crypto/ssh/terminal" ) var version = "compiled manually" // GlobalOptions hold all global options for restic. type GlobalOptions struct { Repo string PasswordFile string Quiet bool NoLock bool JSON bool CacheDir string NoCache bool CACerts []string TLSClientCert string CleanupCache bool LimitUploadKb int LimitDownloadKb int ctx context.Context password string stdout io.Writer stderr io.Writer Options []string extended options.Options } var globalOptions = GlobalOptions{ stdout: os.Stdout, stderr: os.Stderr, } func init() { var cancel context.CancelFunc globalOptions.ctx, cancel = context.WithCancel(context.Background()) AddCleanupHandler(func() error { cancel() return nil }) f := cmdRoot.PersistentFlags() f.StringVarP(&globalOptions.Repo, "repo", "r", os.Getenv("RESTIC_REPOSITORY"), "repository to backup to or restore from (default: $RESTIC_REPOSITORY)") f.StringVarP(&globalOptions.PasswordFile, "password-file", "p", os.Getenv("RESTIC_PASSWORD_FILE"), "read the repository password from a file (default: $RESTIC_PASSWORD_FILE)") f.BoolVarP(&globalOptions.Quiet, "quiet", "q", false, "do not output comprehensive progress report") f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repo, this allows some operations on read-only repos") f.BoolVarP(&globalOptions.JSON, "json", "", false, "set output mode to JSON for commands that support it") f.StringVar(&globalOptions.CacheDir, "cache-dir", "", "set the cache directory") f.BoolVar(&globalOptions.NoCache, "no-cache", false, "do not use a local cache") f.StringSliceVar(&globalOptions.CACerts, "cacert", nil, "path to load root certificates from (default: use system certificates)") f.StringVar(&globalOptions.TLSClientCert, "tls-client-cert", "", "path to a file containing PEM encoded TLS client certificate and private key") f.BoolVar(&globalOptions.CleanupCache, "cleanup-cache", false, "auto remove old cache directories") f.IntVar(&globalOptions.LimitUploadKb, "limit-upload", 0, "limits uploads to a maximum rate in KiB/s. (default: unlimited)") f.IntVar(&globalOptions.LimitDownloadKb, "limit-download", 0, "limits downloads to a maximum rate in KiB/s. (default: unlimited)") f.StringSliceVarP(&globalOptions.Options, "option", "o", []string{}, "set extended option (`key=value`, can be specified multiple times)") restoreTerminal() } // checkErrno returns nil when err is set to syscall.Errno(0), since this is no // error condition. func checkErrno(err error) error { e, ok := err.(syscall.Errno) if !ok { return err } if e == 0 { return nil } return err } func stdinIsTerminal() bool { return terminal.IsTerminal(int(os.Stdin.Fd())) } func stdoutIsTerminal() bool { return terminal.IsTerminal(int(os.Stdout.Fd())) } func stdoutTerminalWidth() int { w, _, err := terminal.GetSize(int(os.Stdout.Fd())) if err != nil { return 0 } return w } // restoreTerminal installs a cleanup handler that restores the previous // terminal state on exit. func restoreTerminal() { if !stdoutIsTerminal() { return } fd := int(os.Stdout.Fd()) state, err := terminal.GetState(fd) if err != nil { fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err) return } AddCleanupHandler(func() error { err := checkErrno(terminal.Restore(fd, state)) if err != nil { fmt.Fprintf(os.Stderr, "unable to get restore terminal state: %#+v\n", err) } return err }) } // ClearLine creates a platform dependent string to clear the current // line, so it can be overwritten. ANSI sequences are not supported on // current windows cmd shell. func ClearLine() string { if runtime.GOOS == "windows" { if w := stdoutTerminalWidth(); w > 0 { return strings.Repeat(" ", w-1) + "\r" } return "" } return "\x1b[2K" } // Printf writes the message to the configured stdout stream. func Printf(format string, args ...interface{}) { _, err := fmt.Fprintf(globalOptions.stdout, format, args...) if err != nil { fmt.Fprintf(os.Stderr, "unable to write to stdout: %v\n", err) Exit(100) } } // Verbosef calls Printf to write the message when the verbose flag is set. func Verbosef(format string, args ...interface{}) { if globalOptions.Quiet { return } Printf(format, args...) } // PrintProgress wraps fmt.Printf to handle the difference in writing progress // information to terminals and non-terminal stdout func PrintProgress(format string, args ...interface{}) { var ( message string carriageControl string ) message = fmt.Sprintf(format, args...) if !(strings.HasSuffix(message, "\r") || strings.HasSuffix(message, "\n")) { if stdoutIsTerminal() { carriageControl = "\r" } else { carriageControl = "\n" } message = fmt.Sprintf("%s%s", message, carriageControl) } if stdoutIsTerminal() { message = fmt.Sprintf("%s%s", ClearLine(), message) } fmt.Print(message) } // Warnf writes the message to the configured stderr stream. func Warnf(format string, args ...interface{}) { _, err := fmt.Fprintf(globalOptions.stderr, format, args...) if err != nil { fmt.Fprintf(os.Stderr, "unable to write to stderr: %v\n", err) Exit(100) } } // Exitf uses Warnf to write the message and then terminates the process with // the given exit code. func Exitf(exitcode int, format string, args ...interface{}) { if format[len(format)-1] != '\n' { format += "\n" } Warnf(format, args...) Exit(exitcode) } // resolvePassword determines the password to be used for opening the repository. func resolvePassword(opts GlobalOptions, env string) (string, error) { if opts.PasswordFile != "" { s, err := ioutil.ReadFile(opts.PasswordFile) if os.IsNotExist(err) { return "", errors.Fatalf("%s does not exist", opts.PasswordFile) } return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile") } if pwd := os.Getenv(env); pwd != "" { return pwd, nil } return "", nil } // readPassword reads the password from the given reader directly. func readPassword(in io.Reader) (password string, err error) { buf := make([]byte, 1000) n, err := io.ReadFull(in, buf) buf = buf[:n] if err != nil && errors.Cause(err) != io.ErrUnexpectedEOF { return "", errors.Wrap(err, "ReadFull") } return strings.TrimRight(string(buf), "\r\n"), nil } // readPasswordTerminal reads the password from the given reader which must be a // tty. Prompt is printed on the writer out before attempting to read the // password. func readPasswordTerminal(in *os.File, out io.Writer, prompt string) (password string, err error) { fmt.Fprint(out, prompt) buf, err := terminal.ReadPassword(int(in.Fd())) fmt.Fprintln(out) if err != nil { return "", errors.Wrap(err, "ReadPassword") } password = string(buf) return password, nil } // ReadPassword reads the password from a password file, the environment // variable RESTIC_PASSWORD or prompts the user. func ReadPassword(opts GlobalOptions, prompt string) (string, error) { if opts.password != "" { return opts.password, nil } var ( password string err error ) if stdinIsTerminal() { password, err = readPasswordTerminal(os.Stdin, os.Stderr, prompt) } else { password, err = readPassword(os.Stdin) } if err != nil { return "", errors.Wrap(err, "unable to read password") } if len(password) == 0 { return "", errors.Fatal("an empty password is not a password") } return password, nil } // ReadPasswordTwice calls ReadPassword two times and returns an error when the // passwords don't match. func ReadPasswordTwice(gopts GlobalOptions, prompt1, prompt2 string) (string, error) { pw1, err := ReadPassword(gopts, prompt1) if err != nil { return "", err } pw2, err := ReadPassword(gopts, prompt2) if err != nil { return "", err } if pw1 != pw2 { return "", errors.Fatal("passwords do not match") } return pw1, nil } const maxKeys = 20 // OpenRepository reads the password and opens the repository. func OpenRepository(opts GlobalOptions) (*repository.Repository, error) { if opts.Repo == "" { return nil, errors.Fatal("Please specify repository location (-r)") } be, err := open(opts.Repo, opts, opts.extended) if err != nil { return nil, err } be = backend.NewRetryBackend(be, 10, func(msg string, err error, d time.Duration) { Warnf("%v returned error, retrying after %v: %v\n", msg, d, err) }) s := repository.New(be) opts.password, err = ReadPassword(opts, "enter password for repository: ") if err != nil { return nil, err } err = s.SearchKey(opts.ctx, opts.password, maxKeys) if err != nil { return nil, err } if stdoutIsTerminal() { Verbosef("password is correct\n") } if opts.NoCache { return s, nil } c, err := cache.New(s.Config().ID, opts.CacheDir) if err != nil { Warnf("unable to open cache: %v\n", err) return s, nil } // start using the cache s.UseCache(c) oldCacheDirs, err := cache.Old(c.Base) if err != nil { Warnf("unable to find old cache directories: %v", err) } // nothing more to do if no old cache dirs could be found if len(oldCacheDirs) == 0 { return s, nil } // cleanup old cache dirs if instructed to do so if opts.CleanupCache { Printf("removing %d old cache dirs from %v\n", len(oldCacheDirs), c.Base) for _, item := range oldCacheDirs { dir := filepath.Join(c.Base, item) err = fs.RemoveAll(dir) if err != nil { Warnf("unable to remove %v: %v\n", dir, err) } } } else { if stdoutIsTerminal() { Verbosef("found %d old cache directories in %v, pass --cleanup-cache to remove them\n", len(oldCacheDirs), c.Base) } } return s, nil } func parseConfig(loc location.Location, opts options.Options) (interface{}, error) { // only apply options for a particular backend here opts = opts.Extract(loc.Scheme) switch loc.Scheme { case "local": cfg := loc.Config.(local.Config) if err := opts.Apply(loc.Scheme, &cfg); err != nil { return nil, err } debug.Log("opening local repository at %#v", cfg) return cfg, nil case "sftp": cfg := loc.Config.(sftp.Config) if err := opts.Apply(loc.Scheme, &cfg); err != nil { return nil, err } debug.Log("opening sftp repository at %#v", cfg) return cfg, nil case "s3": cfg := loc.Config.(s3.Config) if cfg.KeyID == "" { cfg.KeyID = os.Getenv("AWS_ACCESS_KEY_ID") } if cfg.Secret == "" { cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY") } if err := opts.Apply(loc.Scheme, &cfg); err != nil { return nil, err } debug.Log("opening s3 repository at %#v", cfg) return cfg, nil case "gs": cfg := loc.Config.(gs.Config) if cfg.ProjectID == "" { cfg.ProjectID = os.Getenv("GOOGLE_PROJECT_ID") } if cfg.JSONKeyPath == "" { if path := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"); path != "" { // Check read access if _, err := ioutil.ReadFile(path); err != nil { return nil, errors.Fatalf("Failed to read google credential from file %v: %v", path, err) } cfg.JSONKeyPath = path } else { return nil, errors.Fatal("No credential file path is set") } } if err := opts.Apply(loc.Scheme, &cfg); err != nil { return nil, err } debug.Log("opening gs repository at %#v", cfg) return cfg, nil case "azure": cfg := loc.Config.(azure.Config) if cfg.AccountName == "" { cfg.AccountName = os.Getenv("AZURE_ACCOUNT_NAME") } if cfg.AccountKey == "" { cfg.AccountKey = os.Getenv("AZURE_ACCOUNT_KEY") } if err := opts.Apply(loc.Scheme, &cfg); err != nil { return nil, err } debug.Log("opening gs repository at %#v", cfg) return cfg, nil case "swift": cfg := loc.Config.(swift.Config) if err := swift.ApplyEnvironment("", &cfg); err != nil { return nil, err } if err := opts.Apply(loc.Scheme, &cfg); err != nil { return nil, err } debug.Log("opening swift repository at %#v", cfg) return cfg, nil case "b2": cfg := loc.Config.(b2.Config) if cfg.AccountID == "" { cfg.AccountID = os.Getenv("B2_ACCOUNT_ID") } if cfg.AccountID == "" { return nil, errors.Fatalf("unable to open B2 backend: Account ID ($B2_ACCOUNT_ID) is empty") } if cfg.Key == "" { cfg.Key = os.Getenv("B2_ACCOUNT_KEY") } if cfg.Key == "" { return nil, errors.Fatalf("unable to open B2 backend: Key ($B2_ACCOUNT_KEY) is empty") } if err := opts.Apply(loc.Scheme, &cfg); err != nil { return nil, err } debug.Log("opening b2 repository at %#v", cfg) return cfg, nil case "rest": cfg := loc.Config.(rest.Config) if err := opts.Apply(loc.Scheme, &cfg); err != nil { return nil, err } debug.Log("opening rest repository at %#v", cfg) return cfg, nil } return nil, errors.Fatalf("invalid backend: %q", loc.Scheme) } // Open the backend specified by a location config. func open(s string, gopts GlobalOptions, opts options.Options) (restic.Backend, error) { debug.Log("parsing location %v", s) loc, err := location.Parse(s) if err != nil { return nil, errors.Fatalf("parsing repository location failed: %v", err) } var be restic.Backend cfg, err := parseConfig(loc, opts) if err != nil { return nil, err } tropts := backend.TransportOptions{ RootCertFilenames: globalOptions.CACerts, TLSClientCertKeyFilename: globalOptions.TLSClientCert, } rt, err := backend.Transport(tropts) if err != nil { return nil, err } // wrap the transport so that the throughput via HTTP is limited rt = limiter.NewStaticLimiter(gopts.LimitUploadKb, gopts.LimitDownloadKb).Transport(rt) switch loc.Scheme { case "local": be, err = local.Open(cfg.(local.Config)) // wrap the backend in a LimitBackend so that the throughput is limited be = limiter.LimitBackend(be, limiter.NewStaticLimiter(gopts.LimitUploadKb, gopts.LimitDownloadKb)) case "sftp": be, err = sftp.Open(cfg.(sftp.Config)) // wrap the backend in a LimitBackend so that the throughput is limited be = limiter.LimitBackend(be, limiter.NewStaticLimiter(gopts.LimitUploadKb, gopts.LimitDownloadKb)) case "s3": be, err = s3.Open(cfg.(s3.Config), rt) case "gs": be, err = gs.Open(cfg.(gs.Config), rt) case "azure": be, err = azure.Open(cfg.(azure.Config), rt) case "swift": be, err = swift.Open(cfg.(swift.Config), rt) case "b2": be, err = b2.Open(globalOptions.ctx, cfg.(b2.Config), rt) case "rest": be, err = rest.Open(cfg.(rest.Config), rt) default: return nil, errors.Fatalf("invalid backend: %q", loc.Scheme) } if err != nil { return nil, errors.Fatalf("unable to open repo at %v: %v", s, err) } // check if config is there fi, err := be.Stat(globalOptions.ctx, restic.Handle{Type: restic.ConfigFile}) if err != nil { return nil, errors.Fatalf("unable to open config file: %v\nIs there a repository at the following location?\n%v", err, s) } if fi.Size == 0 { return nil, errors.New("config file has zero size, invalid repository?") } return be, nil } // Create the backend specified by URI. func create(s string, opts options.Options) (restic.Backend, error) { debug.Log("parsing location %v", s) loc, err := location.Parse(s) if err != nil { return nil, err } cfg, err := parseConfig(loc, opts) if err != nil { return nil, err } tropts := backend.TransportOptions{ RootCertFilenames: globalOptions.CACerts, TLSClientCertKeyFilename: globalOptions.TLSClientCert, } rt, err := backend.Transport(tropts) if err != nil { return nil, err } switch loc.Scheme { case "local": return local.Create(cfg.(local.Config)) case "sftp": return sftp.Create(cfg.(sftp.Config)) case "s3": return s3.Create(cfg.(s3.Config), rt) case "gs": return gs.Create(cfg.(gs.Config), rt) case "azure": return azure.Create(cfg.(azure.Config), rt) case "swift": return swift.Open(cfg.(swift.Config), rt) case "b2": return b2.Create(globalOptions.ctx, cfg.(b2.Config), rt) case "rest": return rest.Create(cfg.(rest.Config), rt) } debug.Log("invalid repository scheme: %v", s) return nil, errors.Fatalf("invalid scheme %q", loc.Scheme) }
1
9,251
We don't need this anymore, as Google's library handles various auth mechanisms for us. We'll be less explicit about why we're failing but we gain support for several authentication methods- swings and roundabouts!
restic-restic
go
@@ -1,11 +1,12 @@ package plugin type Config struct { - SocketPath string `yaml:"path"` - Volumes VolumesConfig `yaml:"volume"` + SocketDir string `yaml:"socket_dir" default:"/run/docker/plugins"` + Volumes VolumesConfig `yaml:"volume"` + GPUs map[string]map[string]string `yaml:"GPUs"` } type VolumesConfig struct { - Root string + Root string `yaml:"root" default:"/var/lib/docker-volumes"` Volumes map[string]map[string]string }
1
package plugin type Config struct { SocketPath string `yaml:"path"` Volumes VolumesConfig `yaml:"volume"` } type VolumesConfig struct { Root string Volumes map[string]map[string]string }
1
6,282
No need to speficy attribute here. Moreover in yaml it is default to use lowercase names.
sonm-io-core
go
@@ -8,10 +8,12 @@ using System; using System.Collections.Generic; using System.Globalization; using System.IO; +using System.Linq; using System.Net; using System.Net.Http; using System.Text; using System.Threading; +using System.Threading.Tasks; using Task = System.Threading.Tasks.Task; namespace Microsoft.DotNet.Build.CloudTestTasks
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using Microsoft.Build.Framework; using Microsoft.Build.Utilities; using System; using System.Collections.Generic; using System.Globalization; using System.IO; using System.Net; using System.Net.Http; using System.Text; using System.Threading; using Task = System.Threading.Tasks.Task; namespace Microsoft.DotNet.Build.CloudTestTasks { public class UploadClient { private TaskLoggingHelper log; public UploadClient(TaskLoggingHelper loggingHelper) { log = loggingHelper; } public string EncodeBlockIds(int numberOfBlocks, int lengthOfId) { string numberOfBlocksString = numberOfBlocks.ToString("D" + lengthOfId); if (Encoding.UTF8.GetByteCount(numberOfBlocksString) <= 64) { byte[] bytes = Encoding.UTF8.GetBytes(numberOfBlocksString); return Convert.ToBase64String(bytes); } else { throw new Exception("Task failed - Could not encode block id."); } } public async Task UploadBlockBlobAsync( CancellationToken ct, string AccountName, string AccountKey, string ContainerName, string filePath, string destinationBlob, string contentType, int uploadTimeout, string leaseId = "") { string resourceUrl = AzureHelper.GetContainerRestUrl(AccountName, ContainerName); string fileName = destinationBlob; fileName = fileName.Replace("\\", "/"); string blobUploadUrl = resourceUrl + "/" + fileName; int size = (int)new FileInfo(filePath).Length; int blockSize = 4 * 1024 * 1024; //4MB max size of a block blob int bytesLeft = size; List<string> blockIds = new List<string>(); int numberOfBlocks = (size / blockSize) + 1; int countForId = 0; using (FileStream fileStreamTofilePath = new FileStream(filePath, FileMode.Open)) { int offset = 0; while (bytesLeft > 0) { int nextBytesToRead = (bytesLeft < blockSize) ? bytesLeft : blockSize; byte[] fileBytes = new byte[blockSize]; int read = fileStreamTofilePath.Read(fileBytes, 0, nextBytesToRead); if (nextBytesToRead != read) { throw new Exception(string.Format( "Number of bytes read ({0}) from file {1} isn't equal to the number of bytes expected ({2}) .", read, fileName, nextBytesToRead)); } string blockId = EncodeBlockIds(countForId, numberOfBlocks.ToString().Length); blockIds.Add(blockId); string blockUploadUrl = blobUploadUrl + "?comp=block&blockid=" + WebUtility.UrlEncode(blockId); using (HttpClient client = new HttpClient()) { client.DefaultRequestHeaders.Clear(); // In random occassions the request fails if the network is slow and it takes more than 100 seconds to upload 4MB. client.Timeout = TimeSpan.FromMinutes(uploadTimeout); Func<HttpRequestMessage> createRequest = () => { DateTime dt = DateTime.UtcNow; var req = new HttpRequestMessage(HttpMethod.Put, blockUploadUrl); req.Headers.Add( AzureHelper.DateHeaderString, dt.ToString("R", CultureInfo.InvariantCulture)); req.Headers.Add(AzureHelper.VersionHeaderString, AzureHelper.StorageApiVersion); if (!string.IsNullOrWhiteSpace(leaseId)) { log.LogMessage($"Sending request: {leaseId} {blockUploadUrl}"); req.Headers.Add("x-ms-lease-id", leaseId); } req.Headers.Add( AzureHelper.AuthorizationHeaderString, AzureHelper.AuthorizationHeader( AccountName, AccountKey, "PUT", dt, req, string.Empty, string.Empty, nextBytesToRead.ToString(), string.Empty)); Stream postStream = new MemoryStream(); postStream.Write(fileBytes, 0, nextBytesToRead); postStream.Seek(0, SeekOrigin.Begin); req.Content = new StreamContent(postStream); return req; }; log.LogMessage(MessageImportance.Low, "Sending request to upload part {0} of file {1}", countForId, fileName); using (HttpResponseMessage response = await AzureHelper.RequestWithRetry(log, client, createRequest)) { log.LogMessage( MessageImportance.Low, "Received response to upload part {0} of file {1}: Status Code:{2} Status Desc: {3}", countForId, fileName, response.StatusCode, await response.Content.ReadAsStringAsync()); } } offset += read; bytesLeft -= nextBytesToRead; countForId += 1; } } string blockListUploadUrl = blobUploadUrl + "?comp=blocklist"; using (HttpClient client = new HttpClient()) { Func<HttpRequestMessage> createRequest = () => { DateTime dt1 = DateTime.UtcNow; var req = new HttpRequestMessage(HttpMethod.Put, blockListUploadUrl); req.Headers.Add(AzureHelper.DateHeaderString, dt1.ToString("R", CultureInfo.InvariantCulture)); req.Headers.Add(AzureHelper.VersionHeaderString, AzureHelper.StorageApiVersion); if (string.IsNullOrEmpty(contentType)) { contentType = DetermineContentTypeBasedOnFileExtension(filePath); } if (!string.IsNullOrEmpty(contentType)) { req.Headers.Add(AzureHelper.ContentTypeString, contentType); } string cacheControl = DetermineCacheControlBasedOnFileExtension(filePath); if (!string.IsNullOrEmpty(cacheControl)) { req.Headers.Add(AzureHelper.CacheControlString, cacheControl); } var body = new StringBuilder("<?xml version=\"1.0\" encoding=\"UTF-8\"?><BlockList>"); foreach (object item in blockIds) body.AppendFormat("<Latest>{0}</Latest>", item); body.Append("</BlockList>"); byte[] bodyData = Encoding.UTF8.GetBytes(body.ToString()); if (!string.IsNullOrWhiteSpace(leaseId)) { log.LogMessage($"Sending list request: {leaseId} {blockListUploadUrl}"); req.Headers.Add("x-ms-lease-id", leaseId); } req.Headers.Add( AzureHelper.AuthorizationHeaderString, AzureHelper.AuthorizationHeader( AccountName, AccountKey, "PUT", dt1, req, string.Empty, string.Empty, bodyData.Length.ToString(), string.Empty)); Stream postStream = new MemoryStream(); postStream.Write(bodyData, 0, bodyData.Length); postStream.Seek(0, SeekOrigin.Begin); req.Content = new StreamContent(postStream); return req; }; using (HttpResponseMessage response = await AzureHelper.RequestWithRetry(log, client, createRequest)) { log.LogMessage( MessageImportance.Low, "Received response to combine block list for file {0}: Status Code:{1} Status Desc: {2}", fileName, response.StatusCode, await response.Content.ReadAsStringAsync()); } } } private string DetermineContentTypeBasedOnFileExtension(string filename) { if (Path.GetExtension(filename) == ".svg") { return "image/svg+xml"; } else if (Path.GetExtension(filename) == ".version") { return "text/plain"; } return string.Empty; } private string DetermineCacheControlBasedOnFileExtension(string filename) { if (Path.GetExtension(filename) == ".svg") { return "No-Cache"; } return string.Empty; } } }
1
14,434
In general I'm a trying to understand the reason behind this PR. Looks like if a blob (name) already exists we check if the contents are identical? In what scenarios this is not the case? When we want to publish a package/asset that has changed but still we want to use the same version?
dotnet-buildtools
.cs
@@ -151,7 +151,9 @@ class BlacklistRuleBook(bre.BaseRuleBook): lists: first one is IP addresses, second one is network blocks """ - data = urllib2.urlopen(url).read() + req = urllib2.build_opener() + req.addheaders = [('User-Agent', 'Forseti blacklist rules engine agent')] + data = req.open(url).read() ip_addresses = re.findall(r'^[0-9]+(?:\.[0-9]+){3}$', data, re.M) netblocks = re.findall(r'^[0-9]+(?:\.[0-9]+){0,3}/[0-9]{1,2}$', data, re.M)
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Rules engine for Blacklist of IP addresses.""" import itertools import re import urllib2 import struct import socket from collections import namedtuple from google.cloud.forseti.common.gcp_type import resource as resource_mod from google.cloud.forseti.common.util import logger from google.cloud.forseti.scanner.audit import base_rules_engine as bre LOGGER = logger.get_logger(__name__) class BlacklistRulesEngine(bre.BaseRulesEngine): """Rules engine for BlacklistRules.""" def __init__(self, rules_file_path, snapshot_timestamp=None): """Initialize. Args: rules_file_path (str): file location of rules snapshot_timestamp (str): timestamp for database. """ super(BlacklistRulesEngine, self).__init__(rules_file_path=rules_file_path) self.rule_book = None def build_rule_book(self, global_configs=None): """Build BlacklistRuleBook from rules definition file. Args: global_configs (dict): Global Configs """ self.rule_book = BlacklistRuleBook( self._load_rule_definitions()) def find_policy_violations(self, instance_network_interface, force_rebuild=False): """Determine whether the networks violates rules. Args: instance_network_interface (list): list of instance_network_interface force_rebuild (bool): set to false to not force a rebuiid Return: list: iterator of all violations """ violations = itertools.chain() if self.rule_book is None or force_rebuild: self.build_rule_book() resource_rules = self.rule_book.get_resource_rules() for rule in resource_rules: violations = itertools.chain(violations, rule.find_violations( instance_network_interface)) return violations def add_rules(self, rules): """Add rules to the rule book. Args: rules (dicts): rule definitions """ if self.rule_book is not None: self.rule_book.add_rules(rules) class BlacklistRuleBook(bre.BaseRuleBook): """The RuleBook for networks resources.""" def __init__(self, rule_defs=None): """Initialize. Args: rule_defs (dict): The parsed dictionary of rules from the YAML definition file. """ super(BlacklistRuleBook, self).__init__() self.resource_rules_map = {} if not rule_defs: self.rule_defs = {} else: self.rule_defs = rule_defs self.add_rules(rule_defs) def add_rules(self, rule_defs): """Add rules to the rule book. Args: rule_defs (dict): rules definitions """ for (i, rule) in enumerate(rule_defs.get('rules', [])): self.add_rule(rule, i) def add_rule(self, rule_def, rule_index): """Add a rule to the rule book. Args: rule_def (dict): A dictionary containing rule definition properties. rule_index (int): The index of the rule from the rule definitions. Assigned automatically when the rule book is built. """ ips, nets = self.get_and_parse_blacklist(rule_def.get('url')) rule_def_resource = { 'ips_list': ips, 'nets_list': nets } rule = Rule(rule_blacklist=rule_def.get('blacklist'), rule_index=rule_index, rules=rule_def_resource) resource_rules = self.resource_rules_map.get(rule_index) if not resource_rules: self.resource_rules_map[rule_index] = rule def get_resource_rules(self): """Get all the resource rules for (resource, RuleAppliesTo.*). Returns: list: A list of ResourceRules. """ resource_rules = [] for resource_rule in self.resource_rules_map: resource_rules.append(self.resource_rules_map[resource_rule]) return resource_rules @staticmethod def get_and_parse_blacklist(url): """Download blacklist and parse it into IPs and netblocks. Args: url (str): url to download blacklist from Returns: lists: first one is IP addresses, second one is network blocks """ data = urllib2.urlopen(url).read() ip_addresses = re.findall(r'^[0-9]+(?:\.[0-9]+){3}$', data, re.M) netblocks = re.findall(r'^[0-9]+(?:\.[0-9]+){0,3}/[0-9]{1,2}$', data, re.M) return ip_addresses, netblocks class Rule(object): """The rules class for instance_network_interface.""" def __init__(self, rule_blacklist, rule_index, rules): """Initialize. Args: rule_blacklist (str): Name of the loaded blacklist rule_index (int): The index of the rule from the definitions rules (dict): The resources associated with the rules like the whitelist """ self.rule_blacklist = rule_blacklist self.rule_index = rule_index self.rules = rules @staticmethod def address_in_network(ipaddr, net): """ Checks if ip address is in net Args: ipaddr (str): IP address to check net (str): network to check Returns: bool: True if ipaddr in net """ ipaddrb = struct.unpack('!I', socket.inet_aton(ipaddr))[0] netstr, bits = net.split('/') netaddr = struct.unpack('!I', socket.inet_aton(netstr))[0] mask = (0xffffffff << (32 - int(bits))) & 0xffffffff return (ipaddrb & mask) == (netaddr & mask) def is_blacklisted(self, ipaddr): """ Checks if ip address is in a blacklist Args: ipaddr (str): IP address to check Returns: bool: True if ipaddr is blacklisted """ if ipaddr: if ipaddr in self.rules['ips_list']: return True for ip_network in self.rules['nets_list']: if self.address_in_network(ipaddr, ip_network): return True return False def find_violations(self, instance_network_interface): """Raise violation if the IP is not in the whitelist. Args: instance_network_interface (InstanceNetworkInterface): object Yields: namedtuple: Returns RuleViolation named tuple """ for network_interface in instance_network_interface: network_and_project = re.search( r'compute/[a-zA-Z0-9]+/projects/([^/]*).*networks/([^/]*)', network_interface.network) project = network_and_project.group(1) network = network_and_project.group(2) if not network_interface.access_configs: LOGGER.warn('Unable to determine blacklist violation for ' 'network interface: %s, because it doesn\'t ' 'have external internet access.', network_interface.full_name) continue for access_config in network_interface.access_configs: ipaddr = access_config.get('natIP') if self.is_blacklisted(ipaddr): yield self.RuleViolation( resource_name=project, resource_type=resource_mod.ResourceType.INSTANCE, full_name=network_interface.full_name, rule_blacklist=self.rule_blacklist, rule_name=self.rule_blacklist, rule_index=self.rule_index, violation_type='BLACKLIST_VIOLATION', project=project, network=network, ip=access_config.get('natIP'), resource_data=network_interface.as_json()) # Rule violation. # resource_type: string # rule_blacklist: string # rule_name: string # rule_index: int # violation_type: BLACKLIST_VIOLATION # project: string # network: string # ip: string RuleViolation = namedtuple('RuleViolation', ['resource_type', 'full_name', 'resource_name', 'rule_blacklist', 'rule_name', 'rule_index', 'violation_type', 'project', 'network', 'ip', 'resource_data'])
1
32,108
Can rename `req` to be `opener`, since that is the object, and it wraps `req` internally?
forseti-security-forseti-security
py
@@ -1405,6 +1405,8 @@ def getFormatFieldSpeech(attrs,attrsCache=None,formatConfig=None,unit=None,extra linePrefix=attrs.get("line-prefix") if linePrefix: textList.append(linePrefix) + breakpoint=attrs.get("breakpoint") + if breakpoint: textList.append(breakpoint) if attrsCache is not None: attrsCache.clear() attrsCache.update(attrs)
1
# -*- coding: UTF-8 -*- #speech.py #A part of NonVisual Desktop Access (NVDA) #This file is covered by the GNU General Public License. #See the file COPYING for more details. #Copyright (C) 2006-2014 NV Access Limited, Peter Vágner, Aleksey Sadovoy """High-level functions to speak information. """ import itertools import weakref import unicodedata import colors import globalVars from logHandler import log import api import controlTypes import config import tones import synthDriverHandler from synthDriverHandler import * import re import textInfos import queueHandler import speechDictHandler import characterProcessing import languageHandler speechMode_off=0 speechMode_beeps=1 speechMode_talk=2 #: How speech should be handled; one of speechMode_off, speechMode_beeps or speechMode_talk. speechMode=speechMode_talk speechMode_beeps_ms=15 beenCanceled=True isPaused=False curWordChars=[] #Set containing locale codes for languages supporting conjunct characters LANGS_WITH_CONJUNCT_CHARS = {'hi', 'as', 'bn', 'gu', 'kn', 'kok', 'ml', 'mni', 'mr', 'pa', 'te', 'ur', 'ta'} # The REASON_* constants in this module are deprecated and will be removed in a future release. # Use controlTypes.REASON_* instead. from controlTypes import REASON_FOCUS, REASON_FOCUSENTERED, REASON_MOUSE, REASON_QUERY, REASON_CHANGE, REASON_MESSAGE, REASON_SAYALL, REASON_CARET, REASON_ONLYCACHE #: The string used to separate distinct chunks of text when multiple chunks should be spoken without pauses. # #555: Use two spaces so that numbers from adjacent chunks aren't treated as a single number # for languages such as French and German which use space as a thousands separator. CHUNK_SEPARATOR = " " oldTreeLevel=None oldTableID=None oldRowNumber=None oldColumnNumber=None def initialize(): """Loads and sets the synth driver configured in nvda.ini.""" synthDriverHandler.initialize() setSynth(config.conf["speech"]["synth"]) def terminate(): setSynth(None) speechViewerObj=None #: If a chunk of text contains only these characters, it will be considered blank. BLANK_CHUNK_CHARS = frozenset((" ", "\n", "\r", "\0", u"\xa0")) def isBlank(text): """Determine whether text should be reported as blank. @param text: The text in question. @type text: str @return: C{True} if the text is blank, C{False} if not. @rtype: bool """ return not text or set(text) <= BLANK_CHUNK_CHARS RE_CONVERT_WHITESPACE = re.compile("[\0\r\n]") def processText(locale,text,symbolLevel): text = speechDictHandler.processText(text) text = characterProcessing.processSpeechSymbols(locale, text, symbolLevel) text = RE_CONVERT_WHITESPACE.sub(u" ", text) return text.strip() def getLastSpeechIndex(): """Gets the last index passed by the synthesizer. Indexing is used so that its possible to find out when a certain peace of text has been spoken yet. Usually the character position of the text is passed to speak functions as the index. @returns: the last index encountered @rtype: int """ return getSynth().lastIndex def cancelSpeech(): """Interupts the synthesizer from currently speaking""" global beenCanceled, isPaused, _speakSpellingGenerator # Import only for this function to avoid circular import. import sayAllHandler sayAllHandler.stop() speakWithoutPauses._pendingSpeechSequence=[] speakWithoutPauses.lastSentIndex=None if _speakSpellingGenerator: _speakSpellingGenerator.close() if beenCanceled: return elif speechMode==speechMode_off: return elif speechMode==speechMode_beeps: return getSynth().cancel() beenCanceled=True isPaused=False def pauseSpeech(switch): global isPaused, beenCanceled getSynth().pause(switch) isPaused=switch beenCanceled=False def speakMessage(text,index=None): """Speaks a given message. @param text: the message to speak @type text: string @param index: the index to mark this current text with, its best to use the character position of the text if you know it @type index: int """ speakText(text,index=index,reason=controlTypes.REASON_MESSAGE) def getCurrentLanguage(): try: language=getSynth().language if config.conf['speech']['trustVoiceLanguage'] else None except NotImplementedError: language=None if language: language=languageHandler.normalizeLanguage(language) if not language: language=languageHandler.getLanguage() return language def spellTextInfo(info,useCharacterDescriptions=False): """Spells the text from the given TextInfo, honouring any LangChangeCommand objects it finds if autoLanguageSwitching is enabled.""" if not config.conf['speech']['autoLanguageSwitching']: speakSpelling(info.text,useCharacterDescriptions=useCharacterDescriptions) return curLanguage=None for field in info.getTextWithFields({}): if isinstance(field,basestring): speakSpelling(field,curLanguage,useCharacterDescriptions=useCharacterDescriptions) elif isinstance(field,textInfos.FieldCommand) and field.command=="formatChange": curLanguage=field.field.get('language') _speakSpellingGenerator=None def speakSpelling(text,locale=None,useCharacterDescriptions=False): global beenCanceled, _speakSpellingGenerator import speechViewer if speechViewer.isActive: speechViewer.appendText(text) if speechMode==speechMode_off: return elif speechMode==speechMode_beeps: tones.beep(config.conf["speech"]["beepSpeechModePitch"],speechMode_beeps_ms) return if isPaused: cancelSpeech() beenCanceled=False defaultLanguage=getCurrentLanguage() if not locale or (not config.conf['speech']['autoDialectSwitching'] and locale.split('_')[0]==defaultLanguage.split('_')[0]): locale=defaultLanguage if not text: # Translators: This is spoken when NVDA moves to an empty line. return getSynth().speak((_("blank"),)) if not text.isspace(): text=text.rstrip() if _speakSpellingGenerator and _speakSpellingGenerator.gi_frame: _speakSpellingGenerator.send((text,locale,useCharacterDescriptions)) else: _speakSpellingGenerator=_speakSpellingGen(text,locale,useCharacterDescriptions) try: # Speak the first character before this function returns. next(_speakSpellingGenerator) except StopIteration: return queueHandler.registerGeneratorObject(_speakSpellingGenerator) def getCharDescListFromText(text,locale): """This method prepares a list, which contains character and its description for all characters the text is made up of, by checking the presence of character descriptions in characterDescriptions.dic of that locale for all possible combination of consecutive characters in the text. This is done to take care of conjunct characters present in several languages such as Hindi, Urdu, etc. """ charDescList = [] charDesc=None i = len(text) while i: subText = text[:i] charDesc = characterProcessing.getCharacterDescription(locale,subText) if charDesc or i==1: charDescList.append((subText,charDesc)) text = text[i:] i = len(text) else: i = i - 1 return charDescList def _speakSpellingGen(text,locale,useCharacterDescriptions): synth=getSynth() synthConfig=config.conf["speech"][synth.name] buf=[(text,locale,useCharacterDescriptions)] for text,locale,useCharacterDescriptions in buf: textLength=len(text) count = 0 localeHasConjuncts = True if locale.split('_',1)[0] in LANGS_WITH_CONJUNCT_CHARS else False charDescList = getCharDescListFromText(text,locale) if localeHasConjuncts else text for item in charDescList: if localeHasConjuncts: # item is a tuple containing character and its description char = item[0] charDesc = item[1] else: # item is just a character. char = item if useCharacterDescriptions: charDesc=characterProcessing.getCharacterDescription(locale,char.lower()) uppercase=char.isupper() if useCharacterDescriptions and charDesc: #Consider changing to multiple synth speech calls char=charDesc[0] if textLength>1 else u"\u3001".join(charDesc) else: char=characterProcessing.processSpeechSymbol(locale,char) if uppercase and synthConfig["sayCapForCapitals"]: # Translators: cap will be spoken before the given letter when it is capitalized. char=_("cap %s")%char if uppercase and synth.isSupported("pitch") and synthConfig["capPitchChange"]: oldPitch=synthConfig["pitch"] synth.pitch=max(0,min(oldPitch+synthConfig["capPitchChange"],100)) count = len(char) index=count+1 log.io("Speaking character %r"%char) speechSequence=[LangChangeCommand(locale)] if config.conf['speech']['autoLanguageSwitching'] else [] if len(char) == 1 and synthConfig["useSpellingFunctionality"]: speechSequence.append(CharacterModeCommand(True)) if index is not None: speechSequence.append(IndexCommand(index)) speechSequence.append(char) synth.speak(speechSequence) if uppercase and synth.isSupported("pitch") and synthConfig["capPitchChange"]: synth.pitch=oldPitch while textLength>1 and (isPaused or getLastSpeechIndex()!=index): for x in xrange(2): args=yield if args: buf.append(args) if uppercase and synthConfig["beepForCapitals"]: tones.beep(2000,50) args=yield if args: buf.append(args) def speakObjectProperties(obj,reason=controlTypes.REASON_QUERY,index=None,**allowedProperties): if speechMode==speechMode_off: return #Fetch the values for all wanted properties newPropertyValues={} positionInfo=None for name,value in allowedProperties.iteritems(): if name=="includeTableCellCoords": # This is verbosity info. newPropertyValues[name]=value elif name.startswith('positionInfo_') and value: if positionInfo is None: positionInfo=obj.positionInfo elif value: try: newPropertyValues[name]=getattr(obj,name) except NotImplementedError: pass if positionInfo: if allowedProperties.get('positionInfo_level',False) and 'level' in positionInfo: newPropertyValues['positionInfo_level']=positionInfo['level'] if allowedProperties.get('positionInfo_indexInGroup',False) and 'indexInGroup' in positionInfo: newPropertyValues['positionInfo_indexInGroup']=positionInfo['indexInGroup'] if allowedProperties.get('positionInfo_similarItemsInGroup',False) and 'similarItemsInGroup' in positionInfo: newPropertyValues['positionInfo_similarItemsInGroup']=positionInfo['similarItemsInGroup'] #Fetched the cached properties and update them with the new ones oldCachedPropertyValues=getattr(obj,'_speakObjectPropertiesCache',{}).copy() cachedPropertyValues=oldCachedPropertyValues.copy() cachedPropertyValues.update(newPropertyValues) obj._speakObjectPropertiesCache=cachedPropertyValues #If we should only cache we can stop here if reason==controlTypes.REASON_ONLYCACHE: return #If only speaking change, then filter out all values that havn't changed if reason==controlTypes.REASON_CHANGE: for name in set(newPropertyValues)&set(oldCachedPropertyValues): if newPropertyValues[name]==oldCachedPropertyValues[name]: del newPropertyValues[name] elif name=="states": #states need specific handling oldStates=oldCachedPropertyValues[name] newStates=newPropertyValues[name] newPropertyValues['states']=newStates-oldStates newPropertyValues['negativeStates']=oldStates-newStates #properties such as states need to know the role to speak properly, give it as a _ name newPropertyValues['_role']=newPropertyValues.get('role',obj.role) # The real states are needed also, as the states entry might be filtered. newPropertyValues['_states']=obj.states if "rowNumber" in newPropertyValues or "columnNumber" in newPropertyValues: # We're reporting table cell info, so pass the table ID. try: newPropertyValues["_tableID"]=obj.tableID except NotImplementedError: pass #Get the speech text for the properties we want to speak, and then speak it text=getSpeechTextForProperties(reason,**newPropertyValues) if text: speakText(text,index=index) def speakObject(obj,reason=controlTypes.REASON_QUERY,index=None): from NVDAObjects import NVDAObjectTextInfo role=obj.role isEditable=(reason!=controlTypes.REASON_FOCUSENTERED and obj.TextInfo!=NVDAObjectTextInfo and (role in (controlTypes.ROLE_EDITABLETEXT,controlTypes.ROLE_TERMINAL) or controlTypes.STATE_EDITABLE in obj.states)) allowProperties={'name':True,'role':True,'states':True,'value':True,'description':True,'keyboardShortcut':True,'positionInfo_level':True,'positionInfo_indexInGroup':True,'positionInfo_similarItemsInGroup':True,"cellCoordsText":True,"rowNumber":True,"columnNumber":True,"includeTableCellCoords":True,"columnCount":True,"rowCount":True,"rowHeaderText":True,"columnHeaderText":True} if reason==controlTypes.REASON_FOCUSENTERED: allowProperties["value"]=False allowProperties["keyboardShortcut"]=False allowProperties["positionInfo_level"]=False # Aside from excluding some properties, focus entered should be spoken like focus. reason=controlTypes.REASON_FOCUS if not config.conf["presentation"]["reportObjectDescriptions"]: allowProperties["description"]=False if not config.conf["presentation"]["reportKeyboardShortcuts"]: allowProperties["keyboardShortcut"]=False if not config.conf["presentation"]["reportObjectPositionInformation"]: allowProperties["positionInfo_level"]=False allowProperties["positionInfo_indexInGroup"]=False allowProperties["positionInfo_similarItemsInGroup"]=False if reason!=controlTypes.REASON_QUERY: allowProperties["rowCount"]=False allowProperties["columnCount"]=False formatConf=config.conf["documentFormatting"] if not formatConf["reportTableCellCoords"]: allowProperties["cellCoordsText"]=False # rowNumber and columnNumber might be needed even if we're not reporting coordinates. allowProperties["includeTableCellCoords"]=False if not formatConf["reportTableHeaders"]: allowProperties["rowHeaderText"]=False allowProperties["columnHeaderText"]=False if (not formatConf["reportTables"] or (not formatConf["reportTableCellCoords"] and not formatConf["reportTableHeaders"])): # We definitely aren't reporting any table info at all. allowProperties["rowNumber"]=False allowProperties["columnNumber"]=False if isEditable: allowProperties['value']=False speakObjectProperties(obj,reason=reason,index=index,**allowProperties) if reason==controlTypes.REASON_ONLYCACHE: return if isEditable: try: info=obj.makeTextInfo(textInfos.POSITION_SELECTION) if not info.isCollapsed: # Translators: This is spoken to indicate what has been selected. for example 'selected hello world' speakSelectionMessage(_("selected %s"),info.text) else: info.expand(textInfos.UNIT_LINE) speakTextInfo(info,unit=textInfos.UNIT_LINE,reason=controlTypes.REASON_CARET) except: newInfo=obj.makeTextInfo(textInfos.POSITION_ALL) speakTextInfo(newInfo,unit=textInfos.UNIT_PARAGRAPH,reason=controlTypes.REASON_CARET) elif role==controlTypes.ROLE_MATH: import mathPres mathPres.ensureInit() if mathPres.speechProvider: try: speak(mathPres.speechProvider.getSpeechForMathMl(obj.mathMl)) except (NotImplementedError, LookupError): pass def speakText(text,index=None,reason=controlTypes.REASON_MESSAGE,symbolLevel=None): """Speaks some text. @param text: The text to speak. @type text: str @param index: The index to mark this text with, which can be used later to determine whether this piece of text has been spoken. @type index: int @param reason: The reason for this speech; one of the controlTypes.REASON_* constants. @param symbolLevel: The symbol verbosity level; C{None} (default) to use the user's configuration. """ speechSequence=[] if index is not None: speechSequence.append(IndexCommand(index)) if text is not None: if isBlank(text): # Translators: This is spoken when the line is considered blank. text=_("blank") speechSequence.append(text) speak(speechSequence,symbolLevel=symbolLevel) RE_INDENTATION_SPLIT = re.compile(r"^([^\S\r\n\f\v]*)(.*)$", re.UNICODE | re.DOTALL) def splitTextIndentation(text): """Splits indentation from the rest of the text. @param text: The text to split. @type text: basestring @return: Tuple of indentation and content. @rtype: (basestring, basestring) """ return RE_INDENTATION_SPLIT.match(text).groups() RE_INDENTATION_CONVERT = re.compile(r"(?P<char>\s)(?P=char)*", re.UNICODE) def getIndentationSpeech(indentation): """Retrieves the phrase to be spoken for a given string of indentation. @param indentation: The string of indentation. @type indentation: basestring @return: The phrase to be spoken. @rtype: unicode """ # Translators: no indent is spoken when the user moves from a line that has indentation, to one that # does not. if not indentation: # Translators: This is spoken when the given line has no indentation. return _("no indent") res = [] locale=languageHandler.getLanguage() for m in RE_INDENTATION_CONVERT.finditer(indentation): raw = m.group() symbol = characterProcessing.processSpeechSymbol(locale, raw[0]) count = len(raw) if symbol == raw[0]: # There is no replacement for this character, so do nothing. res.append(raw) elif count == 1: res.append(symbol) else: res.append(u"{count} {symbol}".format(count=count, symbol=symbol)) return " ".join(res) def speak(speechSequence,symbolLevel=None): """Speaks a sequence of text and speech commands @param speechSequence: the sequence of text and L{SpeechCommand} objects to speak @param symbolLevel: The symbol verbosity level; C{None} (default) to use the user's configuration. """ if not speechSequence: #Pointless - nothing to speak return import speechViewer if speechViewer.isActive: for item in speechSequence: if isinstance(item,basestring): speechViewer.appendText(item) global beenCanceled, curWordChars curWordChars=[] if speechMode==speechMode_off: return elif speechMode==speechMode_beeps: tones.beep(config.conf["speech"]["beepSpeechModePitch"],speechMode_beeps_ms) return if isPaused: cancelSpeech() beenCanceled=False #Filter out redundant LangChangeCommand objects #And also fill in default values autoLanguageSwitching=config.conf['speech']['autoLanguageSwitching'] autoDialectSwitching=config.conf['speech']['autoDialectSwitching'] curLanguage=defaultLanguage=getCurrentLanguage() prevLanguage=None defaultLanguageRoot=defaultLanguage.split('_')[0] oldSpeechSequence=speechSequence speechSequence=[] for item in oldSpeechSequence: if isinstance(item,LangChangeCommand): if not autoLanguageSwitching: continue curLanguage=item.lang if not curLanguage or (not autoDialectSwitching and curLanguage.split('_')[0]==defaultLanguageRoot): curLanguage=defaultLanguage elif isinstance(item,basestring): if not item: continue if autoLanguageSwitching and curLanguage!=prevLanguage: speechSequence.append(LangChangeCommand(curLanguage)) prevLanguage=curLanguage speechSequence.append(item) else: speechSequence.append(item) if not speechSequence: # After normalisation, the sequence is empty. # There's nothing to speak. return log.io("Speaking %r" % speechSequence) if symbolLevel is None: symbolLevel=config.conf["speech"]["symbolLevel"] curLanguage=defaultLanguage inCharacterMode=False for index in xrange(len(speechSequence)): item=speechSequence[index] if isinstance(item,CharacterModeCommand): inCharacterMode=item.state if autoLanguageSwitching and isinstance(item,LangChangeCommand): curLanguage=item.lang if isinstance(item,basestring): speechSequence[index]=processText(curLanguage,item,symbolLevel) if not inCharacterMode: speechSequence[index]+=CHUNK_SEPARATOR getSynth().speak(speechSequence) def speakSelectionMessage(message,text): if len(text) < 512: speakMessage(message % text) else: # Translators: This is spoken when the user has selected a large portion of text. Example output "1000 characters" speakMessage(message % _("%d characters") % len(text)) def speakSelectionChange(oldInfo,newInfo,speakSelected=True,speakUnselected=True,generalize=False): """Speaks a change in selection, either selected or unselected text. @param oldInfo: a TextInfo instance representing what the selection was before @type oldInfo: L{textInfos.TextInfo} @param newInfo: a TextInfo instance representing what the selection is now @type newInfo: L{textInfos.TextInfo} @param generalize: if True, then this function knows that the text may have changed between the creation of the oldInfo and newInfo objects, meaning that changes need to be spoken more generally, rather than speaking the specific text, as the bounds may be all wrong. @type generalize: boolean """ selectedTextList=[] unselectedTextList=[] if newInfo.isCollapsed and oldInfo.isCollapsed: return startToStart=newInfo.compareEndPoints(oldInfo,"startToStart") startToEnd=newInfo.compareEndPoints(oldInfo,"startToEnd") endToStart=newInfo.compareEndPoints(oldInfo,"endToStart") endToEnd=newInfo.compareEndPoints(oldInfo,"endToEnd") if speakSelected and oldInfo.isCollapsed: selectedTextList.append(newInfo.text) elif speakUnselected and newInfo.isCollapsed: unselectedTextList.append(oldInfo.text) else: if startToEnd>0 or endToStart<0: if speakSelected and not newInfo.isCollapsed: selectedTextList.append(newInfo.text) if speakUnselected and not oldInfo.isCollapsed: unselectedTextList.append(oldInfo.text) else: if speakSelected and startToStart<0 and not newInfo.isCollapsed: tempInfo=newInfo.copy() tempInfo.setEndPoint(oldInfo,"endToStart") selectedTextList.append(tempInfo.text) if speakSelected and endToEnd>0 and not newInfo.isCollapsed: tempInfo=newInfo.copy() tempInfo.setEndPoint(oldInfo,"startToEnd") selectedTextList.append(tempInfo.text) if startToStart>0 and not oldInfo.isCollapsed: tempInfo=oldInfo.copy() tempInfo.setEndPoint(newInfo,"endToStart") unselectedTextList.append(tempInfo.text) if endToEnd<0 and not oldInfo.isCollapsed: tempInfo=oldInfo.copy() tempInfo.setEndPoint(newInfo,"startToEnd") unselectedTextList.append(tempInfo.text) locale=languageHandler.getLanguage() if speakSelected: if not generalize: for text in selectedTextList: if len(text)==1: text=characterProcessing.processSpeechSymbol(locale,text) # Translators: This is spoken while the user is in the process of selecting something, For example: "selecting hello" speakSelectionMessage(_("selecting %s"),text) elif len(selectedTextList)>0: text=newInfo.text if len(text)==1: text=characterProcessing.processSpeechSymbol(locale,text) # Translators: This is spoken to indicate what has been selected. for example 'selected hello world' speakSelectionMessage(_("selected %s"),text) if speakUnselected: if not generalize: for text in unselectedTextList: if len(text)==1: text=characterProcessing.processSpeechSymbol(locale,text) # Translators: This is spoken to indicate what has been unselected. for example 'unselecting hello' speakSelectionMessage(_("unselecting %s"),text) elif len(unselectedTextList)>0: # Translators: Reported when selection is removed. speakMessage(_("selection removed")) if not newInfo.isCollapsed: text=newInfo.text if len(text)==1: text=characterProcessing.processSpeechSymbol(locale,text) # Translators: This is spoken to indicate what has been selected. for example 'selected hello world' speakSelectionMessage(_("selected %s"),text) def speakTypedCharacters(ch): global curWordChars; typingIsProtected=api.isTypingProtected() if typingIsProtected: realChar="*" else: realChar=ch if unicodedata.category(ch)[0] in "LMN": curWordChars.append(realChar) elif ch=="\b": # Backspace, so remove the last character from our buffer. del curWordChars[-1:] elif ch==u'\u007f': # delete character produced in some apps with control+backspace return elif len(curWordChars)>0: typedWord="".join(curWordChars) curWordChars=[] if log.isEnabledFor(log.IO): log.io("typed word: %s"%typedWord) if config.conf["keyboard"]["speakTypedWords"] and not typingIsProtected: speakText(typedWord) if config.conf["keyboard"]["speakTypedCharacters"] and ord(ch)>=32: speakSpelling(realChar) class SpeakTextInfoState(object): """Caches the state of speakTextInfo such as the current controlField stack, current formatfield and indentation.""" __slots__=[ 'objRef', 'controlFieldStackCache', 'formatFieldAttributesCache', 'indentationCache', ] def __init__(self,obj): if isinstance(obj,SpeakTextInfoState): oldState=obj self.objRef=oldState.objRef else: self.objRef=weakref.ref(obj) oldState=getattr(obj,'_speakTextInfoState',None) self.controlFieldStackCache=list(oldState.controlFieldStackCache) if oldState else [] self.formatFieldAttributesCache=oldState.formatFieldAttributesCache if oldState else {} self.indentationCache=oldState.indentationCache if oldState else "" def updateObj(self): obj=self.objRef() if obj: obj._speakTextInfoState=self.copy() def copy(self): return self.__class__(self) def _speakTextInfo_addMath(speechSequence, info, field): import mathPres mathPres.ensureInit() if not mathPres.speechProvider: return try: speechSequence.extend(mathPres.speechProvider.getSpeechForMathMl(info.getMathMl(field))) except (NotImplementedError, LookupError): return def speakTextInfo(info,useCache=True,formatConfig=None,unit=None,reason=controlTypes.REASON_QUERY,index=None,onlyInitialFields=False,suppressBlanks=False): if isinstance(useCache,SpeakTextInfoState): speakTextInfoState=useCache elif useCache: speakTextInfoState=SpeakTextInfoState(info.obj) else: speakTextInfoState=None autoLanguageSwitching=config.conf['speech']['autoLanguageSwitching'] extraDetail=unit in (textInfos.UNIT_CHARACTER,textInfos.UNIT_WORD) if not formatConfig: formatConfig=config.conf["documentFormatting"] if extraDetail: formatConfig=formatConfig.copy() formatConfig['extraDetail']=True reportIndentation=unit==textInfos.UNIT_LINE and formatConfig["reportLineIndentation"] speechSequence=[] #Fetch the last controlFieldStack, or make a blank one controlFieldStackCache=speakTextInfoState.controlFieldStackCache if speakTextInfoState else [] formatFieldAttributesCache=speakTextInfoState.formatFieldAttributesCache if speakTextInfoState else {} textWithFields=info.getTextWithFields(formatConfig) # We don't care about node bounds, especially when comparing fields. # Remove them. for command in textWithFields: if not isinstance(command,textInfos.FieldCommand): continue field=command.field if not field: continue try: del field["_startOfNode"] except KeyError: pass try: del field["_endOfNode"] except KeyError: pass #Make a new controlFieldStack and formatField from the textInfo's initialFields newControlFieldStack=[] newFormatField=textInfos.FormatField() initialFields=[] for field in textWithFields: if isinstance(field,textInfos.FieldCommand) and field.command in ("controlStart","formatChange"): initialFields.append(field.field) else: break if len(initialFields)>0: del textWithFields[0:len(initialFields)] endFieldCount=0 for field in reversed(textWithFields): if isinstance(field,textInfos.FieldCommand) and field.command=="controlEnd": endFieldCount+=1 else: break if endFieldCount>0: del textWithFields[0-endFieldCount:] for field in initialFields: if isinstance(field,textInfos.ControlField): newControlFieldStack.append(field) elif isinstance(field,textInfos.FormatField): newFormatField.update(field) else: raise ValueError("unknown field: %s"%field) #Calculate how many fields in the old and new controlFieldStacks are the same commonFieldCount=0 for count in xrange(min(len(newControlFieldStack),len(controlFieldStackCache))): # #2199: When comparing controlFields try using uniqueID if it exists before resorting to compairing the entire dictionary oldUniqueID=controlFieldStackCache[count].get('uniqueID') newUniqueID=newControlFieldStack[count].get('uniqueID') if ((oldUniqueID is not None or newUniqueID is not None) and newUniqueID==oldUniqueID) or (newControlFieldStack[count]==controlFieldStackCache[count]): commonFieldCount+=1 else: break #Get speech text for any fields in the old controlFieldStack that are not in the new controlFieldStack endingBlock=False for count in reversed(xrange(commonFieldCount,len(controlFieldStackCache))): text=info.getControlFieldSpeech(controlFieldStackCache[count],controlFieldStackCache[0:count],"end_removedFromControlFieldStack",formatConfig,extraDetail,reason=reason) if text: speechSequence.append(text) if not endingBlock and reason==controlTypes.REASON_SAYALL: endingBlock=bool(int(controlFieldStackCache[count].get('isBlock',0))) if endingBlock: speechSequence.append(SpeakWithoutPausesBreakCommand()) # The TextInfo should be considered blank if we are only exiting fields (i.e. we aren't entering any new fields and there is no text). isTextBlank=True # Even when there's no speakable text, we still need to notify the synth of the index. if index is not None: speechSequence.append(IndexCommand(index)) #Get speech text for any fields that are in both controlFieldStacks, if extra detail is not requested if not extraDetail: for count in xrange(commonFieldCount): field=newControlFieldStack[count] text=info.getControlFieldSpeech(field,newControlFieldStack[0:count],"start_inControlFieldStack",formatConfig,extraDetail,reason=reason) if text: speechSequence.append(text) isTextBlank=False if field.get("role")==controlTypes.ROLE_MATH: isTextBlank=False _speakTextInfo_addMath(speechSequence,info,field) #Get speech text for any fields in the new controlFieldStack that are not in the old controlFieldStack for count in xrange(commonFieldCount,len(newControlFieldStack)): field=newControlFieldStack[count] text=info.getControlFieldSpeech(field,newControlFieldStack[0:count],"start_addedToControlFieldStack",formatConfig,extraDetail,reason=reason) if text: speechSequence.append(text) isTextBlank=False if field.get("role")==controlTypes.ROLE_MATH: isTextBlank=False _speakTextInfo_addMath(speechSequence,info,field) commonFieldCount+=1 #Fetch the text for format field attributes that have changed between what was previously cached, and this textInfo's initialFormatField. text=getFormatFieldSpeech(newFormatField,formatFieldAttributesCache,formatConfig,unit=unit,extraDetail=extraDetail) if text: speechSequence.append(text) if autoLanguageSwitching: language=newFormatField.get('language') speechSequence.append(LangChangeCommand(language)) lastLanguage=language if onlyInitialFields or (unit in (textInfos.UNIT_CHARACTER,textInfos.UNIT_WORD) and len(textWithFields)>0 and len(textWithFields[0])==1 and all((isinstance(x,textInfos.FieldCommand) and x.command=="controlEnd") for x in itertools.islice(textWithFields,1,None) )): if onlyInitialFields or any(isinstance(x,basestring) for x in speechSequence): speak(speechSequence) if not onlyInitialFields: speakSpelling(textWithFields[0],locale=language if autoLanguageSwitching else None) if useCache: speakTextInfoState.controlFieldStackCache=newControlFieldStack speakTextInfoState.formatFieldAttributesCache=formatFieldAttributesCache if not isinstance(useCache,SpeakTextInfoState): speakTextInfoState.updateObj() return #Move through the field commands, getting speech text for all controlStarts, controlEnds and formatChange commands #But also keep newControlFieldStack up to date as we will need it for the ends # Add any text to a separate list, as it must be handled differently. #Also make sure that LangChangeCommand objects are added before any controlField or formatField speech relativeSpeechSequence=[] inTextChunk=False allIndentation="" indentationDone=False for command in textWithFields: if isinstance(command,basestring): if reportIndentation and not indentationDone: indentation,command=splitTextIndentation(command) # Combine all indentation into one string for later processing. allIndentation+=indentation if command: # There was content after the indentation, so there is no more indentation. indentationDone=True if command: if inTextChunk: relativeSpeechSequence[-1]+=command else: relativeSpeechSequence.append(command) inTextChunk=True elif isinstance(command,textInfos.FieldCommand): newLanguage=None if command.command=="controlStart": # Control fields always start a new chunk, even if they have no field text. inTextChunk=False fieldText=info.getControlFieldSpeech(command.field,newControlFieldStack,"start_relative",formatConfig,extraDetail,reason=reason) newControlFieldStack.append(command.field) elif command.command=="controlEnd": # Control fields always start a new chunk, even if they have no field text. inTextChunk=False fieldText=info.getControlFieldSpeech(newControlFieldStack[-1],newControlFieldStack[0:-1],"end_relative",formatConfig,extraDetail,reason=reason) del newControlFieldStack[-1] if commonFieldCount>len(newControlFieldStack): commonFieldCount=len(newControlFieldStack) elif command.command=="formatChange": fieldText=getFormatFieldSpeech(command.field,formatFieldAttributesCache,formatConfig,unit=unit,extraDetail=extraDetail) if fieldText: inTextChunk=False if autoLanguageSwitching: newLanguage=command.field.get('language') if lastLanguage!=newLanguage: # The language has changed, so this starts a new text chunk. inTextChunk=False if not inTextChunk: if fieldText: if autoLanguageSwitching and lastLanguage is not None: # Fields must be spoken in the default language. relativeSpeechSequence.append(LangChangeCommand(None)) lastLanguage=None relativeSpeechSequence.append(fieldText) if command.command=="controlStart" and command.field.get("role")==controlTypes.ROLE_MATH: _speakTextInfo_addMath(relativeSpeechSequence,info,command.field) if autoLanguageSwitching and newLanguage!=lastLanguage: relativeSpeechSequence.append(LangChangeCommand(newLanguage)) lastLanguage=newLanguage if reportIndentation and speakTextInfoState and allIndentation!=speakTextInfoState.indentationCache: indentationSpeech=getIndentationSpeech(allIndentation) if autoLanguageSwitching and speechSequence[-1].lang is not None: # Indentation must be spoken in the default language, # but the initial format field specified a different language. # Insert the indentation before the LangChangeCommand. speechSequence.insert(-1, indentationSpeech) else: speechSequence.append(indentationSpeech) if speakTextInfoState: speakTextInfoState.indentationCache=allIndentation # Don't add this text if it is blank. relativeBlank=True for x in relativeSpeechSequence: if isinstance(x,basestring) and not isBlank(x): relativeBlank=False break if not relativeBlank: speechSequence.extend(relativeSpeechSequence) isTextBlank=False #Finally get speech text for any fields left in new controlFieldStack that are common with the old controlFieldStack (for closing), if extra detail is not requested if autoLanguageSwitching and lastLanguage is not None: speechSequence.append(LangChangeCommand(None)) lastLanguage=None if not extraDetail: for count in reversed(xrange(min(len(newControlFieldStack),commonFieldCount))): text=info.getControlFieldSpeech(newControlFieldStack[count],newControlFieldStack[0:count],"end_inControlFieldStack",formatConfig,extraDetail,reason=reason) if text: speechSequence.append(text) isTextBlank=False # If there is nothing that should cause the TextInfo to be considered non-blank, blank should be reported, unless we are doing a say all. if not suppressBlanks and reason != controlTypes.REASON_SAYALL and isTextBlank: # Translators: This is spoken when the line is considered blank. speechSequence.append(_("blank")) #Cache a copy of the new controlFieldStack for future use if useCache: speakTextInfoState.controlFieldStackCache=list(newControlFieldStack) speakTextInfoState.formatFieldAttributesCache=formatFieldAttributesCache if not isinstance(useCache,SpeakTextInfoState): speakTextInfoState.updateObj() if speechSequence: if reason==controlTypes.REASON_SAYALL: speakWithoutPauses(speechSequence) else: speak(speechSequence) def getSpeechTextForProperties(reason=controlTypes.REASON_QUERY,**propertyValues): global oldTreeLevel, oldTableID, oldRowNumber, oldColumnNumber textList=[] name=propertyValues.get('name') if name: textList.append(name) if 'role' in propertyValues: role=propertyValues['role'] speakRole=True elif '_role' in propertyValues: speakRole=False role=propertyValues['_role'] else: speakRole=False role=controlTypes.ROLE_UNKNOWN value=propertyValues.get('value') if role not in controlTypes.silentValuesForRoles else None cellCoordsText=propertyValues.get('cellCoordsText') rowNumber=propertyValues.get('rowNumber') columnNumber=propertyValues.get('columnNumber') includeTableCellCoords=propertyValues.get('includeTableCellCoords',True) if speakRole and (reason not in (controlTypes.REASON_SAYALL,controlTypes.REASON_CARET,controlTypes.REASON_FOCUS) or not (name or value or cellCoordsText or rowNumber or columnNumber) or role not in controlTypes.silentRolesOnFocus) and (role!=controlTypes.ROLE_MATH or reason not in (controlTypes.REASON_CARET,controlTypes.REASON_SAYALL)): textList.append(controlTypes.roleLabels[role]) if value: textList.append(value) states=propertyValues.get('states') realStates=propertyValues.get('_states',states) if states is not None: positiveStates=controlTypes.processPositiveStates(role,realStates,reason,states) textList.extend([controlTypes.stateLabels[x] for x in positiveStates]) if 'negativeStates' in propertyValues: negativeStates=propertyValues['negativeStates'] else: negativeStates=None if negativeStates is not None or (reason != controlTypes.REASON_CHANGE and states is not None): negativeStates=controlTypes.processNegativeStates(role, realStates, reason, negativeStates) if controlTypes.STATE_DROPTARGET in negativeStates: # "not drop target" doesn't make any sense, so use a custom message. # Translators: Reported when drag and drop is finished. # This is only reported for objects which support accessible drag and drop. textList.append(_("done dragging")) negativeStates.discard(controlTypes.STATE_DROPTARGET) # Translators: Indicates that a particular state on an object is negated. # Separate strings have now been defined for commonly negated states (e.g. not selected and not checked), # but this still might be used in some other cases. # %s will be replaced with the negated state. textList.extend([controlTypes.negativeStateLabels.get(x, _("not %s")%controlTypes.stateLabels[x]) for x in negativeStates]) if 'description' in propertyValues: textList.append(propertyValues['description']) if 'keyboardShortcut' in propertyValues: textList.append(propertyValues['keyboardShortcut']) indexInGroup=propertyValues.get('positionInfo_indexInGroup',0) similarItemsInGroup=propertyValues.get('positionInfo_similarItemsInGroup',0) if 0<indexInGroup<=similarItemsInGroup: # Translators: Spoken to indicate the position of an item in a group of items (such as a list). # {number} is replaced with the number of the item in the group. # {total} is replaced with the total number of items in the group. textList.append(_("{number} of {total}").format(number=indexInGroup, total=similarItemsInGroup)) if 'positionInfo_level' in propertyValues: level=propertyValues.get('positionInfo_level',None) role=propertyValues.get('role',None) if level is not None: if role in (controlTypes.ROLE_TREEVIEWITEM,controlTypes.ROLE_LISTITEM) and level!=oldTreeLevel: textList.insert(0,_("level %s")%level) oldTreeLevel=level else: # Translators: Speaks the item level in treeviews (example output: level 2). textList.append(_('level %s')%propertyValues['positionInfo_level']) if cellCoordsText or rowNumber or columnNumber: tableID = propertyValues.get("_tableID") # Always treat the table as different if there is no tableID. sameTable = (tableID and tableID == oldTableID) # Don't update the oldTableID if no tableID was given. if tableID and not sameTable: oldTableID = tableID if rowNumber and (not sameTable or rowNumber != oldRowNumber): rowHeaderText = propertyValues.get("rowHeaderText") if rowHeaderText: textList.append(rowHeaderText) if includeTableCellCoords and not cellCoordsText: # Translators: Speaks current row number (example output: row 3). textList.append(_("row %s")%rowNumber) oldRowNumber = rowNumber if columnNumber and (not sameTable or columnNumber != oldColumnNumber): columnHeaderText = propertyValues.get("columnHeaderText") if columnHeaderText: textList.append(columnHeaderText) if includeTableCellCoords and not cellCoordsText: # Translators: Speaks current column number (example output: column 3). textList.append(_("column %s")%columnNumber) oldColumnNumber = columnNumber if includeTableCellCoords and cellCoordsText: textList.append(cellCoordsText) rowCount=propertyValues.get('rowCount',0) columnCount=propertyValues.get('columnCount',0) if rowCount and columnCount: # Translators: Speaks number of columns and rows in a table (example output: with 3 rows and 2 columns). textList.append(_("with {rowCount} rows and {columnCount} columns").format(rowCount=rowCount,columnCount=columnCount)) elif columnCount and not rowCount: # Translators: Speaks number of columns (example output: with 4 columns). textList.append(_("with %s columns")%columnCount) elif rowCount and not columnCount: # Translators: Speaks number of rows (example output: with 2 rows). textList.append(_("with %s rows")%rowCount) if rowCount or columnCount: # The caller is entering a table, so ensure that it is treated as a new table, even if the previous table was the same. oldTableID = None return CHUNK_SEPARATOR.join([x for x in textList if x]) def getControlFieldSpeech(attrs,ancestorAttrs,fieldType,formatConfig=None,extraDetail=False,reason=None): if attrs.get('isHidden'): return u"" if not formatConfig: formatConfig=config.conf["documentFormatting"] presCat=attrs.getPresentationCategory(ancestorAttrs,formatConfig, reason=reason) childControlCount=int(attrs.get('_childcontrolcount',"0")) if reason==controlTypes.REASON_FOCUS or attrs.get('alwaysReportName',False): name=attrs.get('name',"") else: name="" role=attrs.get('role',controlTypes.ROLE_UNKNOWN) states=attrs.get('states',set()) keyboardShortcut=attrs.get('keyboardShortcut', "") value=attrs.get('value',"") if reason==controlTypes.REASON_FOCUS or attrs.get('alwaysReportDescription',False): description=attrs.get('description',"") else: description="" level=attrs.get('level',None) if presCat != attrs.PRESCAT_LAYOUT: tableID = attrs.get("table-id") else: tableID = None roleText=getSpeechTextForProperties(reason=reason,role=role) stateText=getSpeechTextForProperties(reason=reason,states=states,_role=role) keyboardShortcutText=getSpeechTextForProperties(reason=reason,keyboardShortcut=keyboardShortcut) if config.conf["presentation"]["reportKeyboardShortcuts"] else "" nameText=getSpeechTextForProperties(reason=reason,name=name) valueText=getSpeechTextForProperties(reason=reason,value=value) descriptionText=(getSpeechTextForProperties(reason=reason,description=description) if config.conf["presentation"]["reportObjectDescriptions"] else "") levelText=getSpeechTextForProperties(reason=reason,positionInfo_level=level) # Determine under what circumstances this node should be spoken. # speakEntry: Speak when the user enters the control. # speakWithinForLine: When moving by line, speak when the user is already within the control. # speakExitForLine: When moving by line, speak when the user exits the control. # speakExitForOther: When moving by word or character, speak when the user exits the control. speakEntry=speakWithinForLine=speakExitForLine=speakExitForOther=False if presCat == attrs.PRESCAT_SINGLELINE: speakEntry=True speakWithinForLine=True speakExitForOther=True elif presCat in (attrs.PRESCAT_MARKER, attrs.PRESCAT_CELL): speakEntry=True elif presCat == attrs.PRESCAT_CONTAINER: speakEntry=True speakExitForLine=True speakExitForOther=True # Determine the order of speech. # speakContentFirst: Speak the content before the control field info. speakContentFirst = reason == controlTypes.REASON_FOCUS and presCat != attrs.PRESCAT_CONTAINER and role not in (controlTypes.ROLE_EDITABLETEXT, controlTypes.ROLE_COMBOBOX) and not tableID and controlTypes.STATE_EDITABLE not in states # speakStatesFirst: Speak the states before the role. speakStatesFirst=role==controlTypes.ROLE_LINK # Determine what text to speak. # Special cases if speakEntry and childControlCount and fieldType=="start_addedToControlFieldStack" and role==controlTypes.ROLE_LIST and controlTypes.STATE_READONLY in states: # List. # Translators: Speaks number of items in a list (example output: list with 5 items). return roleText+" "+_("with %s items")%childControlCount elif fieldType=="start_addedToControlFieldStack" and role==controlTypes.ROLE_TABLE and tableID: # Table. return " ".join((roleText, getSpeechTextForProperties(_tableID=tableID, rowCount=attrs.get("table-rowcount"), columnCount=attrs.get("table-columncount")),levelText)) elif fieldType in ("start_addedToControlFieldStack","start_relative") and role in (controlTypes.ROLE_TABLECELL,controlTypes.ROLE_TABLECOLUMNHEADER,controlTypes.ROLE_TABLEROWHEADER) and tableID: # Table cell. reportTableHeaders = formatConfig["reportTableHeaders"] reportTableCellCoords = formatConfig["reportTableCellCoords"] getProps = { 'rowNumber': attrs.get("table-rownumber"), 'columnNumber': attrs.get("table-columnnumber"), 'includeTableCellCoords': reportTableCellCoords } if reportTableHeaders: getProps['rowHeaderText'] = attrs.get("table-rowheadertext") getProps['columnHeaderText'] = attrs.get("table-columnheadertext") return (getSpeechTextForProperties(_tableID=tableID, **getProps) + (" %s" % stateText if stateText else "")) # General cases elif ( (speakEntry and ((speakContentFirst and fieldType in ("end_relative","end_inControlFieldStack")) or (not speakContentFirst and fieldType in ("start_addedToControlFieldStack","start_relative")))) or (speakWithinForLine and not speakContentFirst and not extraDetail and fieldType=="start_inControlFieldStack") ): return CHUNK_SEPARATOR.join([x for x in nameText,(stateText if speakStatesFirst else roleText),(roleText if speakStatesFirst else stateText),valueText,descriptionText,levelText,keyboardShortcutText if x]) elif fieldType in ("end_removedFromControlFieldStack","end_relative") and roleText and ((not extraDetail and speakExitForLine) or (extraDetail and speakExitForOther)): # Translators: Indicates end of something (example output: at the end of a list, speaks out of list). return _("out of %s")%roleText # Special cases elif not extraDetail and not speakEntry and fieldType in ("start_addedToControlFieldStack","start_relative") and controlTypes.STATE_CLICKABLE in states: # Clickable. return getSpeechTextForProperties(states=set([controlTypes.STATE_CLICKABLE])) else: return "" def getFormatFieldSpeech(attrs,attrsCache=None,formatConfig=None,unit=None,extraDetail=False): if not formatConfig: formatConfig=config.conf["documentFormatting"] textList=[] if formatConfig["reportTables"]: tableInfo=attrs.get("table-info") oldTableInfo=attrsCache.get("table-info") if attrsCache is not None else None text=getTableInfoSpeech(tableInfo,oldTableInfo,extraDetail=extraDetail) if text: textList.append(text) if formatConfig["reportPage"]: pageNumber=attrs.get("page-number") oldPageNumber=attrsCache.get("page-number") if attrsCache is not None else None if pageNumber and pageNumber!=oldPageNumber: # Translators: Indicates the page number in a document. # %s will be replaced with the page number. text=_("page %s")%pageNumber textList.append(text) if formatConfig["reportHeadings"]: headingLevel=attrs.get("heading-level") oldHeadingLevel=attrsCache.get("heading-level") if attrsCache is not None else None if headingLevel and headingLevel!=oldHeadingLevel: # Translators: Speaks the heading level (example output: heading level 2). text=_("heading level %d")%headingLevel textList.append(text) if formatConfig["reportStyle"]: style=attrs.get("style") oldStyle=attrsCache.get("style") if attrsCache is not None else None if style!=oldStyle: if style: # Translators: Indicates the style of text. # A style is a collection of formatting settings and depends on the application. # %s will be replaced with the name of the style. text=_("style %s")%style else: # Translators: Indicates that text has reverted to the default style. # A style is a collection of formatting settings and depends on the application. text=_("default style") textList.append(text) if formatConfig["reportFontName"]: fontFamily=attrs.get("font-family") oldFontFamily=attrsCache.get("font-family") if attrsCache is not None else None if fontFamily and fontFamily!=oldFontFamily: textList.append(fontFamily) fontName=attrs.get("font-name") oldFontName=attrsCache.get("font-name") if attrsCache is not None else None if fontName and fontName!=oldFontName: textList.append(fontName) if formatConfig["reportFontSize"]: fontSize=attrs.get("font-size") oldFontSize=attrsCache.get("font-size") if attrsCache is not None else None if fontSize and fontSize!=oldFontSize: textList.append(fontSize) if formatConfig["reportColor"]: color=attrs.get("color") oldColor=attrsCache.get("color") if attrsCache is not None else None backgroundColor=attrs.get("background-color") oldBackgroundColor=attrsCache.get("background-color") if attrsCache is not None else None if color and backgroundColor and color!=oldColor and backgroundColor!=oldBackgroundColor: # Translators: Reported when both the text and background colors change. # {color} will be replaced with the text color. # {backgroundColor} will be replaced with the background color. textList.append(_("{color} on {backgroundColor}").format( color=color.name if isinstance(color,colors.RGB) else unicode(color), backgroundColor=backgroundColor.name if isinstance(backgroundColor,colors.RGB) else unicode(backgroundColor))) elif color and color!=oldColor: # Translators: Reported when the text color changes (but not the background color). # {color} will be replaced with the text color. textList.append(_("{color}").format(color=color.name if isinstance(color,colors.RGB) else unicode(color))) elif backgroundColor and backgroundColor!=oldBackgroundColor: # Translators: Reported when the background color changes (but not the text color). # {backgroundColor} will be replaced with the background color. textList.append(_("{backgroundColor} background").format(backgroundColor=backgroundColor.name if isinstance(backgroundColor,colors.RGB) else unicode(backgroundColor))) if formatConfig["reportLineNumber"]: lineNumber=attrs.get("line-number") oldLineNumber=attrsCache.get("line-number") if attrsCache is not None else None if lineNumber is not None and lineNumber!=oldLineNumber: # Translators: Indicates the line number of the text. # %s will be replaced with the line number. text=_("line %s")%lineNumber textList.append(text) if formatConfig["reportRevisions"]: # Insertion revision=attrs.get("revision-insertion") oldRevision=attrsCache.get("revision-insertion") if attrsCache is not None else None if (revision or oldRevision is not None) and revision!=oldRevision: # Translators: Reported when text is marked as having been inserted text=(_("inserted") if revision # Translators: Reported when text is no longer marked as having been inserted. else _("not inserted")) textList.append(text) revision=attrs.get("revision-deletion") oldRevision=attrsCache.get("revision-deletion") if attrsCache is not None else None if (revision or oldRevision is not None) and revision!=oldRevision: # Translators: Reported when text is marked as having been deleted text=(_("deleted") if revision # Translators: Reported when text is no longer marked as having been deleted. else _("not deleted")) textList.append(text) revision=attrs.get("revision") oldRevision=attrsCache.get("revision") if attrsCache is not None else None if (revision or oldRevision is not None) and revision!=oldRevision: # Translators: Reported when text is revised. text=(_("revised %s"%revision) if revision # Translators: Reported when text is not revised. else _("no revised %s")%oldRevision) textList.append(text) if formatConfig["reportEmphasis"]: # marked text marked=attrs.get("marked") oldMarked=attrsCache.get("marked") if attrsCache is not None else None if (marked or oldMarked is not None) and marked!=oldMarked: # Translators: Reported when text is marked text=(_("marked") if marked # Translators: Reported when text is no longer marked else _("not marked")) textList.append(text) # strong text strong=attrs.get("strong") oldStrong=attrsCache.get("strong") if attrsCache is not None else None if (strong or oldStrong is not None) and strong!=oldStrong: # Translators: Reported when text is marked as strong (e.g. bold) text=(_("strong") if strong # Translators: Reported when text is no longer marked as strong (e.g. bold) else _("not strong")) textList.append(text) # emphasised text emphasised=attrs.get("emphasised") oldEmphasised=attrsCache.get("emphasised") if attrsCache is not None else None if (emphasised or oldEmphasised is not None) and emphasised!=oldEmphasised: # Translators: Reported when text is marked as emphasised text=(_("emphasised") if emphasised # Translators: Reported when text is no longer marked as emphasised else _("not emphasised")) textList.append(text) if formatConfig["reportFontAttributes"]: bold=attrs.get("bold") oldBold=attrsCache.get("bold") if attrsCache is not None else None if (bold or oldBold is not None) and bold!=oldBold: # Translators: Reported when text is bolded. text=(_("bold") if bold # Translators: Reported when text is not bolded. else _("no bold")) textList.append(text) italic=attrs.get("italic") oldItalic=attrsCache.get("italic") if attrsCache is not None else None if (italic or oldItalic is not None) and italic!=oldItalic: # Translators: Reported when text is italicized. text=(_("italic") if italic # Translators: Reported when text is not italicized. else _("no italic")) textList.append(text) strikethrough=attrs.get("strikethrough") oldStrikethrough=attrsCache.get("strikethrough") if attrsCache is not None else None if (strikethrough or oldStrikethrough is not None) and strikethrough!=oldStrikethrough: # Translators: Reported when text is formatted with strikethrough. # See http://en.wikipedia.org/wiki/Strikethrough text=(_("strikethrough") if strikethrough # Translators: Reported when text is formatted without strikethrough. # See http://en.wikipedia.org/wiki/Strikethrough else _("no strikethrough")) textList.append(text) underline=attrs.get("underline") oldUnderline=attrsCache.get("underline") if attrsCache is not None else None if (underline or oldUnderline is not None) and underline!=oldUnderline: # Translators: Reported when text is underlined. text=(_("underlined") if underline # Translators: Reported when text is not underlined. else _("not underlined")) textList.append(text) textPosition=attrs.get("text-position") oldTextPosition=attrsCache.get("text-position") if attrsCache is not None else None if (textPosition or oldTextPosition is not None) and textPosition!=oldTextPosition: textPosition=textPosition.lower() if textPosition else textPosition if textPosition=="super": # Translators: Reported for superscript text. text=_("superscript") elif textPosition=="sub": # Translators: Reported for subscript text. text=_("subscript") else: # Translators: Reported for text which is at the baseline position; # i.e. not superscript or subscript. text=_("baseline") textList.append(text) if formatConfig["reportAlignment"]: textAlign=attrs.get("text-align") oldTextAlign=attrsCache.get("text-align") if attrsCache is not None else None if (textAlign or oldTextAlign is not None) and textAlign!=oldTextAlign: textAlign=textAlign.lower() if textAlign else textAlign if textAlign=="left": # Translators: Reported when text is left-aligned. text=_("align left") elif textAlign=="center": # Translators: Reported when text is centered. text=_("align center") elif textAlign=="right": # Translators: Reported when text is right-aligned. text=_("align right") elif textAlign=="justify": # Translators: Reported when text is justified. # See http://en.wikipedia.org/wiki/Typographic_alignment#Justified text=_("align justify") elif textAlign=="distribute": # Translators: Reported when text is justified with character spacing (Japanese etc) # See http://kohei.us/2010/01/21/distributed-text-justification/ text=_("align distributed") else: # Translators: Reported when text has reverted to default alignment. text=_("align default") textList.append(text) if formatConfig["reportParagraphIndentation"]: indentLabels={ 'left-indent':( # Translators: the label for paragraph format left indent _("left indent"), # Translators: the message when there is no paragraph format left indent _("no left indent"), ), 'right-indent':( # Translators: the label for paragraph format right indent _("right indent"), # Translators: the message when there is no paragraph format right indent _("no right indent"), ), 'hanging-indent':( # Translators: the label for paragraph format hanging indent _("hanging indent"), # Translators: the message when there is no paragraph format hanging indent _("no hanging indent"), ), 'first-line-indent':( # Translators: the label for paragraph format first line indent _("first line indent"), # Translators: the message when there is no paragraph format first line indent _("no first line indent"), ), } for attr,(label,noVal) in indentLabels.iteritems(): newVal=attrs.get(attr) oldVal=attrsCache.get(attr) if attrsCache else None if (newVal or oldVal is not None) and newVal!=oldVal: if newVal: textList.append(u"%s %s"%(label,newVal)) else: textList.append(noVal) verticalAlign=attrs.get("vertical-align") oldverticalAlign=attrsCache.get("vertical-align") if attrsCache is not None else None if (verticalAlign or oldverticalAlign is not None) and verticalAlign!=oldverticalAlign: verticalAlign=verticalAlign.lower() if verticalAlign else verticalAlign if verticalAlign=="top": # Translators: Reported when text is vertically top-aligned. text=_("vertical align top") elif verticalAlign in("center","middle"): # Translators: Reported when text is vertically middle aligned. text=_("vertical align middle") elif verticalAlign=="bottom": # Translators: Reported when text is vertically bottom-aligned. text=_("vertical align bottom") elif verticalAlign=="baseline": # Translators: Reported when text is vertically aligned on the baseline. text=_("vertical align baseline") elif verticalAlign=="justify": # Translators: Reported when text is vertically justified. text=_("vertical align justified") elif verticalAlign=="distributed": # Translators: Reported when text is vertically justified but with character spacing (For some Asian content). text=_("vertical align distributed") else: # Translators: Reported when text has reverted to default vertical alignment. text=_("vertical align default") textList.append(text) if formatConfig["reportLinks"]: link=attrs.get("link") oldLink=attrsCache.get("link") if attrsCache is not None else None if (link or oldLink is not None) and link!=oldLink: text=_("link") if link else _("out of %s")%_("link") textList.append(text) if formatConfig["reportComments"]: comment=attrs.get("comment") oldComment=attrsCache.get("comment") if attrsCache is not None else None if (comment or oldComment is not None) and comment!=oldComment: if comment: # Translators: Reported when text contains a comment. text=_("has comment") textList.append(text) elif extraDetail: # Translators: Reported when text no longer contains a comment. text=_("out of comment") textList.append(text) if formatConfig["reportSpellingErrors"]: invalidSpelling=attrs.get("invalid-spelling") oldInvalidSpelling=attrsCache.get("invalid-spelling") if attrsCache is not None else None if (invalidSpelling or oldInvalidSpelling is not None) and invalidSpelling!=oldInvalidSpelling: if invalidSpelling: # Translators: Reported when text contains a spelling error. text=_("spelling error") elif extraDetail: # Translators: Reported when moving out of text containing a spelling error. text=_("out of spelling error") else: text="" if text: textList.append(text) if unit in (textInfos.UNIT_LINE,textInfos.UNIT_SENTENCE,textInfos.UNIT_PARAGRAPH,textInfos.UNIT_READINGCHUNK): linePrefix=attrs.get("line-prefix") if linePrefix: textList.append(linePrefix) if attrsCache is not None: attrsCache.clear() attrsCache.update(attrs) return CHUNK_SEPARATOR.join(textList) def getTableInfoSpeech(tableInfo,oldTableInfo,extraDetail=False): if tableInfo is None and oldTableInfo is None: return "" if tableInfo is None and oldTableInfo is not None: # Translators: Indicates end of a table. return _("out of table") if not oldTableInfo or tableInfo.get("table-id")!=oldTableInfo.get("table-id"): newTable=True else: newTable=False textList=[] if newTable: columnCount=tableInfo.get("column-count",0) rowCount=tableInfo.get("row-count",0) # Translators: reports number of columns and rows in a table (example output: table with 3 columns and 5 rows). text=_("table with {columnCount} columns and {rowCount} rows").format(columnCount=columnCount,rowCount=rowCount) textList.append(text) oldColumnNumber=oldTableInfo.get("column-number",0) if oldTableInfo else 0 columnNumber=tableInfo.get("column-number",0) if columnNumber!=oldColumnNumber: textList.append(_("column %s")%columnNumber) oldRowNumber=oldTableInfo.get("row-number",0) if oldTableInfo else 0 rowNumber=tableInfo.get("row-number",0) if rowNumber!=oldRowNumber: textList.append(_("row %s")%rowNumber) return " ".join(textList) re_last_pause=re.compile(ur"^(.*(?<=[^\s.!?])[.!?][\"'”’)]?(?:\s+|$))(.*$)",re.DOTALL|re.UNICODE) def speakWithoutPauses(speechSequence,detectBreaks=True): """ Speaks the speech sequences given over multiple calls, only sending to the synth at acceptable phrase or sentence boundaries, or when given None for the speech sequence. """ lastStartIndex=0 #Break on all explicit break commands if detectBreaks and speechSequence: sequenceLen=len(speechSequence) for index in xrange(sequenceLen): if isinstance(speechSequence[index],SpeakWithoutPausesBreakCommand): if index>0 and lastStartIndex<index: speakWithoutPauses(speechSequence[lastStartIndex:index],detectBreaks=False) speakWithoutPauses(None) lastStartIndex=index+1 if lastStartIndex<sequenceLen: speakWithoutPauses(speechSequence[lastStartIndex:],detectBreaks=False) return finalSpeechSequence=[] #To be spoken now pendingSpeechSequence=[] #To be saved off for speaking later if speechSequence is None: #Requesting flush if speakWithoutPauses._pendingSpeechSequence: #Place the last incomplete phrase in to finalSpeechSequence to be spoken now finalSpeechSequence=speakWithoutPauses._pendingSpeechSequence speakWithoutPauses._pendingSpeechSequence=[] else: #Handling normal speech #Scan the given speech and place all completed phrases in finalSpeechSequence to be spoken, #And place the final incomplete phrase in pendingSpeechSequence for index in xrange(len(speechSequence)-1,-1,-1): item=speechSequence[index] if isinstance(item,basestring): m=re_last_pause.match(item) if m: before,after=m.groups() if after: pendingSpeechSequence.append(after) if before: finalSpeechSequence.extend(speakWithoutPauses._pendingSpeechSequence) speakWithoutPauses._pendingSpeechSequence=[] finalSpeechSequence.extend(speechSequence[0:index]) finalSpeechSequence.append(before) # Apply the last language change to the pending sequence. # This will need to be done for any other speech change commands introduced in future. for changeIndex in xrange(index-1,-1,-1): change=speechSequence[changeIndex] if not isinstance(change,LangChangeCommand): continue pendingSpeechSequence.append(change) break break else: pendingSpeechSequence.append(item) else: pendingSpeechSequence.append(item) if pendingSpeechSequence: pendingSpeechSequence.reverse() speakWithoutPauses._pendingSpeechSequence.extend(pendingSpeechSequence) #Scan the final speech sequence backwards for item in reversed(finalSpeechSequence): if isinstance(item,IndexCommand): speakWithoutPauses.lastSentIndex=item.index break if finalSpeechSequence: speak(finalSpeechSequence) speakWithoutPauses.lastSentIndex=None speakWithoutPauses._pendingSpeechSequence=[] class SpeechCommand(object): """ The base class for objects that can be inserted between string of text for parituclar speech functions that convey things such as indexing or voice parameter changes. """ class IndexCommand(SpeechCommand): """Represents an index within some speech.""" def __init__(self,index): """ @param index: the value of this index @type index: integer """ if not isinstance(index,int): raise ValueError("index must be int, not %s"%type(index)) self.index=index def __repr__(self): return "IndexCommand(%r)" % self.index class CharacterModeCommand(object): """Turns character mode on and off for speech synths.""" def __init__(self,state): """ @param state: if true character mode is on, if false its turned off. @type state: boolean """ if not isinstance(state,bool): raise ValueError("state must be boolean, not %s"%type(state)) self.state=state def __repr__(self): return "CharacterModeCommand(%r)" % self.state class LangChangeCommand(SpeechCommand): """A command to switch the language within speech.""" def __init__(self,lang): """ @param lang: the language to switch to: If None then the NVDA locale will be used. @type lang: string """ self.lang=lang # if lang else languageHandler.getLanguage() def __repr__(self): return "LangChangeCommand (%r)"%self.lang class SpeakWithoutPausesBreakCommand(SpeechCommand): """Forces speakWithoutPauses to flush its buffer and therefore break the sentence at this point. This should only be used with the L{speakWithoutPauses} function. This will be removed during processing. """ class BreakCommand(SpeechCommand): """Insert a break between words. """ def __init__(self, time=0): """ @param time: The duration of the pause to be inserted in milliseconds. @param time: int """ self.time = time def __repr__(self): return "BreakCommand(time=%d)" % self.time class PitchCommand(SpeechCommand): """Change the pitch of the voice. """ def __init__(self, multiplier=1): """ @param multiplier: The number by which to multiply the current pitch setting; e.g. 0.5 is half, 1 returns to the current pitch setting. @param multiplier: int/float """ self.multiplier = multiplier def __repr__(self): return "PitchCommand(multiplier=%g)" % self.multiplier class VolumeCommand(SpeechCommand): """Change the volume of the voice. """ def __init__(self, multiplier=1): """ @param multiplier: The number by which to multiply the current volume setting; e.g. 0.5 is half, 1 returns to the current volume setting. @param multiplier: int/float """ self.multiplier = multiplier def __repr__(self): return "VolumeCommand(multiplier=%g)" % self.multiplier class RateCommand(SpeechCommand): """Change the rate of the voice. """ def __init__(self, multiplier=1): """ @param multiplier: The number by which to multiply the current rate setting; e.g. 0.5 is half, 1 returns to the current rate setting. @param multiplier: int/float """ self.multiplier = multiplier def __repr__(self): return "RateCommand(multiplier=%g)" % self.multiplier class PhonemeCommand(SpeechCommand): """Insert a specific pronunciation. This command accepts Unicode International Phonetic Alphabet (IPA) characters. Note that this is not well supported by synthesizers. """ def __init__(self, ipa, text=None): """ @param ipa: Unicode IPA characters. @type ipa: unicode @param text: Text to speak if the synthesizer does not support some or all of the specified IPA characters, C{None} to ignore this command instead. @type text: unicode """ self.ipa = ipa self.text = text def __repr__(self): out = "PhonemeCommand(%r" % self.ipa if self.text: out += ", text=%r" % self.text return out + ")"
1
17,525
@MichaelDCurran, thoughts on adding a breakpoint format field attribute? It seems almost app specific, but I guess it does seem odd abusing line-prefix. Is there any more generic concept here?
nvaccess-nvda
py
@@ -539,7 +539,10 @@ class WebElement(object): @property def rect(self): """A dictionary with the size and location of the element.""" - return self._execute(Command.GET_ELEMENT_RECT)['value'] + if self._w3c: + return self._execute(Command.GET_ELEMENT_RECT)['value'] + else: + return self.size, self.location @property def screenshot_as_base64(self):
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import base64 import hashlib import os import pkgutil import warnings import zipfile from selenium.common.exceptions import WebDriverException from selenium.webdriver.common.by import By from selenium.webdriver.common.utils import keys_to_typing from .command import Command # Python 3 imports try: str = basestring except NameError: pass try: from StringIO import StringIO as IOStream except ImportError: # 3+ from io import BytesIO as IOStream # not relying on __package__ here as it can be `None` in some situations (see #4558) _pkg = '.'.join(__name__.split('.')[:-1]) getAttribute_js = pkgutil.get_data(_pkg, 'getAttribute.js').decode('utf8') isDisplayed_js = pkgutil.get_data(_pkg, 'isDisplayed.js').decode('utf8') class WebElement(object): """Represents a DOM element. Generally, all interesting operations that interact with a document will be performed through this interface. All method calls will do a freshness check to ensure that the element reference is still valid. This essentially determines whether or not the element is still attached to the DOM. If this test fails, then an ``StaleElementReferenceException`` is thrown, and all future calls to this instance will fail.""" def __init__(self, parent, id_, w3c=False): self._parent = parent self._id = id_ self._w3c = w3c def __repr__(self): return '<{0.__module__}.{0.__name__} (session="{1}", element="{2}")>'.format( type(self), self._parent.session_id, self._id) @property def tag_name(self): """This element's ``tagName`` property.""" return self._execute(Command.GET_ELEMENT_TAG_NAME)['value'] @property def text(self): """The text of the element.""" return self._execute(Command.GET_ELEMENT_TEXT)['value'] def click(self): """Clicks the element.""" self._execute(Command.CLICK_ELEMENT) def submit(self): """Submits a form.""" if self._w3c: form = self.find_element(By.XPATH, "./ancestor-or-self::form") self._parent.execute_script( "var e = arguments[0].ownerDocument.createEvent('Event');" "e.initEvent('submit', true, true);" "if (arguments[0].dispatchEvent(e)) { arguments[0].submit() }", form) else: self._execute(Command.SUBMIT_ELEMENT) def clear(self): """Clears the text if it's a text entry element.""" self._execute(Command.CLEAR_ELEMENT) def get_property(self, name): """ Gets the given property of the element. :Args: - name - Name of the property to retrieve. Example:: text_length = target_element.get_property("text_length") """ try: return self._execute(Command.GET_ELEMENT_PROPERTY, {"name": name})["value"] except WebDriverException: # if we hit an end point that doesnt understand getElementProperty lets fake it return self.parent.execute_script('return arguments[0][arguments[1]]', self, name) def get_attribute(self, name): """Gets the given attribute or property of the element. This method will first try to return the value of a property with the given name. If a property with that name doesn't exist, it returns the value of the attribute with the same name. If there's no attribute with that name, ``None`` is returned. Values which are considered truthy, that is equals "true" or "false", are returned as booleans. All other non-``None`` values are returned as strings. For attributes or properties which do not exist, ``None`` is returned. :Args: - name - Name of the attribute/property to retrieve. Example:: # Check if the "active" CSS class is applied to an element. is_active = "active" in target_element.get_attribute("class") """ attributeValue = '' if self._w3c: attributeValue = self.parent.execute_script( "return (%s).apply(null, arguments);" % getAttribute_js, self, name) else: resp = self._execute(Command.GET_ELEMENT_ATTRIBUTE, {'name': name}) attributeValue = resp.get('value') if attributeValue is not None: if name != 'value' and attributeValue.lower() in ('true', 'false'): attributeValue = attributeValue.lower() return attributeValue def is_selected(self): """Returns whether the element is selected. Can be used to check if a checkbox or radio button is selected. """ return self._execute(Command.IS_ELEMENT_SELECTED)['value'] def is_enabled(self): """Returns whether the element is enabled.""" return self._execute(Command.IS_ELEMENT_ENABLED)['value'] def find_element_by_id(self, id_): """Finds element within this element's children by ID. :Args: - id\_ - ID of child element to locate. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: foo_element = element.find_element_by_id('foo') """ return self.find_element(by=By.ID, value=id_) def find_elements_by_id(self, id_): """Finds a list of elements within this element's children by ID. Will return a list of webelements if found, or an empty list if not. :Args: - id\_ - Id of child element to find. :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: elements = element.find_elements_by_id('foo') """ return self.find_elements(by=By.ID, value=id_) def find_element_by_name(self, name): """Finds element within this element's children by name. :Args: - name - name property of the element to find. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: element = element.find_element_by_name('foo') """ return self.find_element(by=By.NAME, value=name) def find_elements_by_name(self, name): """Finds a list of elements within this element's children by name. :Args: - name - name property to search for. :Returns: - list of webelement - a list with elements if any was found. an empty list if not :Usage: elements = element.find_elements_by_name('foo') """ return self.find_elements(by=By.NAME, value=name) def find_element_by_link_text(self, link_text): """Finds element within this element's children by visible link text. :Args: - link_text - Link text string to search for. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: element = element.find_element_by_link_text('Sign In') """ return self.find_element(by=By.LINK_TEXT, value=link_text) def find_elements_by_link_text(self, link_text): """Finds a list of elements within this element's children by visible link text. :Args: - link_text - Link text string to search for. :Returns: - list of webelement - a list with elements if any was found. an empty list if not :Usage: elements = element.find_elements_by_link_text('Sign In') """ return self.find_elements(by=By.LINK_TEXT, value=link_text) def find_element_by_partial_link_text(self, link_text): """Finds element within this element's children by partially visible link text. :Args: - link_text: The text of the element to partially match on. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: element = element.find_element_by_partial_link_text('Sign') """ return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text) def find_elements_by_partial_link_text(self, link_text): """Finds a list of elements within this element's children by link text. :Args: - link_text: The text of the element to partial match on. :Returns: - list of webelement - a list with elements if any was found. an empty list if not :Usage: elements = element.find_elements_by_partial_link_text('Sign') """ return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text) def find_element_by_tag_name(self, name): """Finds element within this element's children by tag name. :Args: - name - name of html tag (eg: h1, a, span) :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: element = element.find_element_by_tag_name('h1') """ return self.find_element(by=By.TAG_NAME, value=name) def find_elements_by_tag_name(self, name): """Finds a list of elements within this element's children by tag name. :Args: - name - name of html tag (eg: h1, a, span) :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: elements = element.find_elements_by_tag_name('h1') """ return self.find_elements(by=By.TAG_NAME, value=name) def find_element_by_xpath(self, xpath): """Finds element by xpath. :Args: - xpath - xpath of element to locate. "//input[@class='myelement']" Note: The base path will be relative to this element's location. This will select the first link under this element. :: myelement.find_element_by_xpath(".//a") However, this will select the first link on the page. :: myelement.find_element_by_xpath("//a") :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: element = element.find_element_by_xpath('//div/td[1]') """ return self.find_element(by=By.XPATH, value=xpath) def find_elements_by_xpath(self, xpath): """Finds elements within the element by xpath. :Args: - xpath - xpath locator string. Note: The base path will be relative to this element's location. This will select all links under this element. :: myelement.find_elements_by_xpath(".//a") However, this will select all links in the page itself. :: myelement.find_elements_by_xpath("//a") :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: elements = element.find_elements_by_xpath("//div[contains(@class, 'foo')]") """ return self.find_elements(by=By.XPATH, value=xpath) def find_element_by_class_name(self, name): """Finds element within this element's children by class name. :Args: - name: The class name of the element to find. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: element = element.find_element_by_class_name('foo') """ return self.find_element(by=By.CLASS_NAME, value=name) def find_elements_by_class_name(self, name): """Finds a list of elements within this element's children by class name. :Args: - name: The class name of the elements to find. :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: elements = element.find_elements_by_class_name('foo') """ return self.find_elements(by=By.CLASS_NAME, value=name) def find_element_by_css_selector(self, css_selector): """Finds element within this element's children by CSS selector. :Args: - css_selector - CSS selector string, ex: 'a.nav#home' :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: element = element.find_element_by_css_selector('#foo') """ return self.find_element(by=By.CSS_SELECTOR, value=css_selector) def find_elements_by_css_selector(self, css_selector): """Finds a list of elements within this element's children by CSS selector. :Args: - css_selector - CSS selector string, ex: 'a.nav#home' :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: elements = element.find_elements_by_css_selector('.foo') """ return self.find_elements(by=By.CSS_SELECTOR, value=css_selector) def send_keys(self, *value): """Simulates typing into the element. :Args: - value - A string for typing, or setting form fields. For setting file inputs, this could be a local file path. Use this to send simple key events or to fill out form fields:: form_textfield = driver.find_element_by_name('username') form_textfield.send_keys("admin") This can also be used to set file inputs. :: file_input = driver.find_element_by_name('profilePic') file_input.send_keys("path/to/profilepic.gif") # Generally it's better to wrap the file path in one of the methods # in os.path to return the actual path to support cross OS testing. # file_input.send_keys(os.path.abspath("path/to/profilepic.gif")) """ # transfer file to another machine only if remote driver is used # the same behaviour as for java binding if self.parent._is_remote: local_file = self.parent.file_detector.is_local_file(*value) if local_file is not None: value = self._upload(local_file) self._execute(Command.SEND_KEYS_TO_ELEMENT, {'text': "".join(keys_to_typing(value)), 'value': keys_to_typing(value)}) # RenderedWebElement Items def is_displayed(self): """Whether the element is visible to a user.""" # Only go into this conditional for browsers that don't use the atom themselves if self._w3c and self.parent.capabilities['browserName'] == 'safari': return self.parent.execute_script( "return (%s).apply(null, arguments);" % isDisplayed_js, self) else: return self._execute(Command.IS_ELEMENT_DISPLAYED)['value'] @property def location_once_scrolled_into_view(self): """THIS PROPERTY MAY CHANGE WITHOUT WARNING. Use this to discover where on the screen an element is so that we can click it. This method should cause the element to be scrolled into view. Returns the top lefthand corner location on the screen, or ``None`` if the element is not visible. """ if self._w3c: old_loc = self._execute(Command.W3C_EXECUTE_SCRIPT, { 'script': "arguments[0].scrollIntoView(true); return arguments[0].getBoundingClientRect()", 'args': [self]})['value'] return {"x": round(old_loc['x']), "y": round(old_loc['y'])} else: return self._execute(Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW)['value'] @property def size(self): """The size of the element.""" size = {} if self._w3c: size = self._execute(Command.GET_ELEMENT_RECT)['value'] else: size = self._execute(Command.GET_ELEMENT_SIZE)['value'] new_size = {"height": size["height"], "width": size["width"]} return new_size def value_of_css_property(self, property_name): """The value of a CSS property.""" return self._execute(Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY, { 'propertyName': property_name})['value'] @property def location(self): """The location of the element in the renderable canvas.""" if self._w3c: old_loc = self._execute(Command.GET_ELEMENT_RECT)['value'] else: old_loc = self._execute(Command.GET_ELEMENT_LOCATION)['value'] new_loc = {"x": round(old_loc['x']), "y": round(old_loc['y'])} return new_loc @property def rect(self): """A dictionary with the size and location of the element.""" return self._execute(Command.GET_ELEMENT_RECT)['value'] @property def screenshot_as_base64(self): """ Gets the screenshot of the current element as a base64 encoded string. :Usage: img_b64 = element.screenshot_as_base64 """ return self._execute(Command.ELEMENT_SCREENSHOT)['value'] @property def screenshot_as_png(self): """ Gets the screenshot of the current element as a binary data. :Usage: element_png = element.screenshot_as_png """ return base64.b64decode(self.screenshot_as_base64.encode('ascii')) def screenshot(self, filename): """ Saves a screenshot of the current element to a PNG image file. Returns False if there is any IOError, else returns True. Use full paths in your filename. :Args: - filename: The full path you wish to save your screenshot to. This should end with a `.png` extension. :Usage: element.screenshot('/Screenshots/foo.png') """ if not filename.lower().endswith('.png'): warnings.warn("name used for saved screenshot does not match file " "type. It should end with a `.png` extension", UserWarning) png = self.screenshot_as_png try: with open(filename, 'wb') as f: f.write(png) except IOError: return False finally: del png return True @property def parent(self): """Internal reference to the WebDriver instance this element was found from.""" return self._parent @property def id(self): """Internal ID used by selenium. This is mainly for internal use. Simple use cases such as checking if 2 webelements refer to the same element, can be done using ``==``:: if element1 == element2: print("These 2 are equal") """ return self._id def __eq__(self, element): return hasattr(element, 'id') and self._id == element.id def __ne__(self, element): return not self.__eq__(element) # Private Methods def _execute(self, command, params=None): """Executes a command against the underlying HTML element. Args: command: The name of the command to _execute as a string. params: A dictionary of named parameters to send with the command. Returns: The command's JSON response loaded into a dictionary object. """ if not params: params = {} params['id'] = self._id return self._parent.execute(command, params) def find_element(self, by=By.ID, value=None): """ Find an element given a By strategy and locator. Prefer the find_element_by_* methods when possible. :Usage: element = element.find_element(By.ID, 'foo') :rtype: WebElement """ if self._w3c: if by == By.ID: by = By.CSS_SELECTOR value = '[id="%s"]' % value elif by == By.TAG_NAME: by = By.CSS_SELECTOR elif by == By.CLASS_NAME: by = By.CSS_SELECTOR value = ".%s" % value elif by == By.NAME: by = By.CSS_SELECTOR value = '[name="%s"]' % value return self._execute(Command.FIND_CHILD_ELEMENT, {"using": by, "value": value})['value'] def find_elements(self, by=By.ID, value=None): """ Find elements given a By strategy and locator. Prefer the find_elements_by_* methods when possible. :Usage: element = element.find_elements(By.CLASS_NAME, 'foo') :rtype: list of WebElement """ if self._w3c: if by == By.ID: by = By.CSS_SELECTOR value = '[id="%s"]' % value elif by == By.TAG_NAME: by = By.CSS_SELECTOR elif by == By.CLASS_NAME: by = By.CSS_SELECTOR value = ".%s" % value elif by == By.NAME: by = By.CSS_SELECTOR value = '[name="%s"]' % value return self._execute(Command.FIND_CHILD_ELEMENTS, {"using": by, "value": value})['value'] def __hash__(self): return int(hashlib.md5(self._id.encode('utf-8')).hexdigest(), 16) def _upload(self, filename): fp = IOStream() zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED) zipped.write(filename, os.path.split(filename)[1]) zipped.close() content = base64.encodestring(fp.getvalue()) if not isinstance(content, str): content = content.decode('utf-8') try: return self._execute(Command.UPLOAD_FILE, {'file': content})['value'] except WebDriverException as e: if "Unrecognized command: POST" in e.__str__(): return filename elif "Command not found: POST " in e.__str__(): return filename elif '{"status":405,"value":["GET","HEAD","DELETE"]}' in e.__str__(): return filename else: raise e
1
15,812
This would return a tuple of two dictionaries. You need to combine them and return a dictionary
SeleniumHQ-selenium
rb
@@ -7,10 +7,14 @@ import ( "errors" "fmt" + "github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer" + "github.com/aws/amazon-ecs-cli-v2/internal/pkg/manifest" + "github.com/aws/amazon-ecs-cli-v2/internal/pkg/store/secretsmanager" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/store/ssm" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/color" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/log" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/prompt" + "github.com/aws/amazon-ecs-cli-v2/internal/pkg/workspace" "github.com/spf13/cobra" )
1
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cli import ( "errors" "fmt" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/store/ssm" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/color" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/log" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/prompt" "github.com/spf13/cobra" ) const ( pipelineAddEnvPrompt = "Would you like to add an environment to your pipeline?" pipelineSelectEnvPrompt = "Which environment would you like to add to your pipeline?" pipelineEnterGitHubRepoPrompt = "What is your application's GitHub repository?" ) var errNoEnvsInProject = errors.New("There were no more environments found that can be added to your pipeline. Please run `archer env init` to create a new environment.") // InitPipelineOpts holds the configuration needed to create a new pipeilne type InitPipelineOpts struct { // Fields with matching flags. Environments []string GitHubRepo string GitHubAccessToken string EnableCD bool Deploy bool // Interfaces to interact with dependencies. prompt prompter // Outputs stored on successful actions. manifestPath string // Caches environments projectEnvs []string *GlobalOpts } func NewInitPipelineOpts() *InitPipelineOpts { return &InitPipelineOpts{ GlobalOpts: NewGlobalOpts(), prompt: prompt.New(), } } // Ask prompts for fields that are required but not passed in. func (opts *InitPipelineOpts) Ask() error { if len(opts.Environments) == 0 { if err := opts.selectEnvironments(true); err != nil { return err } } if opts.GitHubRepo == "" { if err := opts.selectGitHubRepo(); err != nil { return err } } if opts.GitHubAccessToken == "" { if err := opts.getGitHubAccessToken(); err != nil { return err } } // if err := opts.askEnableCD(); err != nil { // return err // } // TODO ask this after pipeline.yml is written // if err := opts.askDeploy(); err != nil { // return err // } return nil } // Validate returns an error if the flag values passed by the user are invalid. func (opts *InitPipelineOpts) Validate() error { // TODO if opts.ProjectName() == "" { return errNoProjectInWorkspace } if len(opts.projectEnvs) == 0 { return errNoEnvsInProject } return nil } // Execute writes the pipline manifest file. func (opts *InitPipelineOpts) Execute() error { opts.manifestPath = "pipeline.yml" // TODO: placeholder log.Infoln() log.Successf("Wrote the pipeline for %s app at '%s'\n", color.HighlightUserInput(opts.GitHubRepo), color.HighlightResource(opts.manifestPath)) log.Infoln("Your pipeline manifest contains configurations for your CodePipeline resources, such as your pipeline stages and build steps.") log.Infoln() return nil } func (opts *InitPipelineOpts) selectEnvironments(addMore bool) error { if addMore == false { return nil } addEnv, err := opts.prompt.Confirm( pipelineAddEnvPrompt, "Adds an environment that corresponds to a deployment stage in your pipeline. Environments are added sequentially.", ) if err != nil { return fmt.Errorf("failed to confirm adding an environment: %w", err) } var selectMoreEnvs bool if addEnv { selectMore, err := opts.selectEnvironment() if err != nil { return err } selectMoreEnvs = selectMore } return opts.selectEnvironments(selectMoreEnvs) } func (opts *InitPipelineOpts) listAvailableEnvironments() []string { envs := []string{} for _, env := range opts.projectEnvs { // Check if environment has already been added to pipeline if opts.envCanBeAdded(env) { envs = append(envs, env) } } return envs } func (opts *InitPipelineOpts) envCanBeAdded(selectedEnv string) bool { for _, env := range opts.Environments { if selectedEnv == env { return false } } return true } func (opts *InitPipelineOpts) selectEnvironment() (bool, error) { selectMoreEnvs := false envs := opts.listAvailableEnvironments() if len(envs) == 0 && len(opts.Environments) != 0 { log.Infoln("There are no more environments to add.") return selectMoreEnvs, nil } env, err := opts.prompt.SelectOne( pipelineSelectEnvPrompt, "Environment to be added as the next stage in your pipeline.", envs, ) if err != nil { return selectMoreEnvs, fmt.Errorf("failed to add environment: %w", err) } opts.Environments = append(opts.Environments, env) selectMoreEnvs = true return selectMoreEnvs, nil } // TODO: Nice-to-have: have an opts.listRemoteRepos() method that execs out to `git remote -v` and parse repo name to offer select menu func (opts *InitPipelineOpts) selectGitHubRepo() error { repo, err := opts.prompt.Get( pipelineEnterGitHubRepoPrompt, fmt.Sprintf(`The GitHub repository linked to your workspace. Pushing to this repository will trigger your pipeline build stage.`), nil) if err != nil { return fmt.Errorf("failed to get GitHub repository: %w", err) } opts.GitHubRepo = repo // TODO validate github repo? return nil } func (opts *InitPipelineOpts) getGitHubAccessToken() error { token, err := opts.prompt.GetSecret( fmt.Sprintf("Please enter your GitHub Personal Access Token for your repository: %s", opts.GitHubRepo), fmt.Sprintf(`The personal access token for the GitHub repository linked to your workspace. For more information on how to create a personal access token, please refer to: https://help.github.com/en/enterprise/2.17/user/authenticating-to-github/creating-a-personal-access-token-for-the-command-line.`), ) if err != nil { return fmt.Errorf("failed to get GitHub access token: %w", err) } opts.GitHubAccessToken = token return nil } func (opts *InitPipelineOpts) askEnableCD() error { enable, err := opts.prompt.Confirm( "Would you like to automatically enable deploying to production?", "Enables the transition to your production environment automatically through your pipeline.", ) if err != nil { return fmt.Errorf("failed to confirm enabling CD: %w", err) } opts.EnableCD = enable return nil } func (opts *InitPipelineOpts) askDeploy() error { deploy, err := opts.prompt.Confirm( "Would you like to deploy your pipeline?", "Deploys your pipeline through CloudFormation.", ) if err != nil { return fmt.Errorf("failed to confirm deploying pipeline: %w", err) } opts.Deploy = deploy return nil } func (opts *InitPipelineOpts) getEnvNames() ([]string, error) { store, err := ssm.NewStore() if err != nil { return nil, fmt.Errorf("couldn't connect to environment datastore: %w", err) } envs, err := store.ListEnvironments(opts.ProjectName()) if err != nil { return nil, fmt.Errorf("could not list environments for project %s: %w", opts.ProjectName(), err) } if len(envs) == 0 { return nil, errNoEnvsInProject } envNames := []string{} for _, env := range envs { envNames = append(envNames, env.Name) } return envNames, nil } // BuildPipelineInitCmd build the command for creating a new pipeline. func BuildPipelineInitCmd() *cobra.Command { opts := NewInitPipelineOpts() cmd := &cobra.Command{ Use: "init", Short: "Creates a pipeline for applications in your workspace.", Long: `Creates a pipeline for the applications in your workspace, using the environments associated with the applications.`, Example: ` Create a pipeline for the applications in your workspace: /code $ archer pipeline init \ --github-repo "gitHubUserName/myFrontendApp" \ --github-access-token file://myGitHubToken \ --environments "stage,prod" \ --deploy`, PreRunE: func(cmd *cobra.Command, args []string) error { projectEnvs, err := opts.getEnvNames() if err != nil { return fmt.Errorf("couldn't get environments: %w", err) } opts.projectEnvs = projectEnvs return opts.Validate() }, RunE: func(cmd *cobra.Command, args []string) error { if err := opts.Ask(); err != nil { return err } if err := opts.Validate(); err != nil { // validate flags return err } return opts.Execute() }, } cmd.Flags().StringVarP(&opts.GitHubRepo, githubRepoFlag, githubRepoFlagShort, "", githubRepoFlagDescription) cmd.Flags().StringVarP(&opts.GitHubAccessToken, githubAccessTokenFlag, githubAccessTokenFlagShort, "", githubAccessTokenFlagDescription) cmd.Flags().BoolVar(&opts.Deploy, deployFlag, false, deployPipelineFlagDescription) cmd.Flags().BoolVar(&opts.EnableCD, enableCDFlag, false, enableCDFlagDescription) cmd.Flags().StringSliceVarP(&opts.Environments, envsFlag, envsFlagShort, []string{}, pipelineEnvsFlagDescription) return cmd }
1
11,000
What do you think of moving the example to the help text of the prompt? and the prompt itself can be "What is your application's GitHub repository URL?"
aws-copilot-cli
go
@@ -58,9 +58,12 @@ public class TracerTest { public void shouldBeAbleToCreateATracer() { List<SpanData> allSpans = new ArrayList<>(); Tracer tracer = createTracer(allSpans); + long timeStamp = 1593493828L; try (Span span = tracer.getCurrentContext().createSpan("parent")) { span.setAttribute("cheese", "gouda"); + span.addEvent("Grating cheese"); + span.addEvent("Melting cheese", timeStamp); span.setStatus(Status.NOT_FOUND); }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.remote.tracing.opentelemetry; import io.opentelemetry.OpenTelemetry; import io.opentelemetry.sdk.OpenTelemetrySdk; import io.opentelemetry.sdk.trace.TracerSdkProvider; import io.opentelemetry.sdk.trace.data.SpanData; import io.opentelemetry.sdk.trace.export.SimpleSpansProcessor; import io.opentelemetry.sdk.trace.export.SpanExporter; import org.junit.Test; import org.openqa.selenium.grid.web.CombinedHandler; import org.openqa.selenium.remote.http.HttpRequest; import org.openqa.selenium.remote.http.HttpResponse; import org.openqa.selenium.remote.http.Routable; import org.openqa.selenium.remote.http.Route; import org.openqa.selenium.remote.tracing.HttpTracing; import org.openqa.selenium.remote.tracing.Span; import org.openqa.selenium.remote.tracing.Status; import org.openqa.selenium.remote.tracing.Tracer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.NoSuchElementException; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.stream.Collectors; import static org.assertj.core.api.Assertions.assertThat; import static org.openqa.selenium.remote.http.HttpMethod.GET; import static org.openqa.selenium.remote.tracing.HttpTracing.newSpanAsChildOf; public class TracerTest { @Test public void shouldBeAbleToCreateATracer() { List<SpanData> allSpans = new ArrayList<>(); Tracer tracer = createTracer(allSpans); try (Span span = tracer.getCurrentContext().createSpan("parent")) { span.setAttribute("cheese", "gouda"); span.setStatus(Status.NOT_FOUND); } Set<SpanData> values = allSpans.stream() .filter(data -> data.getAttributes().containsKey("cheese")) .collect(Collectors.toSet()); assertThat(values).hasSize(1); assertThat(values).element(0) .extracting(SpanData::getStatus).isEqualTo(io.opentelemetry.trace.Status.NOT_FOUND); assertThat(values).element(0) .extracting(el -> el.getAttributes().get("cheese").getStringValue()).isEqualTo("gouda"); } @Test public void nestingSpansInTheSameThreadShouldWork() { List<SpanData> allSpans = new ArrayList<>(); Tracer tracer = createTracer(allSpans); try (Span parent = tracer.getCurrentContext().createSpan("parent")) { try (Span child = parent.createSpan("child")) { child.setAttribute("cheese", "camembert"); } } SpanData parent = allSpans.stream().filter(data -> data.getName().equals("parent")) .findFirst().orElseThrow(NoSuchElementException::new); SpanData child = allSpans.stream().filter(data -> data.getName().equals("child")) .findFirst().orElseThrow(NoSuchElementException::new); assertThat(child.getParentSpanId()).isEqualTo(parent.getSpanId()); } @Test public void nestingSpansFromDifferentThreadsIsFineToo() throws ExecutionException, InterruptedException { List<SpanData> allSpans = new ArrayList<>(); Tracer tracer = createTracer(allSpans); try (Span parent = tracer.getCurrentContext().createSpan("parent")) { Future<?> future = Executors.newSingleThreadExecutor().submit(() -> { try (Span child = parent.createSpan("child")) { child.setAttribute("cheese", "gruyere"); } }); future.get(); } SpanData parent = allSpans.stream().filter(data -> data.getName().equals("parent")) .findFirst().orElseThrow(NoSuchElementException::new); SpanData child = allSpans.stream().filter(data -> data.getName().equals("child")) .findFirst().orElseThrow(NoSuchElementException::new); assertThat(child.getParentSpanId()).isEqualTo(parent.getSpanId()); } @Test public void currentSpanIsKeptOnTracerCorrectlyWithinSameThread() { List<SpanData> allSpans = new ArrayList<>(); Tracer tracer = createTracer(allSpans); try (Span parent = tracer.getCurrentContext().createSpan("parent")) { assertThat(parent.getId()).isEqualTo(tracer.getCurrentContext().getId()); try (Span child = parent.createSpan("child")) { assertThat(child.getId()).isEqualTo(tracer.getCurrentContext().getId()); } assertThat(parent.getId()).isEqualTo(tracer.getCurrentContext().getId()); } } @Test public void currentSpanIsKeptOnTracerCorrectlyBetweenThreads() throws ExecutionException, InterruptedException { List<SpanData> allSpans = new ArrayList<>(); Tracer tracer = createTracer(allSpans); try (Span parent = tracer.getCurrentContext().createSpan("parent")) { assertThat(parent.getId()).isEqualTo(tracer.getCurrentContext().getId()); Future<?> future = Executors.newSingleThreadExecutor().submit(() -> { Span child = null; try { child = parent.createSpan("child"); assertThat(child.getId()).isEqualTo(tracer.getCurrentContext().getId()); } finally { assert child != null; child.close(); } // At this point, the parent span is undefind, but shouldn't be null assertThat(parent.getId()).isNotEqualTo(tracer.getCurrentContext().getId()); assertThat(child.getId()).isNotEqualTo(tracer.getCurrentContext().getId()); assertThat(tracer.getCurrentContext().getId()).isNotNull(); }); future.get(); assertThat(parent.getId()).isEqualTo(tracer.getCurrentContext().getId()); } } @Test public void cleverShenanigansRepresentingWhatWeSeeInTheRouter() { List<SpanData> allSpans = new ArrayList<>(); Tracer tracer = createTracer(allSpans); CombinedHandler handler = new CombinedHandler(); ExecutorService executors = Executors.newCachedThreadPool(); handler.addHandler(Route.get("/status").to(() -> req -> { try (Span span = HttpTracing.newSpanAsChildOf(tracer, req, "status")) { executors.submit(span.wrap(() -> new HashSet<>(Arrays.asList("cheese", "peas")))).get(); CompletableFuture<String> toReturn = new CompletableFuture<>(); executors.submit(() -> { try { HttpRequest cheeseReq = new HttpRequest(GET, "/cheeses"); HttpTracing.inject(tracer, span, cheeseReq); handler.execute(cheeseReq); toReturn.complete("nom, nom, nom"); } catch (RuntimeException e) { toReturn.completeExceptionally(e); } }); toReturn.get(); } catch (Exception e) { throw new RuntimeException(e); } return new HttpResponse(); })); handler.addHandler(Route.get("/cheeses").to(() -> req -> new HttpResponse())); Routable routable = handler.with(delegate -> req -> { try (Span span = newSpanAsChildOf(tracer, req, "httpclient.execute")) { return delegate.execute(req); } }); routable.execute(new HttpRequest(GET, "/")); } private Tracer createTracer(List<SpanData> exportTo) { TracerSdkProvider provider = OpenTelemetrySdk.getTracerProvider(); provider.addSpanProcessor(SimpleSpansProcessor.create(new SpanExporter() { @Override public ResultCode export(Collection<SpanData> spans) { exportTo.addAll(spans); return ResultCode.SUCCESS; } @Override public ResultCode flush() { return ResultCode.SUCCESS; } @Override public void shutdown() { } })); io.opentelemetry.trace.Tracer otTracer = provider.get("get"); return new OpenTelemetryTracer( otTracer, OpenTelemetry.getPropagators().getHttpTextFormat()); } }
1
17,762
Break out tests for events into their own tests rather than placing them in other ones. That makes it easier for us to figure out where problems lie and to do a TDD-driven implementation over new APIs.
SeleniumHQ-selenium
js
@@ -266,7 +266,7 @@ bool parse_it(Iterator &first, Iterator last, RDKit::RWMol &mol) { } else { if (!parse_atom_labels(first, last, mol)) return false; } - } else if ((first + 9) < last && + } else if (std::distance(first, last) > 9 && std::string(first, first + 9) == "atomProp:") { first += 9; if (!parse_atom_props(first, last, mol)) return false;
1
// // Copyright (C) 2016 Greg Landrum // // @@ All Rights Reserved @@ // This file is part of the RDKit. // The contents are covered by the terms of the BSD license // which is included in the file license.txt, found at the root // of the RDKit source tree. // #include <RDGeneral/BoostStartInclude.h> #include <boost/algorithm/string.hpp> #include <boost/foreach.hpp> #include <boost/lexical_cast.hpp> #include <RDGeneral/BoostEndInclude.h> #include <GraphMol/RDKitBase.h> #include <GraphMol/RDKitQueries.h> #include <iostream> #include "SmilesParse.h" #include "SmilesParseOps.h" namespace SmilesParseOps { using namespace RDKit; namespace parser { template <typename Iterator> bool read_int(Iterator &first, Iterator last, unsigned int &res) { std::string num = ""; while (first != last && *first >= '0' && *first <= '9') { num += *first; ++first; } if (num == "") { return false; } res = boost::lexical_cast<unsigned int>(num); return true; } template <typename Iterator> bool read_int_pair(Iterator &first, Iterator last, unsigned int &n1, unsigned int &n2, char sep = '.') { if (!read_int(first, last, n1)) return false; if (first >= last || *first != sep) return false; ++first; return read_int(first, last, n2); } template <typename Iterator> std::string read_text_to(Iterator &first, Iterator last, std::string delims) { std::string res = ""; Iterator start = first; // EFF: there are certainly faster ways to do this while (first != last && delims.find_first_of(*first) == std::string::npos) { if (*first == '&' && first + 2 < last && *(first + 1) == '#') { // escaped char if (start != first) { res += std::string(start, first); } Iterator next = first + 2; while (next != last && *next >= '0' && *next <= '9') { ++next; } if (next == last || *next != ';') throw RDKit::SmilesParseException( "failure parsing CXSMILES extensions: quoted block not terminated " "with ';'"); if (next > first + 2) { std::string blk = std::string(first + 2, next); res += (char)(boost::lexical_cast<int>(blk)); } first = next + 1; start = first; } else { ++first; } } if (start != first) res += std::string(start, first); return res; } template <typename Iterator> bool parse_atom_values(Iterator &first, Iterator last, RDKit::RWMol &mol) { if (first >= last || *first != ':') return false; ++first; unsigned int atIdx = 0; while (first != last && *first != '$') { std::string tkn = read_text_to(first, last, ";$"); if (tkn != "") { mol.getAtomWithIdx(atIdx)->setProp(RDKit::common_properties::molFileValue, tkn); } ++atIdx; if (first != last && *first != '$') ++first; } if (first == last || *first != '$') return false; ++first; return true; } template <typename Iterator> bool parse_atom_props(Iterator &first, Iterator last, RDKit::RWMol &mol) { if (first >= last) return false; while (first != last && *first != '|' && *first != ',') { unsigned int atIdx; if (read_int(first, last, atIdx)) { if (first == last || *first != '.') return false; ++first; std::string pname = read_text_to(first, last, "."); if (pname != "") { if (first == last || *first != '.') return false; ++first; std::string pval = read_text_to(first, last, ":|,"); if (pval != "") { mol.getAtomWithIdx(atIdx)->setProp(pname, pval); } } } if (first != last && *first != '|' && *first != ',') ++first; } if (first != last && *first != '|' && *first != ',') return false; if (*first != '|') ++first; return true; } template <typename Iterator> bool parse_atom_labels(Iterator &first, Iterator last, RDKit::RWMol &mol) { if (first >= last || *first != '$') return false; ++first; unsigned int atIdx = 0; while (first != last && *first != '$') { std::string tkn = read_text_to(first, last, ";$"); if (tkn != "") { mol.getAtomWithIdx(atIdx)->setProp(RDKit::common_properties::atomLabel, tkn); } ++atIdx; if (first != last && *first != '$') ++first; } if (first == last || *first != '$') return false; ++first; return true; } template <typename Iterator> bool parse_coords(Iterator &first, Iterator last, RDKit::RWMol &mol) { if (first >= last || *first != '(') return false; RDKit::Conformer *conf = new Conformer(mol.getNumAtoms()); mol.addConformer(conf); ++first; unsigned int atIdx = 0; while (first != last && *first != ')') { RDGeom::Point3D pt; std::string tkn = read_text_to(first, last, ";)"); if (tkn != "") { std::vector<std::string> tokens; boost::split(tokens, tkn, boost::is_any_of(std::string(","))); if (tokens.size() >= 1 && tokens[0].size()) pt.x = boost::lexical_cast<double>(tokens[0]); if (tokens.size() >= 2 && tokens[1].size()) pt.y = boost::lexical_cast<double>(tokens[1]); if (tokens.size() >= 3 && tokens[2].size()) pt.z = boost::lexical_cast<double>(tokens[2]); } conf->setAtomPos(atIdx, pt); ++atIdx; if (first != last && *first != ')') ++first; } if (first == last || *first != ')') return false; ++first; return true; } template <typename Iterator> bool parse_coordinate_bonds(Iterator &first, Iterator last, RDKit::RWMol &mol) { if (first >= last || *first != 'C') return false; ++first; if (first >= last || *first != ':') return false; ++first; while (first != last && *first >= '0' && *first <= '9') { unsigned int aidx; unsigned int bidx; if (read_int_pair(first, last, aidx, bidx)) { Bond *bnd = mol.getBondWithIdx(bidx); if (bnd->getBeginAtomIdx() != aidx && bnd->getEndAtomIdx() != aidx) { std::cerr << "BOND NOT FOUND! " << bidx << " involving atom " << aidx << std::endl; return false; } bnd->setBondType(Bond::DATIVE); if (bnd->getBeginAtomIdx() != aidx) { unsigned int tmp = bnd->getBeginAtomIdx(); bnd->setBeginAtomIdx(aidx); bnd->setEndAtomIdx(tmp); } } else { return false; } if (first < last && *first == ',') ++first; } return true; } template <typename Iterator> bool processRadicalSection(Iterator &first, Iterator last, RDKit::RWMol &mol, unsigned int numRadicalElectrons) { if (first >= last) return false; ++first; if (first >= last || *first != ':') return false; ++first; unsigned int atIdx; if (!read_int(first, last, atIdx)) return false; mol.getAtomWithIdx(atIdx)->setNumRadicalElectrons(numRadicalElectrons); while (first < last && *first == ',') { ++first; if (first < last && (*first < '0' || *first > '9')) return true; if (!read_int(first, last, atIdx)) return false; mol.getAtomWithIdx(atIdx)->setNumRadicalElectrons(numRadicalElectrons); } if (first >= last) return false; return true; } template <typename Iterator> bool parse_radicals(Iterator &first, Iterator last, RDKit::RWMol &mol) { if (first >= last || *first != '^') return false; while (*first == '^') { ++first; if (first >= last) return false; if (*first < '1' || *first > '7') return false; // these are the values that are allowed to be there switch (*first) { case '1': if (!processRadicalSection(first, last, mol, 1)) return false; break; case '2': case '3': case '4': if (!processRadicalSection(first, last, mol, 2)) return false; break; case '5': case '6': case '7': if (!processRadicalSection(first, last, mol, 3)) return false; break; default: BOOST_LOG(rdWarningLog) << "Radical specification " << *first << " ignored."; } } return true; } template <typename Iterator> bool parse_it(Iterator &first, Iterator last, RDKit::RWMol &mol) { if (first >= last || *first != '|') return false; ++first; while (first < last && *first != '|') { if (*first == '(') { if (!parse_coords(first, last, mol)) return false; } else if (*first == '$') { if ((first + 4) < last && *(first + 1) == '_' && *(first + 2) == 'A' && *(first + 3) == 'V' && *(first + 4) == ':') { first += 4; if (!parse_atom_values(first, last, mol)) return false; } else { if (!parse_atom_labels(first, last, mol)) return false; } } else if ((first + 9) < last && std::string(first, first + 9) == "atomProp:") { first += 9; if (!parse_atom_props(first, last, mol)) return false; } else if (*first == 'C') { if (!parse_coordinate_bonds(first, last, mol)) return false; } else if (*first == '^') { if (!parse_radicals(first, last, mol)) return false; } else { ++first; } // if(first < last && *first != '|') ++first; } if (first >= last || *first != '|') return false; ++first; // step past the last '|' return true; } } // end of namespace parser namespace { template <typename Q> void addquery(Q *qry, std::string symbol, RDKit::RWMol &mol, unsigned int idx) { PRECONDITION(qry, "bad query"); QueryAtom *qa = new QueryAtom(0); qa->setQuery(qry); qa->setNoImplicit(true); mol.replaceAtom(idx, qa); if (symbol != "") mol.getAtomWithIdx(idx)->setProp(RDKit::common_properties::atomLabel, symbol); delete qa; } void processCXSmilesLabels(RDKit::RWMol &mol) { for (RDKit::ROMol::AtomIterator atIt = mol.beginAtoms(); atIt != mol.endAtoms(); ++atIt) { std::string symb = ""; if ((*atIt)->getPropIfPresent(RDKit::common_properties::atomLabel, symb)) { if (symb.size() > 3 && symb[0] == '_' && symb[1] == 'A' && symb[2] == 'P') { unsigned int mapNum = boost::lexical_cast<unsigned int>(symb.substr(3, symb.size() - 3)); (*atIt)->setAtomMapNum(mapNum); } else if (symb == "star_e") { /* according to the MDL spec, these match anything, but in MARVIN they are "unspecified end groups" for polymers */ addquery(makeAtomNullQuery(), symb, mol, (*atIt)->getIdx()); } else if (symb == "Q_e") { addquery(makeQAtomQuery(), symb, mol, (*atIt)->getIdx()); } else if (symb == "QH_p") { addquery(makeQHAtomQuery(), symb, mol, (*atIt)->getIdx()); } else if (symb == "AH_p") { // this seems wrong... /* According to the MARVIN Sketch, AH is "any atom, including H" - this would be "*" in SMILES - and "A" is "any atom except H". The CXSMILES docs say that "A" can be represented normally in SMILES and that "AH" needs to be written out as AH_p. I'm going to assume that this is a Marvin internal thing and just parse it as they describe it. This means that "*" in the SMILES itself needs to be treated differently, which we do below. */ addquery(makeAHAtomQuery(), symb, mol, (*atIt)->getIdx()); } else if (symb == "X_p") { addquery(makeXAtomQuery(), symb, mol, (*atIt)->getIdx()); } else if (symb == "XH_p") { addquery(makeXHAtomQuery(), symb, mol, (*atIt)->getIdx()); } else if (symb == "M_p") { addquery(makeMAtomQuery(), symb, mol, (*atIt)->getIdx()); } else if (symb == "MH_p") { addquery(makeMHAtomQuery(), symb, mol, (*atIt)->getIdx()); } } else if ((*atIt)->getAtomicNum() == 0 && (*atIt)->getSymbol() == "*") { addquery(makeAAtomQuery(), "", mol, (*atIt)->getIdx()); } } } } // end of anonymous namespace void parseCXExtensions(RDKit::RWMol &mol, const std::string &extText, std::string::const_iterator &first) { // std::cerr << "parseCXNExtensions: " << extText << std::endl; if (!extText.size() || extText[0] != '|') return; first = extText.begin(); bool ok = parser::parse_it(first, extText.end(), mol); if (!ok) throw RDKit::SmilesParseException("failure parsing CXSMILES extensions"); if (ok) { processCXSmilesLabels(mol); } } } // end of namespace SmilesParseOps
1
16,896
Nice use of std::distance. I'm a bit worried about first += 9 though.
rdkit-rdkit
cpp
@@ -578,8 +578,9 @@ func (c *Operator) syncNodeEndpoints(ctx context.Context) error { ObjectMeta: metav1.ObjectMeta{ Name: c.kubeletObjectName, Labels: c.config.Labels.Merge(map[string]string{ - "k8s-app": "kubelet", - "app.kubernetes.io/name": "kubelet", + "k8s-app": "kubelet", + "app.kubernetes.io/name": "kubelet", + "app.kubernetes.io/part-of": "prometheus-operator", }), }, Subsets: []v1.EndpointSubset{
1
// Copyright 2016 The prometheus-operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "bytes" "compress/gzip" "context" "fmt" "reflect" "regexp" "strings" "time" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/prometheus-operator/prometheus-operator/pkg/assets" monitoringclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" "github.com/prometheus-operator/prometheus-operator/pkg/informers" "github.com/prometheus-operator/prometheus-operator/pkg/k8sutil" "github.com/prometheus-operator/prometheus-operator/pkg/listwatch" "github.com/prometheus-operator/prometheus-operator/pkg/operator" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/mitchellh/hashstructure" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" ) const ( resyncPeriod = 5 * time.Minute ) // Operator manages life cycle of Prometheus deployments and // monitoring configurations. type Operator struct { kclient kubernetes.Interface mclient monitoringclient.Interface logger log.Logger nsPromInf cache.SharedIndexInformer nsMonInf cache.SharedIndexInformer promInfs *informers.ForResource smonInfs *informers.ForResource pmonInfs *informers.ForResource probeInfs *informers.ForResource ruleInfs *informers.ForResource cmapInfs *informers.ForResource secrInfs *informers.ForResource ssetInfs *informers.ForResource queue workqueue.RateLimitingInterface metrics *operator.Metrics nodeAddressLookupErrors prometheus.Counter nodeEndpointSyncs prometheus.Counter nodeEndpointSyncErrors prometheus.Counter host string kubeletObjectName string kubeletObjectNamespace string kubeletSyncEnabled bool config operator.Config configGenerator *configGenerator } // New creates a new controller. func New(ctx context.Context, conf operator.Config, logger log.Logger, r prometheus.Registerer) (*Operator, error) { cfg, err := k8sutil.NewClusterConfig(conf.Host, conf.TLSInsecure, &conf.TLSConfig) if err != nil { return nil, errors.Wrap(err, "instantiating cluster config failed") } client, err := kubernetes.NewForConfig(cfg) if err != nil { return nil, errors.Wrap(err, "instantiating kubernetes client failed") } mclient, err := monitoringclient.NewForConfig(cfg) if err != nil { return nil, errors.Wrap(err, "instantiating monitoring client failed") } if _, err := labels.Parse(conf.PromSelector); err != nil { return nil, errors.Wrap(err, "can not parse prometheus selector value") } secretListWatchSelector, err := fields.ParseSelector(conf.SecretListWatchSelector) if err != nil { return nil, errors.Wrap(err, "can not parse secrets selector value") } kubeletObjectName := "" kubeletObjectNamespace := "" kubeletSyncEnabled := false if conf.KubeletObject != "" { parts := strings.Split(conf.KubeletObject, "/") if len(parts) != 2 { return nil, fmt.Errorf("malformatted kubelet object string, must be in format \"namespace/name\"") } kubeletObjectNamespace = parts[0] kubeletObjectName = parts[1] kubeletSyncEnabled = true } c := &Operator{ kclient: client, mclient: mclient, logger: logger, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "prometheus"), host: cfg.Host, kubeletObjectName: kubeletObjectName, kubeletObjectNamespace: kubeletObjectNamespace, kubeletSyncEnabled: kubeletSyncEnabled, config: conf, configGenerator: newConfigGenerator(logger), metrics: operator.NewMetrics("prometheus", r), nodeAddressLookupErrors: prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_operator_node_address_lookup_errors_total", Help: "Number of times a node IP address could not be determined", }), nodeEndpointSyncs: prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_operator_node_syncs_total", Help: "Number of node endpoints synchronisations", }), nodeEndpointSyncErrors: prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_operator_node_syncs_failed_total", Help: "Number of node endpoints synchronisation failures", }), } c.metrics.MustRegister(c.nodeAddressLookupErrors, c.nodeEndpointSyncs, c.nodeEndpointSyncErrors) c.promInfs, err = informers.NewInformersForResource( informers.NewMonitoringInformerFactories( c.config.Namespaces.PrometheusAllowList, c.config.Namespaces.DenyList, mclient, resyncPeriod, func(options *metav1.ListOptions) { options.LabelSelector = c.config.PromSelector }, ), monitoringv1.SchemeGroupVersion.WithResource(monitoringv1.PrometheusName), ) if err != nil { return nil, errors.Wrap(err, "error creating prometheus informers") } var promStores []cache.Store for _, informer := range c.promInfs.GetInformers() { promStores = append(promStores, informer.Informer().GetStore()) } c.metrics.MustRegister(NewPrometheusCollectorForStores(promStores...)) c.smonInfs, err = informers.NewInformersForResource( informers.NewMonitoringInformerFactories( c.config.Namespaces.AllowList, c.config.Namespaces.DenyList, mclient, resyncPeriod, nil, ), monitoringv1.SchemeGroupVersion.WithResource(monitoringv1.ServiceMonitorName), ) if err != nil { return nil, errors.Wrap(err, "error creating servicemonitor informers") } c.pmonInfs, err = informers.NewInformersForResource( informers.NewMonitoringInformerFactories( c.config.Namespaces.AllowList, c.config.Namespaces.DenyList, mclient, resyncPeriod, nil, ), monitoringv1.SchemeGroupVersion.WithResource(monitoringv1.PodMonitorName), ) if err != nil { return nil, errors.Wrap(err, "error creating podmonitor informers") } c.probeInfs, err = informers.NewInformersForResource( informers.NewMonitoringInformerFactories( c.config.Namespaces.AllowList, c.config.Namespaces.DenyList, mclient, resyncPeriod, nil, ), monitoringv1.SchemeGroupVersion.WithResource(monitoringv1.ProbeName), ) if err != nil { return nil, errors.Wrap(err, "error creating probe informers") } c.ruleInfs, err = informers.NewInformersForResource( informers.NewMonitoringInformerFactories( c.config.Namespaces.AllowList, c.config.Namespaces.DenyList, mclient, resyncPeriod, nil, ), monitoringv1.SchemeGroupVersion.WithResource(monitoringv1.PrometheusRuleName), ) if err != nil { return nil, errors.Wrap(err, "error creating prometheusrule informers") } c.cmapInfs, err = informers.NewInformersForResource( informers.NewKubeInformerFactories( c.config.Namespaces.AllowList, c.config.Namespaces.DenyList, c.kclient, resyncPeriod, func(options *metav1.ListOptions) { options.LabelSelector = labelPrometheusName }, ), v1.SchemeGroupVersion.WithResource(string(v1.ResourceConfigMaps)), ) if err != nil { return nil, errors.Wrap(err, "error creating configmap informers") } c.secrInfs, err = informers.NewInformersForResource( informers.NewKubeInformerFactories( c.config.Namespaces.PrometheusAllowList, c.config.Namespaces.DenyList, c.kclient, resyncPeriod, func(options *metav1.ListOptions) { options.FieldSelector = secretListWatchSelector.String() }, ), v1.SchemeGroupVersion.WithResource(string(v1.ResourceSecrets)), ) if err != nil { return nil, errors.Wrap(err, "error creating secrets informers") } c.ssetInfs, err = informers.NewInformersForResource( informers.NewKubeInformerFactories( c.config.Namespaces.PrometheusAllowList, c.config.Namespaces.DenyList, c.kclient, resyncPeriod, nil, ), appsv1.SchemeGroupVersion.WithResource("statefulsets"), ) if err != nil { return nil, errors.Wrap(err, "error creating statefulset informers") } newNamespaceInformer := func(o *Operator, allowList map[string]struct{}) cache.SharedIndexInformer { // nsResyncPeriod is used to control how often the namespace informer // should resync. If the unprivileged ListerWatcher is used, then the // informer must resync more often because it cannot watch for // namespace changes. nsResyncPeriod := 15 * time.Second // If the only namespace is v1.NamespaceAll, then the client must be // privileged and a regular cache.ListWatch will be used. In this case // watching works and we do not need to resync so frequently. if listwatch.IsAllNamespaces(allowList) { nsResyncPeriod = resyncPeriod } nsInf := cache.NewSharedIndexInformer( o.metrics.NewInstrumentedListerWatcher( listwatch.NewUnprivilegedNamespaceListWatchFromClient(ctx, o.logger, o.kclient.CoreV1().RESTClient(), allowList, o.config.Namespaces.DenyList, fields.Everything()), ), &v1.Namespace{}, nsResyncPeriod, cache.Indexers{}, ) return nsInf } c.nsMonInf = newNamespaceInformer(c, c.config.Namespaces.AllowList) if listwatch.IdenticalNamespaces(c.config.Namespaces.AllowList, c.config.Namespaces.PrometheusAllowList) { c.nsPromInf = c.nsMonInf } else { c.nsPromInf = newNamespaceInformer(c, c.config.Namespaces.PrometheusAllowList) } return c, nil } // waitForCacheSync waits for the informers' caches to be synced. func (c *Operator) waitForCacheSync(ctx context.Context) error { ok := true for _, infs := range []struct { name string informersForResource *informers.ForResource }{ {"Prometheus", c.promInfs}, {"ServiceMonitor", c.smonInfs}, {"PodMonitor", c.pmonInfs}, {"PrometheusRule", c.ruleInfs}, {"Probe", c.probeInfs}, {"ConfigMap", c.cmapInfs}, {"Secret", c.secrInfs}, {"StatefulSet", c.ssetInfs}, } { for _, inf := range infs.informersForResource.GetInformers() { if !operator.WaitForNamedCacheSync(ctx, "prometheus", log.With(c.logger, "informer", infs.name), inf.Informer()) { ok = false } } } for _, inf := range []struct { name string informer cache.SharedIndexInformer }{ {"PromNamespace", c.nsPromInf}, {"MonNamespace", c.nsMonInf}, } { if !operator.WaitForNamedCacheSync(ctx, "prometheus", log.With(c.logger, "informer", inf.name), inf.informer) { ok = false } } if !ok { return errors.New("failed to sync caches") } level.Info(c.logger).Log("msg", "successfully synced all caches") return nil } // addHandlers adds the eventhandlers to the informers. func (c *Operator) addHandlers() { c.promInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handlePrometheusAdd, DeleteFunc: c.handlePrometheusDelete, UpdateFunc: c.handlePrometheusUpdate, }) c.smonInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handleSmonAdd, DeleteFunc: c.handleSmonDelete, UpdateFunc: c.handleSmonUpdate, }) c.pmonInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handlePmonAdd, DeleteFunc: c.handlePmonDelete, UpdateFunc: c.handlePmonUpdate, }) c.probeInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handleBmonAdd, UpdateFunc: c.handleBmonUpdate, DeleteFunc: c.handleBmonDelete, }) c.ruleInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handleRuleAdd, DeleteFunc: c.handleRuleDelete, UpdateFunc: c.handleRuleUpdate, }) c.cmapInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handleConfigMapAdd, DeleteFunc: c.handleConfigMapDelete, UpdateFunc: c.handleConfigMapUpdate, }) c.secrInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handleSecretAdd, DeleteFunc: c.handleSecretDelete, UpdateFunc: c.handleSecretUpdate, }) c.ssetInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handleStatefulSetAdd, DeleteFunc: c.handleStatefulSetDelete, UpdateFunc: c.handleStatefulSetUpdate, }) } // Run the controller. func (c *Operator) Run(ctx context.Context) error { defer c.queue.ShutDown() errChan := make(chan error) go func() { v, err := c.kclient.Discovery().ServerVersion() if err != nil { errChan <- errors.Wrap(err, "communicating with server failed") return } level.Info(c.logger).Log("msg", "connection established", "cluster-version", v) errChan <- nil }() select { case err := <-errChan: if err != nil { return err } level.Info(c.logger).Log("msg", "CRD API endpoints ready") case <-ctx.Done(): return nil } go c.worker(ctx) go c.promInfs.Start(ctx.Done()) go c.smonInfs.Start(ctx.Done()) go c.pmonInfs.Start(ctx.Done()) go c.probeInfs.Start(ctx.Done()) go c.ruleInfs.Start(ctx.Done()) go c.cmapInfs.Start(ctx.Done()) go c.secrInfs.Start(ctx.Done()) go c.ssetInfs.Start(ctx.Done()) go c.nsMonInf.Run(ctx.Done()) if c.nsPromInf != c.nsMonInf { go c.nsPromInf.Run(ctx.Done()) } if err := c.waitForCacheSync(ctx); err != nil { return err } c.addHandlers() if c.kubeletSyncEnabled { go c.reconcileNodeEndpoints(ctx) } c.metrics.Ready().Set(1) <-ctx.Done() return nil } func (c *Operator) keyFunc(obj interface{}) (string, bool) { k, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { level.Error(c.logger).Log("msg", "creating key failed", "err", err) return k, false } return k, true } func (c *Operator) handlePrometheusAdd(obj interface{}) { key, ok := c.keyFunc(obj) if !ok { return } level.Debug(c.logger).Log("msg", "Prometheus added", "key", key) c.metrics.TriggerByCounter(monitoringv1.PrometheusesKind, "add").Inc() checkPrometheusSpecDeprecation(key, obj.(*monitoringv1.Prometheus), c.logger) c.enqueue(key) } func (c *Operator) handlePrometheusDelete(obj interface{}) { key, ok := c.keyFunc(obj) if !ok { return } level.Debug(c.logger).Log("msg", "Prometheus deleted", "key", key) c.metrics.TriggerByCounter(monitoringv1.PrometheusesKind, "delete").Inc() c.enqueue(key) } func (c *Operator) handlePrometheusUpdate(old, cur interface{}) { if old.(*monitoringv1.Prometheus).ResourceVersion == cur.(*monitoringv1.Prometheus).ResourceVersion { return } key, ok := c.keyFunc(cur) if !ok { return } level.Debug(c.logger).Log("msg", "Prometheus updated", "key", key) c.metrics.TriggerByCounter(monitoringv1.PrometheusesKind, "update").Inc() checkPrometheusSpecDeprecation(key, cur.(*monitoringv1.Prometheus), c.logger) c.enqueue(key) } func (c *Operator) reconcileNodeEndpoints(ctx context.Context) { c.syncNodeEndpointsWithLogError(ctx) ticker := time.NewTicker(3 * time.Minute) defer ticker.Stop() for { select { case <-ctx.Done(): return case <-ticker.C: c.syncNodeEndpointsWithLogError(ctx) } } } // nodeAddresses returns the provided node's address, based on the priority: // 1. NodeInternalIP // 2. NodeExternalIP // // Copied from github.com/prometheus/prometheus/discovery/kubernetes/node.go func nodeAddress(node v1.Node) (string, map[v1.NodeAddressType][]string, error) { m := map[v1.NodeAddressType][]string{} for _, a := range node.Status.Addresses { m[a.Type] = append(m[a.Type], a.Address) } if addresses, ok := m[v1.NodeInternalIP]; ok { return addresses[0], m, nil } if addresses, ok := m[v1.NodeExternalIP]; ok { return addresses[0], m, nil } return "", m, fmt.Errorf("host address unknown") } func getNodeAddresses(nodes *v1.NodeList) ([]v1.EndpointAddress, []error) { addresses := make([]v1.EndpointAddress, 0) errs := make([]error, 0) for _, n := range nodes.Items { address, _, err := nodeAddress(n) if err != nil { errs = append(errs, errors.Wrapf(err, "failed to determine hostname for node (%s)", n.Name)) continue } addresses = append(addresses, v1.EndpointAddress{ IP: address, TargetRef: &v1.ObjectReference{ Kind: "Node", Name: n.Name, UID: n.UID, APIVersion: n.APIVersion, }, }) } return addresses, errs } func (c *Operator) syncNodeEndpointsWithLogError(ctx context.Context) { level.Debug(c.logger).Log("msg", "Syncing nodes into Endpoints object") c.nodeEndpointSyncs.Inc() err := c.syncNodeEndpoints(ctx) if err != nil { c.nodeEndpointSyncErrors.Inc() level.Error(c.logger).Log("msg", "Syncing nodes into Endpoints object failed", "err", err) } } func (c *Operator) syncNodeEndpoints(ctx context.Context) error { logger := log.With(c.logger, "operation", "syncNodeEndpoints") eps := &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: c.kubeletObjectName, Labels: c.config.Labels.Merge(map[string]string{ "k8s-app": "kubelet", "app.kubernetes.io/name": "kubelet", }), }, Subsets: []v1.EndpointSubset{ { Ports: []v1.EndpointPort{ { Name: "https-metrics", Port: 10250, }, { Name: "http-metrics", Port: 10255, }, { Name: "cadvisor", Port: 4194, }, }, }, }, } nodes, err := c.kclient.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { return errors.Wrap(err, "listing nodes failed") } level.Debug(logger).Log("msg", "Nodes retrieved from the Kubernetes API", "num_nodes", len(nodes.Items)) addresses, errs := getNodeAddresses(nodes) if len(errs) > 0 { for _, err := range errs { level.Warn(logger).Log("err", err) } c.nodeAddressLookupErrors.Add(float64(len(errs))) } level.Debug(logger).Log("msg", "Nodes converted to endpoint addresses", "num_addresses", len(addresses)) eps.Subsets[0].Addresses = addresses svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: c.kubeletObjectName, Labels: c.config.Labels.Merge(map[string]string{ "k8s-app": "kubelet", "app.kubernetes.io/name": "kubelet", }), }, Spec: v1.ServiceSpec{ Type: v1.ServiceTypeClusterIP, ClusterIP: "None", Ports: []v1.ServicePort{ { Name: "https-metrics", Port: 10250, }, { Name: "http-metrics", Port: 10255, }, { Name: "cadvisor", Port: 4194, }, }, }, } level.Debug(logger).Log("msg", "Updating Kubernetes service", "service", c.kubeletObjectName, "ns", c.kubeletObjectNamespace) err = k8sutil.CreateOrUpdateService(ctx, c.kclient.CoreV1().Services(c.kubeletObjectNamespace), svc) if err != nil { return errors.Wrap(err, "synchronizing kubelet service object failed") } level.Debug(logger).Log("msg", "Updating Kubernetes endpoint", "endpoint", c.kubeletObjectName, "ns", c.kubeletObjectNamespace) err = k8sutil.CreateOrUpdateEndpoints(ctx, c.kclient.CoreV1().Endpoints(c.kubeletObjectNamespace), eps) if err != nil { return errors.Wrap(err, "synchronizing kubelet endpoints object failed") } return nil } // TODO: Don't enqueue just for the namespace func (c *Operator) handleSmonAdd(obj interface{}) { o, ok := c.getObject(obj) if ok { level.Debug(c.logger).Log("msg", "ServiceMonitor added") c.metrics.TriggerByCounter(monitoringv1.ServiceMonitorsKind, "add").Inc() c.enqueueForMonitorNamespace(o.GetNamespace()) } } // TODO: Don't enqueue just for the namespace func (c *Operator) handleSmonUpdate(old, cur interface{}) { if old.(*monitoringv1.ServiceMonitor).ResourceVersion == cur.(*monitoringv1.ServiceMonitor).ResourceVersion { return } o, ok := c.getObject(cur) if ok { level.Debug(c.logger).Log("msg", "ServiceMonitor updated") c.metrics.TriggerByCounter(monitoringv1.ServiceMonitorsKind, "update").Inc() c.enqueueForMonitorNamespace(o.GetNamespace()) } } // TODO: Don't enqueue just for the namespace func (c *Operator) handleSmonDelete(obj interface{}) { o, ok := c.getObject(obj) if ok { level.Debug(c.logger).Log("msg", "ServiceMonitor delete") c.metrics.TriggerByCounter(monitoringv1.ServiceMonitorsKind, "delete").Inc() c.enqueueForMonitorNamespace(o.GetNamespace()) } } // TODO: Don't enqueue just for the namespace func (c *Operator) handlePmonAdd(obj interface{}) { o, ok := c.getObject(obj) if ok { level.Debug(c.logger).Log("msg", "PodMonitor added") c.metrics.TriggerByCounter(monitoringv1.PodMonitorsKind, "add").Inc() c.enqueueForMonitorNamespace(o.GetNamespace()) } } // TODO: Don't enqueue just for the namespace func (c *Operator) handlePmonUpdate(old, cur interface{}) { if old.(*monitoringv1.PodMonitor).ResourceVersion == cur.(*monitoringv1.PodMonitor).ResourceVersion { return } o, ok := c.getObject(cur) if ok { level.Debug(c.logger).Log("msg", "PodMonitor updated") c.metrics.TriggerByCounter(monitoringv1.PodMonitorsKind, "update").Inc() c.enqueueForMonitorNamespace(o.GetNamespace()) } } // TODO: Don't enqueue just for the namespace func (c *Operator) handlePmonDelete(obj interface{}) { o, ok := c.getObject(obj) if ok { level.Debug(c.logger).Log("msg", "PodMonitor delete") c.metrics.TriggerByCounter(monitoringv1.PodMonitorsKind, "delete").Inc() c.enqueueForMonitorNamespace(o.GetNamespace()) } } // TODO: Don't enqueue just for the namespace func (c *Operator) handleBmonAdd(obj interface{}) { if o, ok := c.getObject(obj); ok { level.Debug(c.logger).Log("msg", "Probe added") c.metrics.TriggerByCounter(monitoringv1.ProbesKind, "add").Inc() c.enqueueForMonitorNamespace(o.GetNamespace()) } } // TODO: Don't enqueue just for the namespace func (c *Operator) handleBmonUpdate(old, cur interface{}) { if old.(*monitoringv1.Probe).ResourceVersion == cur.(*monitoringv1.Probe).ResourceVersion { return } if o, ok := c.getObject(cur); ok { level.Debug(c.logger).Log("msg", "Probe updated") c.metrics.TriggerByCounter(monitoringv1.ProbesKind, "update") c.enqueueForMonitorNamespace(o.GetNamespace()) } } // TODO: Don't enqueue just for the namespace func (c *Operator) handleBmonDelete(obj interface{}) { if o, ok := c.getObject(obj); ok { level.Debug(c.logger).Log("msg", "Probe delete") c.metrics.TriggerByCounter(monitoringv1.ProbesKind, "delete").Inc() c.enqueueForMonitorNamespace(o.GetNamespace()) } } // TODO: Don't enqueue just for the namespace func (c *Operator) handleRuleAdd(obj interface{}) { o, ok := c.getObject(obj) if ok { level.Debug(c.logger).Log("msg", "PrometheusRule added") c.metrics.TriggerByCounter(monitoringv1.PrometheusRuleKind, "add").Inc() c.enqueueForMonitorNamespace(o.GetNamespace()) } } // TODO: Don't enqueue just for the namespace func (c *Operator) handleRuleUpdate(old, cur interface{}) { if old.(*monitoringv1.PrometheusRule).ResourceVersion == cur.(*monitoringv1.PrometheusRule).ResourceVersion { return } o, ok := c.getObject(cur) if ok { level.Debug(c.logger).Log("msg", "PrometheusRule updated") c.metrics.TriggerByCounter(monitoringv1.PrometheusRuleKind, "update").Inc() c.enqueueForMonitorNamespace(o.GetNamespace()) } } // TODO: Don't enqueue just for the namespace func (c *Operator) handleRuleDelete(obj interface{}) { o, ok := c.getObject(obj) if ok { level.Debug(c.logger).Log("msg", "PrometheusRule deleted") c.metrics.TriggerByCounter(monitoringv1.PrometheusRuleKind, "delete").Inc() c.enqueueForMonitorNamespace(o.GetNamespace()) } } // TODO: Do we need to enqueue secrets just for the namespace or in general? func (c *Operator) handleSecretDelete(obj interface{}) { o, ok := c.getObject(obj) if ok { level.Debug(c.logger).Log("msg", "Secret deleted") c.metrics.TriggerByCounter("Secret", "delete").Inc() c.enqueueForPrometheusNamespace(o.GetNamespace()) } } func (c *Operator) handleSecretUpdate(old, cur interface{}) { if old.(*v1.Secret).ResourceVersion == cur.(*v1.Secret).ResourceVersion { return } o, ok := c.getObject(cur) if ok { level.Debug(c.logger).Log("msg", "Secret updated") c.metrics.TriggerByCounter("Secret", "update").Inc() c.enqueueForPrometheusNamespace(o.GetNamespace()) } } func (c *Operator) handleSecretAdd(obj interface{}) { o, ok := c.getObject(obj) if ok { level.Debug(c.logger).Log("msg", "Secret added") c.metrics.TriggerByCounter("Secret", "add").Inc() c.enqueueForPrometheusNamespace(o.GetNamespace()) } } // TODO: Do we need to enqueue configmaps just for the namespace or in general? func (c *Operator) handleConfigMapAdd(obj interface{}) { o, ok := c.getObject(obj) if ok { level.Debug(c.logger).Log("msg", "ConfigMap added") c.metrics.TriggerByCounter("ConfigMap", "add").Inc() c.enqueueForPrometheusNamespace(o.GetNamespace()) } } func (c *Operator) handleConfigMapDelete(obj interface{}) { o, ok := c.getObject(obj) if ok { level.Debug(c.logger).Log("msg", "ConfigMap deleted") c.metrics.TriggerByCounter("ConfigMap", "delete").Inc() c.enqueueForPrometheusNamespace(o.GetNamespace()) } } func (c *Operator) handleConfigMapUpdate(old, cur interface{}) { if old.(*v1.ConfigMap).ResourceVersion == cur.(*v1.ConfigMap).ResourceVersion { return } o, ok := c.getObject(cur) if ok { level.Debug(c.logger).Log("msg", "ConfigMap updated") c.metrics.TriggerByCounter("ConfigMap", "update").Inc() c.enqueueForPrometheusNamespace(o.GetNamespace()) } } func (c *Operator) getObject(obj interface{}) (metav1.Object, bool) { ts, ok := obj.(cache.DeletedFinalStateUnknown) if ok { obj = ts.Obj } o, err := meta.Accessor(obj) if err != nil { level.Error(c.logger).Log("msg", "get object failed", "err", err) return nil, false } return o, true } // enqueue adds a key to the queue. If obj is a key already it gets added // directly. Otherwise, the key is extracted via keyFunc. func (c *Operator) enqueue(obj interface{}) { if obj == nil { return } key, ok := obj.(string) if !ok { key, ok = c.keyFunc(obj) if !ok { return } } c.queue.Add(key) } func (c *Operator) enqueueForPrometheusNamespace(nsName string) { c.enqueueForNamespace(c.nsPromInf.GetStore(), nsName) } func (c *Operator) enqueueForMonitorNamespace(nsName string) { c.enqueueForNamespace(c.nsMonInf.GetStore(), nsName) } // enqueueForNamespace enqueues all Prometheus object keys that belong to the // given namespace or select objects in the given namespace. func (c *Operator) enqueueForNamespace(store cache.Store, nsName string) { nsObject, exists, err := store.GetByKey(nsName) if err != nil { level.Error(c.logger).Log( "msg", "get namespace to enqueue Prometheus instances failed", "err", err, ) return } if !exists { level.Error(c.logger).Log( "msg", fmt.Sprintf("get namespace to enqueue Prometheus instances failed: namespace %q does not exist", nsName), ) return } ns := nsObject.(*v1.Namespace) err = c.promInfs.ListAll(labels.Everything(), func(obj interface{}) { // Check for Prometheus instances in the namespace. p := obj.(*monitoringv1.Prometheus) if p.Namespace == nsName { c.enqueue(p) return } // Check for Prometheus instances selecting ServiceMonitors in // the namespace. smNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.ServiceMonitorNamespaceSelector) if err != nil { level.Error(c.logger).Log( "msg", fmt.Sprintf("failed to convert ServiceMonitorNamespaceSelector of %q to selector", p.Name), "err", err, ) return } if smNSSelector.Matches(labels.Set(ns.Labels)) { c.enqueue(p) return } // Check for Prometheus instances selecting PodMonitors in the NS. pmNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.PodMonitorNamespaceSelector) if err != nil { level.Error(c.logger).Log( "msg", fmt.Sprintf("failed to convert PodMonitorNamespaceSelector of %q to selector", p.Name), "err", err, ) return } if pmNSSelector.Matches(labels.Set(ns.Labels)) { c.enqueue(p) return } // Check for Prometheus instances selecting Probes in the NS. bmNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.ProbeNamespaceSelector) if err != nil { level.Error(c.logger).Log( "msg", fmt.Sprintf("failed to convert ProbeNamespaceSelector of %q to selector", p.Name), "err", err, ) return } if bmNSSelector.Matches(labels.Set(ns.Labels)) { c.enqueue(p) return } // Check for Prometheus instances selecting PrometheusRules in // the NS. ruleNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.RuleNamespaceSelector) if err != nil { level.Error(c.logger).Log( "msg", fmt.Sprintf("failed to convert RuleNamespaceSelector of %q to selector", p.Name), "err", err, ) return } if ruleNSSelector.Matches(labels.Set(ns.Labels)) { c.enqueue(p) return } }) if err != nil { level.Error(c.logger).Log( "msg", "listing all Prometheus instances from cache failed", "err", err, ) } } // worker runs a worker thread that just dequeues items, processes them, and // marks them done. It enforces that the syncHandler is never invoked // concurrently with the same key. func (c *Operator) worker(ctx context.Context) { for c.processNextWorkItem(ctx) { } } func (c *Operator) processNextWorkItem(ctx context.Context) bool { key, quit := c.queue.Get() if quit { return false } defer c.queue.Done(key) c.metrics.ReconcileCounter().Inc() err := c.sync(ctx, key.(string)) c.metrics.SetSyncStatus(key.(string), err == nil) if err == nil { c.queue.Forget(key) return true } c.metrics.ReconcileErrorsCounter().Inc() utilruntime.HandleError(errors.Wrap(err, fmt.Sprintf("Sync %q failed", key))) c.queue.AddRateLimited(key) return true } func (c *Operator) prometheusForStatefulSet(sset interface{}) *monitoringv1.Prometheus { key, ok := c.keyFunc(sset) if !ok { return nil } match, promKey := statefulSetKeyToPrometheusKey(key) if !match { level.Debug(c.logger).Log("msg", "StatefulSet key did not match a Prometheus key format", "key", key) return nil } p, err := c.promInfs.Get(promKey) if apierrors.IsNotFound(err) { return nil } if err != nil { level.Error(c.logger).Log("msg", "Prometheus lookup failed", "err", err) return nil } return p.(*monitoringv1.Prometheus) } func statefulSetNameFromPrometheusName(name string, shard int) string { if shard == 0 { return fmt.Sprintf("prometheus-%s", name) } return fmt.Sprintf("prometheus-%s-shard-%d", name, shard) } var prometheusKeyInShardStatefulSet = regexp.MustCompile("^(.+)/prometheus-(.+)-shard-[1-9][0-9]*$") var prometheusKeyInStatefulSet = regexp.MustCompile("^(.+)/prometheus-(.+)$") func statefulSetKeyToPrometheusKey(key string) (bool, string) { r := prometheusKeyInStatefulSet if prometheusKeyInShardStatefulSet.MatchString(key) { r = prometheusKeyInShardStatefulSet } matches := r.FindAllStringSubmatch(key, 2) if len(matches) != 1 { return false, "" } if len(matches[0]) != 3 { return false, "" } return true, matches[0][1] + "/" + matches[0][2] } func prometheusKeyToStatefulSetKey(key string, shard int) string { keyParts := strings.Split(key, "/") return fmt.Sprintf("%s/%s", keyParts[0], statefulSetNameFromPrometheusName(keyParts[1], shard)) } func (c *Operator) handleStatefulSetDelete(obj interface{}) { if ps := c.prometheusForStatefulSet(obj); ps != nil { level.Debug(c.logger).Log("msg", "StatefulSet delete") c.metrics.TriggerByCounter("StatefulSet", "delete").Inc() c.enqueue(ps) } } func (c *Operator) handleStatefulSetAdd(obj interface{}) { if ps := c.prometheusForStatefulSet(obj); ps != nil { level.Debug(c.logger).Log("msg", "StatefulSet added") c.metrics.TriggerByCounter("StatefulSet", "add").Inc() c.enqueue(ps) } } func (c *Operator) handleStatefulSetUpdate(oldo, curo interface{}) { old := oldo.(*appsv1.StatefulSet) cur := curo.(*appsv1.StatefulSet) level.Debug(c.logger).Log("msg", "update handler", "old", old.ResourceVersion, "cur", cur.ResourceVersion) // Periodic resync may resend the StatefulSet without changes // in-between. Also breaks loops created by updating the resource // ourselves. if old.ResourceVersion == cur.ResourceVersion { return } if ps := c.prometheusForStatefulSet(cur); ps != nil { level.Debug(c.logger).Log("msg", "StatefulSet updated") c.metrics.TriggerByCounter("StatefulSet", "update").Inc() c.enqueue(ps) } } func (c *Operator) sync(ctx context.Context, key string) error { pobj, err := c.promInfs.Get(key) if apierrors.IsNotFound(err) { c.metrics.ForgetObject(key) // Dependent resources are cleaned up by K8s via OwnerReferences return nil } if err != nil { return err } p := pobj.(*monitoringv1.Prometheus) p = p.DeepCopy() p.APIVersion = monitoringv1.SchemeGroupVersion.String() p.Kind = monitoringv1.PrometheusesKind if p.Spec.Paused { return nil } level.Info(c.logger).Log("msg", "sync prometheus", "key", key) ruleConfigMapNames, err := c.createOrUpdateRuleConfigMaps(ctx, p) if err != nil { return err } assetStore := assets.NewStore(c.kclient.CoreV1(), c.kclient.CoreV1()) if err := c.createOrUpdateConfigurationSecret(ctx, p, ruleConfigMapNames, assetStore); err != nil { return errors.Wrap(err, "creating config failed") } if err := c.createOrUpdateTLSAssetSecret(ctx, p, assetStore); err != nil { return errors.Wrap(err, "creating tls asset secret failed") } // Create governing service if it doesn't exist. svcClient := c.kclient.CoreV1().Services(p.Namespace) if err := k8sutil.CreateOrUpdateService(ctx, svcClient, makeStatefulSetService(p, c.config)); err != nil { return errors.Wrap(err, "synchronizing governing service failed") } ssetClient := c.kclient.AppsV1().StatefulSets(p.Namespace) // Ensure we have a StatefulSet running Prometheus deployed and that StatefulSet names are created correctly. expected := expectedStatefulSetShardNames(p) for shard, ssetName := range expected { level.Debug(c.logger).Log("msg", "reconciling statefulset", "statefulset", ssetName, "shard", fmt.Sprintf("%d", shard)) obj, err := c.ssetInfs.Get(prometheusKeyToStatefulSetKey(key, shard)) exists := !apierrors.IsNotFound(err) if err != nil && !apierrors.IsNotFound(err) { return errors.Wrap(err, "retrieving statefulset failed") } spec := appsv1.StatefulSetSpec{} if obj != nil { ss := obj.(*appsv1.StatefulSet) spec = ss.Spec } newSSetInputHash, err := createSSetInputHash(*p, c.config, ruleConfigMapNames, spec) if err != nil { return err } sset, err := makeStatefulSet(ssetName, *p, &c.config, ruleConfigMapNames, newSSetInputHash, int32(shard)) if err != nil { return errors.Wrap(err, "making statefulset failed") } operator.SanitizeSTS(sset) if !exists { level.Debug(c.logger).Log("msg", "no current Prometheus statefulset found") level.Debug(c.logger).Log("msg", "creating Prometheus statefulset") if _, err := ssetClient.Create(ctx, sset, metav1.CreateOptions{}); err != nil { return errors.Wrap(err, "creating statefulset failed") } return nil } oldSSetInputHash := obj.(*appsv1.StatefulSet).ObjectMeta.Annotations[sSetInputHashName] if newSSetInputHash == oldSSetInputHash { level.Debug(c.logger).Log("msg", "new statefulset generation inputs match current, skipping any actions") return nil } level.Debug(c.logger).Log("msg", "updating current Prometheus statefulset") err = k8sutil.UpdateStatefulSet(ctx, ssetClient, sset) sErr, ok := err.(*apierrors.StatusError) if ok && sErr.ErrStatus.Code == 422 && sErr.ErrStatus.Reason == metav1.StatusReasonInvalid { c.metrics.StsDeleteCreateCounter().Inc() level.Info(c.logger).Log("msg", "resolving illegal update of Prometheus StatefulSet", "details", sErr.ErrStatus.Details) propagationPolicy := metav1.DeletePropagationForeground if err := ssetClient.Delete(ctx, sset.GetName(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil { return errors.Wrap(err, "failed to delete StatefulSet to avoid forbidden action") } return nil } if err != nil { return errors.Wrap(err, "updating StatefulSet failed") } } ssets := map[string]struct{}{} for _, ssetName := range expected { ssets[ssetName] = struct{}{} } err = c.ssetInfs.ListAllByNamespace(p.Namespace, labels.SelectorFromSet(labels.Set{prometheusNameLabelName: p.Name}), func(obj interface{}) { s := obj.(*appsv1.StatefulSet) if _, ok := ssets[s.Name]; ok { // Do not delete statefulsets that we still expect to exist. This // is to cleanup StatefulSets when shards are reduced. return } propagationPolicy := metav1.DeletePropagationForeground if err := ssetClient.Delete(context.TODO(), s.GetName(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil { level.Error(c.logger).Log("failed to delete StatefulSet to cleanup") } }) return nil } //checkPrometheusSpecDeprecation checks for deprecated fields in the prometheus spec and logs a warning if applicable func checkPrometheusSpecDeprecation(key string, p *monitoringv1.Prometheus, logger log.Logger) { deprecationWarningf := "prometheus key=%v, field %v is deprecated, '%v' field should be used instead" if p.Spec.BaseImage != "" { level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.baseImage", "spec.image")) } if p.Spec.Tag != "" { level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.tag", "spec.image")) } if p.Spec.SHA != "" { level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.sha", "spec.image")) } if p.Spec.Thanos != nil { if p.Spec.BaseImage != "" { level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.thanos.baseImage", "spec.thanos.image")) } if p.Spec.Tag != "" { level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.thanos.tag", "spec.thanos.image")) } if p.Spec.SHA != "" { level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.thanos.sha", "spec.thanos.image")) } } if p.Spec.ServiceMonitorSelector == nil && p.Spec.PodMonitorSelector == nil && p.Spec.ProbeSelector == nil { level.Warn(logger).Log("msg", "neither serviceMonitorSelector nor podMonitorSelector, nor probeSelector specified. Custom configuration is deprecated, use additionalScrapeConfigs instead") } } func createSSetInputHash(p monitoringv1.Prometheus, c operator.Config, ruleConfigMapNames []string, ss interface{}) (string, error) { hash, err := hashstructure.Hash(struct { P monitoringv1.Prometheus C operator.Config S interface{} R []string `hash:"set"` }{p, c, ss, ruleConfigMapNames}, nil, ) if err != nil { return "", errors.Wrap( err, "failed to calculate combined hash of Prometheus StatefulSet, Prometheus CRD, config and"+ " rule ConfigMap names", ) } return fmt.Sprintf("%d", hash), nil } func ListOptions(name string) metav1.ListOptions { return metav1.ListOptions{ LabelSelector: fields.SelectorFromSet(fields.Set(map[string]string{ "app": "prometheus", "prometheus": name, })).String(), } } // PrometheusStatus evaluates the current status of a Prometheus deployment with // respect to its specified resource object. It return the status and a list of // pods that are not updated. func PrometheusStatus(ctx context.Context, kclient kubernetes.Interface, p *monitoringv1.Prometheus) (*monitoringv1.PrometheusStatus, []v1.Pod, error) { res := &monitoringv1.PrometheusStatus{Paused: p.Spec.Paused} pods, err := kclient.CoreV1().Pods(p.Namespace).List(ctx, ListOptions(p.Name)) if err != nil { return nil, nil, errors.Wrap(err, "retrieving pods of failed") } var oldPods []v1.Pod expected := expectedStatefulSetShardNames(p) for _, ssetName := range expected { sset, err := kclient.AppsV1().StatefulSets(p.Namespace).Get(context.TODO(), ssetName, metav1.GetOptions{}) if err != nil { return nil, nil, errors.Wrap(err, "retrieving stateful set failed") } res.Replicas = int32(len(pods.Items)) for _, pod := range pods.Items { ready, err := k8sutil.PodRunningAndReady(pod) if err != nil { return nil, nil, errors.Wrap(err, "cannot determine pod ready state") } if ready { res.AvailableReplicas++ if needsUpdate(&pod, sset.Spec.Template) { oldPods = append(oldPods, pod) } else { res.UpdatedReplicas++ } continue } res.UnavailableReplicas++ } } return res, oldPods, nil } // needsUpdate checks whether the given pod conforms with the pod template spec // for various attributes that are influenced by the Prometheus CRD settings. func needsUpdate(pod *v1.Pod, tmpl v1.PodTemplateSpec) bool { c1 := pod.Spec.Containers[0] c2 := tmpl.Spec.Containers[0] if c1.Image != c2.Image { return true } if !reflect.DeepEqual(c1.Args, c2.Args) { return true } return false } func (c *Operator) loadAdditionalScrapeConfigsSecret(additionalScrapeConfigs *v1.SecretKeySelector, s *v1.SecretList) ([]byte, error) { if additionalScrapeConfigs != nil { for _, secret := range s.Items { if secret.Name == additionalScrapeConfigs.Name { if c, ok := secret.Data[additionalScrapeConfigs.Key]; ok { return c, nil } return nil, fmt.Errorf("key %v could not be found in Secret %v", additionalScrapeConfigs.Key, additionalScrapeConfigs.Name) } } if additionalScrapeConfigs.Optional == nil || !*additionalScrapeConfigs.Optional { return nil, fmt.Errorf("secret %v could not be found", additionalScrapeConfigs.Name) } level.Debug(c.logger).Log("msg", fmt.Sprintf("secret %v could not be found", additionalScrapeConfigs.Name)) } return nil, nil } func gzipConfig(buf *bytes.Buffer, conf []byte) error { w := gzip.NewWriter(buf) defer w.Close() if _, err := w.Write(conf); err != nil { return err } return nil } func (c *Operator) createOrUpdateConfigurationSecret(ctx context.Context, p *monitoringv1.Prometheus, ruleConfigMapNames []string, store *assets.Store) error { // If no service or pod monitor selectors are configured, the user wants to // manage configuration themselves. Do create an empty Secret if it doesn't // exist. if p.Spec.ServiceMonitorSelector == nil && p.Spec.PodMonitorSelector == nil && p.Spec.ProbeSelector == nil { level.Debug(c.logger).Log("msg", "neither ServiceMonitor nor PodMonitor, nor Probe selector specified, leaving configuration unmanaged", "prometheus", p.Name, "namespace", p.Namespace) s, err := makeEmptyConfigurationSecret(p, c.config) if err != nil { return errors.Wrap(err, "generating empty config secret failed") } sClient := c.kclient.CoreV1().Secrets(p.Namespace) _, err = sClient.Get(ctx, s.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { if _, err := c.kclient.CoreV1().Secrets(p.Namespace).Create(ctx, s, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "creating empty config file failed") } } if !apierrors.IsNotFound(err) && err != nil { return err } return nil } smons, err := c.selectServiceMonitors(ctx, p, store) if err != nil { return errors.Wrap(err, "selecting ServiceMonitors failed") } pmons, err := c.selectPodMonitors(ctx, p, store) if err != nil { return errors.Wrap(err, "selecting PodMonitors failed") } bmons, err := c.selectProbes(p) if err != nil { return errors.Wrap(err, "selecting Probes failed") } sClient := c.kclient.CoreV1().Secrets(p.Namespace) SecretsInPromNS, err := sClient.List(ctx, metav1.ListOptions{}) if err != nil { return err } for i, remote := range p.Spec.RemoteRead { if err := store.AddBasicAuth(ctx, p.GetNamespace(), remote.BasicAuth, fmt.Sprintf("remoteRead/%d", i)); err != nil { return errors.Wrapf(err, "remote read %d", i) } if err := store.AddTLSConfig(ctx, p.GetNamespace(), remote.TLSConfig); err != nil { return errors.Wrapf(err, "remote read %d", i) } } for i, remote := range p.Spec.RemoteWrite { if err := store.AddBasicAuth(ctx, p.GetNamespace(), remote.BasicAuth, fmt.Sprintf("remoteWrite/%d", i)); err != nil { return errors.Wrapf(err, "remote write %d", i) } if err := store.AddTLSConfig(ctx, p.GetNamespace(), remote.TLSConfig); err != nil { return errors.Wrapf(err, "remote write %d", i) } } if p.Spec.APIServerConfig != nil { if err := store.AddBasicAuth(ctx, p.GetNamespace(), p.Spec.APIServerConfig.BasicAuth, "apiserver"); err != nil { return errors.Wrap(err, "apiserver config") } } additionalScrapeConfigs, err := c.loadAdditionalScrapeConfigsSecret(p.Spec.AdditionalScrapeConfigs, SecretsInPromNS) if err != nil { return errors.Wrap(err, "loading additional scrape configs from Secret failed") } additionalAlertRelabelConfigs, err := c.loadAdditionalScrapeConfigsSecret(p.Spec.AdditionalAlertRelabelConfigs, SecretsInPromNS) if err != nil { return errors.Wrap(err, "loading additional alert relabel configs from Secret failed") } additionalAlertManagerConfigs, err := c.loadAdditionalScrapeConfigsSecret(p.Spec.AdditionalAlertManagerConfigs, SecretsInPromNS) if err != nil { return errors.Wrap(err, "loading additional alert manager configs from Secret failed") } // Update secret based on the most recent configuration. conf, err := c.configGenerator.generateConfig( p, smons, pmons, bmons, store.BasicAuthAssets, store.BearerTokenAssets, additionalScrapeConfigs, additionalAlertRelabelConfigs, additionalAlertManagerConfigs, ruleConfigMapNames, ) if err != nil { return errors.Wrap(err, "generating config failed") } s := makeConfigSecret(p, c.config) s.ObjectMeta.Annotations = map[string]string{ "generated": "true", } // Compress config to avoid 1mb secret limit for a while var buf bytes.Buffer if err = gzipConfig(&buf, conf); err != nil { return errors.Wrap(err, "couldn't gzip config") } s.Data[configFilename] = buf.Bytes() curSecret, err := sClient.Get(ctx, s.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { level.Debug(c.logger).Log("msg", "creating configuration") _, err = sClient.Create(ctx, s, metav1.CreateOptions{}) return err } var ( generatedConf = s.Data[configFilename] curConfig, curConfigFound = curSecret.Data[configFilename] ) if curConfigFound { if bytes.Equal(curConfig, generatedConf) { level.Debug(c.logger).Log("msg", "updating Prometheus configuration secret skipped, no configuration change") return nil } level.Debug(c.logger).Log("msg", "current Prometheus configuration has changed") } else { level.Debug(c.logger).Log("msg", "no current Prometheus configuration secret found", "currentConfigFound", curConfigFound) } level.Debug(c.logger).Log("msg", "updating Prometheus configuration secret") return k8sutil.UpdateSecret(ctx, sClient, s) } func (c *Operator) createOrUpdateTLSAssetSecret(ctx context.Context, p *monitoringv1.Prometheus, store *assets.Store) error { boolTrue := true sClient := c.kclient.CoreV1().Secrets(p.Namespace) tlsAssetsSecret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: tlsAssetsSecretName(p.Name), Labels: c.config.Labels.Merge(managedByOperatorLabels), OwnerReferences: []metav1.OwnerReference{ { APIVersion: p.APIVersion, BlockOwnerDeletion: &boolTrue, Controller: &boolTrue, Kind: p.Kind, Name: p.Name, UID: p.UID, }, }, }, Data: map[string][]byte{}, } for key, asset := range store.TLSAssets { tlsAssetsSecret.Data[key.String()] = []byte(asset) } _, err := sClient.Get(ctx, tlsAssetsSecret.Name, metav1.GetOptions{}) if err != nil { if !apierrors.IsNotFound(err) { return errors.Wrapf( err, "failed to check whether tls assets secret already exists for Prometheus %v in namespace %v", p.Name, p.Namespace, ) } _, err = sClient.Create(ctx, tlsAssetsSecret, metav1.CreateOptions{}) level.Debug(c.logger).Log("msg", "created tlsAssetsSecret", "secretname", tlsAssetsSecret.Name) } else { err = k8sutil.UpdateSecret(ctx, sClient, tlsAssetsSecret) level.Debug(c.logger).Log("msg", "updated tlsAssetsSecret", "secretname", tlsAssetsSecret.Name) } if err != nil { return errors.Wrapf(err, "failed to create TLS assets secret for Prometheus %v in namespace %v", p.Name, p.Namespace) } return nil } func (c *Operator) selectServiceMonitors(ctx context.Context, p *monitoringv1.Prometheus, store *assets.Store) (map[string]*monitoringv1.ServiceMonitor, error) { namespaces := []string{} // Selectors (<namespace>/<name>) might overlap. Deduplicate them along the keyFunc. serviceMonitors := make(map[string]*monitoringv1.ServiceMonitor) servMonSelector, err := metav1.LabelSelectorAsSelector(p.Spec.ServiceMonitorSelector) if err != nil { return nil, err } // If 'ServiceMonitorNamespaceSelector' is nil only check own namespace. if p.Spec.ServiceMonitorNamespaceSelector == nil { namespaces = append(namespaces, p.Namespace) } else { servMonNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.ServiceMonitorNamespaceSelector) if err != nil { return nil, err } namespaces, err = c.listMatchingNamespaces(servMonNSSelector) if err != nil { return nil, err } } level.Debug(c.logger).Log("msg", "filtering namespaces to select ServiceMonitors from", "namespaces", strings.Join(namespaces, ","), "namespace", p.Namespace, "prometheus", p.Name) for _, ns := range namespaces { c.smonInfs.ListAllByNamespace(ns, servMonSelector, func(obj interface{}) { k, ok := c.keyFunc(obj) if ok { serviceMonitors[k] = obj.(*monitoringv1.ServiceMonitor) } }) } var rejected int res := make(map[string]*monitoringv1.ServiceMonitor, len(serviceMonitors)) for namespaceAndName, sm := range serviceMonitors { var err error for i, endpoint := range sm.Spec.Endpoints { // If denied by Prometheus spec, filter out all service monitors that access // the file system. if p.Spec.ArbitraryFSAccessThroughSMs.Deny { if err = testForArbitraryFSAccess(endpoint); err != nil { break } } smKey := fmt.Sprintf("serviceMonitor/%s/%s/%d", sm.GetNamespace(), sm.GetName(), i) if err = store.AddBearerToken(ctx, sm.GetNamespace(), endpoint.BearerTokenSecret, smKey); err != nil { break } if err = store.AddBasicAuth(ctx, sm.GetNamespace(), endpoint.BasicAuth, smKey); err != nil { break } if endpoint.TLSConfig != nil { if err = store.AddTLSConfig(ctx, sm.GetNamespace(), endpoint.TLSConfig); err != nil { break } } } if err != nil { rejected++ level.Warn(c.logger).Log( "msg", "skipping servicemonitor", "error", err.Error(), "servicemonitor", namespaceAndName, "namespace", p.Namespace, "prometheus", p.Name, ) continue } res[namespaceAndName] = sm } smKeys := []string{} for k := range res { smKeys = append(smKeys, k) } level.Debug(c.logger).Log("msg", "selected ServiceMonitors", "servicemonitors", strings.Join(smKeys, ","), "namespace", p.Namespace, "prometheus", p.Name) if pKey, ok := c.keyFunc(p); ok { c.metrics.SetSelectedResources(pKey, monitoringv1.ServiceMonitorsKind, len(res)) c.metrics.SetRejectedResources(pKey, monitoringv1.ServiceMonitorsKind, rejected) } return res, nil } func (c *Operator) selectPodMonitors(ctx context.Context, p *monitoringv1.Prometheus, store *assets.Store) (map[string]*monitoringv1.PodMonitor, error) { namespaces := []string{} // Selectors (<namespace>/<name>) might overlap. Deduplicate them along the keyFunc. podMonitors := make(map[string]*monitoringv1.PodMonitor) podMonSelector, err := metav1.LabelSelectorAsSelector(p.Spec.PodMonitorSelector) if err != nil { return nil, err } // If 'PodMonitorNamespaceSelector' is nil only check own namespace. if p.Spec.PodMonitorNamespaceSelector == nil { namespaces = append(namespaces, p.Namespace) } else { podMonNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.PodMonitorNamespaceSelector) if err != nil { return nil, err } namespaces, err = c.listMatchingNamespaces(podMonNSSelector) if err != nil { return nil, err } } level.Debug(c.logger).Log("msg", "filtering namespaces to select PodMonitors from", "namespaces", strings.Join(namespaces, ","), "namespace", p.Namespace, "prometheus", p.Name) for _, ns := range namespaces { c.pmonInfs.ListAllByNamespace(ns, podMonSelector, func(obj interface{}) { k, ok := c.keyFunc(obj) if ok { podMonitors[k] = obj.(*monitoringv1.PodMonitor) } }) } var rejected int res := make(map[string]*monitoringv1.PodMonitor, len(podMonitors)) for namespaceAndName, pm := range podMonitors { var err error for i, endpoint := range pm.Spec.PodMetricsEndpoints { pmKey := fmt.Sprintf("podMonitor/%s/%s/%d", pm.GetNamespace(), pm.GetName(), i) if err = store.AddBearerToken(ctx, pm.GetNamespace(), endpoint.BearerTokenSecret, pmKey); err != nil { break } if err = store.AddBasicAuth(ctx, pm.GetNamespace(), endpoint.BasicAuth, pmKey); err != nil { break } if endpoint.TLSConfig != nil { if err = store.AddSafeTLSConfig(ctx, pm.GetNamespace(), &endpoint.TLSConfig.SafeTLSConfig); err != nil { break } } } if err != nil { rejected++ level.Warn(c.logger).Log( "msg", "skipping podmonitor", "error", err.Error(), "podmonitor", namespaceAndName, "namespace", p.Namespace, "prometheus", p.Name, ) continue } res[namespaceAndName] = pm } pmKeys := []string{} for k := range res { pmKeys = append(pmKeys, k) } level.Debug(c.logger).Log("msg", "selected PodMonitors", "podmonitors", strings.Join(pmKeys, ","), "namespace", p.Namespace, "prometheus", p.Name) if pKey, ok := c.keyFunc(p); ok { c.metrics.SetSelectedResources(pKey, monitoringv1.PodMonitorsKind, len(res)) c.metrics.SetRejectedResources(pKey, monitoringv1.PodMonitorsKind, rejected) } return res, nil } func (c *Operator) selectProbes(p *monitoringv1.Prometheus) (map[string]*monitoringv1.Probe, error) { namespaces := []string{} // Selectors might overlap. Deduplicate them along the keyFunc. probes := make(map[string]*monitoringv1.Probe) bMonSelector, err := metav1.LabelSelectorAsSelector(p.Spec.ProbeSelector) if err != nil { return nil, err } // If 'ProbeNamespaceSelector' is nil only check own namespace. if p.Spec.ProbeNamespaceSelector == nil { namespaces = append(namespaces, p.Namespace) } else { bMonNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.ProbeNamespaceSelector) if err != nil { return nil, err } namespaces, err = c.listMatchingNamespaces(bMonNSSelector) if err != nil { return nil, err } } level.Debug(c.logger).Log("msg", "filtering namespaces to select Probes from", "namespaces", strings.Join(namespaces, ","), "namespace", p.Namespace, "prometheus", p.Name) for _, ns := range namespaces { c.probeInfs.ListAllByNamespace(ns, bMonSelector, func(obj interface{}) { if k, ok := c.keyFunc(obj); ok { probes[k] = obj.(*monitoringv1.Probe) } }) } var rejected int res := make(map[string]*monitoringv1.Probe, len(probes)) for probeName, probe := range probes { if probe.Spec.Targets.StaticConfig == nil && probe.Spec.Targets.Ingress == nil { rejected++ level.Warn(c.logger).Log( "msg", "skipping probe", "error", "Probe needs at least one target of type staticConfig or ingress", "probe", probeName, "namespace", p.Namespace, "prometheus", p.Name, ) continue } res[probeName] = probe } probeKeys := make([]string, 0) for k := range res { probeKeys = append(probeKeys, k) } level.Debug(c.logger).Log("msg", "selected Probes", "probes", strings.Join(probeKeys, ","), "namespace", p.Namespace, "prometheus", p.Name) if pKey, ok := c.keyFunc(p); ok { c.metrics.SetSelectedResources(pKey, monitoringv1.ProbesKind, len(res)) c.metrics.SetRejectedResources(pKey, monitoringv1.ProbesKind, rejected) } return res, nil } func testForArbitraryFSAccess(e monitoringv1.Endpoint) error { if e.BearerTokenFile != "" { return errors.New("it accesses file system via bearer token file which Prometheus specification prohibits") } tlsConf := e.TLSConfig if tlsConf == nil { return nil } if tlsConf.CAFile != "" || tlsConf.CertFile != "" || tlsConf.KeyFile != "" { return errors.New("it accesses file system via tls config which Prometheus specification prohibits") } return nil } // listMatchingNamespaces lists all the namespaces that match the provided // selector. func (c *Operator) listMatchingNamespaces(selector labels.Selector) ([]string, error) { var ns []string err := cache.ListAll(c.nsMonInf.GetStore(), selector, func(obj interface{}) { ns = append(ns, obj.(*v1.Namespace).Name) }) if err != nil { return nil, errors.Wrap(err, "failed to list namespaces") } return ns, nil }
1
15,642
Shouldn't this be `app.kubernetes.io/managed-by` as prometheus-operator manages this resource, but this resource isn't a part of prometheus-operator?
prometheus-operator-prometheus-operator
go
@@ -41,8 +41,9 @@ namespace Datadog.Trace.ClrProfiler if (parent != null && parent.Type == SpanTypes.Http && - parent.GetTag(Tags.HttpMethod).Equals(httpMethod, StringComparison.OrdinalIgnoreCase) && - parent.GetTag(Tags.HttpUrl).Equals(UriHelpers.CleanUri(requestUri, removeScheme: false, tryRemoveIds: false), StringComparison.OrdinalIgnoreCase)) + parent.GetTag(Tags.InstrumentationName) != null && + httpMethod.Equals(parent.GetTag(Tags.HttpMethod), StringComparison.OrdinalIgnoreCase) && + UriHelpers.CleanUri(requestUri, removeScheme: false, tryRemoveIds: false).Equals(parent.GetTag(Tags.HttpUrl), StringComparison.OrdinalIgnoreCase)) { // we are already instrumenting this, // don't instrument nested methods that belong to the same stacktrace
1
using System; using System.Data; using System.Data.Common; using Datadog.Trace.ExtensionMethods; using Datadog.Trace.Logging; using Datadog.Trace.Util; namespace Datadog.Trace.ClrProfiler { /// <summary> /// Convenience class that creates scopes and populates them with some standard details. /// </summary> internal static class ScopeFactory { public const string OperationName = "http.request"; public const string ServiceName = "http-client"; private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.GetLogger(typeof(ScopeFactory)); /// <summary> /// Creates a scope for outbound http requests and populates some common details. /// </summary> /// <param name="tracer">The tracer instance to use to create the new scope.</param> /// <param name="httpMethod">The HTTP method used by the request.</param> /// <param name="requestUri">The URI requested by the request.</param> /// <param name="integrationName">The name of the integration creating this scope.</param> /// <returns>A new pre-populated scope.</returns> public static Scope CreateOutboundHttpScope(Tracer tracer, string httpMethod, Uri requestUri, string integrationName) { if (!tracer.Settings.IsIntegrationEnabled(integrationName)) { // integration disabled, don't create a scope, skip this trace return null; } Scope scope = null; try { Span parent = tracer.ActiveScope?.Span; if (parent != null && parent.Type == SpanTypes.Http && parent.GetTag(Tags.HttpMethod).Equals(httpMethod, StringComparison.OrdinalIgnoreCase) && parent.GetTag(Tags.HttpUrl).Equals(UriHelpers.CleanUri(requestUri, removeScheme: false, tryRemoveIds: false), StringComparison.OrdinalIgnoreCase)) { // we are already instrumenting this, // don't instrument nested methods that belong to the same stacktrace // e.g. HttpClientHandler.SendAsync() -> SocketsHttpHandler.SendAsync() return null; } scope = tracer.StartActive(OperationName, serviceName: $"{tracer.DefaultServiceName}-{ServiceName}"); var span = scope.Span; span.Type = SpanTypes.Http; span.ResourceName = string.Join( " ", httpMethod, UriHelpers.CleanUri(requestUri, removeScheme: true, tryRemoveIds: true)); span.SetTag(Tags.SpanKind, SpanKinds.Client); span.SetTag(Tags.HttpMethod, httpMethod?.ToUpperInvariant()); span.SetTag(Tags.HttpUrl, UriHelpers.CleanUri(requestUri, removeScheme: false, tryRemoveIds: false)); span.SetTag(Tags.InstrumentationName, integrationName); // set analytics sample rate if enabled var analyticsSampleRate = tracer.Settings.GetIntegrationAnalyticsSampleRate(integrationName, enabledWithGlobalSetting: false); span.SetMetric(Tags.Analytics, analyticsSampleRate); } catch (Exception ex) { Log.Error(ex, "Error creating or populating scope."); } // always returns the scope, even if it's null because we couldn't create it, // or we couldn't populate it completely (some tags is better than no tags) return scope; } public static Scope CreateDbCommandScope(Tracer tracer, IDbCommand command, string integrationName) { if (!tracer.Settings.IsIntegrationEnabled(integrationName)) { // integration disabled, don't create a scope, skip this trace return null; } Scope scope = null; try { string dbType = GetDbType(command.GetType().Name); if (dbType == null) { // don't create a scope, skip this trace return null; } Span parent = tracer.ActiveScope?.Span; if (parent != null && parent.Type == SpanTypes.Sql && parent.GetTag(Tags.DbType) == dbType && parent.ResourceName == command.CommandText) { // we are already instrumenting this, // don't instrument nested methods that belong to the same stacktrace // e.g. ExecuteReader() -> ExecuteReader(commandBehavior) return null; } string serviceName = $"{tracer.DefaultServiceName}-{dbType}"; string operationName = $"{dbType}.query"; scope = tracer.StartActive(operationName, serviceName: serviceName); var span = scope.Span; span.SetTag(Tags.DbType, dbType); span.SetTag(Tags.InstrumentationName, integrationName); span.AddTagsFromDbCommand(command); // set analytics sample rate if enabled var analyticsSampleRate = tracer.Settings.GetIntegrationAnalyticsSampleRate(integrationName, enabledWithGlobalSetting: false); span.SetMetric(Tags.Analytics, analyticsSampleRate); } catch (Exception ex) { Log.Error(ex, "Error creating or populating scope."); } return scope; } public static string GetDbType(string commandTypeName) { switch (commandTypeName) { case "SqlCommand": return "sql-server"; case "NpgsqlCommand": return "postgres"; case "InterceptableDbCommand": case "ProfiledDbCommand": // don't create spans for these return null; default: const string commandSuffix = "Command"; // remove "Command" suffix if present return commandTypeName.EndsWith(commandSuffix) ? commandTypeName.Substring(0, commandTypeName.Length - commandSuffix.Length).ToLowerInvariant() : commandTypeName.ToLowerInvariant(); } } } }
1
17,202
As long as we're being extra-vigilant about NREs, should we check that `httpMethod` and `requestUri` are not null?
DataDog-dd-trace-dotnet
.cs
@@ -19,6 +19,7 @@ import ( "github.com/iotexproject/iotex-core/cli/ioctl/cmd/node" "github.com/iotexproject/iotex-core/cli/ioctl/cmd/update" "github.com/iotexproject/iotex-core/cli/ioctl/cmd/version" + xrc20 "github.com/iotexproject/iotex-core/cli/ioctl/cmd/xrc20" ) // RootCmd represents the base command when called without any subcommands
1
// Copyright (c) 2019 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package cmd import ( "os" "github.com/spf13/cobra" "github.com/iotexproject/iotex-core/cli/ioctl/cmd/account" "github.com/iotexproject/iotex-core/cli/ioctl/cmd/action" "github.com/iotexproject/iotex-core/cli/ioctl/cmd/alias" "github.com/iotexproject/iotex-core/cli/ioctl/cmd/bc" "github.com/iotexproject/iotex-core/cli/ioctl/cmd/config" "github.com/iotexproject/iotex-core/cli/ioctl/cmd/node" "github.com/iotexproject/iotex-core/cli/ioctl/cmd/update" "github.com/iotexproject/iotex-core/cli/ioctl/cmd/version" ) // RootCmd represents the base command when called without any subcommands var RootCmd = &cobra.Command{ Use: "ioctl", Short: "Command-line interface for IoTeX blockchain", Long: `ioctl is a command-line interface for interacting with IoTeX blockchain.`, } // Execute adds all child commands to the root command and sets flags appropriately. func Execute() { if err := RootCmd.Execute(); err != nil { os.Exit(1) } } func init() { RootCmd.AddCommand(account.AccountCmd) RootCmd.AddCommand(action.ActionCmd) RootCmd.AddCommand(alias.AliasCmd) RootCmd.AddCommand(bc.BCCmd) RootCmd.AddCommand(config.ConfigCmd) RootCmd.AddCommand(node.NodeCmd) RootCmd.AddCommand(update.UpdateCmd) RootCmd.AddCommand(version.VersionCmd) }
1
18,080
No need to have `xrc20` alias
iotexproject-iotex-core
go
@@ -172,6 +172,19 @@ def define_environment_cls(pipeline_def): ) +def context_cls_inst(pipeline_def): + check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) + pipeline_name = camelcase(pipeline_def.name) + return SystemNamedDict( + name='{pipeline_name}.Context'.format(pipeline_name=pipeline_name), + fields={ + 'context': define_maybe_optional_selector_field( + define_context_context_cls(pipeline_name, pipeline_def.context_definitions) + ) + }, + ).inst() + + def define_expectations_config_cls(name): check.str_param(name, 'name')
1
from dagster import check from dagster.utils import camelcase, single_item from dagster.core.definitions import ( PipelineContextDefinition, PipelineDefinition, ResourceDefinition, Solid, SolidDefinition, SolidInputHandle, ) from dagster.core.types import Bool, Field, List, NamedDict, NamedSelector from dagster.core.types.config import ConfigType, ConfigTypeAttributes from dagster.core.types.default_applier import apply_default_values from dagster.core.types.field_utils import check_opt_field_param, FieldImpl from .objects import ( ContextConfig, EnvironmentConfig, ExecutionConfig, ExpectationsConfig, SolidConfig, ) def SystemNamedDict(name, fields, description=None): return NamedDict(name, fields, description, ConfigTypeAttributes(is_system_config=True)) def SystemNamedSelector(name, fields, description=None): return NamedSelector(name, fields, description, ConfigTypeAttributes(is_system_config=True)) def _is_selector_field_optional(config_type): check.inst_param(config_type, 'config_type', ConfigType) if len(config_type.fields) > 1: return False else: _name, field = single_item(config_type.fields) return field.is_optional def define_maybe_optional_selector_field(config_cls): is_optional = _is_selector_field_optional(config_cls.inst()) return ( Field( config_cls, is_optional=is_optional, default_value=apply_default_values(config_cls.inst(), None), ) if is_optional else Field(config_cls, is_optional=False) ) def define_resource_dictionary_cls(name, resources): check.str_param(name, 'name') check.dict_param(resources, 'resources', key_type=str, value_type=ResourceDefinition) fields = {} for resource_name, resource in resources.items(): if resource.config_field: fields[resource_name] = Field( SystemNamedDict(name + '.' + resource_name, {'config': resource.config_field}) ) return SystemNamedDict(name=name, fields=fields) def define_specific_context_config_cls(name, config_field, resources): check.str_param(name, 'name') check_opt_field_param(config_field, 'config_field') check.dict_param(resources, 'resources', key_type=str, value_type=ResourceDefinition) return SystemNamedDict( name, fields=remove_none_entries( { 'config': config_field, 'resources': Field( define_resource_dictionary_cls('{name}.Resources'.format(name=name), resources) ), } ), ) def define_context_context_cls(pipeline_name, context_definitions): check.str_param(pipeline_name, 'pipeline_name') check.dict_param( context_definitions, 'context_definitions', key_type=str, value_type=PipelineContextDefinition, ) full_type_name = '{pipeline_name}.ContextConfig'.format(pipeline_name=pipeline_name) field_dict = {} if len(context_definitions) == 1: context_name, context_definition = single_item(context_definitions) field_dict[context_name] = Field( define_specific_context_cls(pipeline_name, context_name, context_definition) ) else: for context_name, context_definition in context_definitions.items(): field_dict[context_name] = Field( define_specific_context_cls(pipeline_name, context_name, context_definition), is_optional=True, ) return SystemNamedSelector(full_type_name, field_dict) def define_specific_context_cls(pipeline_name, context_name, context_definition): return define_specific_context_config_cls( '{pipeline_name}.ContextDefinitionConfig.{context_name}'.format( pipeline_name=pipeline_name, context_name=camelcase(context_name) ), context_definition.config_field, context_definition.resources, ) def remove_none_entries(ddict): return {k: v for k, v in ddict.items() if v is not None} def define_solid_config_cls(name, config_field, inputs_field, outputs_field): check.str_param(name, 'name') check_opt_field_param(config_field, 'config_field') check_opt_field_param(inputs_field, 'inputs_field') check_opt_field_param(outputs_field, 'outputs_field') return NamedDict( name, remove_none_entries( {'config': config_field, 'inputs': inputs_field, 'outputs': outputs_field} ), type_attributes=ConfigTypeAttributes(is_system_config=True), ) def define_environment_cls(pipeline_def): check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) pipeline_name = camelcase(pipeline_def.name) return SystemNamedDict( name='{pipeline_name}.Environment'.format(pipeline_name=pipeline_name), fields={ 'context': define_maybe_optional_selector_field( define_context_context_cls(pipeline_name, pipeline_def.context_definitions) ), 'solids': Field( define_solid_dictionary_cls( '{pipeline_name}.SolidsConfigDictionary'.format(pipeline_name=pipeline_name), pipeline_def, ) ), 'expectations': Field( define_expectations_config_cls( '{pipeline_name}.ExpectationsConfig'.format(pipeline_name=pipeline_name) ) ), 'execution': Field( define_execution_config_cls( '{pipeline_name}.ExecutionConfig'.format(pipeline_name=pipeline_name) ) ), }, ) def define_expectations_config_cls(name): check.str_param(name, 'name') return SystemNamedDict( name, fields={'evaluate': Field(Bool, is_optional=True, default_value=True)} ) def solid_has_configurable_inputs(solid_def): check.inst_param(solid_def, 'solid_def', SolidDefinition) return any(map(lambda inp: inp.runtime_type.input_schema, solid_def.input_defs)) def solid_has_configurable_outputs(solid_def): check.inst_param(solid_def, 'solid_def', SolidDefinition) return any(map(lambda out: out.runtime_type.output_schema, solid_def.output_defs)) def get_inputs_field(pipeline_def, solid): check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) check.inst_param(solid, 'solid', Solid) if not solid_has_configurable_inputs(solid.definition): return None inputs_field_fields = {} for inp in [inp for inp in solid.definition.input_defs if inp.runtime_type.input_schema]: inp_handle = SolidInputHandle(solid, inp) # If this input is not satisfied by a dependency you must # provide it via config if not pipeline_def.dependency_structure.has_dep(inp_handle): inputs_field_fields[inp.name] = FieldImpl(inp.runtime_type.input_schema.schema_type) if not inputs_field_fields: return None return Field( SystemNamedDict( '{pipeline_name}.{solid_name}.Inputs'.format( pipeline_name=camelcase(pipeline_def.name), solid_name=camelcase(solid.name) ), inputs_field_fields, ) ) def get_outputs_field(pipeline_def, solid): check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) check.inst_param(solid, 'solid', Solid) solid_def = solid.definition if not solid_has_configurable_outputs(solid_def): return None output_dict_fields = {} for out in [out for out in solid_def.output_defs if out.runtime_type.output_schema]: output_dict_fields[out.name] = Field( type(out.runtime_type.output_schema.schema_type), is_optional=True ) output_entry_dict = SystemNamedDict( '{pipeline_name}.{solid_name}.Outputs'.format( pipeline_name=camelcase(pipeline_def.name), solid_name=camelcase(solid.name) ), output_dict_fields, ) return Field(List(output_entry_dict), is_optional=True) def solid_has_config_entry(solid_def): check.inst_param(solid_def, 'solid_def', SolidDefinition) return ( solid_def.config_field or solid_has_configurable_inputs(solid_def) or solid_has_configurable_outputs(solid_def) ) def define_solid_dictionary_cls(name, pipeline_def): check.str_param(name, 'name') check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) fields = {} for solid in pipeline_def.solids: if solid_has_config_entry(solid.definition): solid_config_type = define_solid_config_cls( '{pipeline_name}.SolidConfig.{solid_name}'.format( pipeline_name=camelcase(pipeline_def.name), solid_name=camelcase(solid.name) ), solid.definition.config_field, inputs_field=get_inputs_field(pipeline_def, solid), outputs_field=get_outputs_field(pipeline_def, solid), ) fields[solid.name] = Field(solid_config_type) return SystemNamedDict(name, fields) def define_execution_config_cls(name): check.str_param(name, 'name') return NamedDict(name, {}, type_attributes=ConfigTypeAttributes(is_system_config=True)) def construct_environment_config(config_value): return EnvironmentConfig( solids=construct_solid_dictionary(config_value['solids']), execution=ExecutionConfig(**config_value['execution']), expectations=ExpectationsConfig(**config_value['expectations']), context=construct_context_config(config_value['context']), ) def construct_context_config(config_value): context_name, context_value = single_item(config_value) return ContextConfig( name=context_name, config=context_value.get('config'), resources=context_value['resources'] ) def construct_solid_dictionary(solid_dict_value): return { key: SolidConfig( config=value.get('config'), inputs=value.get('inputs', {}), outputs=value.get('outputs', []), ) for key, value in solid_dict_value.items() }
1
12,142
the naming convention I'm been adopting if `_type` for instances of these classes. So maybe `context_config_type` is a better name for this fn
dagster-io-dagster
py
@@ -54,6 +54,7 @@ public class BlockMiner<C, M extends AbstractBlockCreator<C>> implements Runnabl private final ProtocolSchedule<C> protocolSchedule; private final Subscribers<MinedBlockObserver> observers; private final AbstractBlockScheduler scheduler; + private Boolean gpuMining = false; public BlockMiner( final Function<BlockHeader, M> blockCreatorFactory,
1
/* * Copyright 2018 ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package org.hyperledger.besu.ethereum.blockcreation; import org.hyperledger.besu.ethereum.ProtocolContext; import org.hyperledger.besu.ethereum.chain.MinedBlockObserver; import org.hyperledger.besu.ethereum.core.Block; import org.hyperledger.besu.ethereum.core.BlockHeader; import org.hyperledger.besu.ethereum.core.BlockImporter; import org.hyperledger.besu.ethereum.core.Transaction; import org.hyperledger.besu.ethereum.mainnet.HeaderValidationMode; import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule; import org.hyperledger.besu.util.Subscribers; import java.util.List; import java.util.concurrent.CancellationException; import java.util.concurrent.TimeUnit; import java.util.function.Function; import com.google.common.base.Stopwatch; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; /** * Responsible for creating a block, and importing it to the blockchain. This is specifically a * mainnet capability (as IBFT would then use the block as part of a proposal round). * * <p>While the capability is largely functional, it has been wrapped in an object to allow it to be * cancelled safely. * * <p>This class is responsible for mining a single block only - the AbstractBlockCreator maintains * state so must be destroyed between block mining activities. */ public class BlockMiner<C, M extends AbstractBlockCreator<C>> implements Runnable { private static final Logger LOG = LogManager.getLogger(); protected final Function<BlockHeader, M> blockCreatorFactory; protected final M minerBlockCreator; protected final ProtocolContext<C> protocolContext; protected final BlockHeader parentHeader; private final ProtocolSchedule<C> protocolSchedule; private final Subscribers<MinedBlockObserver> observers; private final AbstractBlockScheduler scheduler; public BlockMiner( final Function<BlockHeader, M> blockCreatorFactory, final ProtocolSchedule<C> protocolSchedule, final ProtocolContext<C> protocolContext, final Subscribers<MinedBlockObserver> observers, final AbstractBlockScheduler scheduler, final BlockHeader parentHeader) { this.blockCreatorFactory = blockCreatorFactory; this.minerBlockCreator = blockCreatorFactory.apply(parentHeader); this.protocolContext = protocolContext; this.protocolSchedule = protocolSchedule; this.observers = observers; this.scheduler = scheduler; this.parentHeader = parentHeader; } @Override public void run() { boolean blockMined = false; while (!blockMined && !minerBlockCreator.isCancelled()) { try { blockMined = mineBlock(); } catch (final CancellationException ex) { LOG.debug("Block creation process cancelled."); break; } catch (final InterruptedException ex) { LOG.debug("Block mining was interrupted.", ex); Thread.currentThread().interrupt(); } catch (final Exception ex) { LOG.error("Block mining threw an unhandled exception.", ex); } } } /** * Create a block with the given transactions and ommers. The list of transactions are validated * as they are processed, and are not guaranteed to be included in the final block. If * transactions must match exactly, the caller must verify they were all able to be included. * * @param parentHeader The header of the parent of the block to be produced * @param transactions The list of transactions which may be included. * @param ommers The list of ommers to include. * @return the newly created block. */ public Block createBlock( final BlockHeader parentHeader, final List<Transaction> transactions, final List<BlockHeader> ommers) { final BlockCreator blockCreator = this.blockCreatorFactory.apply(parentHeader); final long timestamp = scheduler.getNextTimestamp(parentHeader).getTimestampForHeader(); return blockCreator.createBlock(transactions, ommers, timestamp); } protected boolean mineBlock() throws InterruptedException { // Ensure the block is allowed to be mined - i.e. the timestamp on the new block is sufficiently // ahead of the parent, and still within allowable clock tolerance. LOG.trace("Started a mining operation."); final long newBlockTimestamp = scheduler.waitUntilNextBlockCanBeMined(parentHeader); final Stopwatch stopwatch = Stopwatch.createStarted(); LOG.trace("Mining a new block with timestamp {}", newBlockTimestamp); final Block block = minerBlockCreator.createBlock(newBlockTimestamp); LOG.trace( "Block created, importing to local chain, block includes {} transactions", block.getBody().getTransactions().size()); final BlockImporter<C> importer = protocolSchedule.getByBlockNumber(block.getHeader().getNumber()).getBlockImporter(); final boolean blockImported = importer.importBlock(protocolContext, block, HeaderValidationMode.FULL); if (blockImported) { notifyNewBlockListeners(block); final double taskTimeInSec = stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000.0; LOG.info( String.format( "Produced and imported block #%,d / %d tx / %d om / %,d (%01.1f%%) gas / (%s) in %01.3fs", block.getHeader().getNumber(), block.getBody().getTransactions().size(), block.getBody().getOmmers().size(), block.getHeader().getGasUsed(), (block.getHeader().getGasUsed() * 100.0) / block.getHeader().getGasLimit(), block.getHash(), taskTimeInSec)); } else { LOG.error("Illegal block mined, could not be imported to local chain."); } return blockImported; } public void cancel() { minerBlockCreator.cancel(); } private void notifyNewBlockListeners(final Block block) { observers.forEach(obs -> obs.blockMined(block)); } public BlockHeader getParentHeader() { return parentHeader; } }
1
19,640
Don't call it GPU mining, call it `externalMining`, here and throughout.
hyperledger-besu
java
@@ -252,6 +252,15 @@ return [ | folder - a folder prefix for storing all generated files inside. | path - the public path relative to the application base URL, | or you can specify a full URL path. + | + | For the 'media' resource you can also specify: + | + | imageMaxWidth - Resize all media manager image uploads to be + | within this with (in pixels, 0 = ignore) + | imageMaxHeight - Resize all media manager image uploads to be + | within this height (in pixels, 0 = ignore) + | imageQuality - Set this quality to all media manager image uploads + | (1-100) */ 'storage' => [
1
<?php return [ /* |-------------------------------------------------------------------------- | Specifies the default CMS theme. |-------------------------------------------------------------------------- | | This parameter value can be overridden by the CMS back-end settings. | */ 'activeTheme' => 'demo', /* |-------------------------------------------------------------------------- | Bleeding edge updates |-------------------------------------------------------------------------- | | If you are developing with October, it is important to have the latest | code base. Set this value to 'true' to tell the platform to download | and use the development copies of core files and plugins. | */ 'edgeUpdates' => false, /* |-------------------------------------------------------------------------- | Back-end URI prefix |-------------------------------------------------------------------------- | | Specifies the URL name used for accessing back-end pages. | For example: backend -> http://localhost/backend | */ 'backendUri' => 'backend', /* |-------------------------------------------------------------------------- | Back-end force HTTPS security |-------------------------------------------------------------------------- | | Use this setting to force a secure protocol when accessing any back-end | pages, including the authentication pages. If set to null, this setting | is enabled when debug mode (app.debug) is disabled. | */ 'backendForceSecure' => null, /* |-------------------------------------------------------------------------- | Back-end login remember |-------------------------------------------------------------------------- | | Define live duration of backend sessions : | | true - session never expire (cookie expiration in 5 years) | | false - session have a limited time (see session.lifetime) | | null - The form login display a checkbox that allow user to choose | wanted behavior | */ 'backendForceRemember' => true, /* |-------------------------------------------------------------------------- | Back-end timezone |-------------------------------------------------------------------------- | | This acts as the default setting for a back-end user's timezone. This can | be changed by the user at any time using the backend preferences. All | dates displayed in the back-end will be converted to this timezone. | */ 'backendTimezone' => 'UTC', /* |-------------------------------------------------------------------------- | Back-end Skin |-------------------------------------------------------------------------- | | Specifies the back-end skin to use. | */ 'backendSkin' => 'Backend\Skins\Standard', /* |-------------------------------------------------------------------------- | Determines which modules to load |-------------------------------------------------------------------------- | | Specify which modules should be registered when using the application. | */ 'loadModules' => ['System', 'Backend', 'Cms'], /* |-------------------------------------------------------------------------- | Prevents application updates |-------------------------------------------------------------------------- | | If using composer or git to download updates to the core files, set this | value to 'true' to prevent the update gateway from trying to download | these files again as part of the application update process. Plugins | and themes will still be downloaded. | */ 'disableCoreUpdates' => false, /* |-------------------------------------------------------------------------- | Specific plugins to disable |-------------------------------------------------------------------------- | | Specify plugin codes which will always be disabled in the application. | */ 'disablePlugins' => [], /* |-------------------------------------------------------------------------- | Determines if the routing caching is enabled. |-------------------------------------------------------------------------- | | If the caching is enabled, the page URL map is saved in the cache. If a page | URL was changed on the disk, the old URL value could be still saved in the cache. | To update the cache the back-end Clear Cache feature should be used. It is recommended | to disable the caching during the development, and enable it in the production mode. | */ 'enableRoutesCache' => false, /* |-------------------------------------------------------------------------- | Time to live for the URL map. |-------------------------------------------------------------------------- | | The URL map used in the CMS page routing process. By default | the map is updated every time when a page is saved in the back-end or when the | interval, in minutes, specified with the urlMapCacheTTL parameter expires. | */ 'urlCacheTtl' => 10, /* |-------------------------------------------------------------------------- | Time to live for parsed CMS objects. |-------------------------------------------------------------------------- | | Specifies the number of minutes the CMS object cache lives. After the interval | is expired item are re-cached. Note that items are re-cached automatically when | the corresponding template file is modified. | */ 'parsedPageCacheTTL' => 10, /* |-------------------------------------------------------------------------- | Determines if the asset caching is enabled. |-------------------------------------------------------------------------- | | If the caching is enabled, combined assets are cached. If a asset file | is changed on the disk, the old file contents could be still saved in the cache. | To update the cache the back-end Clear Cache feature should be used. It is recommended | to disable the caching during the development, and enable it in the production mode. | */ 'enableAssetCache' => false, /* |-------------------------------------------------------------------------- | Determines if the asset minification is enabled. |-------------------------------------------------------------------------- | | If the minification is enabled, combined assets are compressed (minified). | It is recommended to disable the minification during development, and | enable it in production mode. If set to null, assets are minified | when debug mode (app.debug) is disabled. | */ 'enableAssetMinify' => null, /* |-------------------------------------------------------------------------- | Check import timestamps when combining assets |-------------------------------------------------------------------------- | | If deep hashing is enabled, the combiner cache will be reset when a change | is detected on imported files, in addition to those referenced directly. | This will cause slower page performance. If set to null, deep hashing | is used when debug mode (app.debug) is enabled. | */ 'enableAssetDeepHashing' => null, /* |-------------------------------------------------------------------------- | Public plugins path |-------------------------------------------------------------------------- | | Specifies the public plugins path relative to the application base URL, | or you can specify a full URL path. | */ 'pluginsPath' => '/plugins', /* |-------------------------------------------------------------------------- | Public themes path |-------------------------------------------------------------------------- | | Specifies the public themes path relative to the application base URL, | or you can specify a full URL path. | */ 'themesPath' => '/themes', /* |-------------------------------------------------------------------------- | Resource storage |-------------------------------------------------------------------------- | | Specifies the configuration for resource storage, such as media and | upload files. These resources are used: | | media - generated by the media manager. | uploads - generated by attachment model relationships. | | For each resource you can specify: | | disk - filesystem disk, as specified in filesystems.php config. | folder - a folder prefix for storing all generated files inside. | path - the public path relative to the application base URL, | or you can specify a full URL path. */ 'storage' => [ 'uploads' => [ 'disk' => 'local', 'folder' => 'uploads', 'path' => '/storage/app/uploads', ], 'media' => [ 'disk' => 'local', 'folder' => 'media', 'path' => '/storage/app/media', ], ], /* |-------------------------------------------------------------------------- | Convert Line Endings |-------------------------------------------------------------------------- | | Determines if October should convert line endings from the windows style | \r\n to the unix style \n. | */ 'convertLineEndings' => false, /* |-------------------------------------------------------------------------- | Linking policy |-------------------------------------------------------------------------- | | Controls how URL links are generated throughout the application. | | detect - detect hostname and use the current schema | secure - detect hostname and force HTTPS schema | insecure - detect hostname and force HTTP schema | force - force hostname and schema using app.url config value | */ 'linkPolicy' => 'detect', /* |-------------------------------------------------------------------------- | Default permission mask |-------------------------------------------------------------------------- | | Specifies a default file and folder permission for newly created objects. | */ 'defaultMask' => ['file' => null, 'folder' => null], /* |-------------------------------------------------------------------------- | Safe mode |-------------------------------------------------------------------------- | | If safe mode is enabled, the PHP code section is disabled in the CMS | for security reasons. If set to null, safe mode is enabled when | debug mode (app.debug) is disabled. | */ 'enableSafeMode' => null, /* |-------------------------------------------------------------------------- | Cross Site Request Forgery (CSRF) Protection |-------------------------------------------------------------------------- | | If the CSRF protection is enabled, all "postback" requests are checked | for a valid security token. | */ 'enableCsrfProtection' => true, /* |-------------------------------------------------------------------------- | Force bytecode invalidation |-------------------------------------------------------------------------- | | When using OPcache with opcache.validate_timestamps set to 0 or APC | with apc.stat set to 0 and Twig cache enabled, clearing the template | cache won't update the cache, set to true to get around this. | */ 'forceBytecodeInvalidation' => true, /* |-------------------------------------------------------------------------- | Twig Strict Variables |-------------------------------------------------------------------------- | | If strict_variables is disabled, Twig will silently ignore invalid | variables (variables and or attributes/methods that do not exist) and | replace them with a null value. When enabled, Twig throws an exception | instead. If set to null, it is enabled when debug mode (app.debug) is | enabled. | */ 'enableTwigStrictVariables' => false, ];
1
13,247
`within this with` typo, should be `within this width`
octobercms-october
php
@@ -22,10 +22,12 @@ import ( ) // Prometheus defines a Prometheus deployment. +// +k8s:openapi-gen=true type Prometheus struct { metav1.TypeMeta `json:",inline"` // Standard object’s metadata. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata + // +k8s:openapi-gen=false metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of the Prometheus cluster. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
1
// Copyright 2016 The prometheus-operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1 import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) // Prometheus defines a Prometheus deployment. type Prometheus struct { metav1.TypeMeta `json:",inline"` // Standard object’s metadata. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of the Prometheus cluster. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status Spec PrometheusSpec `json:"spec"` // Most recent observed status of the Prometheus cluster. Read-only. Not // included when requesting from the apiserver, only from the Prometheus // Operator API itself. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status Status *PrometheusStatus `json:"status,omitempty"` } // PrometheusList is a list of Prometheuses. type PrometheusList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // List of Prometheuses Items []*Prometheus `json:"items"` } // Specification of the desired behavior of the Prometheus cluster. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status type PrometheusSpec struct { // Standard object’s metadata. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata // Metadata Labels and Annotations gets propagated to the prometheus pods. PodMetadata *metav1.ObjectMeta `json:"podMetadata,omitempty"` // ServiceMonitors to be selected for target discovery. ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"` // Version of Prometheus to be deployed. Version string `json:"version,omitempty"` // When a Prometheus deployment is paused, no actions except for deletion // will be performed on the underlying objects. Paused bool `json:"paused,omitempty"` // Base image to use for a Prometheus deployment. BaseImage string `json:"baseImage,omitempty"` // An optional list of references to secrets in the same namespace // to use for pulling prometheus and alertmanager images from registries // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` // Number of instances to deploy for a Prometheus deployment. Replicas *int32 `json:"replicas,omitempty"` // Time duration Prometheus shall retain data for. Retention string `json:"retention,omitempty"` // Log level for Prometheus be configured in. LogLevel string `json:"logLevel,omitempty"` // Interval between consecutive scrapes. ScrapeInterval string `json:"scrapeInterval,omitempty"` // Interval between consecutive evaluations. EvaluationInterval string `json:"evaluationInterval,omitempty"` // The labels to add to any time series or alerts when communicating with // external systems (federation, remote storage, Alertmanager). ExternalLabels map[string]string `json:"externalLabels,omitempty"` // The external URL the Prometheus instances will be available under. This is // necessary to generate correct URLs. This is necessary if Prometheus is not // served from root of a DNS name. ExternalURL string `json:"externalUrl,omitempty"` // The route prefix Prometheus registers HTTP handlers for. This is useful, // if using ExternalURL and a proxy is rewriting HTTP routes of a request, // and the actual ExternalURL is still true, but the server serves requests // under a different route prefix. For example for use with `kubectl proxy`. RoutePrefix string `json:"routePrefix,omitempty"` // Storage spec to specify how storage shall be used. Storage *StorageSpec `json:"storage,omitempty"` // A selector to select which ConfigMaps to mount for loading rule files from. RuleSelector *metav1.LabelSelector `json:"ruleSelector,omitempty"` // Define details regarding alerting. Alerting AlertingSpec `json:"alerting,omitempty"` // Define resources requests and limits for single Pods. Resources v1.ResourceRequirements `json:"resources,omitempty"` // Define which Nodes the Pods are scheduled on. NodeSelector map[string]string `json:"nodeSelector,omitempty"` // ServiceAccountName is the name of the ServiceAccount to use to run the // Prometheus Pods. ServiceAccountName string `json:"serviceAccountName,omitempty"` // Secrets is a list of Secrets in the same namespace as the Prometheus // object, which shall be mounted into the Prometheus Pods. // The Secrets are mounted into /etc/prometheus/secrets/<secret-name>. // Secrets changes after initial creation of a Prometheus object are not // reflected in the running Pods. To change the secrets mounted into the // Prometheus Pods, the object must be deleted and recreated with the new list // of secrets. Secrets []string `json:"secrets,omitempty"` // If specified, the pod's scheduling constraints. Affinity *v1.Affinity `json:"affinity,omitempty"` // If specified, the pod's tolerations. Tolerations []v1.Toleration `json:"tolerations,omitempty"` // If specified, the remote_write spec. This is an experimental feature, it may change in any upcoming release in a breaking way. RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"` // If specified, the remote_read spec. This is an experimental feature, it may change in any upcoming release in a breaking way. RemoteRead []RemoteReadSpec `json:"remoteRead,omitempty"` // SecurityContext holds pod-level security attributes and common container settings. // This defaults to non root user with uid 1000 and gid 2000 for Prometheus >v2.0 and // default PodSecurityContext for other versions. SecurityContext *v1.PodSecurityContext } // Most recent observed status of the Prometheus cluster. Read-only. Not // included when requesting from the apiserver, only from the Prometheus // Operator API itself. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status type PrometheusStatus struct { // Represents whether any actions on the underlaying managed objects are // being performed. Only delete actions will be performed. Paused bool `json:"paused"` // Total number of non-terminated pods targeted by this Prometheus deployment // (their labels match the selector). Replicas int32 `json:"replicas"` // Total number of non-terminated pods targeted by this Prometheus deployment // that have the desired version spec. UpdatedReplicas int32 `json:"updatedReplicas"` // Total number of available pods (ready for at least minReadySeconds) // targeted by this Prometheus deployment. AvailableReplicas int32 `json:"availableReplicas"` // Total number of unavailable pods targeted by this Prometheus deployment. UnavailableReplicas int32 `json:"unavailableReplicas"` } // AlertingSpec defines parameters for alerting configuration of Prometheus servers. type AlertingSpec struct { // AlertmanagerEndpoints Prometheus should fire alerts against. Alertmanagers []AlertmanagerEndpoints `json:"alertmanagers"` } // StorageSpec defines the configured storage for a group Prometheus servers. type StorageSpec struct { // Name of the StorageClass to use when requesting storage provisioning. More // info: https://kubernetes.io/docs/user-guide/persistent-volumes/#storageclasses // DEPRECATED Class string `json:"class"` // EmptyDirVolumeSource to be used by the Prometheus StatefulSets. If specified, used in place of any volumeClaimTemplate. More // info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir EmptyDir *v1.EmptyDirVolumeSource `json:"emptyDir,omitempty"` // A label query over volumes to consider for binding. // DEPRECATED Selector *metav1.LabelSelector `json:"selector"` // Resources represents the minimum resources the volume should have. More // info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources // DEPRECATED Resources v1.ResourceRequirements `json:"resources"` // A PVC spec to be used by the Prometheus StatefulSets. VolumeClaimTemplate v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` } // RemoteWriteSpec defines the remote_write configuration for prometheus. type RemoteWriteSpec struct { //The URL of the endpoint to send samples to. URL string `json:"url"` //Timeout for requests to the remote write endpoint. RemoteTimeout string `json:"remoteTimeout,omitempty"` //The list of remote write relabel configurations. WriteRelabelConfigs []RelabelConfig `json:"writeRelabelConfigs,omitempty"` //BasicAuth for the URL. BasicAuth *BasicAuth `json:"basicAuth,omitempty"` // File to read bearer token for remote write. BearerToken string `json:"bearerToken,omitempty"` // File to read bearer token for remote write. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // TLS Config to use for remote write. TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` //Optional ProxyURL ProxyURL string `json:"proxy_url,omitempty"` } // RemoteReadSpec defines the remote_read configuration for prometheus. type RemoteReadSpec struct { //The URL of the endpoint to send samples to. URL string `json:"url"` //Timeout for requests to the remote write endpoint. RemoteTimeout string `json:"remoteTimeout,omitempty"` //BasicAuth for the URL. BasicAuth *BasicAuth `json:"basicAuth,omitempty"` // bearer token for remote write. BearerToken string `json:"bearerToken,omitempty"` // File to read bearer token for remote write. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // TLS Config to use for remote write. TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` //Optional ProxyURL ProxyURL string `json:"proxy_url,omitempty"` } // RelabelConfig allows dynamic rewriting of the label set. type RelabelConfig struct { //The source labels select values from existing labels. Their content is concatenated //using the configured separator and matched against the configured regular expression //for the replace, keep, and drop actions. SourceLabels []string `json:"sourceLabels"` //Separator placed between concatenated source label values. default is ';'. Separator string `json:"separator,omitempty"` //Label to which the resulting value is written in a replace action. //It is mandatory for replace actions. Regex capture groups are available. TargetLabel string `json:"targetLabel,omitempty"` //Regular expression against which the extracted value is matched. defailt is '(.*)' Regex string `json:"regex,omitempty"` // Modulus to take of the hash of the source label values. Modulus uint64 `json:"modulus,omitempty"` //Replacement value against which a regex replace is performed if the //regular expression matches. Regex capture groups are available. Default is '$1' Replacement string `json:"replacement"` // Action to perform based on regex matching. Default is 'replace' Action string `json:"action,omitempty"` } // AlertmanagerEndpoints defines a selection of a single Endpoints object // containing alertmanager IPs to fire alerts against. type AlertmanagerEndpoints struct { // Namespace of Endpoints object. Namespace string `json:"namespace"` // Name of Endpoints object in Namespace. Name string `json:"name"` // Port the Alertmanager API is exposed on. Port intstr.IntOrString `json:"port"` // Scheme to use when firing alerts. Scheme string `json:"scheme"` // Prefix for the HTTP path alerts are pushed to. PathPrefix string `json:"pathPrefix"` } // ServiceMonitor defines monitoring for a set of services. type ServiceMonitor struct { metav1.TypeMeta `json:",inline"` // Standard object’s metadata. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of desired Service selection for target discrovery by // Prometheus. Spec ServiceMonitorSpec `json:"spec"` } // ServiceMonitorSpec contains specification parameters for a ServiceMonitor. type ServiceMonitorSpec struct { // The label to use to retrieve the job name from. JobLabel string `json:"jobLabel,omitempty"` // A list of endpoints allowed as part of this ServiceMonitor. Endpoints []Endpoint `json:"endpoints"` // Selector to select Endpoints objects. Selector metav1.LabelSelector `json:"selector"` // Selector to select which namespaces the Endpoints objects are discovered from. NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` } // Endpoint defines a scrapeable endpoint serving Prometheus metrics. type Endpoint struct { // Name of the service port this endpoint refers to. Mutually exclusive with targetPort. Port string `json:"port,omitempty"` // Name or number of the target port of the endpoint. Mutually exclusive with port. TargetPort intstr.IntOrString `json:"targetPort,omitempty"` // HTTP path to scrape for metrics. Path string `json:"path,omitempty"` // HTTP scheme to use for scraping. Scheme string `json:"scheme,omitempty"` // Optional HTTP URL parameters Params map[string][]string `json:"params,omitempty"` // Interval at which metrics should be scraped Interval string `json:"interval,omitempty"` // Timeout after which the scrape is ended ScrapeTimeout string `json:"scrapeTimeout,omitempty"` // TLS configuration to use when scraping the endpoint TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` // File to read bearer token for scraping targets. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // HonorLabels chooses the metric's labels on collisions with target labels. HonorLabels bool `json:"honorLabels,omitempty"` // BasicAuth allow an endpoint to authenticate over basic authentication // More info: https://prometheus.io/docs/operating/configuration/#endpoints BasicAuth *BasicAuth `json:"basicAuth,omitempty"` // MetricRelabelConfigs to apply to samples before ingestion. MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"` } // BasicAuth allow an endpoint to authenticate over basic authentication // More info: https://prometheus.io/docs/operating/configuration/#endpoints type BasicAuth struct { // The secret that contains the username for authenticate Username v1.SecretKeySelector `json:"username,omitempty"` // The secret that contains the password for authenticate Password v1.SecretKeySelector `json:"password,omitempty"` } // TLSConfig specifies TLS configuration parameters. type TLSConfig struct { // The CA cert to use for the targets. CAFile string `json:"caFile,omitempty"` // The client cert file for the targets. CertFile string `json:"certFile,omitempty"` // The client key file for the targets. KeyFile string `json:"keyFile,omitempty"` // Used to verify the hostname for the targets. ServerName string `json:"serverName,omitempty"` // Disable target certificate validation. InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` } // A list of ServiceMonitors. type ServiceMonitorList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // List of ServiceMonitors Items []*ServiceMonitor `json:"items"` } // Describes an Alertmanager cluster. type Alertmanager struct { metav1.TypeMeta `json:",inline"` // Standard object’s metadata. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of the Alertmanager cluster. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status Spec AlertmanagerSpec `json:"spec"` // Most recent observed status of the Alertmanager cluster. Read-only. Not // included when requesting from the apiserver, only from the Prometheus // Operator API itself. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status Status *AlertmanagerStatus `json:"status,omitempty"` } // Specification of the desired behavior of the Alertmanager cluster. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status type AlertmanagerSpec struct { // Standard object’s metadata. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata // Metadata Labels and Annotations gets propagated to the prometheus pods. PodMetadata *metav1.ObjectMeta `json:"podMetadata,omitempty"` // Version the cluster should be on. Version string `json:"version,omitempty"` // Base image that is used to deploy pods. BaseImage string `json:"baseImage,omitempty"` // An optional list of references to secrets in the same namespace // to use for pulling prometheus and alertmanager images from registries // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` // Size is the expected size of the alertmanager cluster. The controller will // eventually make the size of the running cluster equal to the expected // size. Replicas *int32 `json:"replicas,omitempty"` // Storage is the definition of how storage will be used by the Alertmanager // instances. Storage *StorageSpec `json:"storage,omitempty"` // The external URL the Alertmanager instances will be available under. This is // necessary to generate correct URLs. This is necessary if Alertmanager is not // served from root of a DNS name. ExternalURL string `json:"externalUrl,omitempty"` // The route prefix Alertmanager registers HTTP handlers for. This is useful, // if using ExternalURL and a proxy is rewriting HTTP routes of a request, // and the actual ExternalURL is still true, but the server serves requests // under a different route prefix. For example for use with `kubectl proxy`. RoutePrefix string `json:"routePrefix,omitempty"` // If set to true all actions on the underlaying managed objects are not // goint to be performed, except for delete actions. Paused bool `json:"paused,omitempty"` // Define which Nodes the Pods are scheduled on. NodeSelector map[string]string `json:"nodeSelector,omitempty"` // Define resources requests and limits for single Pods. Resources v1.ResourceRequirements `json:"resources,omitempty"` // If specified, the pod's scheduling constraints. Affinity *v1.Affinity `json:"affinity,omitempty"` // If specified, the pod's tolerations. Tolerations []v1.Toleration `json:"tolerations,omitempty"` // SecurityContext holds pod-level security attributes and common container settings. // This defaults to non root user with uid 1000 and gid 2000. SecurityContext *v1.PodSecurityContext // ServiceAccountName is the name of the ServiceAccount to use to run the // Prometheus Pods. ServiceAccountName string `json:"serviceAccountName,omitempty"` } // A list of Alertmanagers. type AlertmanagerList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // List of Alertmanagers Items []Alertmanager `json:"items"` } // Most recent observed status of the Alertmanager cluster. Read-only. Not // included when requesting from the apiserver, only from the Prometheus // Operator API itself. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status type AlertmanagerStatus struct { // Represents whether any actions on the underlaying managed objects are // being performed. Only delete actions will be performed. Paused bool `json:"paused"` // Total number of non-terminated pods targeted by this Alertmanager // cluster (their labels match the selector). Replicas int32 `json:"replicas"` // Total number of non-terminated pods targeted by this Alertmanager // cluster that have the desired version spec. UpdatedReplicas int32 `json:"updatedReplicas"` // Total number of available pods (ready for at least minReadySeconds) // targeted by this Alertmanager cluster. AvailableReplicas int32 `json:"availableReplicas"` // Total number of unavailable pods targeted by this Alertmanager cluster. UnavailableReplicas int32 `json:"unavailableReplicas"` } // A selector for selecting namespaces either selecting all namespaces or a // list of namespaces. type NamespaceSelector struct { // Boolean describing whether all namespaces are selected in contrast to a // list restricting them. Any bool `json:"any,omitempty"` // List of namespace names. MatchNames []string `json:"matchNames,omitempty"` // TODO(fabxc): this should embed metav1.LabelSelector eventually. // Currently the selector is only used for namespaces which require more complex // implementation to support label selections. } func (l *Alertmanager) DeepCopyObject() runtime.Object { return l.DeepCopy() } func (l *AlertmanagerList) DeepCopyObject() runtime.Object { return l.DeepCopy() } func (l *Prometheus) DeepCopyObject() runtime.Object { return l.DeepCopy() } func (l *PrometheusList) DeepCopyObject() runtime.Object { return l.DeepCopy() } func (l *ServiceMonitor) DeepCopyObject() runtime.Object { return l.DeepCopy() } func (l *ServiceMonitorList) DeepCopyObject() runtime.Object { return l.DeepCopy() }
1
9,582
how come this is false?
prometheus-operator-prometheus-operator
go
@@ -24,6 +24,19 @@ function node_require(module) { return require(module); } +function typeOf(obj) { + return ({}).toString.call(obj).match(/\s(\w+)/)[1].toLowerCase(); +} + +function checkTypes(args, types) { + args = [].slice.call(args); + for (var i = 0; i < types.length; ++i) { + if (typeOf(args[i]) !== types[i]) { + throw new TypeError('param ' + i + ' must be of type ' + types[i]); + } + } +} + const performFetch = typeof fetch === 'undefined' ? node_require('node-fetch') : fetch; const url_parse = require('url-parse');
1
//////////////////////////////////////////////////////////////////////////// // // Copyright 2016 Realm Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //////////////////////////////////////////////////////////////////////////// 'use strict'; const AuthError = require('./errors').AuthError; function node_require(module) { return require(module); } const performFetch = typeof fetch === 'undefined' ? node_require('node-fetch') : fetch; const url_parse = require('url-parse'); const postHeaders = { 'content-type': 'application/json;charset=utf-8', 'accept': 'application/json' }; function auth_url(server) { if (server.charAt(server.length-1) != '/') { return server + '/auth'; } return server + 'auth'; } function scheduleAccessTokenRefresh(user, localRealmPath, realmUrl, expirationDate) { const refreshBuffer = 10 * 1000; const timeout = expirationDate - Date.now() - refreshBuffer; setTimeout(() => refreshAccessToken(user, localRealmPath, realmUrl), timeout); } function refreshAccessToken(user, localRealmPath, realmUrl) { let parsedRealmUrl = url_parse(realmUrl); const url = auth_url(user.server); const options = { method: 'POST', body: JSON.stringify({ data: user.token, path: parsedRealmUrl.pathname, provider: 'realm', app_id: '' }), headers: postHeaders }; performFetch(url, options) // in case something lower in the HTTP stack breaks, try again in 10 seconds .catch(() => setTimeout(() => refreshAccessToken(user, localRealmPath, realmUrl), 10 * 1000)) .then((response) => response.json().then((json) => { return { response, json }; })) .then((responseAndJson) => { const response = responseAndJson.response; const json = responseAndJson.json; // Look up a fresh instance of the user. // We do this because in React Native Remote Debugging // `Realm.clearTestState()` will have invalidated the user object let newUser = user.constructor.all[user.identity]; if (newUser) { let session = newUser._sessionForOnDiskPath(localRealmPath); if (session) { if (response.status != 200) { let errorHandler = session.config.error; let error = new AuthError(json); if (errorHandler) { errorHandler(session, error); } else { (console.error || console.log).call(console, `Unhandled session token refresh error: ${error}`); } } else if (session.state !== 'invalid') { parsedRealmUrl.set('pathname', json.access_token.token_data.path); session._refreshAccessToken(json.access_token.token, parsedRealmUrl.href); const tokenExpirationDate = new Date(json.access_token.token_data.expires * 1000); scheduleAccessTokenRefresh(newUser, localRealmPath, realmUrl, tokenExpirationDate); } } } }); } function _authenticate(userConstructor, server, json, callback) { json.app_id = ''; const url = auth_url(server); const options = { method: 'POST', body: JSON.stringify(json), headers: postHeaders, open_timeout: 5000 }; performFetch(url, options) .then((response) => { if (response.status !== 200) { return response.json().then((body) => callback(new AuthError(body))); } else { return response.json().then(function (body) { // TODO: validate JSON const token = body.refresh_token.token; const identity = body.refresh_token.token_data.identity; callback(undefined, userConstructor.createUser(server, identity, token, false)); }) } }) .catch(callback); } module.exports = { static: { get current() { const allUsers = this.all; const keys = Object.keys(allUsers); if (keys.length === 0) { return undefined; } else if (keys.length > 1) { throw new Error("Multiple users are logged in"); } return allUsers[keys[0]]; }, adminUser(token) { var uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) { var r = Math.random()*16|0, v = c == 'x' ? r : (r&0x3|0x8); return v.toString(16); }); var user = this.createUser('', uuid, token, true); return user; }, register(server, username, password, callback) { _authenticate(this, server, { provider: 'password', user_info: { password: password, register: true }, data: username }, callback); }, login(server, username, password, callback) { _authenticate(this, server, { provider: 'password', user_info: { password: password }, data: username }, callback); }, registerWithProvider(server, options, callback) { // Compatibility with previous signature: // registerWithProvider(server, provider, providerToken, callback) if (arguments.length === 4) { options = { provider: arguments[1], providerToken: arguments[2] }; callback = arguments[3]; } let reqOptions = { provider: options.provider, data: options.providerToken, }; if (options.userInfo) { reqOptions.user_info = options.userInfo; } _authenticate(this, server, reqOptions, callback); }, _refreshAccessToken: refreshAccessToken }, instance: { openManagementRealm() { let url = url_parse(this.server); if (url.protocol === 'http:') { url.set('protocol', 'realm:'); } else if (url.protocol === 'https:') { url.set('protocol', 'realms:'); } else { throw new Error(`Unexpected user auth url: ${this.server}`); } url.set('pathname', '/~/__management'); return new this.constructor._realmConstructor({ schema: require('./management-schema'), sync: { user: this, url: url.href } }); } } };
1
15,935
How about `Object.prototype.toString`? Or using the `typeof` operator?
realm-realm-js
js
@@ -83,6 +83,7 @@ type ReporterKBPKI struct { notifySyncBuffer chan *keybase1.FSPathSyncStatus suppressCh chan time.Duration canceler func() + ctx context.Context } // NewReporterKBPKI creates a new ReporterKBPKI.
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "fmt" "strconv" "strings" "time" "github.com/keybase/client/go/logger" "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/kbfs/kbfsmd" "github.com/keybase/kbfs/tlf" "github.com/pkg/errors" "golang.org/x/net/context" ) const ( // error param keys errorParamTlf = "tlf" errorParamMode = "mode" errorParamFeature = "feature" errorParamUsername = "username" errorParamExternal = "external" errorParamRekeySelf = "rekeyself" errorParamUsageBytes = "usageBytes" errorParamLimitBytes = "limitBytes" errorParamUsageFiles = "usageFiles" errorParamLimitFiles = "limitFiles" errorParamRenameOldFilename = "oldFilename" errorParamFoldersCreated = "foldersCreated" errorParamFolderLimit = "folderLimit" errorParamApplicationExecPath = "applicationExecPath" // error operation modes errorModeRead = "read" errorModeWrite = "write" // features that aren't ready yet errorFeatureFileLimit = "2gbFileLimit" errorFeatureDirLimit = "512kbDirLimit" ) const connectionStatusConnected keybase1.FSStatusCode = keybase1.FSStatusCode_START const connectionStatusDisconnected keybase1.FSStatusCode = keybase1.FSStatusCode_ERROR // noErrorNames are lookup names that should not result in an error // notification. These should all be reserved or illegal Keybase // usernames that will never be associated with a real account. var noErrorNames = map[string]bool{ "objects": true, // git shells "gemfile": true, // rvm "Gemfile": true, // rvm "devfs": true, // lsof? KBFS-823 "_mtn": true, // emacs on Linux "_MTN": true, // emacs on Linux "docker-machine": true, // docker shell stuff "HEAD": true, // git shell "Keybase.app": true, // some OSX mount thing "DCIM": true, // looking for digital pic folder "Thumbs.db": true, // Windows mounts "config": true, // Windows, possibly 7-Zip? "m4root": true, // OS X, iMovie? "BDMV": true, // OS X, iMovie? "node_modules": true, // Some npm shell configuration "folder": true, // Dolphin? keybase/client#7304 "avchd": true, // Sony PlayMemories Home, keybase/client#6801 "avchd_bk": true, // Sony PlayMemories Home, keybase/client#6801 "sony": true, // Sony PlayMemories Home, keybase/client#6801 } // ReporterKBPKI implements the Notify function of the Reporter // interface in addition to embedding ReporterSimple for error // tracking. Notify will make RPCs to the keybase daemon. type ReporterKBPKI struct { *ReporterSimple config Config log logger.Logger notifyBuffer chan *keybase1.FSNotification notifySyncBuffer chan *keybase1.FSPathSyncStatus suppressCh chan time.Duration canceler func() } // NewReporterKBPKI creates a new ReporterKBPKI. func NewReporterKBPKI(config Config, maxErrors, bufSize int) *ReporterKBPKI { r := &ReporterKBPKI{ ReporterSimple: NewReporterSimple(config.Clock(), maxErrors), config: config, log: config.MakeLogger(""), notifyBuffer: make(chan *keybase1.FSNotification, bufSize), notifySyncBuffer: make(chan *keybase1.FSPathSyncStatus, bufSize), suppressCh: make(chan time.Duration, 1), } var ctx context.Context ctx, r.canceler = context.WithCancel(context.Background()) go r.send(ctx) return r } // ReportErr implements the Reporter interface for ReporterKBPKI. func (r *ReporterKBPKI) ReportErr(ctx context.Context, tlfName tlf.CanonicalName, t tlf.Type, mode ErrorModeType, err error) { r.ReporterSimple.ReportErr(ctx, tlfName, t, mode, err) // Fire off error popups params := make(map[string]string) filename := "" var code keybase1.FSErrorType = -1 switch e := errors.Cause(err).(type) { case ReadAccessError: code = keybase1.FSErrorType_ACCESS_DENIED params[errorParamMode] = errorModeRead filename = e.Filename case WriteAccessError: code = keybase1.FSErrorType_ACCESS_DENIED params[errorParamUsername] = e.User.String() params[errorParamMode] = errorModeWrite filename = e.Filename case WriteUnsupportedError: code = keybase1.FSErrorType_ACCESS_DENIED params[errorParamMode] = errorModeWrite filename = e.Filename case NoSuchUserError: if !noErrorNames[e.Input] { code = keybase1.FSErrorType_USER_NOT_FOUND params[errorParamUsername] = e.Input if strings.ContainsAny(e.Input, "@:") { params[errorParamExternal] = "true" } else { params[errorParamExternal] = "false" } } case UnverifiableTlfUpdateError: code = keybase1.FSErrorType_REVOKED_DATA_DETECTED case NoCurrentSessionError: code = keybase1.FSErrorType_NOT_LOGGED_IN case NeedSelfRekeyError: code = keybase1.FSErrorType_REKEY_NEEDED params[errorParamRekeySelf] = "true" case NeedOtherRekeyError: code = keybase1.FSErrorType_REKEY_NEEDED params[errorParamRekeySelf] = "false" case FileTooBigError: code = keybase1.FSErrorType_NOT_IMPLEMENTED params[errorParamFeature] = errorFeatureFileLimit case FileTooBigForCRError: code = keybase1.FSErrorType_NOT_IMPLEMENTED params[errorParamFeature] = errorFeatureFileLimit case DirTooBigError: code = keybase1.FSErrorType_NOT_IMPLEMENTED params[errorParamFeature] = errorFeatureDirLimit case kbfsmd.NewMetadataVersionError: code = keybase1.FSErrorType_OLD_VERSION err = OutdatedVersionError{} case kbfsmd.NewMerkleVersionError: code = keybase1.FSErrorType_OLD_VERSION err = OutdatedVersionError{} case NewDataVersionError: code = keybase1.FSErrorType_OLD_VERSION err = OutdatedVersionError{} case OverQuotaWarning: code = keybase1.FSErrorType_OVER_QUOTA params[errorParamUsageBytes] = strconv.FormatInt(e.UsageBytes, 10) params[errorParamLimitBytes] = strconv.FormatInt(e.LimitBytes, 10) case *ErrDiskLimitTimeout: if !e.reportable { return } code = keybase1.FSErrorType_DISK_LIMIT_REACHED params[errorParamUsageBytes] = strconv.FormatInt(e.usageBytes, 10) params[errorParamLimitBytes] = strconv.FormatFloat(e.limitBytes, 'f', 0, 64) params[errorParamUsageFiles] = strconv.FormatInt(e.usageFiles, 10) params[errorParamLimitFiles] = strconv.FormatFloat(e.limitFiles, 'f', 0, 64) case NoSigChainError: code = keybase1.FSErrorType_NO_SIG_CHAIN params[errorParamUsername] = e.User.String() case kbfsmd.ServerErrorTooManyFoldersCreated: code = keybase1.FSErrorType_TOO_MANY_FOLDERS params[errorParamFolderLimit] = strconv.FormatUint(e.Limit, 10) params[errorParamFoldersCreated] = strconv.FormatUint(e.Created, 10) case RenameAcrossDirsError: if len(e.ApplicationExecPath) > 0 { code = keybase1.FSErrorType_EXDEV_NOT_SUPPORTED params[errorParamApplicationExecPath] = e.ApplicationExecPath } } if code < 0 && err == context.DeadlineExceeded { code = keybase1.FSErrorType_TIMEOUT // Workaround for DESKTOP-2442 filename = string(tlfName) } if code >= 0 { n := errorNotification(err, code, tlfName, t, mode, filename, params) r.Notify(ctx, n) } } // Notify implements the Reporter interface for ReporterKBPKI. // // TODO: might be useful to get the debug tags out of ctx and store // them in the notifyBuffer as well so that send() can put // them back in its context. func (r *ReporterKBPKI) Notify(ctx context.Context, notification *keybase1.FSNotification) { select { case r.notifyBuffer <- notification: default: r.log.CDebugf(ctx, "ReporterKBPKI: notify buffer full, dropping %+v", notification) } } // NotifySyncStatus implements the Reporter interface for ReporterKBPKI. // // TODO: might be useful to get the debug tags out of ctx and store // them in the notifyBuffer as well so that send() can put // them back in its context. func (r *ReporterKBPKI) NotifySyncStatus(ctx context.Context, status *keybase1.FSPathSyncStatus) { select { case r.notifySyncBuffer <- status: default: r.log.CDebugf(ctx, "ReporterKBPKI: notify sync buffer full, "+ "dropping %+v", status) } } // SuppressNotifications implements the Reporter interface for ReporterKBPKI. func (r *ReporterKBPKI) SuppressNotifications( ctx context.Context, suppressDuration time.Duration) { r.suppressCh <- suppressDuration } // Shutdown implements the Reporter interface for ReporterKBPKI. func (r *ReporterKBPKI) Shutdown() { r.canceler() close(r.notifyBuffer) close(r.notifySyncBuffer) close(r.suppressCh) } // send takes notifications out of notifyBuffer and notifySyncBuffer // and sends them to the keybase daemon. func (r *ReporterKBPKI) send(ctx context.Context) { suppressTimer := time.NewTimer(0) suppressed := false var stagedNotification *keybase1.FSNotification var stagedStatus *keybase1.FSPathSyncStatus for { select { case notification, ok := <-r.notifyBuffer: if !ok { return } if suppressed { stagedNotification = notification } else if err := r.config.KeybaseService().Notify(ctx, notification); err != nil { r.log.CDebugf(ctx, "ReporterDaemon: error sending "+ "notification: %s", err) } case status, ok := <-r.notifySyncBuffer: if !ok { return } if suppressed { stagedStatus = status } else if err := r.config.KeybaseService().NotifySyncStatus(ctx, status); err != nil { r.log.CDebugf(ctx, "ReporterDaemon: error sending "+ "sync status: %s", err) } case suppressFor, ok := <-r.suppressCh: if !ok { return } suppressTimer.Reset(suppressFor) suppressed = true case <-suppressTimer.C: if stagedNotification != nil { if err := r.config.KeybaseService().Notify(ctx, stagedNotification); err != nil { r.log.CDebugf(ctx, "ReporterDaemon: error sending "+ "notification: %s", err) } stagedNotification = nil } if stagedStatus != nil { if err := r.config.KeybaseService().NotifySyncStatus(ctx, stagedStatus); err != nil { r.log.CDebugf(ctx, "ReporterDaemon: error sending "+ "sync status: %s", err) } stagedStatus = nil } suppressed = false } } } // writeNotification creates FSNotifications from paths for file // write events. func writeNotification(file path, finish bool) *keybase1.FSNotification { n := baseNotification(file, finish) if file.Tlf.Type() == tlf.Public { n.NotificationType = keybase1.FSNotificationType_SIGNING } else { n.NotificationType = keybase1.FSNotificationType_ENCRYPTING } return n } // readNotification creates FSNotifications from paths for file // read events. func readNotification(file path, finish bool) *keybase1.FSNotification { n := baseNotification(file, finish) if file.Tlf.Type() == tlf.Public { n.NotificationType = keybase1.FSNotificationType_VERIFYING } else { n.NotificationType = keybase1.FSNotificationType_DECRYPTING } return n } // rekeyNotification creates FSNotifications from TlfHandles for rekey // events. func rekeyNotification(ctx context.Context, config Config, handle *TlfHandle, finish bool) *keybase1.FSNotification { code := keybase1.FSStatusCode_START if finish { code = keybase1.FSStatusCode_FINISH } return &keybase1.FSNotification{ FolderType: handle.Type().FolderType(), Filename: string(handle.GetCanonicalPath()), StatusCode: code, NotificationType: keybase1.FSNotificationType_REKEYING, } } func baseFileEditNotification(file path, writer keybase1.UID, localTime time.Time) *keybase1.FSNotification { n := baseNotification(file, true) n.WriterUid = writer n.LocalTime = keybase1.ToTime(localTime) return n } // fileCreateNotification creates FSNotifications from paths for file // create events. func fileCreateNotification(file path, writer keybase1.UID, localTime time.Time) *keybase1.FSNotification { n := baseFileEditNotification(file, writer, localTime) n.NotificationType = keybase1.FSNotificationType_FILE_CREATED return n } // fileModifyNotification creates FSNotifications from paths for file // modification events. func fileModifyNotification(file path, writer keybase1.UID, localTime time.Time) *keybase1.FSNotification { n := baseFileEditNotification(file, writer, localTime) n.NotificationType = keybase1.FSNotificationType_FILE_MODIFIED return n } // fileDeleteNotification creates FSNotifications from paths for file // delete events. func fileDeleteNotification(file path, writer keybase1.UID, localTime time.Time) *keybase1.FSNotification { n := baseFileEditNotification(file, writer, localTime) n.NotificationType = keybase1.FSNotificationType_FILE_DELETED return n } // fileRenameNotification creates FSNotifications from paths for file // rename events. func fileRenameNotification(oldFile path, newFile path, writer keybase1.UID, localTime time.Time) *keybase1.FSNotification { n := baseFileEditNotification(newFile, writer, localTime) n.NotificationType = keybase1.FSNotificationType_FILE_RENAMED n.Params = map[string]string{errorParamRenameOldFilename: oldFile.CanonicalPathString()} return n } // connectionNotification creates FSNotifications based on whether // or not KBFS is online. func connectionNotification(status keybase1.FSStatusCode) *keybase1.FSNotification { // TODO finish placeholder return &keybase1.FSNotification{ NotificationType: keybase1.FSNotificationType_CONNECTION, StatusCode: status, } } // baseNotification creates a basic FSNotification without a // NotificationType from a path. func baseNotification(file path, finish bool) *keybase1.FSNotification { code := keybase1.FSStatusCode_START if finish { code = keybase1.FSStatusCode_FINISH } return &keybase1.FSNotification{ Filename: file.CanonicalPathString(), StatusCode: code, } } // errorNotification creates FSNotifications for errors. func errorNotification(err error, errType keybase1.FSErrorType, tlfName tlf.CanonicalName, t tlf.Type, mode ErrorModeType, filename string, params map[string]string) *keybase1.FSNotification { if tlfName != "" { params[errorParamTlf] = string(tlfName) } var nType keybase1.FSNotificationType switch mode { case ReadMode: params[errorParamMode] = errorModeRead if t == tlf.Public { nType = keybase1.FSNotificationType_VERIFYING } else { nType = keybase1.FSNotificationType_DECRYPTING } case WriteMode: params[errorParamMode] = errorModeWrite if t == tlf.Public { nType = keybase1.FSNotificationType_SIGNING } else { nType = keybase1.FSNotificationType_ENCRYPTING } default: panic(fmt.Sprintf("Unknown mode: %v", mode)) } return &keybase1.FSNotification{ FolderType: t.FolderType(), Filename: filename, StatusCode: keybase1.FSStatusCode_ERROR, Status: err.Error(), ErrorType: errType, Params: params, NotificationType: nType, } } func mdReadSuccessNotification(handle *TlfHandle, public bool) *keybase1.FSNotification { params := make(map[string]string) if handle != nil { params[errorParamTlf] = string(handle.GetCanonicalName()) } return &keybase1.FSNotification{ FolderType: handle.Type().FolderType(), Filename: string(handle.GetCanonicalPath()), StatusCode: keybase1.FSStatusCode_START, NotificationType: keybase1.FSNotificationType_MD_READ_SUCCESS, Params: params, } }
1
19,862
I know @jzila suggested this, but I disagree: you're really not supposed to save a context in a struct. Can you get by with just saving the `Done()` channel instead? The `ctx` should continue to be passed around explicitly.
keybase-kbfs
go
@@ -12,12 +12,7 @@ namespace Microsoft.AspNet.Server.Kestrel.Networking { IsWindows = PlatformApis.IsWindows; - var isDarwinMono = -#if DNX451 - IsWindows ? false : PlatformApis.IsDarwin; -#else - false; -#endif + var isDarwinMono = !IsWindows && PlatformApis.IsDarwin; if (isDarwinMono) {
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.Runtime.InteropServices; namespace Microsoft.AspNet.Server.Kestrel.Networking { public class Libuv { public Libuv() { IsWindows = PlatformApis.IsWindows; var isDarwinMono = #if DNX451 IsWindows ? false : PlatformApis.IsDarwin; #else false; #endif if (isDarwinMono) { _uv_loop_init = NativeDarwinMonoMethods.uv_loop_init; _uv_loop_close = NativeDarwinMonoMethods.uv_loop_close; _uv_run = NativeDarwinMonoMethods.uv_run; _uv_stop = NativeDarwinMonoMethods.uv_stop; _uv_ref = NativeDarwinMonoMethods.uv_ref; _uv_unref = NativeDarwinMonoMethods.uv_unref; _uv_close = NativeDarwinMonoMethods.uv_close; _uv_async_init = NativeDarwinMonoMethods.uv_async_init; _uv_async_send = NativeDarwinMonoMethods.uv_async_send; _uv_tcp_init = NativeDarwinMonoMethods.uv_tcp_init; _uv_tcp_bind = NativeDarwinMonoMethods.uv_tcp_bind; _uv_tcp_open = NativeDarwinMonoMethods.uv_tcp_open; _uv_tcp_nodelay = NativeDarwinMonoMethods.uv_tcp_nodelay; _uv_pipe_init = NativeDarwinMonoMethods.uv_pipe_init; _uv_pipe_bind = NativeDarwinMonoMethods.uv_pipe_bind; _uv_listen = NativeDarwinMonoMethods.uv_listen; _uv_accept = NativeDarwinMonoMethods.uv_accept; _uv_pipe_connect = NativeDarwinMonoMethods.uv_pipe_connect; _uv_pipe_pending_count = NativeDarwinMonoMethods.uv_pipe_pending_count; _uv_read_start = NativeDarwinMonoMethods.uv_read_start; _uv_read_stop = NativeDarwinMonoMethods.uv_read_stop; _uv_try_write = NativeDarwinMonoMethods.uv_try_write; unsafe { _uv_write = NativeDarwinMonoMethods.uv_write; _uv_write2 = NativeDarwinMonoMethods.uv_write2; } _uv_shutdown = NativeDarwinMonoMethods.uv_shutdown; _uv_err_name = NativeDarwinMonoMethods.uv_err_name; _uv_strerror = NativeDarwinMonoMethods.uv_strerror; _uv_loop_size = NativeDarwinMonoMethods.uv_loop_size; _uv_handle_size = NativeDarwinMonoMethods.uv_handle_size; _uv_req_size = NativeDarwinMonoMethods.uv_req_size; _uv_ip4_addr = NativeDarwinMonoMethods.uv_ip4_addr; _uv_ip6_addr = NativeDarwinMonoMethods.uv_ip6_addr; _uv_tcp_getpeername = NativeDarwinMonoMethods.uv_tcp_getpeername; _uv_tcp_getsockname = NativeDarwinMonoMethods.uv_tcp_getsockname; _uv_walk = NativeDarwinMonoMethods.uv_walk; } else { _uv_loop_init = NativeMethods.uv_loop_init; _uv_loop_close = NativeMethods.uv_loop_close; _uv_run = NativeMethods.uv_run; _uv_stop = NativeMethods.uv_stop; _uv_ref = NativeMethods.uv_ref; _uv_unref = NativeMethods.uv_unref; _uv_close = NativeMethods.uv_close; _uv_async_init = NativeMethods.uv_async_init; _uv_async_send = NativeMethods.uv_async_send; _uv_tcp_init = NativeMethods.uv_tcp_init; _uv_tcp_bind = NativeMethods.uv_tcp_bind; _uv_tcp_open = NativeMethods.uv_tcp_open; _uv_tcp_nodelay = NativeMethods.uv_tcp_nodelay; _uv_pipe_init = NativeMethods.uv_pipe_init; _uv_pipe_bind = NativeMethods.uv_pipe_bind; _uv_listen = NativeMethods.uv_listen; _uv_accept = NativeMethods.uv_accept; _uv_pipe_connect = NativeMethods.uv_pipe_connect; _uv_pipe_pending_count = NativeMethods.uv_pipe_pending_count; _uv_read_start = NativeMethods.uv_read_start; _uv_read_stop = NativeMethods.uv_read_stop; _uv_try_write = NativeMethods.uv_try_write; unsafe { _uv_write = NativeMethods.uv_write; _uv_write2 = NativeMethods.uv_write2; } _uv_shutdown = NativeMethods.uv_shutdown; _uv_err_name = NativeMethods.uv_err_name; _uv_strerror = NativeMethods.uv_strerror; _uv_loop_size = NativeMethods.uv_loop_size; _uv_handle_size = NativeMethods.uv_handle_size; _uv_req_size = NativeMethods.uv_req_size; _uv_ip4_addr = NativeMethods.uv_ip4_addr; _uv_ip6_addr = NativeMethods.uv_ip6_addr; _uv_tcp_getpeername = NativeMethods.uv_tcp_getpeername; _uv_tcp_getsockname = NativeMethods.uv_tcp_getsockname; _uv_walk = NativeMethods.uv_walk; } } public readonly bool IsWindows; public int Check(int statusCode) { Exception error; var result = Check(statusCode, out error); if (error != null) { throw error; } return statusCode; } public int Check(int statusCode, out Exception error) { if (statusCode < 0) { var errorName = err_name(statusCode); var errorDescription = strerror(statusCode); error = new UvException("Error " + statusCode + " " + errorName + " " + errorDescription); } else { error = null; } return statusCode; } protected Func<UvLoopHandle, int> _uv_loop_init; public void loop_init(UvLoopHandle handle) { Check(_uv_loop_init(handle)); } protected Func<IntPtr, int> _uv_loop_close; public void loop_close(UvLoopHandle handle) { handle.Validate(closed: true); Check(_uv_loop_close(handle.InternalGetHandle())); } protected Func<UvLoopHandle, int, int> _uv_run; public int run(UvLoopHandle handle, int mode) { handle.Validate(); return Check(_uv_run(handle, mode)); } protected Action<UvLoopHandle> _uv_stop; public void stop(UvLoopHandle handle) { handle.Validate(); _uv_stop(handle); } protected Action<UvHandle> _uv_ref; public void @ref(UvHandle handle) { handle.Validate(); _uv_ref(handle); } protected Action<UvHandle> _uv_unref; public void unref(UvHandle handle) { handle.Validate(); _uv_unref(handle); } [UnmanagedFunctionPointer(CallingConvention.Cdecl)] public delegate void uv_close_cb(IntPtr handle); protected Action<IntPtr, uv_close_cb> _uv_close; public void close(UvHandle handle, uv_close_cb close_cb) { handle.Validate(closed: true); _uv_close(handle.InternalGetHandle(), close_cb); } public void close(IntPtr handle, uv_close_cb close_cb) { _uv_close(handle, close_cb); } [UnmanagedFunctionPointer(CallingConvention.Cdecl)] public delegate void uv_async_cb(IntPtr handle); protected Func<UvLoopHandle, UvAsyncHandle, uv_async_cb, int> _uv_async_init; public void async_init(UvLoopHandle loop, UvAsyncHandle handle, uv_async_cb cb) { loop.Validate(); handle.Validate(); Check(_uv_async_init(loop, handle, cb)); } protected Func<UvAsyncHandle, int> _uv_async_send; public void async_send(UvAsyncHandle handle) { Check(_uv_async_send(handle)); } protected Func<UvLoopHandle, UvTcpHandle, int> _uv_tcp_init; public void tcp_init(UvLoopHandle loop, UvTcpHandle handle) { loop.Validate(); handle.Validate(); Check(_uv_tcp_init(loop, handle)); } protected delegate int uv_tcp_bind_func(UvTcpHandle handle, ref SockAddr addr, int flags); protected uv_tcp_bind_func _uv_tcp_bind; public void tcp_bind(UvTcpHandle handle, ref SockAddr addr, int flags) { handle.Validate(); Check(_uv_tcp_bind(handle, ref addr, flags)); } protected Func<UvTcpHandle, IntPtr, int> _uv_tcp_open; public void tcp_open(UvTcpHandle handle, IntPtr hSocket) { handle.Validate(); Check(_uv_tcp_open(handle, hSocket)); } protected Func<UvTcpHandle, int, int> _uv_tcp_nodelay; public void tcp_nodelay(UvTcpHandle handle, bool enable) { handle.Validate(); Check(_uv_tcp_nodelay(handle, enable ? 1 : 0)); } protected Func<UvLoopHandle, UvPipeHandle, int, int> _uv_pipe_init; public void pipe_init(UvLoopHandle loop, UvPipeHandle handle, bool ipc) { loop.Validate(); handle.Validate(); Check(_uv_pipe_init(loop, handle, ipc ? -1 : 0)); } protected Func<UvPipeHandle, string, int> _uv_pipe_bind; public void pipe_bind(UvPipeHandle handle, string name) { handle.Validate(); Check(_uv_pipe_bind(handle, name)); } [UnmanagedFunctionPointer(CallingConvention.Cdecl)] public delegate void uv_connection_cb(IntPtr server, int status); protected Func<UvStreamHandle, int, uv_connection_cb, int> _uv_listen; public void listen(UvStreamHandle handle, int backlog, uv_connection_cb cb) { handle.Validate(); Check(_uv_listen(handle, backlog, cb)); } protected Func<UvStreamHandle, UvStreamHandle, int> _uv_accept; public void accept(UvStreamHandle server, UvStreamHandle client) { server.Validate(); client.Validate(); Check(_uv_accept(server, client)); } [UnmanagedFunctionPointer(CallingConvention.Cdecl)] public delegate void uv_connect_cb(IntPtr req, int status); protected Action<UvConnectRequest, UvPipeHandle, string, uv_connect_cb> _uv_pipe_connect; unsafe public void pipe_connect(UvConnectRequest req, UvPipeHandle handle, string name, uv_connect_cb cb) { req.Validate(); handle.Validate(); _uv_pipe_connect(req, handle, name, cb); } protected Func<UvPipeHandle, int> _uv_pipe_pending_count; unsafe public int pipe_pending_count(UvPipeHandle handle) { handle.Validate(); return _uv_pipe_pending_count(handle); } [UnmanagedFunctionPointer(CallingConvention.Cdecl)] public delegate void uv_alloc_cb(IntPtr server, int suggested_size, out uv_buf_t buf); [UnmanagedFunctionPointer(CallingConvention.Cdecl)] public delegate void uv_read_cb(IntPtr server, int nread, ref uv_buf_t buf); protected Func<UvStreamHandle, uv_alloc_cb, uv_read_cb, int> _uv_read_start; public void read_start(UvStreamHandle handle, uv_alloc_cb alloc_cb, uv_read_cb read_cb) { handle.Validate(); Check(_uv_read_start(handle, alloc_cb, read_cb)); } protected Func<UvStreamHandle, int> _uv_read_stop; public void read_stop(UvStreamHandle handle) { handle.Validate(); Check(_uv_read_stop(handle)); } protected Func<UvStreamHandle, uv_buf_t[], int, int> _uv_try_write; public int try_write(UvStreamHandle handle, uv_buf_t[] bufs, int nbufs) { handle.Validate(); return Check(_uv_try_write(handle, bufs, nbufs)); } [UnmanagedFunctionPointer(CallingConvention.Cdecl)] public delegate void uv_write_cb(IntPtr req, int status); unsafe protected delegate int uv_write_func(UvRequest req, UvStreamHandle handle, uv_buf_t* bufs, int nbufs, uv_write_cb cb); unsafe protected uv_write_func _uv_write; unsafe public void write(UvRequest req, UvStreamHandle handle, uv_buf_t* bufs, int nbufs, uv_write_cb cb) { req.Validate(); handle.Validate(); Check(_uv_write(req, handle, bufs, nbufs, cb)); } unsafe protected delegate int uv_write2_func(UvRequest req, UvStreamHandle handle, uv_buf_t* bufs, int nbufs, UvStreamHandle sendHandle, uv_write_cb cb); unsafe protected uv_write2_func _uv_write2; unsafe public void write2(UvRequest req, UvStreamHandle handle, Libuv.uv_buf_t* bufs, int nbufs, UvStreamHandle sendHandle, uv_write_cb cb) { req.Validate(); handle.Validate(); Check(_uv_write2(req, handle, bufs, nbufs, sendHandle, cb)); } [UnmanagedFunctionPointer(CallingConvention.Cdecl)] public delegate void uv_shutdown_cb(IntPtr req, int status); protected Func<UvShutdownReq, UvStreamHandle, uv_shutdown_cb, int> _uv_shutdown; public void shutdown(UvShutdownReq req, UvStreamHandle handle, uv_shutdown_cb cb) { req.Validate(); handle.Validate(); Check(_uv_shutdown(req, handle, cb)); } protected Func<int, IntPtr> _uv_err_name; public unsafe string err_name(int err) { IntPtr ptr = _uv_err_name(err); return ptr == IntPtr.Zero ? null : Marshal.PtrToStringAnsi(ptr); } protected Func<int, IntPtr> _uv_strerror; public unsafe string strerror(int err) { IntPtr ptr = _uv_strerror(err); return ptr == IntPtr.Zero ? null : Marshal.PtrToStringAnsi(ptr); } protected Func<int> _uv_loop_size; public int loop_size() { return _uv_loop_size(); } protected Func<HandleType, int> _uv_handle_size; public int handle_size(HandleType handleType) { return _uv_handle_size(handleType); } protected Func<RequestType, int> _uv_req_size; public int req_size(RequestType reqType) { return _uv_req_size(reqType); } protected delegate int uv_ip4_addr_func(string ip, int port, out SockAddr addr); protected uv_ip4_addr_func _uv_ip4_addr; public int ip4_addr(string ip, int port, out SockAddr addr, out Exception error) { return Check(_uv_ip4_addr(ip, port, out addr), out error); } protected delegate int uv_ip6_addr_func(string ip, int port, out SockAddr addr); protected uv_ip6_addr_func _uv_ip6_addr; public int ip6_addr(string ip, int port, out SockAddr addr, out Exception error) { return Check(_uv_ip6_addr(ip, port, out addr), out error); } [UnmanagedFunctionPointer(CallingConvention.Cdecl)] public delegate void uv_walk_cb(IntPtr handle, IntPtr arg); protected Func<UvLoopHandle, uv_walk_cb, IntPtr, int> _uv_walk; unsafe public void walk(UvLoopHandle loop, uv_walk_cb walk_cb, IntPtr arg) { loop.Validate(); _uv_walk(loop, walk_cb, arg); } public delegate int uv_tcp_getsockname_func(UvTcpHandle handle, out SockAddr addr, ref int namelen); protected uv_tcp_getsockname_func _uv_tcp_getsockname; public void tcp_getsockname(UvTcpHandle handle, out SockAddr addr, ref int namelen) { handle.Validate(); Check(_uv_tcp_getsockname(handle, out addr, ref namelen)); } public delegate int uv_tcp_getpeername_func(UvTcpHandle handle, out SockAddr addr, ref int namelen); protected uv_tcp_getpeername_func _uv_tcp_getpeername; public void tcp_getpeername(UvTcpHandle handle, out SockAddr addr, ref int namelen) { handle.Validate(); Check(_uv_tcp_getpeername(handle, out addr, ref namelen)); } public uv_buf_t buf_init(IntPtr memory, int len) { return new uv_buf_t(memory, len, IsWindows); } public struct uv_buf_t { // this type represents a WSABUF struct on Windows // https://msdn.microsoft.com/en-us/library/windows/desktop/ms741542(v=vs.85).aspx // and an iovec struct on *nix // http://man7.org/linux/man-pages/man2/readv.2.html // because the order of the fields in these structs is different, the field // names in this type don't have meaningful symbolic names. instead, they are // assigned in the correct order by the constructor at runtime private readonly IntPtr _field0; private readonly IntPtr _field1; public uv_buf_t(IntPtr memory, int len, bool IsWindows) { if (IsWindows) { _field0 = (IntPtr)len; _field1 = memory; } else { _field0 = memory; _field1 = (IntPtr)len; } } } public enum HandleType { Unknown = 0, ASYNC, CHECK, FS_EVENT, FS_POLL, HANDLE, IDLE, NAMED_PIPE, POLL, PREPARE, PROCESS, STREAM, TCP, TIMER, TTY, UDP, SIGNAL, } public enum RequestType { Unknown = 0, REQ, CONNECT, WRITE, SHUTDOWN, UDP_SEND, FS, WORK, GETADDRINFO, GETNAMEINFO, } private static class NativeMethods { [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_loop_init(UvLoopHandle handle); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_loop_close(IntPtr a0); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_run(UvLoopHandle handle, int mode); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern void uv_stop(UvLoopHandle handle); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern void uv_ref(UvHandle handle); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern void uv_unref(UvHandle handle); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern void uv_close(IntPtr handle, uv_close_cb close_cb); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_async_init(UvLoopHandle loop, UvAsyncHandle handle, uv_async_cb cb); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public extern static int uv_async_send(UvAsyncHandle handle); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_tcp_init(UvLoopHandle loop, UvTcpHandle handle); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_tcp_bind(UvTcpHandle handle, ref SockAddr addr, int flags); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_tcp_open(UvTcpHandle handle, IntPtr hSocket); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_tcp_nodelay(UvTcpHandle handle, int enable); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_pipe_init(UvLoopHandle loop, UvPipeHandle handle, int ipc); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_pipe_bind(UvPipeHandle loop, string name); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_listen(UvStreamHandle handle, int backlog, uv_connection_cb cb); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_accept(UvStreamHandle server, UvStreamHandle client); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)] public static extern void uv_pipe_connect(UvConnectRequest req, UvPipeHandle handle, string name, uv_connect_cb cb); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public extern static int uv_pipe_pending_count(UvPipeHandle handle); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public extern static int uv_read_start(UvStreamHandle handle, uv_alloc_cb alloc_cb, uv_read_cb read_cb); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_read_stop(UvStreamHandle handle); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_try_write(UvStreamHandle handle, uv_buf_t[] bufs, int nbufs); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] unsafe public static extern int uv_write(UvRequest req, UvStreamHandle handle, uv_buf_t* bufs, int nbufs, uv_write_cb cb); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] unsafe public static extern int uv_write2(UvRequest req, UvStreamHandle handle, uv_buf_t* bufs, int nbufs, UvStreamHandle sendHandle, uv_write_cb cb); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_shutdown(UvShutdownReq req, UvStreamHandle handle, uv_shutdown_cb cb); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public extern static IntPtr uv_err_name(int err); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern IntPtr uv_strerror(int err); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_loop_size(); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_handle_size(HandleType handleType); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_req_size(RequestType reqType); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_ip4_addr(string ip, int port, out SockAddr addr); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_ip6_addr(string ip, int port, out SockAddr addr); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_tcp_getsockname(UvTcpHandle handle, out SockAddr name, ref int namelen); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_tcp_getpeername(UvTcpHandle handle, out SockAddr name, ref int namelen); [DllImport("libuv", CallingConvention = CallingConvention.Cdecl)] unsafe public static extern int uv_walk(UvLoopHandle loop, uv_walk_cb walk_cb, IntPtr arg); } private static class NativeDarwinMonoMethods { [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_loop_init(UvLoopHandle handle); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_loop_close(IntPtr a0); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_run(UvLoopHandle handle, int mode); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern void uv_stop(UvLoopHandle handle); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern void uv_ref(UvHandle handle); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern void uv_unref(UvHandle handle); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern void uv_close(IntPtr handle, uv_close_cb close_cb); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_async_init(UvLoopHandle loop, UvAsyncHandle handle, uv_async_cb cb); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public extern static int uv_async_send(UvAsyncHandle handle); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_tcp_init(UvLoopHandle loop, UvTcpHandle handle); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_tcp_bind(UvTcpHandle handle, ref SockAddr addr, int flags); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_tcp_open(UvTcpHandle handle, IntPtr hSocket); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_tcp_nodelay(UvTcpHandle handle, int enable); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_pipe_init(UvLoopHandle loop, UvPipeHandle handle, int ipc); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_pipe_bind(UvPipeHandle loop, string name); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_listen(UvStreamHandle handle, int backlog, uv_connection_cb cb); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_accept(UvStreamHandle server, UvStreamHandle client); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)] public static extern void uv_pipe_connect(UvConnectRequest req, UvPipeHandle handle, string name, uv_connect_cb cb); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public extern static int uv_pipe_pending_count(UvPipeHandle handle); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public extern static int uv_read_start(UvStreamHandle handle, uv_alloc_cb alloc_cb, uv_read_cb read_cb); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_read_stop(UvStreamHandle handle); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_try_write(UvStreamHandle handle, uv_buf_t[] bufs, int nbufs); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] unsafe public static extern int uv_write(UvRequest req, UvStreamHandle handle, uv_buf_t* bufs, int nbufs, uv_write_cb cb); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] unsafe public static extern int uv_write2(UvRequest req, UvStreamHandle handle, uv_buf_t* bufs, int nbufs, UvStreamHandle sendHandle, uv_write_cb cb); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_shutdown(UvShutdownReq req, UvStreamHandle handle, uv_shutdown_cb cb); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public extern static IntPtr uv_err_name(int err); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern IntPtr uv_strerror(int err); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_loop_size(); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_handle_size(HandleType handleType); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_req_size(RequestType reqType); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_ip4_addr(string ip, int port, out SockAddr addr); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_ip6_addr(string ip, int port, out SockAddr addr); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_tcp_getsockname(UvTcpHandle handle, out SockAddr name, ref int namelen); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] public static extern int uv_tcp_getpeername(UvTcpHandle handle, out SockAddr name, ref int namelen); [DllImport("__Internal", CallingConvention = CallingConvention.Cdecl)] unsafe public static extern int uv_walk(UvLoopHandle loop, uv_walk_cb walk_cb, IntPtr arg); } } }
1
7,613
This check isn't right though.
aspnet-KestrelHttpServer
.cs
@@ -96,6 +96,8 @@ public abstract class DynamicLangXApiView implements ViewModel { return missingDefaultServiceAddress() || missingDefaultServiceScopes(); } + public abstract String codeGenVersion(); + @Override public String resourceRoot() { return SnippetSetRunner.SNIPPET_RESOURCE_ROOT;
1
/* Copyright 2016 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.viewmodel; import com.google.api.codegen.SnippetSetRunner; import com.google.auto.value.AutoValue; import java.util.List; import javax.annotation.Nullable; @AutoValue public abstract class DynamicLangXApiView implements ViewModel { public abstract String templateFileName(); public abstract FileHeaderView fileHeader(); public abstract String protoFilename(); public abstract ServiceDocView doc(); public abstract String name(); public abstract String serviceAddress(); public abstract Integer servicePort(); public abstract String serviceTitle(); public abstract Iterable<String> authScopes(); public abstract List<PathTemplateView> pathTemplates(); public abstract List<FormatResourceFunctionView> formatResourceFunctions(); public abstract List<ParseResourceFunctionView> parseResourceFunctions(); public abstract List<PathTemplateGetterFunctionView> pathTemplateGetterFunctions(); public abstract List<PageStreamingDescriptorView> pageStreamingDescriptors(); @Nullable public abstract List<BundlingDescriptorView> bundlingDescriptors(); public abstract List<LongRunningOperationDetailView> longRunningDescriptors(); public abstract List<GrpcStreamingDetailView> grpcStreamingDescriptors(); public abstract List<String> methodKeys(); public abstract String clientConfigPath(); public abstract String interfaceKey(); public abstract String grpcClientTypeName(); public abstract List<GrpcStubView> stubs(); public abstract String outputPath(); public abstract List<ApiMethodView> apiMethods(); public abstract boolean hasPageStreamingMethods(); public abstract boolean hasBundlingMethods(); public abstract boolean hasLongRunningOperations(); public boolean hasGrpcStreamingMethods() { return grpcStreamingDescriptors().size() > 0; } public abstract boolean hasDefaultServiceAddress(); public abstract boolean hasDefaultServiceScopes(); public boolean missingDefaultServiceAddress() { return !hasDefaultServiceAddress(); } public boolean missingDefaultServiceScopes() { return !hasDefaultServiceScopes(); } public boolean hasMissingDefaultOptions() { return missingDefaultServiceAddress() || missingDefaultServiceScopes(); } @Override public String resourceRoot() { return SnippetSetRunner.SNIPPET_RESOURCE_ROOT; } public static Builder newBuilder() { return new AutoValue_DynamicLangXApiView.Builder(); } @AutoValue.Builder public abstract static class Builder { public abstract Builder templateFileName(String val); public abstract Builder fileHeader(FileHeaderView val); public abstract Builder protoFilename(String simpleName); public abstract Builder doc(ServiceDocView doc); public abstract Builder name(String val); public abstract Builder serviceAddress(String val); public abstract Builder servicePort(Integer val); public abstract Builder serviceTitle(String val); public abstract Builder authScopes(Iterable<String> val); public abstract Builder pathTemplates(List<PathTemplateView> val); public abstract Builder formatResourceFunctions(List<FormatResourceFunctionView> val); public abstract Builder parseResourceFunctions(List<ParseResourceFunctionView> val); public abstract Builder pathTemplateGetterFunctions(List<PathTemplateGetterFunctionView> val); public abstract Builder pageStreamingDescriptors(List<PageStreamingDescriptorView> val); public abstract Builder bundlingDescriptors(List<BundlingDescriptorView> val); public abstract Builder longRunningDescriptors(List<LongRunningOperationDetailView> val); public abstract Builder grpcStreamingDescriptors(List<GrpcStreamingDetailView> val); public abstract Builder methodKeys(List<String> val); public abstract Builder clientConfigPath(String val); public abstract Builder interfaceKey(String val); public abstract Builder grpcClientTypeName(String val); public abstract Builder stubs(List<GrpcStubView> val); public abstract Builder outputPath(String val); public abstract Builder apiMethods(List<ApiMethodView> val); public abstract Builder hasPageStreamingMethods(boolean val); public abstract Builder hasBundlingMethods(boolean val); public abstract Builder hasLongRunningOperations(boolean val); public abstract Builder hasDefaultServiceAddress(boolean val); public abstract Builder hasDefaultServiceScopes(boolean val); public abstract DynamicLangXApiView build(); } }
1
21,324
maybe `toolkitVersion` instead? It doesn't necessarily have to be called that in the generated code, but in the view model classes, I think it makes it clearer that it is the version of toolkit itself.
googleapis-gapic-generator
java
@@ -230,15 +230,13 @@ module.exports = class XHRUpload extends Plugin { const body = opts.getResponseData(xhr.responseText, xhr) const uploadURL = body[opts.responseUrlFieldName] - const response = { + const uploadResp = { status: ev.target.status, body, uploadURL } - this.uppy.setFileState(file.id, { response }) - - this.uppy.emit('upload-success', file, body, uploadURL) + this.uppy.emit('upload-success', file, uploadResp) if (uploadURL) { this.uppy.log(`Download ${file.name} from ${file.uploadURL}`)
1
const { Plugin } = require('@uppy/core') const cuid = require('cuid') const Translator = require('@uppy/utils/lib/Translator') const { Provider, Socket } = require('@uppy/companion-client') const emitSocketProgress = require('@uppy/utils/lib/emitSocketProgress') const getSocketHost = require('@uppy/utils/lib/getSocketHost') const settle = require('@uppy/utils/lib/settle') const limitPromises = require('@uppy/utils/lib/limitPromises') function buildResponseError (xhr, error) { // No error message if (!error) error = new Error('Upload error') // Got an error message string if (typeof error === 'string') error = new Error(error) // Got something else if (!(error instanceof Error)) { error = Object.assign(new Error('Upload error'), { data: error }) } error.request = xhr return error } module.exports = class XHRUpload extends Plugin { constructor (uppy, opts) { super(uppy, opts) this.type = 'uploader' this.id = 'XHRUpload' this.title = 'XHRUpload' const defaultLocale = { strings: { timedOut: 'Upload stalled for %{seconds} seconds, aborting.' } } // Default options const defaultOptions = { formData: true, fieldName: 'files[]', method: 'post', metaFields: null, responseUrlFieldName: 'url', bundle: false, headers: {}, locale: defaultLocale, timeout: 30 * 1000, limit: 0, withCredentials: false, /** * @typedef respObj * @property {string} responseText * @property {number} status * @property {string} statusText * @property {Object.<string, string>} headers * * @param {string} responseText the response body string * @param {XMLHttpRequest | respObj} response the response object (XHR or similar) */ getResponseData (responseText, response) { let parsedResponse = {} try { parsedResponse = JSON.parse(responseText) } catch (err) { console.log(err) } return parsedResponse }, /** * * @param {string} responseText the response body string * @param {XMLHttpRequest | respObj} response the response object (XHR or similar) */ getResponseError (responseText, response) { return new Error('Upload error') } } // Merge default options with the ones set by user this.opts = Object.assign({}, defaultOptions, opts) // i18n this.translator = new Translator([ defaultLocale, this.uppy.locale, this.opts.locale ]) this.i18n = this.translator.translate.bind(this.translator) this.i18nArray = this.translator.translateArray.bind(this.translator) this.handleUpload = this.handleUpload.bind(this) // Simultaneous upload limiting is shared across all uploads with this plugin. if (typeof this.opts.limit === 'number' && this.opts.limit !== 0) { this.limitUploads = limitPromises(this.opts.limit) } else { this.limitUploads = (fn) => fn } if (this.opts.bundle && !this.opts.formData) { throw new Error('`opts.formData` must be true when `opts.bundle` is enabled.') } } getOptions (file) { const overrides = this.uppy.getState().xhrUpload const opts = Object.assign({}, this.opts, overrides || {}, file.xhrUpload || {} ) opts.headers = {} Object.assign(opts.headers, this.opts.headers) if (overrides) { Object.assign(opts.headers, overrides.headers) } if (file.xhrUpload) { Object.assign(opts.headers, file.xhrUpload.headers) } return opts } // Helper to abort upload requests if there has not been any progress for `timeout` ms. // Create an instance using `timer = createProgressTimeout(10000, onTimeout)` // Call `timer.progress()` to signal that there has been progress of any kind. // Call `timer.done()` when the upload has completed. createProgressTimeout (timeout, timeoutHandler) { const uppy = this.uppy const self = this let isDone = false function onTimedOut () { uppy.log(`[XHRUpload] timed out`) const error = new Error(self.i18n('timedOut', { seconds: Math.ceil(timeout / 1000) })) timeoutHandler(error) } let aliveTimer = null function progress () { // Some browsers fire another progress event when the upload is // cancelled, so we have to ignore progress after the timer was // told to stop. if (isDone) return if (timeout > 0) { if (aliveTimer) clearTimeout(aliveTimer) aliveTimer = setTimeout(onTimedOut, timeout) } } function done () { uppy.log(`[XHRUpload] timer done`) if (aliveTimer) { clearTimeout(aliveTimer) aliveTimer = null } isDone = true } return { progress, done } } createFormDataUpload (file, opts) { const formPost = new FormData() const metaFields = Array.isArray(opts.metaFields) ? opts.metaFields // Send along all fields by default. : Object.keys(file.meta) metaFields.forEach((item) => { formPost.append(item, file.meta[item]) }) if (file.name) { formPost.append(opts.fieldName, file.data, file.name) } else { formPost.append(opts.fieldName, file.data) } return formPost } createBareUpload (file, opts) { return file.data } upload (file, current, total) { const opts = this.getOptions(file) this.uppy.log(`uploading ${current} of ${total}`) return new Promise((resolve, reject) => { const data = opts.formData ? this.createFormDataUpload(file, opts) : this.createBareUpload(file, opts) const timer = this.createProgressTimeout(opts.timeout, (error) => { xhr.abort() this.uppy.emit('upload-error', file, error) reject(error) }) const xhr = new XMLHttpRequest() const id = cuid() xhr.upload.addEventListener('loadstart', (ev) => { this.uppy.log(`[XHRUpload] ${id} started`) // Begin checking for timeouts when loading starts. timer.progress() }) xhr.upload.addEventListener('progress', (ev) => { this.uppy.log(`[XHRUpload] ${id} progress: ${ev.loaded} / ${ev.total}`) timer.progress() if (ev.lengthComputable) { this.uppy.emit('upload-progress', file, { uploader: this, bytesUploaded: ev.loaded, bytesTotal: ev.total }) } }) xhr.addEventListener('load', (ev) => { this.uppy.log(`[XHRUpload] ${id} finished`) timer.done() if (ev.target.status >= 200 && ev.target.status < 300) { const body = opts.getResponseData(xhr.responseText, xhr) const uploadURL = body[opts.responseUrlFieldName] const response = { status: ev.target.status, body, uploadURL } this.uppy.setFileState(file.id, { response }) this.uppy.emit('upload-success', file, body, uploadURL) if (uploadURL) { this.uppy.log(`Download ${file.name} from ${file.uploadURL}`) } return resolve(file) } else { const body = opts.getResponseData(xhr.responseText, xhr) const error = buildResponseError(xhr, opts.getResponseError(xhr.responseText, xhr)) const response = { status: ev.target.status, body } this.uppy.setFileState(file.id, { response }) this.uppy.emit('upload-error', file, error) return reject(error) } }) xhr.addEventListener('error', (ev) => { this.uppy.log(`[XHRUpload] ${id} errored`) timer.done() const error = buildResponseError(xhr, opts.getResponseError(xhr.responseText, xhr)) this.uppy.emit('upload-error', file, error) return reject(error) }) xhr.open(opts.method.toUpperCase(), opts.endpoint, true) xhr.withCredentials = opts.withCredentials Object.keys(opts.headers).forEach((header) => { xhr.setRequestHeader(header, opts.headers[header]) }) xhr.send(data) this.uppy.on('file-removed', (removedFile) => { if (removedFile.id === file.id) { timer.done() xhr.abort() } }) this.uppy.on('cancel-all', () => { timer.done() xhr.abort() }) }) } uploadRemote (file, current, total) { const opts = this.getOptions(file) return new Promise((resolve, reject) => { const fields = {} const metaFields = Array.isArray(opts.metaFields) ? opts.metaFields // Send along all fields by default. : Object.keys(file.meta) metaFields.forEach((name) => { fields[name] = file.meta[name] }) const provider = new Provider(this.uppy, file.remote.providerOptions) provider.post( file.remote.url, Object.assign({}, file.remote.body, { endpoint: opts.endpoint, size: file.data.size, fieldname: opts.fieldName, metadata: fields, headers: opts.headers }) ) .then((res) => { const token = res.token const host = getSocketHost(file.remote.serverUrl) const socket = new Socket({ target: `${host}/api/${token}` }) socket.on('progress', (progressData) => emitSocketProgress(this, progressData, file)) socket.on('success', (data) => { const resp = opts.getResponseData(data.response.responseText, data.response) const uploadURL = resp[opts.responseUrlFieldName] this.uppy.emit('upload-success', file, resp, uploadURL) socket.close() return resolve() }) socket.on('error', (errData) => { const resp = errData.response const error = resp ? opts.getResponseError(resp.responseText, resp) : Object.assign(new Error(errData.error.message), { cause: errData.error }) this.uppy.emit('upload-error', file, error) reject(error) }) }) }) } uploadBundle (files) { return new Promise((resolve, reject) => { const endpoint = this.opts.endpoint const method = this.opts.method const formData = new FormData() files.forEach((file, i) => { const opts = this.getOptions(file) formData.append(opts.fieldName, file.data) }) const xhr = new XMLHttpRequest() xhr.withCredentials = this.opts.withCredentials const timer = this.createProgressTimeout(this.opts.timeout, (error) => { xhr.abort() emitError(error) reject(error) }) const emitError = (error) => { files.forEach((file) => { this.uppy.emit('upload-error', file, error) }) } xhr.upload.addEventListener('loadstart', (ev) => { this.uppy.log('[XHRUpload] started uploading bundle') timer.progress() }) xhr.upload.addEventListener('progress', (ev) => { timer.progress() if (!ev.lengthComputable) return files.forEach((file) => { this.uppy.emit('upload-progress', file, { uploader: this, bytesUploaded: ev.loaded / ev.total * file.size, bytesTotal: file.size }) }) }) xhr.addEventListener('load', (ev) => { timer.done() if (ev.target.status >= 200 && ev.target.status < 300) { const resp = this.opts.getResponseData(xhr.responseText, xhr) files.forEach((file) => { this.uppy.emit('upload-success', file, resp) }) return resolve() } const error = this.opts.getResponseError(xhr.responseText, xhr) || new Error('Upload error') error.request = xhr emitError(error) return reject(error) }) xhr.addEventListener('error', (ev) => { timer.done() const error = this.opts.getResponseError(xhr.responseText, xhr) || new Error('Upload error') emitError(error) return reject(error) }) this.uppy.on('cancel-all', () => { timer.done() xhr.abort() }) xhr.open(method.toUpperCase(), endpoint, true) xhr.withCredentials = this.opts.withCredentials Object.keys(this.opts.headers).forEach((header) => { xhr.setRequestHeader(header, this.opts.headers[header]) }) xhr.send(formData) files.forEach((file) => { this.uppy.emit('upload-started', file) }) }) } uploadFiles (files) { const actions = files.map((file, i) => { const current = parseInt(i, 10) + 1 const total = files.length if (file.error) { return () => Promise.reject(new Error(file.error)) } else if (file.isRemote) { // We emit upload-started here, so that it's also emitted for files // that have to wait due to the `limit` option. this.uppy.emit('upload-started', file) return this.uploadRemote.bind(this, file, current, total) } else { this.uppy.emit('upload-started', file) return this.upload.bind(this, file, current, total) } }) const promises = actions.map((action) => { const limitedAction = this.limitUploads(action) return limitedAction() }) return settle(promises) } handleUpload (fileIDs) { if (fileIDs.length === 0) { this.uppy.log('[XHRUpload] No files to upload!') return Promise.resolve() } this.uppy.log('[XHRUpload] Uploading...') const files = fileIDs.map((fileID) => this.uppy.getFile(fileID)) if (this.opts.bundle) { return this.uploadBundle(files) } return this.uploadFiles(files).then(() => null) } install () { if (this.opts.bundle) { this.uppy.setState({ capabilities: Object.assign({}, this.uppy.getState().capabilities, { bundled: true }) }) } this.uppy.addUploader(this.handleUpload) } uninstall () { if (this.opts.bundle) { this.uppy.setState({ capabilities: Object.assign({}, this.uppy.getState().capabilities, { bundled: true }) }) } this.uppy.removeUploader(this.handleUpload) } }
1
11,307
the response data was added intentionally in #612, i think we could keep the `setFileState` stuff here as a special case, at least for now
transloadit-uppy
js
@@ -134,7 +134,7 @@ func (s *stream) Read(p []byte) (int, error) { } else { select { case <-s.readChan: - case <-time.After(deadline.Sub(time.Now())): + case <-time.After(time.Until(deadline)): } } s.mutex.Lock()
1
package quic import ( "context" "fmt" "io" "net" "sync" "time" "github.com/lucas-clemente/quic-go/flowcontrol" "github.com/lucas-clemente/quic-go/frames" "github.com/lucas-clemente/quic-go/internal/utils" "github.com/lucas-clemente/quic-go/protocol" ) // A Stream assembles the data from StreamFrames and provides a super-convenient Read-Interface // // Read() and Write() may be called concurrently, but multiple calls to Read() or Write() individually must be synchronized manually. type stream struct { mutex sync.Mutex ctx context.Context ctxCancel context.CancelFunc streamID protocol.StreamID onData func() // onReset is a callback that should send a RST_STREAM onReset func(protocol.StreamID, protocol.ByteCount) readPosInFrame int writeOffset protocol.ByteCount readOffset protocol.ByteCount // Once set, the errors must not be changed! err error // cancelled is set when Cancel() is called cancelled utils.AtomicBool // finishedReading is set once we read a frame with a FinBit finishedReading utils.AtomicBool // finisedWriting is set once Close() is called finishedWriting utils.AtomicBool // resetLocally is set if Reset() is called resetLocally utils.AtomicBool // resetRemotely is set if RegisterRemoteError() is called resetRemotely utils.AtomicBool frameQueue *streamFrameSorter readChan chan struct{} readDeadline time.Time dataForWriting []byte finSent utils.AtomicBool rstSent utils.AtomicBool writeChan chan struct{} writeDeadline time.Time flowControlManager flowcontrol.FlowControlManager } var _ Stream = &stream{} type deadlineError struct{} func (deadlineError) Error() string { return "deadline exceeded" } func (deadlineError) Temporary() bool { return true } func (deadlineError) Timeout() bool { return true } var errDeadline net.Error = &deadlineError{} // newStream creates a new Stream func newStream(StreamID protocol.StreamID, onData func(), onReset func(protocol.StreamID, protocol.ByteCount), flowControlManager flowcontrol.FlowControlManager) *stream { s := &stream{ onData: onData, onReset: onReset, streamID: StreamID, flowControlManager: flowControlManager, frameQueue: newStreamFrameSorter(), readChan: make(chan struct{}, 1), writeChan: make(chan struct{}, 1), } s.ctx, s.ctxCancel = context.WithCancel(context.Background()) return s } // Read implements io.Reader. It is not thread safe! func (s *stream) Read(p []byte) (int, error) { s.mutex.Lock() err := s.err s.mutex.Unlock() if s.cancelled.Get() || s.resetLocally.Get() { return 0, err } if s.finishedReading.Get() { return 0, io.EOF } bytesRead := 0 for bytesRead < len(p) { s.mutex.Lock() frame := s.frameQueue.Head() if frame == nil && bytesRead > 0 { err = s.err s.mutex.Unlock() return bytesRead, err } var err error for { // Stop waiting on errors if s.resetLocally.Get() || s.cancelled.Get() { err = s.err break } deadline := s.readDeadline if !deadline.IsZero() && !time.Now().Before(deadline) { err = errDeadline break } if frame != nil { s.readPosInFrame = int(s.readOffset - frame.Offset) break } s.mutex.Unlock() if deadline.IsZero() { <-s.readChan } else { select { case <-s.readChan: case <-time.After(deadline.Sub(time.Now())): } } s.mutex.Lock() frame = s.frameQueue.Head() } s.mutex.Unlock() if err != nil { return bytesRead, err } m := utils.Min(len(p)-bytesRead, int(frame.DataLen())-s.readPosInFrame) if bytesRead > len(p) { return bytesRead, fmt.Errorf("BUG: bytesRead (%d) > len(p) (%d) in stream.Read", bytesRead, len(p)) } if s.readPosInFrame > int(frame.DataLen()) { return bytesRead, fmt.Errorf("BUG: readPosInFrame (%d) > frame.DataLen (%d) in stream.Read", s.readPosInFrame, frame.DataLen()) } copy(p[bytesRead:], frame.Data[s.readPosInFrame:]) s.readPosInFrame += m bytesRead += m s.readOffset += protocol.ByteCount(m) // when a RST_STREAM was received, the was already informed about the final byteOffset for this stream if !s.resetRemotely.Get() { s.flowControlManager.AddBytesRead(s.streamID, protocol.ByteCount(m)) } s.onData() // so that a possible WINDOW_UPDATE is sent if s.readPosInFrame >= int(frame.DataLen()) { fin := frame.FinBit s.mutex.Lock() s.frameQueue.Pop() s.mutex.Unlock() if fin { s.finishedReading.Set(true) return bytesRead, io.EOF } } } return bytesRead, nil } func (s *stream) Write(p []byte) (int, error) { s.mutex.Lock() defer s.mutex.Unlock() if s.resetLocally.Get() || s.err != nil { return 0, s.err } if s.finishedWriting.Get() { return 0, fmt.Errorf("write on closed stream %d", s.streamID) } if len(p) == 0 { return 0, nil } s.dataForWriting = make([]byte, len(p)) copy(s.dataForWriting, p) s.onData() var err error for { deadline := s.writeDeadline if !deadline.IsZero() && !time.Now().Before(deadline) { err = errDeadline break } if s.dataForWriting == nil || s.err != nil { break } s.mutex.Unlock() if deadline.IsZero() { <-s.writeChan } else { select { case <-s.writeChan: case <-time.After(deadline.Sub(time.Now())): } } s.mutex.Lock() } if err != nil { return 0, err } if s.err != nil { return len(p) - len(s.dataForWriting), s.err } return len(p), nil } func (s *stream) lenOfDataForWriting() protocol.ByteCount { s.mutex.Lock() var l protocol.ByteCount if s.err == nil { l = protocol.ByteCount(len(s.dataForWriting)) } s.mutex.Unlock() return l } func (s *stream) getDataForWriting(maxBytes protocol.ByteCount) []byte { s.mutex.Lock() defer s.mutex.Unlock() if s.err != nil || s.dataForWriting == nil { return nil } var ret []byte if protocol.ByteCount(len(s.dataForWriting)) > maxBytes { ret = s.dataForWriting[:maxBytes] s.dataForWriting = s.dataForWriting[maxBytes:] } else { ret = s.dataForWriting s.dataForWriting = nil s.signalWrite() } s.writeOffset += protocol.ByteCount(len(ret)) return ret } // Close implements io.Closer func (s *stream) Close() error { s.finishedWriting.Set(true) s.ctxCancel() s.onData() return nil } func (s *stream) shouldSendReset() bool { if s.rstSent.Get() { return false } return (s.resetLocally.Get() || s.resetRemotely.Get()) && !s.finishedWriteAndSentFin() } func (s *stream) shouldSendFin() bool { s.mutex.Lock() res := s.finishedWriting.Get() && !s.finSent.Get() && s.err == nil && s.dataForWriting == nil s.mutex.Unlock() return res } func (s *stream) sentFin() { s.finSent.Set(true) } // AddStreamFrame adds a new stream frame func (s *stream) AddStreamFrame(frame *frames.StreamFrame) error { maxOffset := frame.Offset + frame.DataLen() err := s.flowControlManager.UpdateHighestReceived(s.streamID, maxOffset) if err != nil { return err } s.mutex.Lock() defer s.mutex.Unlock() err = s.frameQueue.Push(frame) if err != nil && err != errDuplicateStreamData { return err } s.signalRead() return nil } // signalRead performs a non-blocking send on the readChan func (s *stream) signalRead() { select { case s.readChan <- struct{}{}: default: } } // signalRead performs a non-blocking send on the writeChan func (s *stream) signalWrite() { select { case s.writeChan <- struct{}{}: default: } } func (s *stream) SetReadDeadline(t time.Time) error { s.mutex.Lock() oldDeadline := s.readDeadline s.readDeadline = t s.mutex.Unlock() // if the new deadline is before the currently set deadline, wake up Read() if t.Before(oldDeadline) { s.signalRead() } return nil } func (s *stream) SetWriteDeadline(t time.Time) error { s.mutex.Lock() oldDeadline := s.writeDeadline s.writeDeadline = t s.mutex.Unlock() if t.Before(oldDeadline) { s.signalWrite() } return nil } func (s *stream) SetDeadline(t time.Time) error { _ = s.SetReadDeadline(t) // SetReadDeadline never errors _ = s.SetWriteDeadline(t) // SetWriteDeadline never errors return nil } // CloseRemote makes the stream receive a "virtual" FIN stream frame at a given offset func (s *stream) CloseRemote(offset protocol.ByteCount) { s.AddStreamFrame(&frames.StreamFrame{FinBit: true, Offset: offset}) } // Cancel is called by session to indicate that an error occurred // The stream should will be closed immediately func (s *stream) Cancel(err error) { s.mutex.Lock() s.cancelled.Set(true) s.ctxCancel() // errors must not be changed! if s.err == nil { s.err = err s.signalRead() s.signalWrite() } s.mutex.Unlock() } // resets the stream locally func (s *stream) Reset(err error) { if s.resetLocally.Get() { return } s.mutex.Lock() s.resetLocally.Set(true) s.ctxCancel() // errors must not be changed! if s.err == nil { s.err = err s.signalRead() s.signalWrite() } if s.shouldSendReset() { s.onReset(s.streamID, s.writeOffset) s.rstSent.Set(true) } s.mutex.Unlock() } // resets the stream remotely func (s *stream) RegisterRemoteError(err error) { if s.resetRemotely.Get() { return } s.mutex.Lock() s.resetRemotely.Set(true) s.ctxCancel() // errors must not be changed! if s.err == nil { s.err = err s.signalWrite() } if s.shouldSendReset() { s.onReset(s.streamID, s.writeOffset) s.rstSent.Set(true) } s.mutex.Unlock() } func (s *stream) finishedWriteAndSentFin() bool { return s.finishedWriting.Get() && s.finSent.Get() } func (s *stream) finished() bool { return s.cancelled.Get() || (s.finishedReading.Get() && s.finishedWriteAndSentFin()) || (s.resetRemotely.Get() && s.rstSent.Get()) || (s.finishedReading.Get() && s.rstSent.Get()) || (s.finishedWriteAndSentFin() && s.resetRemotely.Get()) } func (s *stream) Context() context.Context { return s.ctx } func (s *stream) StreamID() protocol.StreamID { return s.streamID }
1
6,646
This isn't really easy to read.
lucas-clemente-quic-go
go
@@ -123,7 +123,7 @@ func (c *CStorVolumeReplicaController) cVREventHandler(operation common.QueueOpe err := volumereplica.DeleteVolume(fullVolName) if err != nil { - glog.Errorf("Error in deleting volume %q: %s", cVR.ObjectMeta.Name,err) + glog.Errorf("Error in deleting volume %q: %s", cVR.ObjectMeta.Name, err) c.recorder.Event(cVR, corev1.EventTypeWarning, string(common.FailureDestroy), string(common.MessageResourceFailDestroy)) return string(apis.CVRStatusDeletionFailed), err }
1
/* Copyright 2018 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package replicacontroller import ( "encoding/json" "fmt" "os" "reflect" "github.com/golang/glog" "github.com/openebs/maya/cmd/cstor-pool-mgmt/controller/common" "github.com/openebs/maya/cmd/cstor-pool-mgmt/pool" "github.com/openebs/maya/cmd/cstor-pool-mgmt/volumereplica" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" ) // CVRPatch struct represent the struct used to patch // the cvr object type CVRPatch struct { // Op defines the operation Op string `json:"op"` // Path defines the key path // eg. for // { // "Name": "openebs" // Category: { // "Inclusive": "v1", // "Rank": "A" // } // } // The path of 'Inclusive' would be // "/Name/Category/Inclusive" Path string `json:"path"` Value string `json:"value"` } // syncHandler compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the CStorReplicaUpdated resource // with the current status of the resource. func (c *CStorVolumeReplicaController) syncHandler(key string, operation common.QueueOperation) error { cVRGot, err := c.getVolumeReplicaResource(key) if err != nil { return err } if cVRGot == nil { return fmt.Errorf("cannot retrieve cStorVolumeReplica %q", key) } status, err := c.cVREventHandler(operation, cVRGot) if status == "" { return nil } cVRGot.Status.Phase = apis.CStorVolumeReplicaPhase(status) if err != nil { glog.Errorf(err.Error()) glog.Infof("cVR:%v, %v; Status: %v", cVRGot.Name, string(cVRGot.GetUID()), cVRGot.Status.Phase) _, err := c.clientset.OpenebsV1alpha1().CStorVolumeReplicas(cVRGot.Namespace).Update(cVRGot) if err != nil { return err } return err } _, err = c.clientset.OpenebsV1alpha1().CStorVolumeReplicas(cVRGot.Namespace).Update(cVRGot) if err != nil { return err } glog.Infof("cVR:%v, %v; Status: %v", cVRGot.Name, string(cVRGot.GetUID()), cVRGot.Status.Phase) return nil } func (c *CStorVolumeReplicaController) cVREventHandler(operation common.QueueOperation, cVR *apis.CStorVolumeReplica) (string, error) { err := volumereplica.CheckValidVolumeReplica(cVR) if err != nil { c.recorder.Event(cVR, corev1.EventTypeWarning, string(common.FailureValidate), string(common.MessageResourceFailValidate)) return string(apis.CVRStatusOffline), err } // PoolNameHandler tries to get pool name and blocks for // particular number of attempts. var noOfAttempts = 2 if !common.PoolNameHandler(cVR, noOfAttempts) { return string(apis.CVRStatusOffline), fmt.Errorf("Pool not present") } // cStorVolumeReplica is created with command which requires fullVolName which is in // the form of poolname/volname. fullVolName := string(pool.PoolPrefix) + cVR.Labels["cstorpool.openebs.io/uid"] + "/" + cVR.Labels["cstorvolume.openebs.io/name"] glog.Infof("fullVolName: %v", fullVolName) switch operation { case common.QOpAdd: glog.Infof("Processing cvr added event: %v, %v", cVR.ObjectMeta.Name, string(cVR.GetUID())) status, err := c.cVRAddEventHandler(cVR, fullVolName) return status, err case common.QOpDestroy: glog.Infof("Processing cvr deleted event %v, %v", cVR.ObjectMeta.Name, string(cVR.GetUID())) err := volumereplica.DeleteVolume(fullVolName) if err != nil { glog.Errorf("Error in deleting volume %q: %s", cVR.ObjectMeta.Name,err) c.recorder.Event(cVR, corev1.EventTypeWarning, string(common.FailureDestroy), string(common.MessageResourceFailDestroy)) return string(apis.CVRStatusDeletionFailed), err } // removeFinalizer is to remove finalizer of cVR resource. err = c.removeFinalizer(cVR) if err != nil { return string(apis.CVRStatusOffline), err } return "", nil } return string(apis.CVRStatusInvalid), nil } func (c *CStorVolumeReplicaController) cVRAddEventHandler(cVR *apis.CStorVolumeReplica, fullVolName string) (string, error) { // lock is to synchronize pool and volumereplica. Until certain pool related // operations are over, the volumereplica threads will be held. common.SyncResources.Mux.Lock() if common.SyncResources.IsImported { common.SyncResources.Mux.Unlock() // To check if volume is already imported with pool. importedFlag := common.CheckForInitialImportedPoolVol(common.InitialImportedPoolVol, fullVolName) if importedFlag && !IsEmptyStatus(cVR) { glog.Infof("CStorVolumeReplica %v is already imported", string(cVR.ObjectMeta.UID)) c.recorder.Event(cVR, corev1.EventTypeNormal, string(common.SuccessImported), string(common.MessageResourceImported)) return string(apis.CVRStatusOnline), nil } } else { common.SyncResources.Mux.Unlock() } // If volumereplica is already present. existingvol, _ := volumereplica.GetVolumes() if common.CheckIfPresent(existingvol, fullVolName) { glog.Warningf("CStorVolumeReplica %v is already present", string(cVR.GetUID())) c.recorder.Event(cVR, corev1.EventTypeWarning, string(common.AlreadyPresent), string(common.MessageResourceAlreadyPresent)) return string(apis.CVRStatusErrorDuplicate), nil } // IsEmptyStatus is to check if initial status of cVR object is empty. if IsEmptyStatus(cVR) || IsPendingStatus(cVR) { err := volumereplica.CreateVolume(cVR, fullVolName) if err != nil { glog.Errorf("cVR creation failure: %v", err.Error()) return string(apis.CVRStatusOffline), err } c.recorder.Event(cVR, corev1.EventTypeNormal, string(common.SuccessCreated), string(common.MessageResourceCreated)) glog.Infof("cVR creation successful: %v, %v", cVR.ObjectMeta.Name, string(cVR.GetUID())) return string(apis.CVRStatusOnline), nil } return string(apis.CVRStatusOffline), fmt.Errorf("VolumeReplica offline: %v, %v", cVR.Name, cVR.Labels["cstorvolume.openebs.io/name"]) } // getVolumeReplicaResource returns object corresponding to the resource key func (c *CStorVolumeReplicaController) getVolumeReplicaResource(key string) (*apis.CStorVolumeReplica, error) { // Convert the key(namespace/name) string into a distinct name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) return nil, nil } cStorVolumeReplicaUpdated, err := c.clientset.OpenebsV1alpha1().CStorVolumeReplicas(namespace).Get(name, metav1.GetOptions{}) if err != nil { // The cStorPool resource may no longer exist, in which case we stop // processing. if errors.IsNotFound(err) { runtime.HandleError(fmt.Errorf("cStorVolumeReplicaUpdated '%s' in work queue no longer exists", key)) return nil, nil } return nil, err } return cStorVolumeReplicaUpdated, nil } // removeFinalizer is to remove finalizer of CStorVolumeReplica resource. func (c *CStorVolumeReplicaController) removeFinalizer(cVR *apis.CStorVolumeReplica) error { glog.Errorf("Removing finalizers for %s", cVR.Name) // The Patch method requires an array of elements // therefore creating array of one element cvrPatch := make([]CVRPatch, 1) // setting operation as remove cvrPatch[0].Op = "remove" // object to be removed is finalizers cvrPatch[0].Path = "/metadata/finalizers" cvrPatchJSON, err := json.Marshal(cvrPatch) if err != nil { glog.Errorf("Error marshaling cvrPatch object: %s", err) return err } _, err = c.clientset.OpenebsV1alpha1().CStorVolumeReplicas(cVR.Namespace).Patch(cVR.Name, types.JSONPatchType, cvrPatchJSON) if err != nil { glog.Errorf("Finalizer patch failed for %s: %s", cVR.Name, err) return err } glog.Infof("Removed Finalizer: %v, %v", cVR.ObjectMeta.Name, string(cVR.GetUID())) return nil } // IsRightCStorVolumeReplica is to check if the cvr request is for particular pod/application. func IsRightCStorVolumeReplica(cVR *apis.CStorVolumeReplica) bool { if os.Getenv(string(common.OpenEBSIOCStorID)) == string(cVR.ObjectMeta.Labels["cstorpool.openebs.io/uid"]) { return true } return false } // IsDestroyEvent is to check if the call is for CStorVolumeReplica destroy. func IsDestroyEvent(cVR *apis.CStorVolumeReplica) bool { if cVR.ObjectMeta.DeletionTimestamp != nil { return true } return false } // IsOnlyStatusChange is to check only status change of cStorVolumeReplica object. func IsOnlyStatusChange(oldCVR, newCVR *apis.CStorVolumeReplica) bool { if reflect.DeepEqual(oldCVR.Spec, newCVR.Spec) && !reflect.DeepEqual(oldCVR.Status, newCVR.Status) { return true } return false } // IsDeletionFailedBefore is to make sure no other operation should happen if the // status of CStorVolumeReplica is deletion-failed. func IsDeletionFailedBefore(cVR *apis.CStorVolumeReplica) bool { if cVR.Status.Phase == apis.CVRStatusDeletionFailed { return true } return false } // IsEmptyStatus is to check if the status of cStorVolumeReplica object is empty. func IsEmptyStatus(cVR *apis.CStorVolumeReplica) bool { if string(cVR.Status.Phase) == string(apis.CVRStatusEmpty) { glog.Infof("cVR empty status: %v", string(cVR.ObjectMeta.UID)) return true } glog.Infof("Not empty status: %v", string(cVR.ObjectMeta.UID)) return false } // IsPendingStatus is to check if the status of cStorVolumeReplica object is pending. func IsPendingStatus(cVR *apis.CStorVolumeReplica) bool { if string(cVR.Status.Phase) == string(apis.CVRStatusPending) { glog.Infof("cVR pending: %v", string(cVR.ObjectMeta.UID)) return true } glog.V(4).Infof("Not pending status: %v", string(cVR.ObjectMeta.UID)) return false } // IsErrorDuplicate is to check if the status of cStorVolumeReplica object is error-duplicate. func IsErrorDuplicate(cVR *apis.CStorVolumeReplica) bool { if string(cVR.Status.Phase) == string(apis.CVRStatusErrorDuplicate) { glog.Infof("cVR duplication error: %v", string(cVR.ObjectMeta.UID)) return true } glog.V(4).Infof("Not error duplicate status: %v", string(cVR.ObjectMeta.UID)) return false }
1
9,287
This line changed due to go formatting. Format was not there earlier.
openebs-maya
go
@@ -28,13 +28,13 @@ type staticUpstream struct { Path string Interval time.Duration } + Without string } // NewStaticUpstreams parses the configuration input and sets up // static upstreams for the proxy middleware. func NewStaticUpstreams(c parse.Dispenser) ([]Upstream, error) { var upstreams []Upstream - for c.Next() { upstream := &staticUpstream{ from: "",
1
package proxy import ( "io" "io/ioutil" "net/http" "net/url" "strconv" "strings" "time" "github.com/mholt/caddy/config/parse" ) var ( supportedPolicies map[string]func() Policy = make(map[string]func() Policy) proxyHeaders http.Header = make(http.Header) ) type staticUpstream struct { from string Hosts HostPool Policy Policy FailTimeout time.Duration MaxFails int32 HealthCheck struct { Path string Interval time.Duration } } // NewStaticUpstreams parses the configuration input and sets up // static upstreams for the proxy middleware. func NewStaticUpstreams(c parse.Dispenser) ([]Upstream, error) { var upstreams []Upstream for c.Next() { upstream := &staticUpstream{ from: "", Hosts: nil, Policy: &Random{}, FailTimeout: 10 * time.Second, MaxFails: 1, } if !c.Args(&upstream.from) { return upstreams, c.ArgErr() } to := c.RemainingArgs() if len(to) == 0 { return upstreams, c.ArgErr() } for c.NextBlock() { switch c.Val() { case "policy": if !c.NextArg() { return upstreams, c.ArgErr() } if policyCreateFunc, ok := supportedPolicies[c.Val()]; ok { upstream.Policy = policyCreateFunc() } else { return upstreams, c.ArgErr() } case "fail_timeout": if !c.NextArg() { return upstreams, c.ArgErr() } if dur, err := time.ParseDuration(c.Val()); err == nil { upstream.FailTimeout = dur } else { return upstreams, err } case "max_fails": if !c.NextArg() { return upstreams, c.ArgErr() } if n, err := strconv.Atoi(c.Val()); err == nil { upstream.MaxFails = int32(n) } else { return upstreams, err } case "health_check": if !c.NextArg() { return upstreams, c.ArgErr() } upstream.HealthCheck.Path = c.Val() upstream.HealthCheck.Interval = 30 * time.Second if c.NextArg() { if dur, err := time.ParseDuration(c.Val()); err == nil { upstream.HealthCheck.Interval = dur } else { return upstreams, err } } case "proxy_header": var header, value string if !c.Args(&header, &value) { return upstreams, c.ArgErr() } proxyHeaders.Add(header, value) case "websocket": proxyHeaders.Add("Connection", "{>Connection}") proxyHeaders.Add("Upgrade", "{>Upgrade}") } } upstream.Hosts = make([]*UpstreamHost, len(to)) for i, host := range to { if !strings.HasPrefix(host, "http") { host = "http://" + host } uh := &UpstreamHost{ Name: host, Conns: 0, Fails: 0, FailTimeout: upstream.FailTimeout, Unhealthy: false, ExtraHeaders: proxyHeaders, CheckDown: func(upstream *staticUpstream) UpstreamHostDownFunc { return func(uh *UpstreamHost) bool { if uh.Unhealthy { return true } if uh.Fails >= upstream.MaxFails && upstream.MaxFails != 0 { return true } return false } }(upstream), } if baseURL, err := url.Parse(uh.Name); err == nil { uh.ReverseProxy = NewSingleHostReverseProxy(baseURL) } else { return upstreams, err } upstream.Hosts[i] = uh } if upstream.HealthCheck.Path != "" { go upstream.HealthCheckWorker(nil) } upstreams = append(upstreams, upstream) } return upstreams, nil } // RegisterPolicy adds a custom policy to the proxy. func RegisterPolicy(name string, policy func() Policy) { supportedPolicies[name] = policy } func (u *staticUpstream) From() string { return u.from } func (u *staticUpstream) healthCheck() { for _, host := range u.Hosts { hostURL := host.Name + u.HealthCheck.Path if r, err := http.Get(hostURL); err == nil { io.Copy(ioutil.Discard, r.Body) r.Body.Close() host.Unhealthy = r.StatusCode < 200 || r.StatusCode >= 400 } else { host.Unhealthy = true } } } func (u *staticUpstream) HealthCheckWorker(stop chan struct{}) { ticker := time.NewTicker(u.HealthCheck.Interval) u.healthCheck() for { select { case <-ticker.C: u.healthCheck() case <-stop: // TODO: the library should provide a stop channel and global // waitgroup to allow goroutines started by plugins a chance // to clean themselves up. } } } func (u *staticUpstream) Select() *UpstreamHost { pool := u.Hosts if len(pool) == 1 { if pool[0].Down() { return nil } return pool[0] } allDown := true for _, host := range pool { if !host.Down() { allDown = false break } } if allDown { return nil } if u.Policy == nil { return (&Random{}).Select(pool) } return u.Policy.Select(pool) }
1
6,935
The name "Without" in code is a little nebulous. Maybe something more descriptive like TrimPrefix or StripPrefix or WithoutPathPrefix or something like that. (Thoughts?)
caddyserver-caddy
go
@@ -254,6 +254,12 @@ public class MoveIT { linkDataset.then().assertThat() .statusCode(OK.getStatusCode()); + // A dataset cannot be linked to the same dataverse again. + Response tryToLinkAgain = UtilIT.linkDataset(datasetPid, dataverse2Alias, superuserApiToken); + tryToLinkAgain.prettyPrint(); + tryToLinkAgain.then().assertThat() + .statusCode(FORBIDDEN.getStatusCode()); + Response getLinksBefore = UtilIT.getDatasetLinks(datasetPid, superuserApiToken); getLinksBefore.prettyPrint(); getLinksBefore.then().assertThat()
1
package edu.harvard.iq.dataverse.api; import com.jayway.restassured.RestAssured; import com.jayway.restassured.path.json.JsonPath; import com.jayway.restassured.response.Response; import edu.harvard.iq.dataverse.authorization.DataverseRole; import java.io.StringReader; import java.util.logging.Logger; import javax.json.Json; import javax.json.JsonObject; import static javax.ws.rs.core.Response.Status.BAD_REQUEST; import static javax.ws.rs.core.Response.Status.CREATED; import static javax.ws.rs.core.Response.Status.FORBIDDEN; import static javax.ws.rs.core.Response.Status.OK; import static javax.ws.rs.core.Response.Status.UNAUTHORIZED; import org.hamcrest.CoreMatchers; import static org.hamcrest.CoreMatchers.equalTo; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; public class MoveIT { private static final Logger logger = Logger.getLogger(MoveIT.class.getCanonicalName()); @BeforeClass public static void setUpClass() { RestAssured.baseURI = UtilIT.getRestAssuredBaseUri(); } @Test public void testMoveDataset() { Response createCurator = UtilIT.createRandomUser(); createCurator.prettyPrint(); createCurator.then().assertThat() .statusCode(OK.getStatusCode()); String curatorUsername = UtilIT.getUsernameFromResponse(createCurator); String curatorApiToken = UtilIT.getApiTokenFromResponse(createCurator); Response createCuratorDataverse1 = UtilIT.createRandomDataverse(curatorApiToken); createCuratorDataverse1.prettyPrint(); createCuratorDataverse1.then().assertThat() .statusCode(CREATED.getStatusCode()); String curatorDataverseAlias1 = UtilIT.getAliasFromResponse(createCuratorDataverse1); Response createAuthor = UtilIT.createRandomUser(); createAuthor.prettyPrint(); createAuthor.then().assertThat() .statusCode(OK.getStatusCode()); String authorUsername = UtilIT.getUsernameFromResponse(createAuthor); String authorApiToken = UtilIT.getApiTokenFromResponse(createAuthor); // Whoops, the curator forgot to give the author permission to create a dataset. Response noPermToCreateDataset = UtilIT.createRandomDatasetViaNativeApi(curatorDataverseAlias1, authorApiToken); noPermToCreateDataset.prettyPrint(); noPermToCreateDataset.then().assertThat() .statusCode(UNAUTHORIZED.getStatusCode()) .body("message", equalTo("User @" + authorUsername + " is not permitted to perform requested action.")); Response grantAuthorAddDataset = UtilIT.grantRoleOnDataverse(curatorDataverseAlias1, DataverseRole.DS_CONTRIBUTOR.toString(), "@" + authorUsername, curatorApiToken); grantAuthorAddDataset.prettyPrint(); grantAuthorAddDataset.then().assertThat() .statusCode(OK.getStatusCode()) .body("data.assignee", equalTo("@" + authorUsername)) .body("data._roleAlias", equalTo("dsContributor")); Response createDataset = UtilIT.createRandomDatasetViaNativeApi(curatorDataverseAlias1, authorApiToken); createDataset.prettyPrint(); createDataset.then().assertThat() .statusCode(CREATED.getStatusCode()); Integer datasetId = UtilIT.getDatasetIdFromResponse(createDataset); String nullApiToken = null; String nonExistentDataverse = UtilIT.getRandomDvAlias(); Response moveDatasetFailNoTargetDataverse = UtilIT.moveDataset(datasetId.toString(), nonExistentDataverse, nullApiToken); moveDatasetFailNoTargetDataverse.prettyPrint(); moveDatasetFailNoTargetDataverse.then().assertThat() .statusCode(BAD_REQUEST.getStatusCode()) .body("message", equalTo("Target dataverse not found.")); Response moveDatasetFailGuest = UtilIT.moveDataset(datasetId.toString(), curatorDataverseAlias1, nullApiToken); moveDatasetFailGuest.prettyPrint(); moveDatasetFailGuest.then().assertThat() .statusCode(UNAUTHORIZED.getStatusCode()) .body("message", equalTo("User :guest is not permitted to perform requested action.")); Response moveDatasetFailAlreadyThere = UtilIT.moveDataset(datasetId.toString(), curatorDataverseAlias1, curatorApiToken); moveDatasetFailAlreadyThere.prettyPrint(); moveDatasetFailAlreadyThere.then().assertThat() .statusCode(FORBIDDEN.getStatusCode()) .body("message", equalTo("This dataset is already in this dataverse.")); Response createAuthorDataverse1 = UtilIT.createRandomDataverse(curatorApiToken); createAuthorDataverse1.prettyPrint(); createAuthorDataverse1.then().assertThat() .statusCode(CREATED.getStatusCode()); String authorDataverseAlias1 = UtilIT.getAliasFromResponse(createAuthorDataverse1); Response moveDatasetFail = UtilIT.moveDataset(datasetId.toString(), authorDataverseAlias1, authorApiToken); moveDatasetFail.prettyPrint(); moveDatasetFail.then().assertThat() .statusCode(UNAUTHORIZED.getStatusCode()) .body("message", equalTo("User @" + authorUsername + " is not permitted to perform requested action.")); Response createSuperuser = UtilIT.createRandomUser(); createSuperuser.then().assertThat() .statusCode(OK.getStatusCode()); String superusername = UtilIT.getUsernameFromResponse(createSuperuser); String superuserApiToken = UtilIT.getApiTokenFromResponse(createSuperuser); Response makeSuperuser = UtilIT.makeSuperUser(superusername); makeSuperuser.then().assertThat() .statusCode(OK.getStatusCode()); Response moveDataset1 = UtilIT.moveDataset(datasetId.toString(), authorDataverseAlias1, superuserApiToken); moveDataset1.prettyPrint(); moveDataset1.then().assertThat() .statusCode(OK.getStatusCode()) .body("data.message", equalTo("Dataset moved successfully.")); Response moveDataset2 = UtilIT.moveDataset(datasetId.toString(), curatorDataverseAlias1, superuserApiToken); moveDataset2.prettyPrint(); moveDataset2.then().assertThat() .statusCode(OK.getStatusCode()) .body("data.message", equalTo("Dataset moved successfully.")); Response createCuratorDataverse2 = UtilIT.createRandomDataverse(curatorApiToken); createCuratorDataverse2.prettyPrint(); createCuratorDataverse2.then().assertThat() .statusCode(CREATED.getStatusCode()); String curatorDataverseAlias2 = UtilIT.getAliasFromResponse(createCuratorDataverse2); Response moveDatasetFailNoPermToPublishDv = UtilIT.moveDataset(datasetId.toString(), curatorDataverseAlias2, authorApiToken); moveDatasetFailNoPermToPublishDv.prettyPrint(); moveDatasetFailNoPermToPublishDv.then().assertThat() .statusCode(UNAUTHORIZED.getStatusCode()) .body("message", equalTo("User @" + authorUsername + " is not permitted to perform requested action.")); Response moveDataset3 = UtilIT.moveDataset(datasetId.toString(), curatorDataverseAlias2, curatorApiToken); moveDataset3.prettyPrint(); moveDataset3.then().assertThat() .statusCode(OK.getStatusCode()) .body("data.message", equalTo("Dataset moved successfully.")); } @Test public void testMoveDatasetThief() { Response createAuthor = UtilIT.createRandomUser(); createAuthor.prettyPrint(); createAuthor.then().assertThat() .statusCode(OK.getStatusCode()); String authorUsername = UtilIT.getUsernameFromResponse(createAuthor); String authorApiToken = UtilIT.getApiTokenFromResponse(createAuthor); Response createThief = UtilIT.createRandomUser(); createThief.prettyPrint(); createThief.then().assertThat() .statusCode(OK.getStatusCode()); String thiefUsername = UtilIT.getUsernameFromResponse(createThief); String thiefApiToken = UtilIT.getApiTokenFromResponse(createThief); Response createAuthorDataverse = UtilIT.createRandomDataverse(authorApiToken); createAuthorDataverse.prettyPrint(); createAuthorDataverse.then().assertThat() .statusCode(CREATED.getStatusCode()); String authorDataverseAlias = UtilIT.getAliasFromResponse(createAuthorDataverse); Response createDataset = UtilIT.createRandomDatasetViaNativeApi(authorDataverseAlias, authorApiToken); createDataset.prettyPrint(); createDataset.then().assertThat() .statusCode(CREATED.getStatusCode()); Integer datasetId = UtilIT.getDatasetIdFromResponse(createDataset); // Can the thief steal the dataset? Response createThiefDataverse = UtilIT.createRandomDataverse(thiefApiToken); createThiefDataverse.prettyPrint(); createThiefDataverse.then().assertThat() .statusCode(CREATED.getStatusCode()); String thiefDataverseAlias = UtilIT.getAliasFromResponse(createThiefDataverse); Response thiefAttemptToStealDataset = UtilIT.moveDataset(datasetId.toString(), thiefDataverseAlias, thiefApiToken); thiefAttemptToStealDataset.prettyPrint(); thiefAttemptToStealDataset.then().assertThat() .statusCode(UNAUTHORIZED.getStatusCode()) .body("message", equalTo("User @" + thiefUsername + " is not permitted to perform requested action.")); } @Test public void testMoveLinkedDataset() { Response createUser = UtilIT.createRandomUser(); createUser.prettyPrint(); createUser.then().assertThat() .statusCode(OK.getStatusCode()); String username = UtilIT.getUsernameFromResponse(createUser); String apiToken = UtilIT.getApiTokenFromResponse(createUser); Response createSuperUser = UtilIT.createRandomUser(); createSuperUser.prettyPrint(); createSuperUser.then().assertThat() .statusCode(OK.getStatusCode()); String superuserUsername = UtilIT.getUsernameFromResponse(createSuperUser); String superuserApiToken = UtilIT.getApiTokenFromResponse(createSuperUser); Response makeSuperuser = UtilIT.makeSuperUser(superuserUsername); makeSuperuser.prettyPrint(); makeSuperuser.then().assertThat() .statusCode(OK.getStatusCode()); Response createDataverse1 = UtilIT.createRandomDataverse(apiToken); createDataverse1.prettyPrint(); createDataverse1.then().assertThat() .statusCode(CREATED.getStatusCode()); String dataverse1Alias = UtilIT.getAliasFromResponse(createDataverse1); Response createDataset = UtilIT.createRandomDatasetViaNativeApi(dataverse1Alias, apiToken); createDataset.prettyPrint(); createDataset.then().assertThat() .statusCode(CREATED.getStatusCode()); Integer datasetId = UtilIT.getDatasetIdFromResponse(createDataset); String datasetPid = JsonPath.from(createDataset.asString()).getString("data.persistentId"); Response createDataverse2 = UtilIT.createRandomDataverse(apiToken); createDataverse2.prettyPrint(); createDataverse2.then().assertThat() .statusCode(CREATED.getStatusCode()); String dataverse2Alias = UtilIT.getAliasFromResponse(createDataverse2); Integer dataverse2Id = UtilIT.getDatasetIdFromResponse(createDataverse2); String dataverse2Name = JsonPath.from(createDataverse2.asString()).getString("data.name"); UtilIT.publishDataverseViaNativeApi(dataverse1Alias, apiToken).then().assertThat() .statusCode(OK.getStatusCode()); UtilIT.publishDatasetViaNativeApi(datasetPid, "major", apiToken).then().assertThat() .statusCode(OK.getStatusCode()); Response moveDatasetFailTargetDataverseNotPublished = UtilIT.moveDataset(datasetId.toString(), dataverse2Alias, apiToken); moveDatasetFailTargetDataverseNotPublished.prettyPrint(); moveDatasetFailTargetDataverseNotPublished.then().assertThat() .statusCode(FORBIDDEN.getStatusCode()) .body("message", equalTo("A published dataset may not be moved to an unpublished dataverse. You can retry the move after publishing " + dataverse2Name + ".")); UtilIT.publishDataverseViaNativeApi(dataverse2Alias, apiToken).then().assertThat() .statusCode(OK.getStatusCode()); // Link dataset to second dataverse. Response linkDataset = UtilIT.linkDataset(datasetPid, dataverse2Alias, superuserApiToken); linkDataset.prettyPrint(); linkDataset.then().assertThat() .statusCode(OK.getStatusCode()); Response getLinksBefore = UtilIT.getDatasetLinks(datasetPid, superuserApiToken); getLinksBefore.prettyPrint(); getLinksBefore.then().assertThat() .statusCode(OK.getStatusCode()); Response listDatasetsBeforeSource = UtilIT.listDatasetsViaSword(dataverse1Alias, apiToken); listDatasetsBeforeSource.prettyPrint(); listDatasetsBeforeSource.then().assertThat() .statusCode(OK.getStatusCode()) .body("feed.entry[0].id", CoreMatchers.endsWith(datasetPid)); Response listDatasetsBeforeDestination = UtilIT.listDatasetsViaSword(dataverse2Alias, apiToken); listDatasetsBeforeDestination.prettyPrint(); listDatasetsBeforeDestination.then().assertThat() // TODO: Add assertion that no dataset exists. .statusCode(OK.getStatusCode()); Response attemptToMoveLinkedDataset = UtilIT.moveDataset(datasetId.toString(), dataverse2Alias, superuserApiToken); attemptToMoveLinkedDataset.prettyPrint(); attemptToMoveLinkedDataset.then().assertThat() .statusCode(FORBIDDEN.getStatusCode()) .body("message", equalTo("Use the query parameter forceMove=true to complete the move. This dataset is linked to the new host dataverse or one of its parents. This move would remove the link to this dataset. ")); JsonObject linksBeforeData = Json.createReader(new StringReader(getLinksBefore.asString())).readObject(); Assert.assertEquals("OK", linksBeforeData.getString("status")); Assert.assertEquals(dataverse2Alias + " (id " + dataverse2Id + ")", linksBeforeData.getJsonObject("data").getJsonArray("dataverses that link to dataset id " + datasetId).getString(0)); boolean forceMove = true; Response forceMoveLinkedDataset = UtilIT.moveDataset(datasetId.toString(), dataverse2Alias, forceMove, superuserApiToken); forceMoveLinkedDataset.prettyPrint(); forceMoveLinkedDataset.then().assertThat() .statusCode(OK.getStatusCode()) .body("data.message", equalTo("Dataset moved successfully.")); Response listDatasetsAfterSource = UtilIT.listDatasetsViaSword(dataverse1Alias, apiToken); listDatasetsAfterSource.prettyPrint(); listDatasetsAfterSource.then().assertThat() // TODO: Add assertion that no dataset exists. .statusCode(OK.getStatusCode()); Response listDatasetsAfterDestination = UtilIT.listDatasetsViaSword(dataverse2Alias, apiToken); listDatasetsAfterDestination.prettyPrint(); listDatasetsAfterDestination.then().assertThat() .statusCode(OK.getStatusCode()) .body("feed.entry[0].id", CoreMatchers.endsWith(datasetPid)); Response getLinksAfter = UtilIT.getDatasetLinks(datasetPid, superuserApiToken); getLinksAfter.prettyPrint(); getLinksAfter.then().assertThat() .statusCode(OK.getStatusCode()); JsonObject linksAfterData = Json.createReader(new StringReader(getLinksAfter.asString())).readObject(); Assert.assertEquals("OK", linksAfterData.getString("status")); Assert.assertEquals(0, linksAfterData.getJsonObject("data").getJsonArray("dataverses that link to dataset id " + datasetId).size()); } }
1
43,422
is this test in the move tests? I see what you mean then - it works, but I wonder if we won't lose track that it's being tested here.
IQSS-dataverse
java
@@ -368,5 +368,8 @@ type Instance struct { EBSOptimized *bool `json:"ebsOptimized"` // The tags associated with the instance. - Tags map[string]string `json:"tag"` + Tags map[string]string `json:"tags"` + + // The security groups associated with the instance. + SecurityGroups map[string]string `json:"securityGroups"` }
1
// Copyright © 2018 The Kubernetes Authors. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1alpha1 import ( "fmt" "reflect" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // AWSMachineProviderConfig is the type that will be embedded in a Machine.Spec.ProviderConfig field // for an AWS instance. It is used by the AWS machine actuator to create a single machine instance, // using the RunInstances call (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) // Required parameters such as region that are not specified by this configuration, will be defaulted // by the actuator. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type AWSMachineProviderConfig struct { metav1.TypeMeta `json:",inline"` // AMI is the reference to the AMI from which to create the machine instance. AMI AWSResourceReference `json:"ami"` // InstanceType is the type of instance to create. Example: m4.xlarge InstanceType string `json:"instanceType"` // AdditionalTags is the set of tags to add to an instance, in addition to the ones // added by default by the actuator. These tags are additive. The actuator will ensure // these tags are present, but will not remove any other tags that may exist on the // instance. // +optional AdditionalTags map[string]string `json:"additionalTags,omitempty"` // IAMInstanceProfile is a reference to an IAM role to assign to the instance // +optional IAMInstanceProfile *AWSResourceReference `json:"iamInstanceProfile,omitempty"` // PublicIP specifies whether the instance should get a public IP. // Precedence for this setting is as follows: // 1. This field if set // 2. Cluster/flavor setting // 3. Subnet default // +optional PublicIP *bool `json:"publicIP,omitempty"` // AdditionalSecurityGroups is an array of references to security groups that should be applied to the // instance. These security groups would be set in addition to any security groups defined // at the cluster level or in the actuator. // +optional AdditionalSecurityGroups []AWSResourceReference `json:"additionalSecurityGroups,omitempty"` // Subnet is a reference to the subnet to use for this instance. If not specified, // the cluster subnet will be used. // +optional Subnet *AWSResourceReference `json:"subnet,omitempty"` // KeyName is the name of the SSH key to install on the instance. // +optional KeyName string `json:"keyName"` } // AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. // Only one of ID, ARN or Filters may be specified. Specifying more than one will result in // a validation error. type AWSResourceReference struct { // ID of resource // +optional ID *string `json:"id,omitempty"` // ARN of resource // +optional ARN *string `json:"arn,omitempty"` // Filters is a set of key/value pairs used to identify a resource // They are applied according to the rules defined by the AWS API: // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html // +optional Filters []Filter `json:"filters"` } // Filter is a filter used to identify an AWS resource type Filter struct { // Name of the filter. Filter names are case-sensitive. Name string `json:"name"` // Values includes one or more filter values. Filter values are case-sensitive. Values []string `json:"values"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type AWSClusterProviderConfig struct { metav1.TypeMeta `json:",inline"` // The AWS Region the cluster lives in. Region string `json:"region"` // SSHKeyName is the name of the ssh key to attach to the bastion host. SSHKeyName string `json:"sshKeyName,omitempty"` } // AWSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. // It containsk AWS-specific status information. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type AWSMachineProviderStatus struct { metav1.TypeMeta `json:",inline"` // InstanceID is the instance ID of the machine created in AWS // +optional InstanceID *string `json:"instanceID,omitempty"` // InstanceState is the state of the AWS instance for this machine // +optional InstanceState *string `json:"instanceState,omitempty"` // Conditions is a set of conditions associated with the Machine to indicate // errors or other status // +optional Conditions []AWSMachineProviderCondition `json:"conditions,omitempty"` } // AWSMachineProviderConditionType is a valid value for AWSMachineProviderCondition.Type type AWSMachineProviderConditionType string // Valid conditions for an AWS machine instance const ( // MachineCreated indicates whether the machine has been created or not. If not, // it should include a reason and message for the failure. MachineCreated AWSMachineProviderConditionType = "MachineCreated" ) // AWSMachineProviderCondition is a condition in a AWSMachineProviderStatus type AWSMachineProviderCondition struct { // Type is the type of the condition. Type AWSMachineProviderConditionType `json:"type"` // Status is the status of the condition. Status corev1.ConditionStatus `json:"status"` // LastProbeTime is the last time we probed the condition. // +optional LastProbeTime metav1.Time `json:"lastProbeTime"` // LastTransitionTime is the last time the condition transitioned from one status to another. // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime"` // Reason is a unique, one-word, CamelCase reason for the condition's last transition. // +optional Reason string `json:"reason"` // Message is a human-readable message indicating details about last transition. // +optional Message string `json:"message"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type AWSClusterProviderStatus struct { metav1.TypeMeta `json:",inline"` Region string `json:"region"` Network Network `json:"network"` Bastion Instance `json:"bastion"` } // Network encapsulates AWS networking resources. type Network struct { // VPC defines the cluster vpc. VPC VPC `json:"vpc"` // InternetGatewayID is the id of the internet gateway associated with the VPC. InternetGatewayID *string `json:"internetGatewayId"` // SecurityGroups is a map from the role/kind of the security group to its unique name, if any. SecurityGroups map[SecurityGroupRole]*SecurityGroup `json:"securityGroups"` // Subnets includes all the subnets defined inside the VPC. Subnets Subnets `json:"subnets"` } // VPC defines an AWS vpc. type VPC struct { ID string `json:"id"` CidrBlock string `json:"cidrBlock"` } // String returns a string representation of the VPC. func (v *VPC) String() string { return fmt.Sprintf("id=%s", v.ID) } // Subnet defines an AWS subnet attached to a VPC. type Subnet struct { ID string `json:"id"` VpcID string `json:"vpcId"` AvailabilityZone string `json:"availabilityZone"` CidrBlock string `json:"cidrBlock"` IsPublic bool `json:"public"` RouteTableID *string `json:"routeTableId"` NatGatewayID *string `json:"natGatewayId"` } // String returns a string representation of the subnet. func (s *Subnet) String() string { return fmt.Sprintf("id=%s/az=%s/public=%v", s.ID, s.AvailabilityZone, s.IsPublic) } // Subnets is a slice of Subnet. type Subnets []*Subnet // ToMap returns a map from id to subnet. func (s Subnets) ToMap() map[string]*Subnet { res := make(map[string]*Subnet) for _, x := range s { res[x.ID] = x } return res } // FilterPrivate returns a slice containing all subnets marked as private. func (s Subnets) FilterPrivate() (res Subnets) { for _, x := range s { if !x.IsPublic { res = append(res, x) } } return } // FilterPublic returns a slice containing all subnets marked as public. func (s Subnets) FilterPublic() (res Subnets) { for _, x := range s { if x.IsPublic { res = append(res, x) } } return } // RouteTable defines an AWS routing table. type RouteTable struct { ID string `json:"id"` } // SecurityGroupRole defines the unique role of a security group. type SecurityGroupRole string var ( SecurityGroupBastion = SecurityGroupRole("bastion") SecurityGroupNode = SecurityGroupRole("node") SecurityGroupControlPlane = SecurityGroupRole("controlplane") ) // SecurityGroup defines an AWS security group. type SecurityGroup struct { ID string `json:"id"` Name string `json:"name"` IngressRules IngressRules `json:"ingressRule"` } // String returns a string representation of the security group. func (s *SecurityGroup) String() string { return fmt.Sprintf("id=%s/name=%s", s.ID, s.Name) } // IngressRule defines an AWS ingress rule for security groups. type IngressRule struct { Description string `json:"description"` Protocol string `json:"protocol"` FromPort int64 `json:"fromPort"` ToPort int64 `json:"toPort"` // List of CIDR blocks to allow access from. Cannot be specified with SourceSecurityGroupID. CidrBlocks []string `json:"cidrBlocks"` // The security group id to allow access from. Cannot be specified with CidrBlocks. SourceSecurityGroupID *string `json:"sourceSecurityGroupId"` } // String returns a string representation of the ingress rule. func (i *IngressRule) String() string { return fmt.Sprintf("protocol=%s/range=[%d-%d]/description=%s", i.Protocol, i.FromPort, i.ToPort, i.Description) } // IngressRules is a slice of AWS ingress rules for security groups. type IngressRules []*IngressRule // Returns the difference between this slice and the other slice. func (i IngressRules) Difference(o IngressRules) (out IngressRules) { for _, x := range i { found := false for _, y := range o { if reflect.DeepEqual(x, y) { found = true break } } if !found { out = append(out, x) } } return } // InstanceState describes the state of an AWS instance. type InstanceState string var ( InstanceStatePending = InstanceState("pending") InstanceStateRunning = InstanceState("running") InstanceStateShuttingDown = InstanceState("shutting-down") InstanceStateTerminated = InstanceState("terminated") InstanceStateStopping = InstanceState("stopping") InstanceStateStopped = InstanceState("stopped") ) // Instance describes an AWS instance. type Instance struct { ID string `json:"id"` // The current state of the instance. State InstanceState `json:"instanceState"` // The instance type. Type string `json:"type"` // The ID of the subnet of the instance. SubnetID string `json:"subnetId"` // The ID of the AMI used to launch the instance. ImageID string `json:"imageId"` // The name of the SSH key pair. KeyName *string `json:"keyName"` // SecurityGroupIDs are one or more security group IDs this instance belongs to. SecurityGroupIDs []string `json:"securityGroupIds"` // UserData is the raw data script passed to the instance which is run upon bootstrap. // This field must not be base64 encoded and should only be used when running a new instance. UserData *string `json:"userData"` // The ARN of the IAM instance profile associated with the instance, if applicable. IAMProfile *AWSResourceReference `json:"iamProfile"` // The private IPv4 address assigned to the instance. PrivateIP *string `json:"privateIp"` // The public IPv4 address assigned to the instance, if applicable. PublicIP *string `json:"publicIp"` // Specifies whether enhanced networking with ENA is enabled. ENASupport *bool `json:"enaSupport"` // Indicates whether the instance is optimized for Amazon EBS I/O. EBSOptimized *bool `json:"ebsOptimized"` // The tags associated with the instance. Tags map[string]string `json:"tag"` }
1
6,181
I'm not sure if this wanted to be `json:"tags"` (which I'd set in my PR) or `json:"tag"` that someone elses PR had set. Given that the rest of the fields had their JSON field name set to the same as the struct field name, I opted for `tags`.
kubernetes-sigs-cluster-api-provider-aws
go
@@ -47,6 +47,11 @@ module Ncr message: "must be three letters or numbers" }, allow_blank: true + scope :for_fiscal_year, lambda { |year| + range = self.class.range_for_fiscal_year(year) + where(created_at: range[:start_time]...range[:end_time]) + } + def self.all_system_approver_emails [ self.ba61_tier1_budget_mailbox,
1
require 'csv' module Ncr # Make sure all table names use 'ncr_XXX' def self.table_name_prefix 'ncr_' end EXPENSE_TYPES = %w(BA60 BA61 BA80) BUILDING_NUMBERS = YAML.load_file("#{Rails.root}/config/data/ncr/building_numbers.yml") class WorkOrder < ActiveRecord::Base # must define before include PurchaseCardMixin def self.purchase_amount_column_name :amount end include ClientDataMixin include PurchaseCardMixin attr_accessor :approving_official_email # This is a hack to be able to attribute changes to the correct user. This attribute needs to be set explicitly, then the update comment will use them as the "commenter". Defaults to the requester. attr_accessor :modifier validates :approving_official_email, presence: true validates_email_format_of :approving_official_email validates :amount, presence: true validates :cl_number, format: { with: /\ACL\d{7}\z/, message: "must start with 'CL', followed by seven numbers" }, allow_blank: true validates :expense_type, inclusion: {in: EXPENSE_TYPES}, presence: true validates :function_code, format: { with: /\APG[A-Z0-9]{3}\z/, message: "must start with 'PG', followed by three letters or numbers" }, allow_blank: true validates :project_title, presence: true validates :vendor, presence: true validates :building_number, presence: true, if: :not_ba60? validates :rwa_number, presence: true, if: :ba80? validates :rwa_number, format: { with: /\A[a-zA-Z][0-9]{7}\z/, message: "must be one letter followed by 7 numbers" }, allow_blank: true validates :soc_code, format: { with: /\A[A-Z0-9]{3}\z/, message: "must be three letters or numbers" }, allow_blank: true def self.all_system_approver_emails [ self.ba61_tier1_budget_mailbox, self.ba61_tier2_budget_mailbox, self.ba80_budget_mailbox, self.ool_ba80_budget_mailbox, ] end def self.ba61_tier1_budget_mailbox self.approver_with_role("BA61_tier1_budget_approver") end def self.ba61_tier2_budget_mailbox self.approver_with_role("BA61_tier2_budget_approver") end def self.ba80_budget_mailbox self.approver_with_role("BA80_budget_approver") end def self.ool_ba80_budget_mailbox self.approver_with_role("OOL_BA80_budget_approver") end def self.approver_with_role(role_name) users = User.with_role(role_name).where(client_slug: "ncr") if users.empty? fail "Missing User with role #{role_name} -- did you run rake db:migrate and rake db:seed?" end users.first.email_address end def self.relevant_fields(expense_type) fields = self.default_fields if expense_type == "BA61" fields << :emergency elsif expense_type == "BA80" fields += [:rwa_number, :code] end fields end def self.default_fields fields = self.column_names.map(&:to_sym) + [:approving_official_email] fields - [:emergency, :rwa_number, :code, :created_at, :updated_at, :id] end def approver_email_frozen? approval = self.individual_steps.first approval && !approval.actionable? end def approver_changed? self.approving_official && self.approving_official.email_address != approving_official_email end def requires_approval? !self.emergency end def for_whsc_organization? if org_code.present? ncr_org.try(:whsc?) end end def for_ool_organization? if org_code.present? ncr_org.try(:ool?) end end def setup_approvals_and_observers manager = ApprovalManager.new(self) manager.setup_approvals_and_observers end def approving_official approvers.first end def current_approver if pending? currently_awaiting_approvers.first elsif approving_official approving_official elsif emergency and approvers.empty? nil else User.for_email(system_approver_emails.first) end end def final_approver if !emergency and approvers.any? approvers.last end end def budget_approvals self.individual_steps.offset(1) end def budget_approvers budget_approvals.map(&:completed_by) end def editable? true end # Methods for Client Data interface def fields_for_display attributes = self.class.relevant_fields(expense_type) attributes.map { |attribute| [WorkOrder.human_attribute_name(attribute), self.send(attribute)] } end def ba80? self.expense_type == 'BA80' end def not_ba60? expense_type != "BA60" end def total_price self.amount || 0.0 end # may be replaced with paper-trail or similar at some point def version self.updated_at.to_i end def system_approver_emails manager = ApprovalManager.new(self) manager.system_approver_emails end def building_id regex = /\A(\w{8}) .*\z/ if building_number && regex.match(building_number) regex.match(building_number)[1] else building_number end end def name project_title end def public_identifier "FY" + fiscal_year.to_s.rjust(2, "0") + "-#{proposal.id}" end def fiscal_year year = self.created_at.nil? ? Time.zone.now.year : self.created_at.year month = self.created_at.nil? ? Time.zone.now.month : self.created_at.month if month >= 10 year += 1 end year % 100 # convert to two-digit end def restart_budget_approvals self.budget_approvals.each(&:restart!) self.proposal.reset_status self.proposal.root_step.initialize! end private def ncr_org ncr_org_code = org_code.match(/^(\w+)/)[1] Ncr::Organization.find_by(code: ncr_org_code) end end end
1
16,110
since the logic here and in `Proposal` is exactly the same, do you think it makes sense for us to include it elsewhere? I am not opposed to duplicated code when it makes sense, but the reason I first identified this was that I was looking for code in NCR::WorkOrder that was not specific to Work Orders. Seems like fiscal year logic might be helpful for reporting for other clients. In that case, we'd want this scope on those client data classes as well. Might be wrong to include this in the FiscalYear mixin, but it does seem like it should be outside of Proposal and WorkOrder. Does that make sense?
18F-C2
rb
@@ -42,6 +42,11 @@ class InfluxWriterSubscriber(object): self.time = 0 + def on_connection_closed(self, connection, reply_code, reply_text): + self.log.info('RabbitMQ connection got closed!') + self.connection.add_timeout(5, self.connect_to_rabbitmq) + + @staticmethod def static_callback(ch, method, properties, body, obj): return obj.callback(ch, method, properties, body)
1
#!/usr/bin/env python3 import sys import os import pika from influxdb import InfluxDBClient from influxdb.exceptions import InfluxDBClientError, InfluxDBServerError import ujson import logging from listenbrainz.listen import Listen from time import time, sleep import listenbrainz.config as config from listenbrainz.listenstore import InfluxListenStore from listenbrainz.utils import escape, get_measurement_name, get_escaped_measurement_name, \ get_influx_query_timestamp, convert_to_unix_timestamp, \ convert_timestamp_to_influx_row_format from requests.exceptions import ConnectionError from redis import Redis from collections import defaultdict REPORT_FREQUENCY = 5000 DUMP_JSON_WITH_ERRORS = False ERROR_RETRY_DELAY = 3 # number of seconds to wait until retrying an operation class InfluxWriterSubscriber(object): def __init__(self): self.log = logging.getLogger(__name__) logging.basicConfig() self.log.setLevel(logging.INFO) self.ls = None self.influx = None self.redis = None self.incoming_ch = None self.unique_ch = None self.connection = None self.total_inserts = 0 self.inserts = 0 self.time = 0 @staticmethod def static_callback(ch, method, properties, body, obj): return obj.callback(ch, method, properties, body) def connect_to_rabbitmq(self): while True: try: credentials = pika.PlainCredentials(config.RABBITMQ_USERNAME, config.RABBITMQ_PASSWORD) connection_parameters = pika.ConnectionParameters( host=config.RABBITMQ_HOST, port=config.RABBITMQ_PORT, virtual_host=config.RABBITMQ_VHOST, credentials=credentials ) self.connection = pika.BlockingConnection(connection_parameters) break except Exception as e: self.log.error("Cannot connect to rabbitmq: %s, retrying in 2 seconds" % str(e)) sleep(ERROR_RETRY_DELAY) def callback(self, ch, method, properties, body): listens = ujson.loads(body) ret = self.write(listens) if not ret: return ret while True: try: self.incoming_ch.basic_ack(delivery_tag = method.delivery_tag) break except pika.exceptions.ConnectionClosed: self.connect_to_rabbitmq() count = len(listens) # collect and occasionally print some stats self.inserts += count if self.inserts >= REPORT_FREQUENCY: self.total_inserts += self.inserts if self.time > 0: self.log.info("Inserted %d rows in %.1fs (%.2f listens/sec). Total %d rows." % \ (self.inserts, self.time, self.inserts / self.time, self.total_inserts)) self.inserts = 0 self.time = 0 # now update listen counts in influx self.ls.update_listen_counts() return ret def insert_to_listenstore(self, data, retries=5): """ Inserts a batch of listens to the ListenStore. If this fails, then breaks the data into two parts and recursively tries to insert them, until we find the culprit listen Args: data: the data to be inserted into the ListenStore retries: the number of retries to make before deciding that we've failed Returns: number of listens successfully sent """ if not data: return 0 failure_count = 0 while True: try: self.ls.insert(data) return len(data) except (InfluxDBServerError, InfluxDBClientError, ValueError) as e: failure_count += 1 if failure_count >= retries: break sleep(ERROR_RETRY_DELAY) except ConnectionError as e: self.log.error("Cannot write data to listenstore: %s. Sleep." % str(e)) sleep(ERROR_RETRY_DELAY) # if we get here, we failed on trying to write the data if len(data) == 1: # try to send the bad listen one more time and if it doesn't work # log the error try: self.ls.insert(data) return 1 except (InfluxDBServerError, InfluxDBClientError, ValueError, ConnectionError) as e: self.log.error("Unable to insert bad listen to listenstore: %s" % str(e)) if DUMP_JSON_WITH_ERRORS: self.log.error("Was writing the following data:") influx_dict = data[0].to_influx(get_measurement_name(data[0].user_name)) self.log.error(ujson.dumps(influx_dict)) return 0 else: slice_index = len(data) // 2 # send first half sent = self.insert_to_listenstore(data[:slice_index], retries) # send second half sent += self.insert_to_listenstore(data[slice_index:], retries) return sent def write(self, listen_dicts): submit = [] unique = [] duplicate_count = 0 unique_count = 0 # Partition the listens on the basis of user names # and then store the time range for each user users = {} for listen in listen_dicts: t = int(listen['listened_at']) user_name = listen['user_name'] if user_name not in users: users[user_name] = { 'min_time': t, 'max_time': t, 'listens': [listen], } continue if t > users[user_name]['max_time']: users[user_name]['max_time'] = t if t < users[user_name]['min_time']: users[user_name]['min_time'] = t users[user_name]['listens'].append(listen) # get listens in the time range for each user and # remove duplicates on the basis of timestamps for user_name in users: # get the range of time that we need to get from influx for # deduplication of listens min_time = users[user_name]['min_time'] max_time = users[user_name]['max_time'] query = """SELECT time, recording_msid FROM %s WHERE time >= %s AND time <= %s """ % (get_escaped_measurement_name(user_name), get_influx_query_timestamp(min_time), get_influx_query_timestamp(max_time)) while True: try: results = self.influx.query(query) break except Exception as e: self.log.error("Cannot query influx: %s" % str(e)) sleep(3) # collect all the timestamps for this given time range. timestamps = defaultdict(list) # dict of list of listens indexed by timestamp for result in results.get_points(measurement=get_measurement_name(user_name)): timestamps[convert_to_unix_timestamp(result['time'])].append(result) for listen in users[user_name]['listens']: # Check if a listen with the same timestamp and recording msid is already present in # Influx DB and if it is, mark current listen as duplicate t = int(listen['listened_at']) recording_msid = listen['recording_msid'] dup = False if t in timestamps: for row in timestamps[t]: if row['recording_msid'] == recording_msid: duplicate_count += 1 dup = True break else: # if there are listens with the same timestamp but different # metadata, we add a tag specifically for making sure that # influxdb doesn't drop one of the listens. This value # is monotonically increasing and defaults to 0 listen['dedup_tag'] = len(timestamps[t]) if not dup: unique_count += 1 submit.append(Listen.from_json(listen)) unique.append(listen) timestamps[t].append({ 'time': convert_timestamp_to_influx_row_format(t), 'recording_msid': recording_msid }) t0 = time() submitted_count = self.insert_to_listenstore(submit) self.time += time() - t0 self.log.error("dups: %d, unique: %d, submitted: %d" % (duplicate_count, unique_count, submitted_count)) if not unique_count: return True while True: try: self.unique_ch.basic_publish(exchange='unique', routing_key='', body=ujson.dumps(unique), properties=pika.BasicProperties(delivery_mode = 2,)) break except pika.exceptions.ConnectionClosed: self.connect_to_rabbitmq() return True def start(self): self.log.info("influx-writer init") if not hasattr(config, "REDIS_HOST"): self.log.error("Redis service not defined. Sleeping 2 seconds and exiting.") sleep(ERROR_RETRY_DELAY) sys.exit(-1) if not hasattr(config, "INFLUX_HOST"): self.log.error("Influx service not defined. Sleeping 2 seconds and exiting.") sleep(ERROR_RETRY_DELAY) sys.exit(-1) if not hasattr(config, "RABBITMQ_HOST"): self.log.error("RabbitMQ service not defined. Sleeping 2 seconds and exiting.") sleep(ERROR_RETRY_DELAY) sys.exit(-1) while True: try: self.ls = InfluxListenStore({ 'REDIS_HOST' : config.REDIS_HOST, 'REDIS_PORT' : config.REDIS_PORT, 'INFLUX_HOST': config.INFLUX_HOST, 'INFLUX_PORT': config.INFLUX_PORT, 'INFLUX_DB_NAME': config.INFLUX_DB_NAME}) self.influx = InfluxDBClient(host=config.INFLUX_HOST, port=config.INFLUX_PORT, database=config.INFLUX_DB_NAME) break except Exception as err: self.log.error("Cannot connect to influx: %s. Retrying in 2 seconds and trying again." % str(err)) sleep(ERROR_RETRY_DELAY) while True: try: self.redis = Redis(host=config.REDIS_HOST, port=config.REDIS_PORT, decode_responses=True) self.redis.ping() break except Exception as err: self.log.error("Cannot connect to redis: %s. Retrying in 2 seconds and trying again." % str(err)) sleep(ERROR_RETRY_DELAY) while True: self.connect_to_rabbitmq() self.incoming_ch = self.connection.channel() self.incoming_ch.exchange_declare(exchange='incoming', type='fanout') self.incoming_ch.queue_declare('incoming', durable=True) self.incoming_ch.queue_bind(exchange='incoming', queue='incoming') self.incoming_ch.basic_consume(lambda ch, method, properties, body: self.static_callback(ch, method, properties, body, obj=self), queue='incoming') self.unique_ch = self.connection.channel() self.unique_ch.exchange_declare(exchange='unique', type='fanout') self.unique_ch.queue_declare('unique', durable=True) self.log.info("influx-writer started") try: self.incoming_ch.start_consuming() except pika.exceptions.ConnectionClosed: self.log.info("Connection to rabbitmq closed. Re-opening.") self.connection = None continue self.connection.close() def print_and_log_error(self, msg): self.log.error(msg) print(msg, file = sys.stderr) if __name__ == "__main__": rc = InfluxWriterSubscriber() rc.start()
1
14,617
there is no static method as a go between -- how does this work?
metabrainz-listenbrainz-server
py
@@ -0,0 +1,8 @@ +# frozen_string_literal: true +# encoding: utf-8 + +class StringifiedSymbol + include Mongoid::Document + store_in collection: "stringified_symbols", client: :other + field :stringified_symbol, type: StringifiedSymbol +end
1
1
12,881
Can you please change the name of this class to be something else?
mongodb-mongoid
rb
@@ -403,7 +403,7 @@ size_t h2o_strstr(const char *haysack, size_t haysack_len, const char *needle, s } /* note: returns a zero-width match as well */ -const char *h2o_next_token(h2o_iovec_t *iter, int separator, size_t *element_len, h2o_iovec_t *value) +const char *h2o_next_token(h2o_iovec_t *iter, int separator, size_t *element_len, h2o_iovec_t *value, int coma_separator) { const char *cur = iter->base, *end = iter->base + iter->len, *token_start, *token_end;
1
/* * Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Justin Zhu, Fastly, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "h2o/string_.h" h2o_iovec_t h2o_strdup(h2o_mem_pool_t *pool, const char *s, size_t slen) { /* We do not need this check to be here, but it needs to be somewhere, see the definition of H2O_SIZE_T_LONGEST_STR */ H2O_BUILD_ASSERT(sizeof(size_t) <= sizeof(uint64_t)); h2o_iovec_t ret; if (slen == SIZE_MAX) slen = strlen(s); if (pool != NULL) { ret.base = h2o_mem_alloc_pool(pool, char, slen + 1); } else { ret.base = h2o_mem_alloc(slen + 1); } h2o_memcpy(ret.base, s, slen); ret.base[slen] = '\0'; ret.len = slen; return ret; } h2o_iovec_t h2o_strdup_shared(h2o_mem_pool_t *pool, const char *s, size_t slen) { h2o_iovec_t ret; if (slen == SIZE_MAX) slen = strlen(s); ret.base = h2o_mem_alloc_shared(pool, slen + 1, NULL); memcpy(ret.base, s, slen); ret.base[slen] = '\0'; ret.len = slen; return ret; } h2o_iovec_t h2o_strdup_slashed(h2o_mem_pool_t *pool, const char *src, size_t len) { h2o_iovec_t ret; ret.len = len != SIZE_MAX ? len : strlen(src); ret.base = pool != NULL ? h2o_mem_alloc_pool(pool, char, ret.len + 2) : h2o_mem_alloc(ret.len + 2); memcpy(ret.base, src, ret.len); if (ret.len != 0 && ret.base[ret.len - 1] != '/') ret.base[ret.len++] = '/'; ret.base[ret.len] = '\0'; return ret; } int h2o__lcstris_core(const char *target, const char *test, size_t test_len) { for (; test_len != 0; --test_len) if (h2o_tolower(*target++) != *test++) return 0; return 1; } size_t h2o_strtosize(const char *s, size_t len) { uint64_t v = 0, m = 1; const char *p = s + len; if (len == 0) goto Error; while (1) { int ch = *--p; if (!('0' <= ch && ch <= '9')) goto Error; v += (ch - '0') * m; if (p == s) break; m *= 10; /* do not even try to overflow */ if (m == 10000000000000000000ULL) goto Error; } if (v >= SIZE_MAX) goto Error; return v; Error: return SIZE_MAX; } size_t h2o_strtosizefwd(char **s, size_t len) { uint64_t v, c; char *p = *s, *p_end = *s + len; if (len == 0) goto Error; int ch = *p++; if (!('0' <= ch && ch <= '9')) goto Error; v = ch - '0'; c = 1; while (1) { ch = *p; if (!('0' <= ch && ch <= '9')) break; v *= 10; v += ch - '0'; p++; c++; if (p == p_end) break; /* similar as above, do not even try to overflow */ if (c == 20) goto Error; } if (v >= SIZE_MAX) goto Error; *s = p; return v; Error: return SIZE_MAX; } static uint32_t decode_base64url_quad(const char *src) { const char *src_end = src + 4; uint32_t decoded = 0; while (1) { if ('A' <= *src && *src <= 'Z') { decoded |= *src - 'A'; } else if ('a' <= *src && *src <= 'z') { decoded |= *src - 'a' + 26; } else if ('0' <= *src && *src <= '9') { decoded |= *src - '0' + 52; } else if (*src == '-') { decoded |= 62; } else if (*src == '_') { decoded |= 63; #if 1 /* curl uses normal base64 */ } else if (*src == '+') { decoded |= 62; } else if (*src == '/') { decoded |= 63; #endif } else { return UINT32_MAX; } if (++src == src_end) break; decoded <<= 6; } return decoded; } h2o_iovec_t h2o_decode_base64url(h2o_mem_pool_t *pool, const char *src, size_t len) { h2o_iovec_t decoded; uint32_t t; uint8_t *dst; char remaining_input[4]; decoded.len = len * 3 / 4; decoded.base = pool != NULL ? h2o_mem_alloc_pool(pool, char, decoded.len + 1) : h2o_mem_alloc(decoded.len + 1); dst = (uint8_t *)decoded.base; while (len >= 4) { if ((t = decode_base64url_quad(src)) == UINT32_MAX) goto Error; *dst++ = t >> 16; *dst++ = t >> 8; *dst++ = t; src += 4; len -= 4; } switch (len) { case 0: break; case 1: goto Error; case 2: remaining_input[0] = *src++; remaining_input[1] = *src++; remaining_input[2] = 'A'; remaining_input[3] = 'A'; if ((t = decode_base64url_quad(remaining_input)) == UINT32_MAX) goto Error; *dst++ = t >> 16; break; case 3: remaining_input[0] = *src++; remaining_input[1] = *src++; remaining_input[2] = *src++; remaining_input[3] = 'A'; if ((t = decode_base64url_quad(remaining_input)) == UINT32_MAX) goto Error; *dst++ = t >> 16; *dst++ = t >> 8; break; } assert((char *)dst - decoded.base == decoded.len); decoded.base[decoded.len] = '\0'; return decoded; Error: if (pool == NULL) free(decoded.base); return h2o_iovec_init(NULL, 0); } size_t h2o_base64_encode(char *_dst, const void *_src, size_t len, int url_encoded) { static const char *MAP = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz" "0123456789+/"; static const char *MAP_URL_ENCODED = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz" "0123456789-_"; char *dst = _dst; const uint8_t *src = _src; const char *map = url_encoded ? MAP_URL_ENCODED : MAP; uint32_t quad; for (; len >= 3; src += 3, len -= 3) { quad = ((uint32_t)src[0] << 16) | ((uint32_t)src[1] << 8) | src[2]; *dst++ = map[quad >> 18]; *dst++ = map[(quad >> 12) & 63]; *dst++ = map[(quad >> 6) & 63]; *dst++ = map[quad & 63]; } if (len != 0) { quad = (uint32_t)src[0] << 16; *dst++ = map[quad >> 18]; if (len == 2) { quad |= (uint32_t)src[1] << 8; *dst++ = map[(quad >> 12) & 63]; *dst++ = map[(quad >> 6) & 63]; if (!url_encoded) *dst++ = '='; } else { *dst++ = map[(quad >> 12) & 63]; if (!url_encoded) { *dst++ = '='; *dst++ = '='; } } } *dst = '\0'; return dst - _dst; } static int decode_hex(int ch) { if ('0' <= ch && ch <= '9') return ch - '0'; if ('A' <= ch && ch <= 'F') return ch - 'A' + 0xa; if ('a' <= ch && ch <= 'f') return ch - 'a' + 0xa; return -1; } int h2o_hex_decode(void *_dst, const char *src, size_t src_len) { unsigned char *dst = _dst; if (src_len % 2 != 0) return -1; for (; src_len != 0; src_len -= 2) { int hi, lo; if ((hi = decode_hex(*src++)) == -1 || (lo = decode_hex(*src++)) == -1) return -1; *dst++ = (hi << 4) | lo; } return 0; } void h2o_hex_encode(char *dst, const void *_src, size_t src_len) { const unsigned char *src = _src, *src_end = src + src_len; for (; src != src_end; ++src) { *dst++ = "0123456789abcdef"[*src >> 4]; *dst++ = "0123456789abcdef"[*src & 0xf]; } *dst = '\0'; } h2o_iovec_t h2o_uri_escape(h2o_mem_pool_t *pool, const char *s, size_t l, const char *preserve_chars) { h2o_iovec_t encoded; size_t i, capacity = l * 3 + 1; encoded.base = pool != NULL ? h2o_mem_alloc_pool(pool, char, capacity) : h2o_mem_alloc(capacity); encoded.len = 0; /* RFC 3986: path-noscheme = segment-nz-nc *( "/" segment ) segment-nz-nc = 1*( unreserved / pct-encoded / sub-delims / "@" ) unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" */ for (i = 0; i != l; ++i) { int ch = s[i]; if (('A' <= ch && ch <= 'Z') || ('a' <= ch && ch <= 'z') || ('0' <= ch && ch <= '9') || ch == '-' || ch == '.' || ch == '_' || ch == '~' || ch == '!' || ch == '$' || ch == '&' || ch == '\'' || ch == '(' || ch == ')' || ch == '*' || ch == '+' || ch == ',' || ch == ';' || ch == '=' || (ch != '\0' && preserve_chars != NULL && strchr(preserve_chars, ch) != NULL)) { encoded.base[encoded.len++] = ch; } else { encoded.base[encoded.len++] = '%'; encoded.base[encoded.len++] = "0123456789ABCDEF"[(ch >> 4) & 0xf]; encoded.base[encoded.len++] = "0123456789ABCDEF"[ch & 0xf]; } } encoded.base[encoded.len] = '\0'; return encoded; } h2o_iovec_t h2o_get_filext(const char *path, size_t len) { const char *end = path + len, *p = end; while (--p != path) { if (*p == '.') { return h2o_iovec_init(p + 1, end - (p + 1)); } else if (*p == '/') { break; } } return h2o_iovec_init(NULL, 0); } static int is_ws(int ch) { return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n'; } h2o_iovec_t h2o_str_stripws(const char *s, size_t len) { const char *end = s + len; while (s != end) { if (!is_ws(*s)) break; ++s; } while (s != end) { if (!is_ws(end[-1])) break; --end; } return h2o_iovec_init(s, end - s); } size_t h2o_strstr(const char *haysack, size_t haysack_len, const char *needle, size_t needle_len) { /* TODO optimize */ if (haysack_len >= needle_len) { size_t off, max = haysack_len - needle_len + 1; if (needle_len == 0) return 0; for (off = 0; off != max; ++off) if (haysack[off] == needle[0] && memcmp(haysack + off + 1, needle + 1, needle_len - 1) == 0) return off; } return SIZE_MAX; } /* note: returns a zero-width match as well */ const char *h2o_next_token(h2o_iovec_t *iter, int separator, size_t *element_len, h2o_iovec_t *value) { const char *cur = iter->base, *end = iter->base + iter->len, *token_start, *token_end; /* find start */ for (;; ++cur) { if (cur == end) return NULL; if (!(*cur == ' ' || *cur == '\t')) break; } token_start = cur; token_end = cur; /* find last */ for (;; ++cur) { if (cur == end) break; if (*cur == separator) { ++cur; break; } if (*cur == ',') { if (token_start == cur) { ++cur; token_end = cur; } break; } if (value != NULL && *cur == '=') { ++cur; goto FindValue; } if (!(*cur == ' ' || *cur == '\t')) token_end = cur + 1; } /* found */ *iter = h2o_iovec_init(cur, end - cur); *element_len = token_end - token_start; if (value != NULL) *value = (h2o_iovec_t){NULL}; return token_start; FindValue: *iter = h2o_iovec_init(cur, end - cur); *element_len = token_end - token_start; if ((value->base = (char *)h2o_next_token(iter, separator, &value->len, NULL)) == NULL) { *value = (h2o_iovec_t){"", 0}; } else if (h2o_memis(value->base, value->len, H2O_STRLIT(","))) { *value = (h2o_iovec_t){"", 0}; iter->base -= 1; iter->len += 1; } return token_start; } int h2o_contains_token(const char *haysack, size_t haysack_len, const char *needle, size_t needle_len, int separator) { h2o_iovec_t iter = h2o_iovec_init(haysack, haysack_len); const char *token = NULL; size_t token_len = 0; while ((token = h2o_next_token(&iter, separator, &token_len, NULL)) != NULL) { if (h2o_lcstris(token, token_len, needle, needle_len)) { return 1; } } return 0; } h2o_iovec_t h2o_htmlescape(h2o_mem_pool_t *pool, const char *src, size_t len) { const char *s, *end = src + len; size_t add_size = 0; #define ENTITY_MAP() \ ENTITY('"', "&quot;"); \ ENTITY('&', "&amp;"); \ ENTITY('\'', "&#39;"); \ ENTITY('<', "&lt;"); \ ENTITY('>', "&gt;"); for (s = src; s != end; ++s) { if ((unsigned)(unsigned char)*s - '"' <= '>' - '"') { switch (*s) { #define ENTITY(code, quoted) \ case code: \ add_size += sizeof(quoted) - 2; \ break ENTITY_MAP() #undef ENTITY } } } /* escape and return the result if necessary */ if (add_size != 0) { /* allocate buffer and fill in the chars that are known not to require escaping */ h2o_iovec_t escaped = {h2o_mem_alloc_pool(pool, char, len + add_size + 1), 0}; /* fill-in the rest */ for (s = src; s != end; ++s) { switch (*s) { #define ENTITY(code, quoted) \ case code: \ memcpy(escaped.base + escaped.len, quoted, sizeof(quoted) - 1); \ escaped.len += sizeof(quoted) - 1; \ break ENTITY_MAP() #undef ENTITY default: escaped.base[escaped.len++] = *s; break; } } assert(escaped.len == len + add_size); escaped.base[escaped.len] = '\0'; return escaped; } #undef ENTITY_MAP /* no need not escape; return the original */ return h2o_iovec_init(src, len); } h2o_iovec_t h2o_concat_list(h2o_mem_pool_t *pool, h2o_iovec_t *list, size_t count) { h2o_iovec_t ret = {NULL, 0}; size_t i; /* calc the length */ for (i = 0; i != count; ++i) { ret.len += list[i].len; } /* allocate memory */ if (pool != NULL) ret.base = h2o_mem_alloc_pool(pool, char, ret.len + 1); else ret.base = h2o_mem_alloc(ret.len + 1); /* concatenate */ ret.len = 0; for (i = 0; i != count; ++i) { h2o_memcpy(ret.base + ret.len, list[i].base, list[i].len); ret.len += list[i].len; } ret.base[ret.len] = '\0'; return ret; } h2o_iovec_t h2o_join_list(h2o_mem_pool_t *pool, h2o_iovec_t *list, size_t count, h2o_iovec_t delimiter) { if (count == 0) { return h2o_iovec_init(NULL, 0); } size_t joined_len = 0; h2o_iovec_t *joined = alloca(sizeof(*joined) * (count * 2 - 1)); size_t i; for (i = 0; i != count; ++i) { if (i != 0) { joined[joined_len++] = delimiter; } joined[joined_len++] = list[i]; } return h2o_concat_list(pool, joined, joined_len); } void h2o_split(h2o_mem_pool_t *pool, h2o_iovec_vector_t *list, h2o_iovec_t str, const char needle) { const char *p = str.base, *end = str.base + str.len, *found; while (p < end && (found = memchr(p, needle, end - p)) != NULL) { h2o_vector_reserve(pool, list, list->size + 1); list->entries[list->size++] = h2o_strdup(pool, p, found - p); p = found + 1; } h2o_vector_reserve(pool, list, list->size + 1); list->entries[list->size++] = h2o_strdup(pool, p, end - p); } int h2o_str_at_position(char *buf, const char *src, size_t src_len, int lineno, int column) { const char *src_end = src + src_len; int i; /* find the line */ if (lineno <= 0 || column <= 0) return -1; for (--lineno; lineno != 0; --lineno) { do { if (src == src_end) return -1; } while (*src++ != '\n'); } /* adjust the starting column */ while (column > 40) { if (src != src_end) ++src; --column; } /* emit */ for (i = 1; i <= 76; ++i) { if (src == src_end || *src == '\n') break; *buf++ = *src++; } if (i < column) column = i; *buf++ = '\n'; for (i = 1; i < column; ++i) *buf++ = ' '; *buf++ = '^'; *buf++ = '\n'; *buf = '\0'; return 0; }
1
14,168
I think we might prefer generalizing the interface rather than creating an exception. Current design of `h2o_next_token` assumes the input to be a comma-separated list, and allows the caller to specify a different separator when parsing a nested list. As I understand, what we are trying to attain in this PR is to have a way of parsing a flat list of semicolon-separated list. Assuming that is the case, I think it would be better to change the API of the function to accept two separators (i.e. inner and outer), where for our existing use-cases `outer` would be `,`.
h2o-h2o
c
@@ -43,8 +43,8 @@ namespace Nethermind.Blockchain { private const long LowestInsertedBodyNumberDbEntryAddress = 0; private const int CacheSize = 64; - private readonly ICache<Keccak, Block> _blockCache = new LruCacheWithRecycling<Keccak, Block>(CacheSize, CacheSize, "blocks"); - private readonly ICache<Keccak, BlockHeader> _headerCache = new LruCacheWithRecycling<Keccak, BlockHeader>(CacheSize, CacheSize, "headers"); + private readonly ICache<Keccak, Block> _blockCache = new LruCache<Keccak, Block>(CacheSize, CacheSize, "blocks"); + private readonly ICache<Keccak, BlockHeader> _headerCache = new LruCache<Keccak, BlockHeader>(CacheSize, CacheSize, "headers"); private const int BestKnownSearchLimit = 256_000_000;
1
// Copyright (c) 2018 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using System; using System.Collections; using System.Collections.Generic; using System.IO; using System.Linq; using System.Threading; using Nethermind.Blockchain.Find; using Nethermind.Blockchain.Synchronization; using Nethermind.Core; using Nethermind.Core.Attributes; using Nethermind.Core.Caching; using Nethermind.Core.Crypto; using Nethermind.Core.Extensions; using Nethermind.Core.Specs; using Nethermind.Db; using Nethermind.Dirichlet.Numerics; using Nethermind.Logging; using Nethermind.Serialization.Rlp; using Nethermind.State.Repositories; using Nethermind.Db.Blooms; using Nethermind.TxPool; namespace Nethermind.Blockchain { [Todo(Improve.Refactor, "After the fast sync work there are some duplicated code parts for the 'by header' and 'by block' approaches.")] public partial class BlockTree : IBlockTree { private const long LowestInsertedBodyNumberDbEntryAddress = 0; private const int CacheSize = 64; private readonly ICache<Keccak, Block> _blockCache = new LruCacheWithRecycling<Keccak, Block>(CacheSize, CacheSize, "blocks"); private readonly ICache<Keccak, BlockHeader> _headerCache = new LruCacheWithRecycling<Keccak, BlockHeader>(CacheSize, CacheSize, "headers"); private const int BestKnownSearchLimit = 256_000_000; private readonly object _batchInsertLock = new object(); private readonly IDb _blockDb; private readonly IDb _headerDb; private readonly IDb _blockInfoDb; private ICache<long, HashSet<Keccak>> _invalidBlocks = new LruCacheWithRecycling<long, HashSet<Keccak>>(128, 128, "invalid blocks"); private readonly BlockDecoder _blockDecoder = new BlockDecoder(); private readonly HeaderDecoder _headerDecoder = new HeaderDecoder(); private readonly ILogger _logger; private readonly ISpecProvider _specProvider; private readonly ITxPool _txPool; private readonly IBloomStorage _bloomStorage; private readonly ISyncConfig _syncConfig; private readonly IChainLevelInfoRepository _chainLevelInfoRepository; internal static Keccak DeletePointerAddressInDb = new Keccak(new BitArray(32 * 8, true).ToBytes()); internal static Keccak HeadAddressInDb = Keccak.Zero; public BlockHeader Genesis { get; private set; } public Block Head { get; private set; } public BlockHeader BestSuggestedHeader { get; private set; } public Block BestSuggestedBody { get; private set; } public BlockHeader LowestInsertedHeader { get; private set; } private long? _lowestInsertedReceiptBlock; public long? LowestInsertedBodyNumber { get => _lowestInsertedReceiptBlock; set { _lowestInsertedReceiptBlock = value; if (value.HasValue) { _blockDb.Set(LowestInsertedBodyNumberDbEntryAddress, Rlp.Encode(value.Value).Bytes); } } } public long BestKnownNumber { get; private set; } public int ChainId => _specProvider.ChainId; private int _canAcceptNewBlocksCounter = 0; public bool CanAcceptNewBlocks => _canAcceptNewBlocksCounter == 0; public BlockTree( IDb blockDb, IDb headerDb, IDb blockInfoDb, IChainLevelInfoRepository chainLevelInfoRepository, ISpecProvider specProvider, ITxPool txPool, IBloomStorage bloomStorage, ILogManager logManager) : this(blockDb, headerDb, blockInfoDb, chainLevelInfoRepository, specProvider, txPool, bloomStorage, new SyncConfig(), logManager) { } public BlockTree( IDb blockDb, IDb headerDb, IDb blockInfoDb, IChainLevelInfoRepository chainLevelInfoRepository, ISpecProvider specProvider, ITxPool txPool, IBloomStorage bloomStorage, ISyncConfig syncConfig, ILogManager logManager) { _logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager)); _blockDb = blockDb ?? throw new ArgumentNullException(nameof(blockDb)); _headerDb = headerDb ?? throw new ArgumentNullException(nameof(headerDb)); _blockInfoDb = blockInfoDb ?? throw new ArgumentNullException(nameof(blockInfoDb)); _specProvider = specProvider; _txPool = txPool ?? throw new ArgumentNullException(nameof(txPool)); _bloomStorage = bloomStorage ?? throw new ArgumentNullException(nameof(txPool)); _syncConfig = syncConfig ?? throw new ArgumentNullException(nameof(syncConfig)); _chainLevelInfoRepository = chainLevelInfoRepository ?? throw new ArgumentNullException(nameof(chainLevelInfoRepository)); var deletePointer = _blockInfoDb.Get(DeletePointerAddressInDb); if (deletePointer != null) { DeleteBlocks(new Keccak(deletePointer)); } ChainLevelInfo genesisLevel = LoadLevel(0, true); if (genesisLevel != null) { if (genesisLevel.BlockInfos.Length != 1) { // just for corrupted test bases genesisLevel.BlockInfos = new[] {genesisLevel.BlockInfos[0]}; _chainLevelInfoRepository.PersistLevel(0, genesisLevel); //throw new InvalidOperationException($"Genesis level in DB has {genesisLevel.BlockInfos.Length} blocks"); } if (genesisLevel.BlockInfos[0].WasProcessed) { BlockHeader genesisHeader = FindHeader(genesisLevel.BlockInfos[0].BlockHash, BlockTreeLookupOptions.None); Genesis = genesisHeader; LoadHeadBlockAtStart(); } RecalculateTreeLevels(); } if (_logger.IsInfo) _logger.Info($"Block tree initialized, " + $"last processed is {Head?.Header?.ToString(BlockHeader.Format.Short) ?? "0"}, " + $"best queued is {BestSuggestedHeader?.Number.ToString() ?? "0"}, " + $"best known is {BestKnownNumber}, " + $"lowest inserted header {LowestInsertedHeader?.Number}, " + $"body {LowestInsertedBodyNumber}"); ThisNodeInfo.AddInfo("Chain ID :", $"{Nethermind.Core.ChainId.GetChainName(ChainId)}"); ThisNodeInfo.AddInfo("Chain head :", $"{Head?.Header?.ToString(BlockHeader.Format.Short) ?? "0"}"); } private void RecalculateTreeLevels() { if (_syncConfig.BeamSyncFixMode) { BestKnownNumber = Head.Number; BestSuggestedBody = Head; BestSuggestedHeader = Head.Header; return; } LoadLowestInsertedBodyNumber(); LoadLowestInsertedHeader(); LoadBestKnown(); } private void LoadLowestInsertedBodyNumber() { LowestInsertedBodyNumber = _blockDb.Get(LowestInsertedBodyNumberDbEntryAddress)? .AsRlpValueContext().DecodeLong(); } private void LoadLowestInsertedHeader() { long left = 1L; long right = _syncConfig.PivotNumberParsed; bool HasLevel(long blockNumber) { ChainLevelInfo level = LoadLevel(blockNumber); return level != null; } long? lowestInsertedHeader = BinarySearchBlockNumber(left, right, HasLevel, BinarySearchDirection.Down); if (lowestInsertedHeader != null) { ChainLevelInfo level = LoadLevel(lowestInsertedHeader.Value); BlockInfo blockInfo = level.BlockInfos[0]; LowestInsertedHeader = FindHeader(blockInfo.BlockHash, BlockTreeLookupOptions.None); } } private void LoadBestKnown() { long left = (Head?.Number ?? 0) == 0 ? Math.Max(_syncConfig.PivotNumberParsed, LowestInsertedHeader?.Number ?? 0) - 1 : Head.Number; long right = Math.Max(0, left) + BestKnownSearchLimit; bool LevelExists(long blockNumber) { return LoadLevel(blockNumber) != null; } bool HeaderExists(long blockNumber) { ChainLevelInfo level = LoadLevel(blockNumber); if (level == null) { return false; } foreach (BlockInfo blockInfo in level.BlockInfos) { if (FindHeader(blockInfo.BlockHash, BlockTreeLookupOptions.None) != null) { return true; } } return false; } bool BodyExists(long blockNumber) { ChainLevelInfo level = LoadLevel(blockNumber); if (level == null) { return false; } foreach (BlockInfo blockInfo in level.BlockInfos) { if (FindBlock(blockInfo.BlockHash, BlockTreeLookupOptions.None) != null) { return true; } } return false; } long bestKnownNumberFound = BinarySearchBlockNumber(1, left, LevelExists) ?? 0; long bestKnownNumberAlternative = BinarySearchBlockNumber(left, right, LevelExists) ?? 0; long bestSuggestedHeaderNumber = BinarySearchBlockNumber(1, left, HeaderExists) ?? 0; long bestSuggestedHeaderNumberAlternative = BinarySearchBlockNumber(left, right, HeaderExists) ?? 0; long bestSuggestedBodyNumber = BinarySearchBlockNumber(1, left, BodyExists) ?? 0; long bestSuggestedBodyNumberAlternative = BinarySearchBlockNumber(left, right, BodyExists) ?? 0; if(_logger.IsInfo) _logger.Info("Numbers resolved, " + $"level = Max({bestKnownNumberFound}, {bestKnownNumberAlternative}), " + $"header = Max({bestSuggestedHeaderNumber}, {bestSuggestedHeaderNumberAlternative}), " + $"body = Max({bestSuggestedBodyNumber}, {bestSuggestedBodyNumberAlternative})"); BestKnownNumber = Math.Max(bestKnownNumberFound, bestKnownNumberAlternative); bestSuggestedHeaderNumber = Math.Max(bestSuggestedHeaderNumber, bestSuggestedHeaderNumberAlternative); bestSuggestedBodyNumber = Math.Max(bestSuggestedBodyNumber, bestSuggestedBodyNumberAlternative); if (BestKnownNumber < 0 || bestSuggestedHeaderNumber < 0 || bestSuggestedBodyNumber < 0 || bestSuggestedHeaderNumber < bestSuggestedBodyNumber) { throw new InvalidDataException($"Invalid initial block tree state loaded - " + $"best known: {BestKnownNumber}|" + $"best header: {bestSuggestedHeaderNumber}|" + $"best body: {bestSuggestedBodyNumber}|"); } BestSuggestedHeader = FindHeader(bestSuggestedHeaderNumber, BlockTreeLookupOptions.None); var bestSuggestedBodyHeader = FindHeader(bestSuggestedBodyNumber, BlockTreeLookupOptions.None); BestSuggestedBody = bestSuggestedBodyHeader == null ? null : FindBlock(bestSuggestedBodyHeader.Hash, BlockTreeLookupOptions.None); } private enum BinarySearchDirection { Up, Down } private static long? BinarySearchBlockNumber(long left, long right, Func<long, bool> isBlockFound, BinarySearchDirection direction = BinarySearchDirection.Up) { if (left > right) { return null; } long? result = null; while (left != right) { long index = direction == BinarySearchDirection.Up ? left + (right - left) / 2 : right - (right - left) / 2; if (isBlockFound(index)) { result = index; if (direction == BinarySearchDirection.Up) { left = index + 1; } else { right = index - 1; } } else { if (direction == BinarySearchDirection.Up) { right = index; } else { left = index; } } } if (isBlockFound(left)) { result = direction == BinarySearchDirection.Up ? left : right; } return result; } public AddBlockResult Insert(BlockHeader header) { if (!CanAcceptNewBlocks) { return AddBlockResult.CannotAccept; } if (header.Number == 0) { throw new InvalidOperationException("Genesis block should not be inserted."); } if (header.TotalDifficulty == null) { SetTotalDifficulty(header); } // validate hash here // using previously received header RLPs would allows us to save 2GB allocations on a sample // 3M Goerli blocks fast sync Rlp newRlp = _headerDecoder.Encode(header); _headerDb.Set(header.Hash, newRlp.Bytes); BlockInfo blockInfo = new BlockInfo(header.Hash, header.TotalDifficulty ?? 0); ChainLevelInfo chainLevel = new ChainLevelInfo(true, blockInfo); _chainLevelInfoRepository.PersistLevel(header.Number, chainLevel); _bloomStorage.Store(header.Number, header.Bloom); if (header.Number < (LowestInsertedHeader?.Number ?? long.MaxValue)) { LowestInsertedHeader = header; } if (header.Number > BestKnownNumber) { BestKnownNumber = header.Number; } if (header.Number > BestSuggestedHeader.Number) { BestSuggestedHeader = header; } return AddBlockResult.Added; } public AddBlockResult Insert(Block block) { if (!CanAcceptNewBlocks) { return AddBlockResult.CannotAccept; } if (block.Number == 0) { throw new InvalidOperationException("Genesis block should not be inserted."); } // if we carry Rlp from the network message all the way here then we could solve 4GB of allocations and some processing // by avoiding encoding back to RLP here (allocations measured on a sample 3M blocks Goerli fast sync Rlp newRlp = _blockDecoder.Encode(block); _blockDb.Set(block.Hash, newRlp.Bytes); return AddBlockResult.Added; } public void Insert(IEnumerable<Block> blocks) { lock (_batchInsertLock) { try { // _blockDb.StartBatch(); foreach (Block block in blocks) { Insert(block); } } finally { // _blockDb.CommitBatch(); } } } private AddBlockResult Suggest(Block block, BlockHeader header, bool shouldProcess = true) { #if DEBUG /* this is just to make sure that we do not fall into this trap when creating tests */ if (header.StateRoot == null && !header.IsGenesis) { throw new InvalidDataException($"State root is null in {header.ToString(BlockHeader.Format.Short)}"); } #endif if (!CanAcceptNewBlocks) { return AddBlockResult.CannotAccept; } HashSet<Keccak> invalidBlocksWithThisNumber = _invalidBlocks.Get(header.Number); if (invalidBlocksWithThisNumber?.Contains(header.Hash) ?? false) { return AddBlockResult.InvalidBlock; } bool isKnown = IsKnownBlock(header.Number, header.Hash); if (isKnown && (BestSuggestedHeader?.Number ?? 0) >= header.Number) { if (_logger.IsTrace) { _logger.Trace($"Block {header.Hash} already known."); } return AddBlockResult.AlreadyKnown; } if (!header.IsGenesis && !IsKnownBlock(header.Number - 1, header.ParentHash)) { if (_logger.IsTrace) { _logger.Trace($"Could not find parent ({header.ParentHash}) of block {header.Hash}"); } return AddBlockResult.UnknownParent; } SetTotalDifficulty(header); if (block != null && !isKnown) { Rlp newRlp = _blockDecoder.Encode(block); _blockDb.Set(block.Hash, newRlp.Bytes); } if (!isKnown) { Rlp newRlp = _headerDecoder.Encode(header); _headerDb.Set(header.Hash, newRlp.Bytes); BlockInfo blockInfo = new BlockInfo(header.Hash, header.TotalDifficulty ?? 0); UpdateOrCreateLevel(header.Number, blockInfo, !shouldProcess); } if (header.IsGenesis || header.TotalDifficulty > (BestSuggestedHeader?.TotalDifficulty ?? 0)) { if (header.IsGenesis) { Genesis = header; } BestSuggestedHeader = header; if (block != null && shouldProcess) { BestSuggestedBody = block; NewBestSuggestedBlock?.Invoke(this, new BlockEventArgs(block)); } } return AddBlockResult.Added; } public AddBlockResult SuggestHeader(BlockHeader header) { return Suggest(null, header); } public AddBlockResult SuggestBlock(Block block, bool shouldProcess = true) { if (Genesis == null && !block.IsGenesis) { throw new InvalidOperationException("Block tree should be initialized with genesis before suggesting other blocks."); } return Suggest(block, block.Header, shouldProcess); } public BlockHeader FindHeader(long number, BlockTreeLookupOptions options) { Keccak blockHash = GetBlockHashOnMainOrBestDifficultyHash(number); return blockHash == null ? null : FindHeader(blockHash, options); } public Keccak FindBlockHash(long blockNumber) => GetBlockHashOnMainOrBestDifficultyHash(blockNumber); public BlockHeader FindHeader(Keccak blockHash, BlockTreeLookupOptions options) { if (blockHash == null || blockHash == Keccak.Zero) { // TODO: would be great to check why this is still needed (maybe it is something archaic) return null; } BlockHeader header = _headerDb.Get(blockHash, _headerDecoder, _headerCache, false); if (header == null) { return null; } bool totalDifficultyNeeded = (options & BlockTreeLookupOptions.TotalDifficultyNotNeeded) == BlockTreeLookupOptions.None; bool requiresCanonical = (options & BlockTreeLookupOptions.RequireCanonical) == BlockTreeLookupOptions.RequireCanonical; if ((totalDifficultyNeeded && header.TotalDifficulty == null) || requiresCanonical) { (BlockInfo blockInfo, ChainLevelInfo level) = LoadInfo(header.Number, header.Hash); if (level == null || blockInfo == null) { // TODO: this is here because storing block data is not transactional // TODO: would be great to remove it, he? SetTotalDifficulty(header); blockInfo = new BlockInfo(header.Hash, header.TotalDifficulty.Value); UpdateOrCreateLevel(header.Number, blockInfo); (_, level) = LoadInfo(header.Number, header.Hash); } else { header.TotalDifficulty = blockInfo.TotalDifficulty; } if (requiresCanonical) { bool isMain = level.MainChainBlock?.BlockHash.Equals(blockHash) == true; header = isMain ? header : null; } } if (header != null && ShouldCache(header.Number)) { _headerCache.Set(blockHash, header); } return header; } /// <returns> /// If level has a block on the main chain then returns the block info,otherwise <value>null</value> /// </returns> public BlockInfo FindCanonicalBlockInfo(long blockNumber) { ChainLevelInfo level = LoadLevel(blockNumber); if (level == null) { return null; } if (level.HasBlockOnMainChain) { BlockInfo blockInfo = level.BlockInfos[0]; blockInfo.BlockNumber = blockNumber; return blockInfo; } return null; } public Keccak FindHash(long number) { return GetBlockHashOnMainOrBestDifficultyHash(number); } public BlockHeader[] FindHeaders(Keccak blockHash, int numberOfBlocks, int skip, bool reverse) { if (numberOfBlocks == 0) { return Array.Empty<BlockHeader>(); } if (blockHash == null) { return new BlockHeader[numberOfBlocks]; } BlockHeader startHeader = FindHeader(blockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (startHeader == null) { return new BlockHeader[numberOfBlocks]; } if (numberOfBlocks == 1) { return new[] {startHeader}; } if (skip == 0) { /* if we do not skip and we have the last block then we can assume that all the blocks are there and we can use the fact that we can use parent hash and that searching by hash is much faster as it does not require the step of resolving number -> hash */ BlockHeader endHeader = FindHeader(startHeader.Number + numberOfBlocks - 1, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (endHeader != null) { return FindHeadersReversedFull(endHeader, numberOfBlocks); } } BlockHeader[] result = new BlockHeader[numberOfBlocks]; BlockHeader current = startHeader; int directionMultiplier = reverse ? -1 : 1; int responseIndex = 0; do { result[responseIndex] = current; responseIndex++; long nextNumber = startHeader.Number + directionMultiplier * (responseIndex * skip + responseIndex); if (nextNumber < 0) { break; } current = FindHeader(nextNumber, BlockTreeLookupOptions.TotalDifficultyNotNeeded); } while (current != null && responseIndex < numberOfBlocks); return result; } private BlockHeader[] FindHeadersReversedFull(BlockHeader startHeader, int numberOfBlocks) { if (startHeader == null) throw new ArgumentNullException(nameof(startHeader)); if (numberOfBlocks == 1) { return new[] {startHeader}; } BlockHeader[] result = new BlockHeader[numberOfBlocks]; BlockHeader current = startHeader; int responseIndex = numberOfBlocks - 1; do { result[responseIndex] = current; responseIndex--; if (responseIndex < 0) { break; } current = this.FindParentHeader(current, BlockTreeLookupOptions.TotalDifficultyNotNeeded); } while (current != null && responseIndex < numberOfBlocks); return result; } public BlockHeader FindLowestCommonAncestor(BlockHeader firstDescendant, BlockHeader secondDescendant, long maxSearchDepth) { if (firstDescendant.Number > secondDescendant.Number) { firstDescendant = GetAncestorAtNumber(firstDescendant, secondDescendant.Number); } else if (secondDescendant.Number > firstDescendant.Number) { secondDescendant = GetAncestorAtNumber(secondDescendant, firstDescendant.Number); } long currentSearchDepth = 0; while (firstDescendant.Hash != secondDescendant.Hash) { if (currentSearchDepth >= maxSearchDepth) return null; firstDescendant = this.FindParentHeader(firstDescendant, BlockTreeLookupOptions.TotalDifficultyNotNeeded); secondDescendant = this.FindParentHeader(secondDescendant, BlockTreeLookupOptions.TotalDifficultyNotNeeded); } return firstDescendant; } private BlockHeader GetAncestorAtNumber(BlockHeader header, long number) { if (header.Number >= number) return header; while (header.Number < number) { header = this.FindParentHeader(header, BlockTreeLookupOptions.TotalDifficultyNotNeeded); } return header; } private Keccak GetBlockHashOnMainOrBestDifficultyHash(long blockNumber) { if (blockNumber < 0) { throw new ArgumentException($"{nameof(blockNumber)} must be greater or equal zero and is {blockNumber}", nameof(blockNumber)); } ChainLevelInfo level = LoadLevel(blockNumber); if (level == null) { return null; } if (level.HasBlockOnMainChain) { return level.BlockInfos[0].BlockHash; } UInt256 bestDifficultySoFar = UInt256.Zero; Keccak bestHash = null; for (int i = 0; i < level.BlockInfos.Length; i++) { BlockInfo current = level.BlockInfos[i]; if (level.BlockInfos[i].TotalDifficulty > bestDifficultySoFar) { bestDifficultySoFar = current.TotalDifficulty; bestHash = current.BlockHash; } } return bestHash; } public Block FindBlock(long blockNumber, BlockTreeLookupOptions options) { Keccak hash = GetBlockHashOnMainOrBestDifficultyHash(blockNumber); return FindBlock(hash, options); } public void DeleteInvalidBlock(Block invalidBlock) { if (_logger.IsDebug) _logger.Debug($"Deleting invalid block {invalidBlock.ToString(Block.Format.FullHashAndNumber)}"); var invalidBlocksWithThisNumber = _invalidBlocks.Get(invalidBlock.Number) ?? new HashSet<Keccak>(); invalidBlocksWithThisNumber.Add(invalidBlock.Hash); _invalidBlocks.Set(invalidBlock.Number, invalidBlocksWithThisNumber); BestSuggestedHeader = Head?.Header; BestSuggestedBody = Head; BlockAcceptingNewBlocks(); try { DeleteBlocks(invalidBlock.Hash); } finally { ReleaseAcceptingNewBlocks(); } } private void DeleteBlocks(Keccak deletePointer) { BlockHeader deleteHeader = FindHeader(deletePointer, BlockTreeLookupOptions.TotalDifficultyNotNeeded); long currentNumber = deleteHeader.Number; Keccak currentHash = deleteHeader.Hash; Keccak nextHash = null; ChainLevelInfo nextLevel = null; using var batch = _chainLevelInfoRepository.StartBatch(); while (true) { ChainLevelInfo currentLevel = nextLevel ?? LoadLevel(currentNumber); nextLevel = LoadLevel(currentNumber + 1); bool shouldRemoveLevel = false; if (currentLevel != null) // preparing update of the level (removal of the invalid branch block) { if (currentLevel.BlockInfos.Length == 1) { shouldRemoveLevel = true; } else { currentLevel.BlockInfos = currentLevel.BlockInfos.Where(bi => bi.BlockHash != currentHash).ToArray(); } } // just finding what the next descendant will be if (nextLevel != null) { nextHash = FindChild(nextLevel, currentHash); } UpdateDeletePointer(nextHash); if (shouldRemoveLevel) { BestKnownNumber = Math.Min(BestKnownNumber, currentNumber - 1); _chainLevelInfoRepository.Delete(currentNumber, batch); } else { _chainLevelInfoRepository.PersistLevel(currentNumber, currentLevel, batch); } if (_logger.IsInfo) _logger.Info($"Deleting invalid block {currentHash} at level {currentNumber}"); _blockCache.Delete(currentHash); _blockDb.Delete(currentHash); _headerCache.Delete(currentHash); _headerDb.Delete(currentHash); if (nextHash == null) { break; } currentNumber++; currentHash = nextHash; nextHash = null; } } private Keccak FindChild(ChainLevelInfo level, Keccak parentHash) { Keccak childHash = null; for (int i = 0; i < level.BlockInfos.Length; i++) { BlockHeader potentialChild = FindHeader(level.BlockInfos[i].BlockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (potentialChild.ParentHash == parentHash) { childHash = potentialChild.Hash; break; } } return childHash; } public bool IsMainChain(BlockHeader blockHeader) { ChainLevelInfo chainLevelInfo = LoadLevel(blockHeader.Number); bool isMain = chainLevelInfo.MainChainBlock?.BlockHash.Equals(blockHeader.Hash) == true; return isMain; } public bool IsMainChain(Keccak blockHash) { BlockHeader header = FindHeader(blockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (header == null) { throw new InvalidOperationException($"Not able to retrieve block number for an unknown block {blockHash}"); } return IsMainChain(header); } public bool WasProcessed(long number, Keccak blockHash) { ChainLevelInfo levelInfo = LoadLevel(number); int? index = FindIndex(blockHash, levelInfo); if (index == null) { throw new InvalidOperationException($"Not able to find block {blockHash} index on the chain level"); } return levelInfo.BlockInfos[index.Value].WasProcessed; } public void UpdateMainChain(Block[] processedBlocks, bool wereProcessed) { if (processedBlocks.Length == 0) { return; } bool ascendingOrder = true; if (processedBlocks.Length > 1) { if (processedBlocks[^1].Number < processedBlocks[0].Number) { ascendingOrder = false; } } #if DEBUG for (int i = 0; i < processedBlocks.Length; i++) { if (i != 0) { if (ascendingOrder && processedBlocks[i].Number != processedBlocks[i - 1].Number + 1) { throw new InvalidOperationException("Update main chain invoked with gaps"); } if (!ascendingOrder && processedBlocks[i - 1].Number != processedBlocks[i].Number + 1) { throw new InvalidOperationException("Update main chain invoked with gaps"); } } } #endif long lastNumber = ascendingOrder ? processedBlocks[^1].Number : processedBlocks[0].Number; long previousHeadNumber = Head?.Number ?? 0L; using BatchWrite batch = _chainLevelInfoRepository.StartBatch(); if (previousHeadNumber > lastNumber) { for (long i = 0; i < previousHeadNumber - lastNumber; i++) { long levelNumber = previousHeadNumber - i; ChainLevelInfo level = LoadLevel(levelNumber); level.HasBlockOnMainChain = false; _chainLevelInfoRepository.PersistLevel(levelNumber, level, batch); } } for (int i = 0; i < processedBlocks.Length; i++) { Block block = processedBlocks[i]; if (ShouldCache(block.Number)) { _blockCache.Set(block.Hash, processedBlocks[i]); _headerCache.Set(block.Hash, block.Header); } MoveToMain(processedBlocks[i], batch, wereProcessed); } } [Todo(Improve.MissingFunctionality, "Recalculate bloom storage on reorg.")] private void MoveToMain(Block block, BatchWrite batch, bool wasProcessed) { ChainLevelInfo level = LoadLevel(block.Number); int? index = FindIndex(block.Hash, level); if (index == null) { throw new InvalidOperationException($"Cannot move unknown block {block.ToString(Block.Format.FullHashAndNumber)} to main"); } Keccak hashOfThePreviousMainBlock = level.MainChainBlock?.BlockHash; BlockInfo info = level.BlockInfos[index.Value]; info.WasProcessed = wasProcessed; if (index.Value != 0) { (level.BlockInfos[index.Value], level.BlockInfos[0]) = (level.BlockInfos[0], level.BlockInfos[index.Value]); } level.HasBlockOnMainChain = true; _chainLevelInfoRepository.PersistLevel(block.Number, level, batch); _bloomStorage.Store(block.Number, block.Bloom); BlockAddedToMain?.Invoke(this, new BlockEventArgs(block)); if (block.IsGenesis || block.TotalDifficulty > (Head?.TotalDifficulty ?? 0)) { if (block.Number == 0) { Genesis = block.Header; } if (block.TotalDifficulty == null) { throw new InvalidOperationException("Head block with null total difficulty"); } if (wasProcessed) { UpdateHeadBlock(block); } } for (int i = 0; i < block.Transactions.Length; i++) { _txPool.RemoveTransaction(block.Transactions[i].Hash, block.Number); } // the hash will only be the same during perf test runs / modified DB states if (hashOfThePreviousMainBlock != null && hashOfThePreviousMainBlock != block.Hash) { Block previous = FindBlock(hashOfThePreviousMainBlock, BlockTreeLookupOptions.TotalDifficultyNotNeeded); bool isEip155Enabled = _specProvider.GetSpec(previous.Number).IsEip155Enabled; for (int i = 0; i < previous?.Transactions.Length; i++) { Transaction tx = previous.Transactions[i]; _txPool.AddTransaction(tx, isEip155Enabled ? TxHandlingOptions.None : TxHandlingOptions.PreEip155Signing); } } if (_logger.IsTrace) _logger.Trace($"Block {block.ToString(Block.Format.Short)} added to main chain"); } private void LoadHeadBlockAtStart() { byte[] data = _blockInfoDb.Get(HeadAddressInDb); if (data != null) { Block headBlock = FindBlock(new Keccak(data), BlockTreeLookupOptions.None); ChainLevelInfo level = LoadLevel(headBlock.Number); int? index = FindIndex(headBlock.Hash, level); if (!index.HasValue) { throw new InvalidDataException("Head block data missing from chain info"); } headBlock.Header.TotalDifficulty = level.BlockInfos[index.Value].TotalDifficulty; Head = headBlock; } } public bool IsKnownBlock(long number, Keccak blockHash) { if (number > BestKnownNumber) { return false; } // IsKnownBlock will be mainly called when new blocks are incoming // and these are very likely to be all at the head of the chain if (blockHash == Head?.Hash) { return true; } if (_headerCache.Get(blockHash) != null) { return true; } ChainLevelInfo level = LoadLevel(number); return level != null && FindIndex(blockHash, level).HasValue; } private void UpdateDeletePointer(Keccak hash) { if (hash == null) { _blockInfoDb.Delete(DeletePointerAddressInDb); } else { if (_logger.IsInfo) _logger.Info($"Deleting an invalid block or its descendant {hash}"); _blockInfoDb.Set(DeletePointerAddressInDb, hash.Bytes); } } public void UpdateHeadBlock(Keccak blockHash) { _blockInfoDb.Set(HeadAddressInDb, Head.Hash.Bytes); } private void UpdateHeadBlock(Block block) { if (block.IsGenesis) { Genesis = block.Header; } Head = block; _blockInfoDb.Set(HeadAddressInDb, Head.Hash.Bytes); NewHeadBlock?.Invoke(this, new BlockEventArgs(block)); } private void UpdateOrCreateLevel(long number, BlockInfo blockInfo, bool setAsMain = false) { using (var batch = _chainLevelInfoRepository.StartBatch()) { ChainLevelInfo level = LoadLevel(number, false); if (level != null) { BlockInfo[] blockInfos = level.BlockInfos; Array.Resize(ref blockInfos, blockInfos.Length + 1); if (setAsMain) { blockInfos[^1] = blockInfos[0]; blockInfos[0] = blockInfo; } else { blockInfos[^1] = blockInfo; } level.BlockInfos = blockInfos; } else { if (number > BestKnownNumber) { BestKnownNumber = number; } level = new ChainLevelInfo(false, new[] {blockInfo}); } if (setAsMain) { level.HasBlockOnMainChain = true; } _chainLevelInfoRepository.PersistLevel(number, level, batch); } } private (BlockInfo Info, ChainLevelInfo Level) LoadInfo(long number, Keccak blockHash) { ChainLevelInfo chainLevelInfo = LoadLevel(number); if (chainLevelInfo == null) { return (null, null); } int? index = FindIndex(blockHash, chainLevelInfo); return index.HasValue ? (chainLevelInfo.BlockInfos[index.Value], chainLevelInfo) : (null, chainLevelInfo); } private int? FindIndex(Keccak blockHash, ChainLevelInfo level) { for (int i = 0; i < level.BlockInfos.Length; i++) { if (level.BlockInfos[i].BlockHash.Equals(blockHash)) { return i; } } return null; } private ChainLevelInfo LoadLevel(long number, bool forceLoad = true) { if (number > BestKnownNumber && !forceLoad) { return null; } return _chainLevelInfoRepository.LoadLevel(number); } /// <summary> /// To make cache useful even when we handle sync requests /// </summary> /// <param name="number"></param> /// <returns></returns> private bool ShouldCache(long number) { return number == 0L || Head == null || number > Head.Number - CacheSize && number <= Head.Number + 1; } public ChainLevelInfo FindLevel(long number) { return _chainLevelInfoRepository.LoadLevel(number); } public Keccak HeadHash => Head?.Hash; public Keccak GenesisHash => Genesis?.Hash; public Keccak PendingHash => Head?.Hash; public Block FindBlock(Keccak blockHash, BlockTreeLookupOptions options) { if (blockHash == null || blockHash == Keccak.Zero) { return null; } Block block = _blockDb.Get(blockHash, _blockDecoder, _blockCache, false); if (block == null) { return null; } bool totalDifficultyNeeded = (options & BlockTreeLookupOptions.TotalDifficultyNotNeeded) == BlockTreeLookupOptions.None; bool requiresCanonical = (options & BlockTreeLookupOptions.RequireCanonical) == BlockTreeLookupOptions.RequireCanonical; if ((totalDifficultyNeeded && block.TotalDifficulty == null) || requiresCanonical) { (BlockInfo blockInfo, ChainLevelInfo level) = LoadInfo(block.Number, block.Hash); if (level == null || blockInfo == null) { // TODO: this is here because storing block data is not transactional // TODO: would be great to remove it, he? SetTotalDifficulty(block.Header); blockInfo = new BlockInfo(block.Hash, block.TotalDifficulty.Value); UpdateOrCreateLevel(block.Number, blockInfo); (_, level) = LoadInfo(block.Number, block.Hash); } else { block.Header.TotalDifficulty = blockInfo.TotalDifficulty; } if (requiresCanonical) { bool isMain = level.MainChainBlock?.BlockHash.Equals(blockHash) == true; block = isMain ? block : null; } } if (block != null && ShouldCache(block.Number)) { _blockCache.Set(blockHash, block); _headerCache.Set(blockHash, block.Header); } return block; } private void SetTotalDifficulty(BlockHeader header) { if (header.TotalDifficulty != null) { return; } if (_logger.IsTrace) { _logger.Trace($"Calculating total difficulty for {header}"); } if (header.Number == 0) { header.TotalDifficulty = header.Difficulty; } else { BlockHeader parentHeader = this.FindParentHeader(header, BlockTreeLookupOptions.None); if (parentHeader == null) { throw new InvalidOperationException($"An orphaned block on the chain {header}"); } if (parentHeader.TotalDifficulty == null) { throw new InvalidOperationException( $"Parent's {nameof(parentHeader.TotalDifficulty)} unknown when calculating for {header}"); } header.TotalDifficulty = parentHeader.TotalDifficulty + header.Difficulty; } if (_logger.IsTrace) { _logger.Trace($"Calculated total difficulty for {header} is {header.TotalDifficulty}"); } } public event EventHandler<BlockEventArgs> BlockAddedToMain; public event EventHandler<BlockEventArgs> NewBestSuggestedBlock; public event EventHandler<BlockEventArgs> NewHeadBlock; /// <summary> /// Can delete a slice of the chain (usually invoked when the chain is corrupted in the DB). /// This will only allow to delete a slice starting somewhere before the head of the chain /// and ending somewhere after the head (in case there are some hanging levels later). /// </summary> /// <param name="startNumber">Start level of the slice to delete</param> /// <param name="endNumber">End level of the slice to delete</param> /// <exception cref="ArgumentException">Thrown when <paramref name="startNumber"/> ot <paramref name="endNumber"/> do not satisfy the slice position rules</exception> public int DeleteChainSlice(in long startNumber, long? endNumber) { int deleted = 0; endNumber ??= BestKnownNumber; if (endNumber - startNumber < 0) { throw new ArgumentException("Start number must be equal or greater end number.", nameof(startNumber)); } if (endNumber - startNumber > 50000) { throw new ArgumentException($"Cannot delete that many blocks at once (start: {startNumber}, end {endNumber}).", nameof(startNumber)); } if (startNumber < 1) { throw new ArgumentException("Start number must be strictly greater than 0", nameof(startNumber)); } Block newHeadBlock = null; // we are running these checks before all the deletes if (Head.Number >= startNumber) { // greater than zero so will not fail ChainLevelInfo chainLevelInfo = _chainLevelInfoRepository.LoadLevel(startNumber - 1); // there may be no canonical block marked on this level - then we just hack to genesis Keccak newHeadHash = chainLevelInfo.HasBlockOnMainChain ? chainLevelInfo.BlockInfos[0].BlockHash : Genesis.Hash; newHeadBlock = FindBlock(newHeadHash, BlockTreeLookupOptions.None); } using (_chainLevelInfoRepository.StartBatch()) { for (long i = endNumber.Value; i >= startNumber; i--) { ChainLevelInfo chainLevelInfo = _chainLevelInfoRepository.LoadLevel(i); if (chainLevelInfo == null) { continue; } _chainLevelInfoRepository.Delete(i); deleted++; foreach (BlockInfo blockInfo in chainLevelInfo.BlockInfos) { Keccak blockHash = blockInfo.BlockHash; _blockInfoDb.Delete(blockHash); _blockDb.Delete(blockHash); _headerDb.Delete(blockHash); } } } if (newHeadBlock != null) { UpdateHeadBlock(newHeadBlock); } return deleted; } internal void BlockAcceptingNewBlocks() { Interlocked.Increment(ref _canAcceptNewBlocksCounter); } internal void ReleaseAcceptingNewBlocks() { Interlocked.Decrement(ref _canAcceptNewBlocksCounter); } } }
1
24,306
why not recycling? I Lru cache now recycling?
NethermindEth-nethermind
.cs
@@ -69,7 +69,7 @@ func (consumer *createConsumer) Consume(requestPtr interface{}) (response interf issuerID := consumer.peerID if request.ConsumerInfo != nil { issuerID = request.ConsumerInfo.IssuerID - if request.ConsumerInfo.PaymentVersion == PaymentVersionV2 { + if request.ConsumerInfo.PaymentVersion == PaymentVersionV3 { indicateNewVersion = true } } else {
1
/* * Copyright (C) 2017 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package session import ( "encoding/json" "github.com/mysteriumnetwork/node/communication" "github.com/mysteriumnetwork/node/identity" "github.com/mysteriumnetwork/node/nat/traversal" "github.com/mysteriumnetwork/node/session/promise" ) // PromiseLoader loads the last known promise info for the given consumer type PromiseLoader interface { LoadPaymentInfo(consumerID, receiverID, issuerID identity.Identity) *promise.PaymentInfo } // createConsumer processes session create requests from communication channel. type createConsumer struct { sessionCreator Creator receiverID identity.Identity peerID identity.Identity configProvider ConfigProvider promiseLoader PromiseLoader } // Creator defines method for session creation type Creator interface { Create(consumerID identity.Identity, consumerInfo ConsumerInfo, proposalID int, config ServiceConfiguration, pingerPrams *traversal.Params) (Session, error) } // GetMessageEndpoint returns endpoint there to receive messages func (consumer *createConsumer) GetRequestEndpoint() communication.RequestEndpoint { return endpointSessionCreate } // NewRequest creates struct where request from endpoint will be serialized func (consumer *createConsumer) NewRequest() (requestPtr interface{}) { var request CreateRequest return &request } // Consume handles requests from endpoint and replies with response func (consumer *createConsumer) Consume(requestPtr interface{}) (response interface{}, err error) { request := requestPtr.(*CreateRequest) sessionConfigParams, err := consumer.configProvider(request.Config, &traversal.Params{}) if err != nil { return responseInternalError, err } var indicateNewVersion bool issuerID := consumer.peerID if request.ConsumerInfo != nil { issuerID = request.ConsumerInfo.IssuerID if request.ConsumerInfo.PaymentVersion == PaymentVersionV2 { indicateNewVersion = true } } else { request.ConsumerInfo = &ConsumerInfo{ IssuerID: issuerID, } } sessionConfigParams.TraversalParams.RequestConfig = request.Config sessionConfigParams.TraversalParams.Cancel = make(chan struct{}) sessionInstance, err := consumer.sessionCreator.Create(consumer.peerID, *request.ConsumerInfo, request.ProposalID, sessionConfigParams.SessionServiceConfig, sessionConfigParams.TraversalParams) switch err { case nil: if sessionConfigParams.SessionDestroyCallback != nil { go func() { <-sessionInstance.done sessionConfigParams.SessionDestroyCallback() }() } return responseWithSession(sessionInstance, sessionConfigParams.SessionServiceConfig, consumer.promiseLoader.LoadPaymentInfo(consumer.peerID, consumer.receiverID, issuerID), indicateNewVersion), nil case ErrorInvalidProposal: return responseInvalidProposal, nil default: return responseInternalError, nil } } func responseWithSession(sessionInstance Session, config ServiceConfiguration, pi *promise.PaymentInfo, indicateNewVersion bool) CreateResponse { serializedConfig, err := json.Marshal(config) if err != nil { // Failed to serialize session // TODO Cant expose error to response, some logging should be here return responseInternalError } // let the consumer know we'll support the new payments if indicateNewVersion { pi.Supports = string(PaymentVersionV2) } return CreateResponse{ Success: true, Session: SessionDto{ ID: sessionInstance.ID, Config: serializedConfig, }, PaymentInfo: pi, } }
1
15,050
Why it is now called `PaymentVersionV3`?
mysteriumnetwork-node
go
@@ -165,7 +165,7 @@ func (dc *DownstreamController) syncSecret() { } nodes := dc.lc.SecretNodes(secret.Namespace, secret.Name) - klog.Infof("there are %d nodes need to sync secret, operation: %s", len(nodes), e.Type) + klog.V(4).Infof("there are %d nodes need to sync secret, operation: %s", len(nodes), e.Type) for _, n := range nodes { msg := model.NewMessage("") msg.SetResourceVersion(secret.ResourceVersion)
1
package controller import ( "fmt" "strings" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/klog" beehiveContext "github.com/kubeedge/beehive/pkg/core/context" "github.com/kubeedge/beehive/pkg/core/model" "github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/config" "github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/constants" "github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/manager" "github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/messagelayer" "github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/utils" common "github.com/kubeedge/kubeedge/common/constants" ) // DownstreamController watch kubernetes api server and send change to edge type DownstreamController struct { kubeClient *kubernetes.Clientset messageLayer messagelayer.MessageLayer podManager *manager.PodManager configmapManager *manager.ConfigMapManager secretManager *manager.SecretManager nodeManager *manager.NodesManager serviceManager *manager.ServiceManager endpointsManager *manager.EndpointsManager lc *manager.LocationCache } func (dc *DownstreamController) syncPod() { for { select { case <-beehiveContext.Done(): klog.Warning("Stop edgecontroller downstream syncPod loop") return case e := <-dc.podManager.Events(): pod, ok := e.Object.(*v1.Pod) if !ok { klog.Warningf("object type: %T unsupported", pod) continue } if !dc.lc.IsEdgeNode(pod.Spec.NodeName) { continue } msg := model.NewMessage("") msg.SetResourceVersion(pod.ResourceVersion) resource, err := messagelayer.BuildResource(pod.Spec.NodeName, pod.Namespace, model.ResourceTypePod, pod.Name) if err != nil { klog.Warningf("built message resource failed with error: %s", err) continue } msg.Content = pod switch e.Type { case watch.Added: msg.BuildRouter(constants.EdgeControllerModuleName, constants.GroupResource, resource, model.InsertOperation) dc.lc.AddOrUpdatePod(*pod) case watch.Deleted: msg.BuildRouter(constants.EdgeControllerModuleName, constants.GroupResource, resource, model.DeleteOperation) case watch.Modified: msg.BuildRouter(constants.EdgeControllerModuleName, constants.GroupResource, resource, model.UpdateOperation) dc.lc.AddOrUpdatePod(*pod) default: klog.Warningf("pod event type: %s unsupported", e.Type) } if err := dc.messageLayer.Send(*msg); err != nil { klog.Warningf("send message failed with error: %s, operation: %s, resource: %s", err, msg.GetOperation(), msg.GetResource()) } else { klog.V(4).Infof("send message successfully, operation: %s, resource: %s", msg.GetOperation(), msg.GetResource()) } } } } func (dc *DownstreamController) syncConfigMap() { for { select { case <-beehiveContext.Done(): klog.Warning("Stop edgecontroller downstream syncConfigMap loop") return case e := <-dc.configmapManager.Events(): configMap, ok := e.Object.(*v1.ConfigMap) if !ok { klog.Warningf("object type: %T unsupported", configMap) continue } var operation string switch e.Type { case watch.Added: operation = model.InsertOperation case watch.Modified: operation = model.UpdateOperation case watch.Deleted: operation = model.DeleteOperation dc.lc.DeleteConfigMap(configMap.Namespace, configMap.Name) default: // unsupported operation, no need to send to any node klog.Warningf("config map event type: %s unsupported", e.Type) continue // continue to next select } nodes := dc.lc.ConfigMapNodes(configMap.Namespace, configMap.Name) klog.V(4).Infof("there are %d nodes need to sync config map, operation: %s", len(nodes), e.Type) for _, n := range nodes { msg := model.NewMessage("") msg.SetResourceVersion(configMap.ResourceVersion) resource, err := messagelayer.BuildResource(n, configMap.Namespace, model.ResourceTypeConfigmap, configMap.Name) if err != nil { klog.Warningf("build message resource failed with error: %s", err) continue } msg.BuildRouter(constants.EdgeControllerModuleName, constants.GroupResource, resource, operation) msg.Content = configMap err = dc.messageLayer.Send(*msg) if err != nil { klog.Warningf("send message failed with error: %s, operation: %s, resource: %s", err, msg.GetOperation(), msg.GetResource()) } else { klog.V(4).Infof("send message successfully, operation: %s, resource: %s", msg.GetOperation(), msg.GetResource()) } } } } } func (dc *DownstreamController) syncSecret() { for { select { case <-beehiveContext.Done(): klog.Warning("Stop edgecontroller downstream syncSecret loop") return case e := <-dc.secretManager.Events(): secret, ok := e.Object.(*v1.Secret) if !ok { klog.Warningf("object type: %T unsupported", secret) continue } var operation string switch e.Type { case watch.Added: // TODO: rollback when all edge upgrade to 2.1.6 or upper fallthrough case watch.Modified: operation = model.UpdateOperation case watch.Deleted: operation = model.DeleteOperation dc.lc.DeleteSecret(secret.Namespace, secret.Name) default: // unsupported operation, no need to send to any node klog.Warningf("secret event type: %s unsupported", e.Type) continue // continue to next select } nodes := dc.lc.SecretNodes(secret.Namespace, secret.Name) klog.Infof("there are %d nodes need to sync secret, operation: %s", len(nodes), e.Type) for _, n := range nodes { msg := model.NewMessage("") msg.SetResourceVersion(secret.ResourceVersion) resource, err := messagelayer.BuildResource(n, secret.Namespace, model.ResourceTypeSecret, secret.Name) if err != nil { klog.Warningf("build message resource failed with error: %s", err) continue } msg.BuildRouter(constants.EdgeControllerModuleName, constants.GroupResource, resource, operation) msg.Content = secret err = dc.messageLayer.Send(*msg) if err != nil { klog.Warningf("send message failed with error: %s, operation: %s, resource: %s", err, msg.GetOperation(), msg.GetResource()) } else { klog.V(4).Infof("send message successfully, operation: %s, resource: %s", msg.GetOperation(), msg.GetResource()) } } } } } func (dc *DownstreamController) syncEdgeNodes() { for { select { case <-beehiveContext.Done(): klog.Warning("Stop edgecontroller downstream syncEdgeNodes loop") return case e := <-dc.nodeManager.Events(): node, ok := e.Object.(*v1.Node) if !ok { klog.Warningf("Object type: %T unsupported", node) continue } switch e.Type { case watch.Added: fallthrough case watch.Modified: // When node comes to running, send all the service/endpoints/pods information to edge for _, nsc := range node.Status.Conditions { if nsc.Type == "Ready" { status, ok := dc.lc.GetNodeStatus(node.ObjectMeta.Name) dc.lc.UpdateEdgeNode(node.ObjectMeta.Name, string(nsc.Status)) if nsc.Status == "True" && (!ok || status != "True") { // send all services to edge msg := model.NewMessage("") // TODO: what should in place of namespace and service when we are sending service list ? resource, err := messagelayer.BuildResource(node.Name, "namespace", common.ResourceTypeServiceList, "service") if err != nil { klog.Warningf("Built message resource failed with error: %s", err) break } msg.BuildRouter(constants.EdgeControllerModuleName, constants.GroupResource, resource, model.UpdateOperation) svcs := dc.lc.GetAllServices() msg.Content = svcs if err := dc.messageLayer.Send(*msg); err != nil { klog.Warningf("Send message failed with error: %s, operation: %s, resource: %s", err, msg.GetOperation(), msg.GetResource()) } else { klog.V(4).Infof("Send message successfully, operation: %s, resource: %s", msg.GetOperation(), msg.GetResource()) } for _, svc := range svcs { pods, ok := dc.lc.GetServicePods(fmt.Sprintf("%s/%s", svc.Namespace, svc.Name)) if ok { msg := model.NewMessage("") resource, err := messagelayer.BuildResource(node.Name, svc.Namespace, model.ResourceTypePodlist, svc.Name) if err != nil { klog.Warningf("Built message resource failed with error: %v", err) continue } msg.BuildRouter(constants.EdgeControllerModuleName, constants.GroupResource, resource, model.UpdateOperation) msg.Content = pods if err := dc.messageLayer.Send(*msg); err != nil { klog.Warningf("Send message failed with error: %s, operation: %s, resource: %s", err, msg.GetOperation(), msg.GetResource()) } else { klog.V(4).Infof("Send message successfully, operation: %s, resource: %s", msg.GetOperation(), msg.GetResource()) } } } // send all endpoints to edge msg = model.NewMessage("") // TODO: what should in place of namespace and endpoints when we are sending endpoints list ? resource, err = messagelayer.BuildResource(node.Name, "namespace", common.ResourceTypeEndpointsList, "endpoints") if err != nil { klog.Warningf("Built message resource failed with error: %s", err) break } msg.BuildRouter(constants.EdgeControllerModuleName, constants.GroupResource, resource, model.UpdateOperation) msg.Content = dc.lc.GetAllEndpoints() if err := dc.messageLayer.Send(*msg); err != nil { klog.Warningf("Send message failed with error: %s, operation: %s, resource: %s", err, msg.GetOperation(), msg.GetResource()) } else { klog.V(4).Infof("Send message successfully, operation: %s, resource: %s", msg.GetOperation(), msg.GetResource()) } } break } } case watch.Deleted: dc.lc.DeleteNode(node.ObjectMeta.Name) default: // unsupported operation, no need to send to any node klog.Warningf("Node event type: %s unsupported", e.Type) } } } } func (dc *DownstreamController) syncService() { var operation string for { select { case <-beehiveContext.Done(): klog.Warning("Stop edgecontroller downstream syncService loop") return case e := <-dc.serviceManager.Events(): svc, ok := e.Object.(*v1.Service) if !ok { klog.Warningf("Object type: %T unsupported", svc) continue } switch e.Type { case watch.Added: dc.lc.AddOrUpdateService(*svc) operation = model.InsertOperation case watch.Modified: dc.lc.AddOrUpdateService(*svc) operation = model.UpdateOperation case watch.Deleted: dc.lc.DeleteService(*svc) operation = model.DeleteOperation default: // unsupported operation, no need to send to any node klog.Warningf("Service event type: %s unsupported", e.Type) continue } // send to all nodes dc.lc.EdgeNodes.Range(func(key interface{}, value interface{}) bool { nodeName, ok := key.(string) if !ok { klog.Warning("Failed to assert key to sting") return true } msg := model.NewMessage("") msg.SetResourceVersion(svc.ResourceVersion) resource, err := messagelayer.BuildResource(nodeName, svc.Namespace, common.ResourceTypeService, svc.Name) if err != nil { klog.Warningf("Built message resource failed with error: %v", err) return true } msg.BuildRouter(constants.EdgeControllerModuleName, constants.GroupResource, resource, operation) msg.Content = svc if err := dc.messageLayer.Send(*msg); err != nil { klog.Warningf("Send message failed with error: %s, operation: %s, resource: %s", err, msg.GetOperation(), msg.GetResource()) } else { klog.V(4).Infof("Send message successfully, operation: %s, resource: %s", msg.GetOperation(), msg.GetResource()) } return true }) } } } func (dc *DownstreamController) syncEndpoints() { var operation string for { select { case <-beehiveContext.Done(): klog.Warning("Stop edgecontroller downstream syncEndpoints loop") return case e := <-dc.endpointsManager.Events(): eps, ok := e.Object.(*v1.Endpoints) if !ok { klog.Warningf("Object type: %T unsupported", eps) continue } ok = true switch e.Type { case watch.Added: dc.lc.AddOrUpdateEndpoints(*eps) operation = model.InsertOperation case watch.Modified: ok = dc.lc.IsEndpointsUpdated(*eps) dc.lc.AddOrUpdateEndpoints(*eps) operation = model.UpdateOperation case watch.Deleted: dc.lc.DeleteEndpoints(*eps) dc.lc.DeleteServicePods(*eps) operation = model.DeleteOperation default: // unsupported operation, no need to send to any node klog.Warningf("endpoints event type: %s unsupported", e.Type) continue } // send to all nodes if ok { var listOptions metav1.ListOptions var pods *v1.PodList var err error svc, ok := dc.lc.GetService(fmt.Sprintf("%s/%s", eps.Namespace, eps.Name)) if ok { labelSelectorString := "" for key, value := range svc.Spec.Selector { labelSelectorString = labelSelectorString + key + "=" + value + "," } labelSelectorString = strings.TrimSuffix(labelSelectorString, ",") listOptions = metav1.ListOptions{ LabelSelector: labelSelectorString, Limit: 100, } pods, err = dc.kubeClient.CoreV1().Pods(svc.Namespace).List(listOptions) if err == nil { dc.lc.AddOrUpdateServicePods(fmt.Sprintf("%s/%s", svc.Namespace, svc.Name), pods.Items) } } dc.lc.EdgeNodes.Range(func(key interface{}, value interface{}) bool { nodeName, check := key.(string) if !check { klog.Warning("Failed to assert key to sting") return true } msg := model.NewMessage("") msg.SetResourceVersion(eps.ResourceVersion) resource, err := messagelayer.BuildResource(nodeName, eps.Namespace, common.ResourceTypeEndpoints, eps.Name) if err != nil { klog.Warningf("Built message resource failed with error: %s", err) return true } msg.BuildRouter(constants.EdgeControllerModuleName, constants.GroupResource, resource, operation) msg.Content = eps if err := dc.messageLayer.Send(*msg); err != nil { klog.Warningf("Send message failed with error: %s, operation: %s, resource: %s", err, msg.GetOperation(), msg.GetResource()) } else { klog.V(4).Infof("Send message successfully, operation: %s, resource: %s", msg.GetOperation(), msg.GetResource()) } if operation != model.DeleteOperation && ok { msg := model.NewMessage("") resource, err := messagelayer.BuildResource(nodeName, svc.Namespace, model.ResourceTypePodlist, svc.Name) if err != nil { klog.Warningf("Built message resource failed with error: %v", err) return true } msg.BuildRouter(constants.EdgeControllerModuleName, constants.GroupResource, resource, model.UpdateOperation) msg.Content = pods.Items if err := dc.messageLayer.Send(*msg); err != nil { klog.Warningf("Send message failed with error: %s, operation: %s, resource: %s", err, msg.GetOperation(), msg.GetResource()) } else { klog.V(4).Infof("Send message successfully, operation: %s, resource: %s", msg.GetOperation(), msg.GetResource()) } } return true }) } } } } // Start DownstreamController func (dc *DownstreamController) Start() error { klog.Info("start downstream controller") // pod go dc.syncPod() // configmap go dc.syncConfigMap() // secret go dc.syncSecret() // nodes go dc.syncEdgeNodes() // service go dc.syncService() // endpoints go dc.syncEndpoints() return nil } // initLocating to know configmap and secret should send to which nodes func (dc *DownstreamController) initLocating() error { var ( pods *v1.PodList err error ) set := labels.Set{manager.NodeRoleKey: manager.NodeRoleValue} selector := labels.SelectorFromSet(set) nodes, err := dc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return err } var status string for _, node := range nodes.Items { for _, nsc := range node.Status.Conditions { if nsc.Type == "Ready" { status = string(nsc.Status) break } } dc.lc.UpdateEdgeNode(node.ObjectMeta.Name, status) } if !config.Config.EdgeSiteEnable { pods, err = dc.kubeClient.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{}) } else { selector := fields.OneTermEqualSelector("spec.nodeName", config.Config.NodeName).String() pods, err = dc.kubeClient.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{FieldSelector: selector}) } if err != nil { return err } for _, p := range pods.Items { if dc.lc.IsEdgeNode(p.Spec.NodeName) { dc.lc.AddOrUpdatePod(p) } } return nil } // NewDownstreamController create a DownstreamController from config func NewDownstreamController() (*DownstreamController, error) { lc := &manager.LocationCache{} cli, err := utils.KubeClient() if err != nil { klog.Warningf("create kube client failed with error: %s", err) return nil, err } var nodeName = "" if config.Config.EdgeSiteEnable { if config.Config.NodeName == "" { return nil, fmt.Errorf("kubeEdge node name is not provided in edgesite controller configuration") } nodeName = config.Config.NodeName } podManager, err := manager.NewPodManager(cli, v1.NamespaceAll, nodeName) if err != nil { klog.Warningf("create pod manager failed with error: %s", err) return nil, err } configMapManager, err := manager.NewConfigMapManager(cli, v1.NamespaceAll) if err != nil { klog.Warningf("create configmap manager failed with error: %s", err) return nil, err } secretManager, err := manager.NewSecretManager(cli, v1.NamespaceAll) if err != nil { klog.Warningf("create secret manager failed with error: %s", err) return nil, err } nodesManager, err := manager.NewNodesManager(cli, v1.NamespaceAll) if err != nil { klog.Warningf("Create nodes manager failed with error: %s", err) return nil, err } serviceManager, err := manager.NewServiceManager(cli, v1.NamespaceAll) if err != nil { klog.Warningf("Create service manager failed with error: %s", err) return nil, err } endpointsManager, err := manager.NewEndpointsManager(cli, v1.NamespaceAll) if err != nil { klog.Warningf("Create endpoints manager failed with error: %s", err) return nil, err } dc := &DownstreamController{ kubeClient: cli, podManager: podManager, configmapManager: configMapManager, secretManager: secretManager, nodeManager: nodesManager, serviceManager: serviceManager, endpointsManager: endpointsManager, messageLayer: messagelayer.NewContextMessageLayer(), lc: lc, } if err := dc.initLocating(); err != nil { return nil, err } return dc, nil }
1
16,172
changed for debug purpose?
kubeedge-kubeedge
go
@@ -46,6 +46,13 @@ namespace OpenTelemetry /// <inheritdoc /> public abstract override void OnEnd(T data); + internal override void SetParentProvider(BaseProvider parentProvider) + { + base.SetParentProvider(parentProvider); + + this.exporter.ParentProvider = parentProvider; + } + /// <inheritdoc /> protected override bool OnShutdown(int timeoutMilliseconds) {
1
// <copyright file="BaseExportProcessor.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using OpenTelemetry.Internal; namespace OpenTelemetry { /// <summary> /// Implements processor that exports telemetry objects. /// </summary> /// <typeparam name="T">The type of telemetry object to be exported.</typeparam> public abstract class BaseExportProcessor<T> : BaseProcessor<T> where T : class { protected readonly BaseExporter<T> exporter; private bool disposed; /// <summary> /// Initializes a new instance of the <see cref="BaseExportProcessor{T}"/> class. /// </summary> /// <param name="exporter">Exporter instance.</param> public BaseExportProcessor(BaseExporter<T> exporter) { this.exporter = exporter ?? throw new ArgumentNullException(nameof(exporter)); } /// <inheritdoc /> public sealed override void OnStart(T data) { } /// <inheritdoc /> public abstract override void OnEnd(T data); /// <inheritdoc /> protected override bool OnShutdown(int timeoutMilliseconds) { return this.exporter.Shutdown(timeoutMilliseconds); } /// <inheritdoc/> protected override void Dispose(bool disposing) { base.Dispose(disposing); if (disposing && !this.disposed) { try { this.exporter.Dispose(); } catch (Exception ex) { OpenTelemetrySdkEventSource.Log.SpanProcessorException(nameof(this.Dispose), ex); } this.disposed = true; } } } }
1
17,887
@cijothomas I needed a way to set the Exporter.ParentProvider when the Processor's gets set. Internal to SDK though.
open-telemetry-opentelemetry-dotnet
.cs
@@ -31,9 +31,13 @@ inline extractor::RoadClassification roadClass(const ConnectedRoad &road, return graph.GetEdgeData(road.eid).flags.road_classification; } -inline bool isRampClass(EdgeID eid, const util::NodeBasedDynamicGraph &node_based_graph) +inline bool isRampClass(EdgeID eid, + const util::NodeBasedDynamicGraph &node_based_graph, + bool from_motorway = true) { - return node_based_graph.GetEdgeData(eid).flags.road_classification.IsRampClass(); + return node_based_graph.GetEdgeData(eid).flags.road_classification.IsRampClass() || + (from_motorway && + node_based_graph.GetEdgeData(eid).flags.road_classification.IsLinkClass()); } } // namespace
1
#include "guidance/motorway_handler.hpp" #include "extractor/road_classification.hpp" #include "guidance/constants.hpp" #include "util/assert.hpp" #include "util/bearing.hpp" #include "util/guidance/name_announcements.hpp" #include <limits> #include <utility> #include <boost/assert.hpp> using osrm::guidance::getTurnDirection; using osrm::util::angularDeviation; namespace osrm { namespace guidance { namespace { inline bool isMotorwayClass(EdgeID eid, const util::NodeBasedDynamicGraph &node_based_graph) { return node_based_graph.GetEdgeData(eid).flags.road_classification.IsMotorwayClass(); } inline extractor::RoadClassification roadClass(const ConnectedRoad &road, const util::NodeBasedDynamicGraph &graph) { return graph.GetEdgeData(road.eid).flags.road_classification; } inline bool isRampClass(EdgeID eid, const util::NodeBasedDynamicGraph &node_based_graph) { return node_based_graph.GetEdgeData(eid).flags.road_classification.IsRampClass(); } } // namespace MotorwayHandler::MotorwayHandler(const util::NodeBasedDynamicGraph &node_based_graph, const extractor::EdgeBasedNodeDataContainer &node_data_container, const std::vector<util::Coordinate> &coordinates, const extractor::CompressedEdgeContainer &compressed_geometries, const extractor::RestrictionMap &node_restriction_map, const std::unordered_set<NodeID> &barrier_nodes, const extractor::TurnLanesIndexedArray &turn_lanes_data, const util::NameTable &name_table, const extractor::SuffixTable &street_name_suffix_table) : IntersectionHandler(node_based_graph, node_data_container, coordinates, compressed_geometries, node_restriction_map, barrier_nodes, turn_lanes_data, name_table, street_name_suffix_table) { } bool MotorwayHandler::canProcess(const NodeID, const EdgeID via_eid, const Intersection &intersection) const { bool has_motorway = false; bool has_normal_roads = false; for (const auto &road : intersection) { // not merging or forking? if (road.entry_allowed && angularDeviation(road.angle, STRAIGHT_ANGLE) > 60) return false; else if (isMotorwayClass(road.eid, node_based_graph)) { if (road.entry_allowed) has_motorway = true; } else if (!isRampClass(road.eid, node_based_graph)) has_normal_roads = true; } if (has_normal_roads) return false; return has_motorway || isMotorwayClass(via_eid, node_based_graph); } Intersection MotorwayHandler:: operator()(const NodeID, const EdgeID via_eid, Intersection intersection) const { // coming from motorway if (isMotorwayClass(via_eid, node_based_graph)) { intersection = fromMotorway(via_eid, std::move(intersection)); std::for_each(intersection.begin(), intersection.end(), [](ConnectedRoad &road) { if (road.instruction.type == TurnType::OnRamp) road.instruction.type = TurnType::OffRamp; }); return intersection; } else // coming from a ramp { return fromRamp(via_eid, std::move(intersection)); // ramp merging straight onto motorway } } Intersection MotorwayHandler::fromMotorway(const EdgeID via_eid, Intersection intersection) const { const auto &in_data = node_data_container.GetAnnotation(node_based_graph.GetEdgeData(via_eid).annotation_data); BOOST_ASSERT(isMotorwayClass(via_eid, node_based_graph)); const auto countExitingMotorways = [this](const Intersection &intersection) { unsigned count = 0; for (const auto &road : intersection) { if (road.entry_allowed && isMotorwayClass(road.eid, node_based_graph)) ++count; } return count; }; // find the angle that continues on our current highway const auto getContinueAngle = [this, in_data](const Intersection &intersection) { for (const auto &road : intersection) { if (!road.entry_allowed) continue; const auto &out_data = node_data_container.GetAnnotation( node_based_graph.GetEdgeData(road.eid).annotation_data); const auto same_name = !util::guidance::requiresNameAnnounced( in_data.name_id, out_data.name_id, name_table, street_name_suffix_table); if (road.angle != 0 && in_data.name_id != EMPTY_NAMEID && out_data.name_id != EMPTY_NAMEID && same_name && isMotorwayClass(road.eid, node_based_graph)) return road.angle; } return intersection[0].angle; }; const auto getMostLikelyContinue = [this](const Intersection &intersection) { double angle = intersection[0].angle; double best = 180; for (const auto &road : intersection) { if (isMotorwayClass(road.eid, node_based_graph) && angularDeviation(road.angle, STRAIGHT_ANGLE) < best) { best = angularDeviation(road.angle, STRAIGHT_ANGLE); angle = road.angle; } } return angle; }; const auto findBestContinue = [&]() { const double continue_angle = getContinueAngle(intersection); if (continue_angle != intersection[0].angle) return continue_angle; else return getMostLikelyContinue(intersection); }; // find continue angle const double continue_angle = findBestContinue(); // highway does not continue and has no obvious choice if (continue_angle == intersection[0].angle) { if (intersection.size() == 2) { // do not announce ramps at the end of a highway intersection[1].instruction = {TurnType::NoTurn, getTurnDirection(intersection[1].angle)}; } else if (intersection.size() == 3) { // splitting ramp at the end of a highway if (intersection[1].entry_allowed && intersection[2].entry_allowed) { assignFork(via_eid, intersection[2], intersection[1]); } else { // ending in a passing ramp if (intersection[1].entry_allowed) intersection[1].instruction = {TurnType::NoTurn, getTurnDirection(intersection[1].angle)}; else intersection[2].instruction = {TurnType::NoTurn, getTurnDirection(intersection[2].angle)}; } } else if (intersection.size() == 4 && roadClass(intersection[1], node_based_graph) == roadClass(intersection[2], node_based_graph) && roadClass(intersection[2], node_based_graph) == roadClass(intersection[3], node_based_graph)) { // tripple fork at the end assignFork(via_eid, intersection[3], intersection[2], intersection[1]); } else if (intersection.countEnterable() > 0) // check whether turns exist at all { // FALLBACK, this should hopefully never be reached return fallback(std::move(intersection)); } } else { const unsigned exiting_motorways = countExitingMotorways(intersection); if (exiting_motorways == 0) { // Ending in Ramp for (auto &road : intersection) { if (road.entry_allowed) { BOOST_ASSERT(isRampClass(road.eid, node_based_graph)); road.instruction = TurnInstruction::SUPPRESSED(getTurnDirection(road.angle)); } } } else if (exiting_motorways == 1) { // normal motorway passing some ramps or mering onto another motorway if (intersection.size() == 2) { BOOST_ASSERT(!isRampClass(intersection[1].eid, node_based_graph)); intersection[1].instruction = getInstructionForObvious(intersection.size(), via_eid, isThroughStreet(1, intersection, node_based_graph, node_data_container, name_table, street_name_suffix_table), intersection[1]); } else { // Normal Highway exit or merge for (auto &road : intersection) { // ignore invalid uturns/other if (!road.entry_allowed) continue; if (road.angle == continue_angle) { road.instruction = getInstructionForObvious(intersection.size(), via_eid, isThroughStreet(1, intersection, node_based_graph, node_data_container, name_table, street_name_suffix_table), road); } else if (road.angle < continue_angle) { road.instruction = {isRampClass(road.eid, node_based_graph) ? TurnType::OffRamp : TurnType::Turn, (road.angle < 145) ? DirectionModifier::Right : DirectionModifier::SlightRight}; } else if (road.angle > continue_angle) { road.instruction = {isRampClass(road.eid, node_based_graph) ? TurnType::OffRamp : TurnType::Turn, (road.angle > 215) ? DirectionModifier::Left : DirectionModifier::SlightLeft}; } } } } // handle motorway forks else if (exiting_motorways > 1) { if (exiting_motorways == 2) { OSRM_ASSERT(intersection.size() != 2, node_coordinates[node_based_graph.GetTarget(via_eid)]); // standard fork std::size_t first_valid = std::numeric_limits<std::size_t>::max(), second_valid = std::numeric_limits<std::size_t>::max(); for (std::size_t i = 0; i < intersection.size(); ++i) { if (intersection[i].entry_allowed && isMotorwayClass(intersection[i].eid, node_based_graph)) { if (first_valid < intersection.size()) { second_valid = i; break; } else { first_valid = i; } } } assignFork(via_eid, intersection[second_valid], intersection[first_valid]); } else if (exiting_motorways == 3) { // triple fork std::size_t first_valid = std::numeric_limits<std::size_t>::max(), second_valid = std::numeric_limits<std::size_t>::max(), third_valid = std::numeric_limits<std::size_t>::max(); for (std::size_t i = 0; i < intersection.size(); ++i) { if (intersection[i].entry_allowed && isMotorwayClass(intersection[i].eid, node_based_graph)) { if (second_valid < intersection.size()) { third_valid = i; break; } else if (first_valid < intersection.size()) { second_valid = i; } else { first_valid = i; } } } assignFork(via_eid, intersection[third_valid], intersection[second_valid], intersection[first_valid]); } else { return fallback(std::move(intersection)); } } // done for more than one highway exit } return intersection; } Intersection MotorwayHandler::fromRamp(const EdgeID via_eid, Intersection intersection) const { auto num_valid_turns = intersection.countEnterable(); // ramp straight into a motorway/ramp if (intersection.size() == 2 && num_valid_turns == 1) { BOOST_ASSERT(!intersection[0].entry_allowed); BOOST_ASSERT(isMotorwayClass(intersection[1].eid, node_based_graph)); intersection[1].instruction = getInstructionForObvious(intersection.size(), via_eid, isThroughStreet(1, intersection, node_based_graph, node_data_container, name_table, street_name_suffix_table), intersection[1]); } else if (intersection.size() == 3) { const auto &second_intersection_data = node_data_container.GetAnnotation( node_based_graph.GetEdgeData(intersection[2].eid).annotation_data); const auto &first_intersection_data = node_data_container.GetAnnotation( node_based_graph.GetEdgeData(intersection[1].eid).annotation_data); const auto first_second_same_name = !util::guidance::requiresNameAnnounced(second_intersection_data.name_id, first_intersection_data.name_id, name_table, street_name_suffix_table); // merging onto a passing highway / or two ramps merging onto the same highway if (num_valid_turns == 1) { BOOST_ASSERT(!intersection[0].entry_allowed); // check order of highways // 4 // 5 3 // // 6 2 // // 7 1 // 0 const auto &first_intersection_name_empty = name_table.GetNameForID(first_intersection_data.name_id).empty(); const auto &second_intersection_name_empty = name_table.GetNameForID(second_intersection_data.name_id).empty(); if (intersection[1].entry_allowed) { if (isMotorwayClass(intersection[1].eid, node_based_graph) && !second_intersection_name_empty && !first_intersection_name_empty && first_second_same_name) { // circular order indicates a merge to the left (0-3 onto 4 if (angularDeviation(intersection[1].angle, STRAIGHT_ANGLE) < 2 * NARROW_TURN_ANGLE) intersection[1].instruction = {TurnType::Merge, DirectionModifier::SlightLeft}; else // fallback intersection[1].instruction = {TurnType::Merge, getTurnDirection(intersection[1].angle)}; } else // passing by the end of a motorway { intersection[1].instruction = getInstructionForObvious(intersection.size(), via_eid, isThroughStreet(1, intersection, node_based_graph, node_data_container, name_table, street_name_suffix_table), intersection[1]); } } else { BOOST_ASSERT(intersection[2].entry_allowed); if (isMotorwayClass(intersection[2].eid, node_based_graph) && !second_intersection_name_empty && !first_intersection_name_empty && first_second_same_name) { // circular order (5-0) onto 4 if (angularDeviation(intersection[2].angle, STRAIGHT_ANGLE) < 2 * NARROW_TURN_ANGLE) intersection[2].instruction = {TurnType::Merge, DirectionModifier::SlightRight}; else // fallback intersection[2].instruction = {TurnType::Merge, getTurnDirection(intersection[2].angle)}; } else // passing the end of a highway { intersection[2].instruction = getInstructionForObvious(intersection.size(), via_eid, isThroughStreet(2, intersection, node_based_graph, node_data_container, name_table, street_name_suffix_table), intersection[2]); } } } else { BOOST_ASSERT(num_valid_turns == 2); // UTurn on ramps is not possible BOOST_ASSERT(!intersection[0].entry_allowed); BOOST_ASSERT(intersection[1].entry_allowed); BOOST_ASSERT(intersection[2].entry_allowed); // two motorways starting at end of ramp (fork) // M M // \ / // | // R if (isMotorwayClass(intersection[1].eid, node_based_graph) && isMotorwayClass(intersection[2].eid, node_based_graph)) { assignFork(via_eid, intersection[2], intersection[1]); } else { // continued ramp passing motorway entry // M R // M R // | / // R if (isMotorwayClass(intersection[1].eid, node_based_graph)) { intersection[1].instruction = {TurnType::Turn, DirectionModifier::SlightRight}; intersection[2].instruction = {TurnType::Continue, DirectionModifier::SlightLeft}; } else { assignFork(via_eid, intersection[2], intersection[1]); } } } } // On - Off Ramp on passing Motorway, Ramp onto Fork(?) else if (intersection.size() == 4) { bool passed_highway_entry = false; for (auto &road : intersection) { if (!road.entry_allowed && isMotorwayClass(road.eid, node_based_graph)) { passed_highway_entry = true; } else if (isMotorwayClass(road.eid, node_based_graph)) { road.instruction = {TurnType::Merge, passed_highway_entry ? DirectionModifier::SlightRight : DirectionModifier::SlightLeft}; } else { BOOST_ASSERT(isRampClass(road.eid, node_based_graph)); road.instruction = {TurnType::OffRamp, getTurnDirection(road.angle)}; } } } else { return fallback(std::move(intersection)); } return intersection; } Intersection MotorwayHandler::fallback(Intersection intersection) const { for (auto &road : intersection) { if (!road.entry_allowed) continue; const auto type = isMotorwayClass(road.eid, node_based_graph) ? TurnType::Merge : TurnType::Turn; if (type == TurnType::Turn) { if (angularDeviation(road.angle, STRAIGHT_ANGLE) < FUZZY_ANGLE_DIFFERENCE) road.instruction = {type, DirectionModifier::Straight}; else { road.instruction = {type, road.angle > STRAIGHT_ANGLE ? DirectionModifier::SlightLeft : DirectionModifier::SlightRight}; } } else { road.instruction = {type, road.angle < STRAIGHT_ANGLE ? DirectionModifier::SlightLeft : DirectionModifier::SlightRight}; } } return intersection; } } // namespace guidance } // namespace osrm
1
23,438
Talking through this scenario with @srividyacb and she's wondering if there should also be a check of `(from_trunk && node_based_graph.GetEdgeData(eid).flags.road_classification.IsLinkClass())` as highways with trunk classifications can also have this scenario.
Project-OSRM-osrm-backend
cpp
@@ -13,7 +13,7 @@ module Beaker :q_verify_packages => ENV['q_verify_packages'] || 'y', :q_puppet_symlinks_install => 'y', :q_puppetagent_certname => host, - :q_puppetagent_server => master, + :q_puppetagent_server => master_certname, # Disable database, console, and master by default # This will be overridden by other blocks being merged in.
1
module Beaker module Answers module Version30 def self.host_answers(host, master_certname, master, database, dashboard, options) # Windows hosts don't have normal answers... return nil if host['platform'] =~ /windows/ # Everything's an agent agent_a = { :q_puppetagent_install => 'y', :q_puppet_cloud_install => 'y', :q_verify_packages => ENV['q_verify_packages'] || 'y', :q_puppet_symlinks_install => 'y', :q_puppetagent_certname => host, :q_puppetagent_server => master, # Disable database, console, and master by default # This will be overridden by other blocks being merged in. :q_puppetmaster_install => 'n', :q_all_in_one_install => 'n', :q_puppet_enterpriseconsole_install => 'n', :q_puppetdb_install => 'n', :q_database_install => 'n', } # These base answers are needed by all common_a = { :q_install => 'y', :q_vendor_packages_install => 'y', } # master/database answers master_database_a = { :q_puppetmaster_certname => master_certname } # Master/dashboard answers master_console_a = { :q_puppetdb_hostname => database, :q_puppetdb_port => 8081 } # Master only answers master_a = { :q_puppetmaster_install => 'y', :q_puppetmaster_dnsaltnames => master_certname+",puppet", :q_puppetmaster_enterpriseconsole_hostname => dashboard, :q_puppetmaster_enterpriseconsole_port => 443, } if master['ip'] master_a[:q_puppetmaster_dnsaltnames]+=","+master['ip'] end # Common answers for console and database dashboard_password = "'#{ENV['q_puppet_enterpriseconsole_auth_password'] || '~!@#$%^*-/ aZ'}'" puppetdb_password = "'#{ENV['q_puppetdb_password'] || '~!@#$%^*-/ aZ'}'" console_database_a = { :q_puppetdb_database_name => 'pe-puppetdb', :q_puppetdb_database_user => 'mYpdBu3r', :q_puppetdb_database_password => puppetdb_password, :q_puppet_enterpriseconsole_auth_database_name => 'console_auth', :q_puppet_enterpriseconsole_auth_database_user => 'mYu7hu3r', :q_puppet_enterpriseconsole_auth_database_password => dashboard_password, :q_puppet_enterpriseconsole_database_name => 'console', :q_puppet_enterpriseconsole_database_user => 'mYc0nS03u3r', :q_puppet_enterpriseconsole_database_password => dashboard_password, :q_database_host => database, :q_database_port => 5432 } # Console only answers dashboard_user = "'#{ENV['q_puppet_enterpriseconsole_auth_user_email'] || '[email protected]'}'" smtp_host = "'#{ENV['q_puppet_enterpriseconsole_smtp_host'] || dashboard}'" smtp_port = "'#{ENV['q_puppet_enterpriseconsole_smtp_port'] || 25}'" smtp_username = ENV['q_puppet_enterpriseconsole_smtp_username'] smtp_password = ENV['q_puppet_enterpriseconsole_smtp_password'] smtp_use_tls = "'#{ENV['q_puppet_enterpriseconsole_smtp_use_tls'] || 'n'}'" console_a = { :q_puppet_enterpriseconsole_install => 'y', :q_puppet_enterpriseconsole_inventory_hostname => host, :q_puppet_enterpriseconsole_inventory_certname => host, :q_puppet_enterpriseconsole_inventory_dnsaltnames => dashboard, :q_puppet_enterpriseconsole_inventory_port => 8140, :q_puppet_enterpriseconsole_master_hostname => master, :q_puppet_enterpriseconsole_auth_user_email => dashboard_user, :q_puppet_enterpriseconsole_auth_password => dashboard_password, :q_puppet_enterpriseconsole_httpd_port => 443, :q_puppet_enterpriseconsole_smtp_host => smtp_host, :q_puppet_enterpriseconsole_smtp_use_tls => smtp_use_tls, :q_puppet_enterpriseconsole_smtp_port => smtp_port, } if smtp_password and smtp_username console_a.merge!({ :q_puppet_enterpriseconsole_smtp_password => "'#{smtp_password}'", :q_puppet_enterpriseconsole_smtp_username => "'#{smtp_username}'", :q_puppet_enterpriseconsole_smtp_user_auth => 'y' }) end # Database only answers database_a = { :q_puppetdb_install => 'y', :q_database_install => 'y', :q_database_root_password => "'=ZYdjiP3jCwV5eo9s1MBd'", :q_database_root_user => 'pe-postgres', } # Special answers for special hosts aix_a = { :q_run_updtvpkg => 'y', } answers = common_a.dup unless options[:type] == :upgrade answers.merge! agent_a end if host == master answers.merge! master_console_a unless options[:type] == :upgrade answers.merge! master_a answers.merge! master_database_a end end if host == dashboard answers.merge! master_console_a answers.merge! console_database_a answers[:q_pe_database] = 'y' unless options[:type] == :upgrade answers.merge! console_a else answers[:q_database_export_dir] = '/tmp' end end if host == database if database != master if options[:type] == :upgrade # This is kinda annoying - if we're upgrading to 3.0 and are # puppetdb, we're actually doing a clean install. We thus # need the core agent answers. answers.merge! agent_a end answers.merge! master_database_a end answers.merge! database_a answers.merge! console_database_a end if host == master and host == database and host == dashboard answers[:q_all_in_one_install] = 'y' end if host['platform'].include? 'aix' answers.merge! aix_a end return answers end def self.answers(hosts, master_certname, options) the_answers = {} database = only_host_with_role(hosts, 'database') dashboard = only_host_with_role(hosts, 'dashboard') master = only_host_with_role(hosts, 'master') hosts.each do |h| if options[:type] == :upgrade and h[:pe_ver] =~ /\A3.0/ # 3.0.x to 3.0.x should require no answers the_answers[h.name] = { :q_install => 'y', :q_install_vendor_packages => 'y', } else the_answers[h.name] = host_answers(h, master_certname, master, database, dashboard, options) end end return the_answers end end end end
1
4,624
where does `master_certname` come from?
voxpupuli-beaker
rb
@@ -1,5 +1,7 @@ from .resnet import ResNet, make_res_layer from .resnext import ResNeXt from .ssd_vgg import SSDVGG +from .hrnet import HRNet -__all__ = ['ResNet', 'make_res_layer', 'ResNeXt', 'SSDVGG'] +__all__ = ['ResNet', 'make_res_layer', 'ResNeXt', 'SSDVGG', + 'HRNet']
1
from .resnet import ResNet, make_res_layer from .resnext import ResNeXt from .ssd_vgg import SSDVGG __all__ = ['ResNet', 'make_res_layer', 'ResNeXt', 'SSDVGG']
1
17,413
The line breaking is unnecessary.
open-mmlab-mmdetection
py
@@ -28,7 +28,7 @@ void DatasetLoader::SetHeader(const char* filename) { // get column names if (io_config_.has_header) { std::string first_line = text_reader.first_line(); - feature_names_ = Common::Split(first_line.c_str(), "\t ,"); + feature_names_ = Common::Split(first_line.c_str(), "\t,"); } // load label idx first
1
#include <LightGBM/utils/openmp_wrapper.h> #include <LightGBM/utils/log.h> #include <LightGBM/dataset_loader.h> #include <LightGBM/network.h> namespace LightGBM { DatasetLoader::DatasetLoader(const IOConfig& io_config, const PredictFunction& predict_fun, int num_class, const char* filename) :io_config_(io_config), random_(io_config_.data_random_seed), predict_fun_(predict_fun), num_class_(num_class) { label_idx_ = 0; weight_idx_ = NO_SPECIFIC; group_idx_ = NO_SPECIFIC; SetHeader(filename); } DatasetLoader::~DatasetLoader() { } void DatasetLoader::SetHeader(const char* filename) { std::unordered_map<std::string, int> name2idx; std::string name_prefix("name:"); if (filename != nullptr) { TextReader<data_size_t> text_reader(filename, io_config_.has_header); // get column names if (io_config_.has_header) { std::string first_line = text_reader.first_line(); feature_names_ = Common::Split(first_line.c_str(), "\t ,"); } // load label idx first if (io_config_.label_column.size() > 0) { if (Common::StartsWith(io_config_.label_column, name_prefix)) { std::string name = io_config_.label_column.substr(name_prefix.size()); label_idx_ = -1; for (int i = 0; i < static_cast<int>(feature_names_.size()); ++i) { if (name == feature_names_[i]) { label_idx_ = i; break; } } if (label_idx_ >= 0) { Log::Info("Using column %s as label", name.c_str()); } else { Log::Fatal("Could not find label column %s in data file \ or data file doesn't contain header", name.c_str()); } } else { if (!Common::AtoiAndCheck(io_config_.label_column.c_str(), &label_idx_)) { Log::Fatal("label_column is not a number, \ if you want to use a column name, \ please add the prefix \"name:\" to the column name"); } Log::Info("Using column number %d as label", label_idx_); } } if (!feature_names_.empty()) { // erase label column name feature_names_.erase(feature_names_.begin() + label_idx_); for (size_t i = 0; i < feature_names_.size(); ++i) { name2idx[feature_names_[i]] = static_cast<int>(i); } } // load ignore columns if (io_config_.ignore_column.size() > 0) { if (Common::StartsWith(io_config_.ignore_column, name_prefix)) { std::string names = io_config_.ignore_column.substr(name_prefix.size()); for (auto name : Common::Split(names.c_str(), ',')) { if (name2idx.count(name) > 0) { int tmp = name2idx[name]; ignore_features_.emplace(tmp); } else { Log::Fatal("Could not find ignore column %s in data file", name.c_str()); } } } else { for (auto token : Common::Split(io_config_.ignore_column.c_str(), ',')) { int tmp = 0; if (!Common::AtoiAndCheck(token.c_str(), &tmp)) { Log::Fatal("ignore_column is not a number, \ if you want to use a column name, \ please add the prefix \"name:\" to the column name"); } ignore_features_.emplace(tmp); } } } // load weight idx if (io_config_.weight_column.size() > 0) { if (Common::StartsWith(io_config_.weight_column, name_prefix)) { std::string name = io_config_.weight_column.substr(name_prefix.size()); if (name2idx.count(name) > 0) { weight_idx_ = name2idx[name]; Log::Info("Using column %s as weight", name.c_str()); } else { Log::Fatal("Could not find weight column %s in data file", name.c_str()); } } else { if (!Common::AtoiAndCheck(io_config_.weight_column.c_str(), &weight_idx_)) { Log::Fatal("weight_column is not a number, \ if you want to use a column name, \ please add the prefix \"name:\" to the column name"); } Log::Info("Using column number %d as weight", weight_idx_); } ignore_features_.emplace(weight_idx_); } // load group idx if (io_config_.group_column.size() > 0) { if (Common::StartsWith(io_config_.group_column, name_prefix)) { std::string name = io_config_.group_column.substr(name_prefix.size()); if (name2idx.count(name) > 0) { group_idx_ = name2idx[name]; Log::Info("Using column %s as group/query id", name.c_str()); } else { Log::Fatal("Could not find group/query column %s in data file", name.c_str()); } } else { if (!Common::AtoiAndCheck(io_config_.group_column.c_str(), &group_idx_)) { Log::Fatal("group_column is not a number, \ if you want to use a column name, \ please add the prefix \"name:\" to the column name"); } Log::Info("Using column number %d as group/query id", group_idx_); } ignore_features_.emplace(group_idx_); } } if (io_config_.categorical_column.size() > 0) { if (Common::StartsWith(io_config_.categorical_column, name_prefix)) { std::string names = io_config_.categorical_column.substr(name_prefix.size()); for (auto name : Common::Split(names.c_str(), ',')) { if (name2idx.count(name) > 0) { int tmp = name2idx[name]; categorical_features_.emplace(tmp); } else { Log::Fatal("Could not find categorical_column %s in data file", name.c_str()); } } } else { for (auto token : Common::Split(io_config_.categorical_column.c_str(), ',')) { int tmp = 0; if (!Common::AtoiAndCheck(token.c_str(), &tmp)) { Log::Fatal("categorical_column is not a number, \ if you want to use a column name, \ please add the prefix \"name:\" to the column name"); } categorical_features_.emplace(tmp); } } } } Dataset* DatasetLoader::LoadFromFile(const char* filename, int rank, int num_machines) { // don't support query id in data file when training in parallel if (num_machines > 1 && !io_config_.is_pre_partition) { if (group_idx_ > 0) { Log::Fatal("Using a query id without pre-partitioning the data file is not supported for parallel training. \ Please use an additional query file or pre-partition the data"); } } auto dataset = std::unique_ptr<Dataset>(new Dataset()); data_size_t num_global_data = 0; std::vector<data_size_t> used_data_indices; auto bin_filename = CheckCanLoadFromBin(filename); if (bin_filename.size() == 0) { auto parser = std::unique_ptr<Parser>(Parser::CreateParser(filename, io_config_.has_header, 0, label_idx_)); if (parser == nullptr) { Log::Fatal("Could not recognize data format of %s", filename); } dataset->data_filename_ = filename; dataset->metadata_.Init(filename); if (!io_config_.use_two_round_loading) { // read data to memory auto text_data = LoadTextDataToMemory(filename, dataset->metadata_, rank, num_machines, &num_global_data, &used_data_indices); dataset->num_data_ = static_cast<data_size_t>(text_data.size()); // sample data auto sample_data = SampleTextDataFromMemory(text_data); // construct feature bin mappers ConstructBinMappersFromTextData(rank, num_machines, sample_data, parser.get(), dataset.get()); // initialize label dataset->metadata_.Init(dataset->num_data_, weight_idx_, group_idx_); // extract features ExtractFeaturesFromMemory(text_data, parser.get(), dataset.get()); text_data.clear(); } else { // sample data from file auto sample_data = SampleTextDataFromFile(filename, dataset->metadata_, rank, num_machines, &num_global_data, &used_data_indices); if (used_data_indices.size() > 0) { dataset->num_data_ = static_cast<data_size_t>(used_data_indices.size()); } else { dataset->num_data_ = num_global_data; } // construct feature bin mappers ConstructBinMappersFromTextData(rank, num_machines, sample_data, parser.get(), dataset.get()); // initialize label dataset->metadata_.Init(dataset->num_data_, weight_idx_, group_idx_); // extract features ExtractFeaturesFromFile(filename, parser.get(), used_data_indices, dataset.get()); } } else { // load data from binary file dataset.reset(LoadFromBinFile(filename, bin_filename.c_str(), rank, num_machines, &num_global_data, &used_data_indices)); } // check meta data dataset->metadata_.CheckOrPartition(num_global_data, used_data_indices); // need to check training data CheckDataset(dataset.get()); return dataset.release(); } Dataset* DatasetLoader::LoadFromFileAlignWithOtherDataset(const char* filename, const Dataset* train_data) { data_size_t num_global_data = 0; std::vector<data_size_t> used_data_indices; auto dataset = std::unique_ptr<Dataset>(new Dataset()); auto bin_filename = CheckCanLoadFromBin(filename); if (bin_filename.size() == 0) { auto parser = std::unique_ptr<Parser>(Parser::CreateParser(filename, io_config_.has_header, 0, label_idx_)); if (parser == nullptr) { Log::Fatal("Could not recognize data format of %s", filename); } dataset->data_filename_ = filename; dataset->metadata_.Init(filename); if (!io_config_.use_two_round_loading) { // read data in memory auto text_data = LoadTextDataToMemory(filename, dataset->metadata_, 0, 1, &num_global_data, &used_data_indices); dataset->num_data_ = static_cast<data_size_t>(text_data.size()); // initialize label dataset->metadata_.Init(dataset->num_data_, weight_idx_, group_idx_); dataset->CreateValid(train_data); // extract features ExtractFeaturesFromMemory(text_data, parser.get(), dataset.get()); text_data.clear(); } else { TextReader<data_size_t> text_reader(filename, io_config_.has_header); // Get number of lines of data file dataset->num_data_ = static_cast<data_size_t>(text_reader.CountLine()); num_global_data = dataset->num_data_; // initialize label dataset->metadata_.Init(dataset->num_data_, weight_idx_, group_idx_); dataset->CreateValid(train_data); // extract features ExtractFeaturesFromFile(filename, parser.get(), used_data_indices, dataset.get()); } } else { // load data from binary file dataset.reset(LoadFromBinFile(filename, bin_filename.c_str(), 0, 1, &num_global_data, &used_data_indices)); } // not need to check validation data // check meta data dataset->metadata_.CheckOrPartition(num_global_data, used_data_indices); return dataset.release(); } Dataset* DatasetLoader::LoadFromBinFile(const char* data_filename, const char* bin_filename, int rank, int num_machines, int* num_global_data, std::vector<data_size_t>* used_data_indices) { auto dataset = std::unique_ptr<Dataset>(new Dataset()); FILE* file; #ifdef _MSC_VER fopen_s(&file, bin_filename, "rb"); #else file = fopen(bin_filename, "rb"); #endif dataset->data_filename_ = data_filename; if (file == NULL) { Log::Fatal("Could not read binary data from %s", bin_filename); } // buffer to read binary file size_t buffer_size = 16 * 1024 * 1024; auto buffer = std::vector<char>(buffer_size); // check token size_t size_of_token = std::strlen(Dataset::binary_file_token); size_t read_cnt = fread(buffer.data(), sizeof(char), size_of_token, file); if (read_cnt != size_of_token) { Log::Fatal("Binary file error: token has the wrong size"); } if (std::string(buffer.data()) != std::string(Dataset::binary_file_token)) { Log::Fatal("input file is not LightGBM binary file"); } // read size of header read_cnt = fread(buffer.data(), sizeof(size_t), 1, file); if (read_cnt != 1) { Log::Fatal("Binary file error: header has the wrong size"); } size_t size_of_head = *(reinterpret_cast<size_t*>(buffer.data())); // re-allocmate space if not enough if (size_of_head > buffer_size) { buffer_size = size_of_head; buffer.resize(buffer_size); } // read header read_cnt = fread(buffer.data(), 1, size_of_head, file); if (read_cnt != size_of_head) { Log::Fatal("Binary file error: header is incorrect"); } // get header const char* mem_ptr = buffer.data(); dataset->num_data_ = *(reinterpret_cast<const data_size_t*>(mem_ptr)); mem_ptr += sizeof(dataset->num_data_); dataset->num_features_ = *(reinterpret_cast<const int*>(mem_ptr)); mem_ptr += sizeof(dataset->num_features_); dataset->num_total_features_ = *(reinterpret_cast<const int*>(mem_ptr)); mem_ptr += sizeof(dataset->num_total_features_); const int* tmp_feature_map = reinterpret_cast<const int*>(mem_ptr); dataset->used_feature_map_.clear(); for (int i = 0; i < dataset->num_total_features_; ++i) { dataset->used_feature_map_.push_back(tmp_feature_map[i]); } mem_ptr += sizeof(int) * dataset->num_total_features_; // num_groups dataset->num_groups_ = *(reinterpret_cast<const int*>(mem_ptr)); mem_ptr += sizeof(dataset->num_groups_); // real_feature_idx_ const int* tmp_ptr_real_feature_idx_ = reinterpret_cast<const int*>(mem_ptr); dataset->real_feature_idx_.clear(); for (int i = 0; i < dataset->num_features_; ++i) { dataset->real_feature_idx_.push_back(tmp_ptr_real_feature_idx_[i]); } mem_ptr += sizeof(int) * dataset->num_features_; // feature2group const int* tmp_ptr_feature2group = reinterpret_cast<const int*>(mem_ptr); dataset->feature2group_.clear(); for (int i = 0; i < dataset->num_features_; ++i) { dataset->feature2group_.push_back(tmp_ptr_feature2group[i]); } mem_ptr += sizeof(int) * dataset->num_features_; // feature2subfeature const int* tmp_ptr_feature2subfeature = reinterpret_cast<const int*>(mem_ptr); dataset->feature2subfeature_.clear(); for (int i = 0; i < dataset->num_features_; ++i) { dataset->feature2subfeature_.push_back(tmp_ptr_feature2subfeature[i]); } mem_ptr += sizeof(int) * dataset->num_features_; // group_bin_boundaries const uint64_t* tmp_ptr_group_bin_boundaries = reinterpret_cast<const uint64_t*>(mem_ptr); dataset->group_bin_boundaries_.clear(); for (int i = 0; i < dataset->num_groups_ + 1; ++i) { dataset->group_bin_boundaries_.push_back(tmp_ptr_group_bin_boundaries[i]); } mem_ptr += sizeof(uint64_t) * (dataset->num_groups_ + 1); // group_feature_start_ const int* tmp_ptr_group_feature_start = reinterpret_cast<const int*>(mem_ptr); dataset->group_feature_start_.clear(); for (int i = 0; i < dataset->num_groups_; ++i) { dataset->group_feature_start_.push_back(tmp_ptr_group_feature_start[i]); } mem_ptr += sizeof(int) * (dataset->num_groups_); // group_feature_cnt_ const int* tmp_ptr_group_feature_cnt = reinterpret_cast<const int*>(mem_ptr); dataset->group_feature_cnt_.clear(); for (int i = 0; i < dataset->num_groups_; ++i) { dataset->group_feature_cnt_.push_back(tmp_ptr_group_feature_cnt[i]); } mem_ptr += sizeof(int) * (dataset->num_groups_); // get feature names dataset->feature_names_.clear(); // write feature names for (int i = 0; i < dataset->num_total_features_; ++i) { int str_len = *(reinterpret_cast<const int*>(mem_ptr)); mem_ptr += sizeof(int); std::stringstream str_buf; for (int j = 0; j < str_len; ++j) { char tmp_char = *(reinterpret_cast<const char*>(mem_ptr)); mem_ptr += sizeof(char); str_buf << tmp_char; } dataset->feature_names_.emplace_back(str_buf.str()); } // read size of meta data read_cnt = fread(buffer.data(), sizeof(size_t), 1, file); if (read_cnt != 1) { Log::Fatal("Binary file error: meta data has the wrong size"); } size_t size_of_metadata = *(reinterpret_cast<size_t*>(buffer.data())); // re-allocate space if not enough if (size_of_metadata > buffer_size) { buffer_size = size_of_metadata; buffer.resize(buffer_size); } // read meta data read_cnt = fread(buffer.data(), 1, size_of_metadata, file); if (read_cnt != size_of_metadata) { Log::Fatal("Binary file error: meta data is incorrect"); } // load meta data dataset->metadata_.LoadFromMemory(buffer.data()); *num_global_data = dataset->num_data_; used_data_indices->clear(); // sample local used data if need to partition if (num_machines > 1 && !io_config_.is_pre_partition) { const data_size_t* query_boundaries = dataset->metadata_.query_boundaries(); if (query_boundaries == nullptr) { // if not contain query file, minimal sample unit is one record for (data_size_t i = 0; i < dataset->num_data_; ++i) { if (random_.NextShort(0, num_machines) == rank) { used_data_indices->push_back(i); } } } else { // if contain query file, minimal sample unit is one query data_size_t num_queries = dataset->metadata_.num_queries(); data_size_t qid = -1; bool is_query_used = false; for (data_size_t i = 0; i < dataset->num_data_; ++i) { if (qid >= num_queries) { Log::Fatal("Current query exceeds the range of the query file, please ensure the query file is correct"); } if (i >= query_boundaries[qid + 1]) { // if is new query is_query_used = false; if (random_.NextShort(0, num_machines) == rank) { is_query_used = true; } ++qid; } if (is_query_used) { used_data_indices->push_back(i); } } } dataset->num_data_ = static_cast<data_size_t>((*used_data_indices).size()); } dataset->metadata_.PartitionLabel(*used_data_indices); // read feature data for (int i = 0; i < dataset->num_groups_; ++i) { // read feature size read_cnt = fread(buffer.data(), sizeof(size_t), 1, file); if (read_cnt != 1) { Log::Fatal("Binary file error: feature %d has the wrong size", i); } size_t size_of_feature = *(reinterpret_cast<size_t*>(buffer.data())); // re-allocate space if not enough if (size_of_feature > buffer_size) { buffer_size = size_of_feature; buffer.resize(buffer_size); } read_cnt = fread(buffer.data(), 1, size_of_feature, file); if (read_cnt != size_of_feature) { Log::Fatal("Binary file error: feature %d is incorrect, read count: %d", i, read_cnt); } dataset->feature_groups_.emplace_back(std::unique_ptr<FeatureGroup>( new FeatureGroup(buffer.data(), *num_global_data, *used_data_indices) )); } dataset->feature_groups_.shrink_to_fit(); fclose(file); dataset->is_finish_load_ = true; return dataset.release(); } Dataset* DatasetLoader::CostructFromSampleData(double** sample_values, int** sample_indices, int num_col, const int* num_per_col, size_t total_sample_size, data_size_t num_data) { std::vector<std::unique_ptr<BinMapper>> bin_mappers(num_col); // fill feature_names_ if not header if (feature_names_.empty()) { for (int i = 0; i < num_col; ++i) { std::stringstream str_buf; str_buf << "Column_" << i; feature_names_.push_back(str_buf.str()); } } const data_size_t filter_cnt = static_cast<data_size_t>( static_cast<double>(io_config_.min_data_in_leaf * total_sample_size) / num_data); OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_col; ++i) { OMP_LOOP_EX_BEGIN(); if (ignore_features_.count(i) > 0) { bin_mappers[i] = nullptr; continue; } BinType bin_type = BinType::NumericalBin; if (categorical_features_.count(i)) { bin_type = BinType::CategoricalBin; } bin_mappers[i].reset(new BinMapper()); bin_mappers[i]->FindBin(sample_values[i], num_per_col[i], total_sample_size, io_config_.max_bin, io_config_.min_data_in_bin, filter_cnt, bin_type); OMP_LOOP_EX_END(); } OMP_THROW_EX(); auto dataset = std::unique_ptr<Dataset>(new Dataset(num_data)); dataset->feature_names_ = feature_names_; dataset->Construct(bin_mappers, sample_indices, num_per_col, total_sample_size, io_config_); return dataset.release(); } // ---- private functions ---- void DatasetLoader::CheckDataset(const Dataset* dataset) { if (dataset->num_data_ <= 0) { Log::Fatal("Data file %s is empty", dataset->data_filename_); } if (dataset->feature_groups_.empty()) { Log::Fatal("No usable features in data file %s", dataset->data_filename_); } if (dataset->feature_names_.size() != static_cast<size_t>(dataset->num_total_features_)) { Log::Fatal("Size of feature name error, should be %d, got %d", dataset->num_total_features_, static_cast<int>(dataset->feature_names_.size())); } bool is_feature_order_by_group = true; int last_group = -1; int last_sub_feature = -1; // if features are ordered, not need to use hist_buf for (int i = 0; i < dataset->num_features_; ++i) { int group = dataset->feature2group_[i]; int sub_feature = dataset->feature2subfeature_[i]; if (group < last_group) { is_feature_order_by_group = false; } else if (group == last_group) { if (sub_feature <= last_sub_feature) { is_feature_order_by_group = false; break; } } last_group = group; last_sub_feature = sub_feature; } if (!is_feature_order_by_group) { Log::Fatal("feature in dataset should order by group"); } } std::vector<std::string> DatasetLoader::LoadTextDataToMemory(const char* filename, const Metadata& metadata, int rank, int num_machines, int* num_global_data, std::vector<data_size_t>* used_data_indices) { TextReader<data_size_t> text_reader(filename, io_config_.has_header); used_data_indices->clear(); if (num_machines == 1 || io_config_.is_pre_partition) { // read all lines *num_global_data = text_reader.ReadAllLines(); } else { // need partition data // get query data const data_size_t* query_boundaries = metadata.query_boundaries(); if (query_boundaries == nullptr) { // if not contain query data, minimal sample unit is one record *num_global_data = text_reader.ReadAndFilterLines([this, rank, num_machines](data_size_t) { if (random_.NextShort(0, num_machines) == rank) { return true; } else { return false; } }, used_data_indices); } else { // if contain query data, minimal sample unit is one query data_size_t num_queries = metadata.num_queries(); data_size_t qid = -1; bool is_query_used = false; *num_global_data = text_reader.ReadAndFilterLines( [this, rank, num_machines, &qid, &query_boundaries, &is_query_used, num_queries] (data_size_t line_idx) { if (qid >= num_queries) { Log::Fatal("Current query exceeds the range of the query file, please ensure the query file is correct"); } if (line_idx >= query_boundaries[qid + 1]) { // if is new query is_query_used = false; if (random_.NextShort(0, num_machines) == rank) { is_query_used = true; } ++qid; } return is_query_used; }, used_data_indices); } } return std::move(text_reader.Lines()); } std::vector<std::string> DatasetLoader::SampleTextDataFromMemory(const std::vector<std::string>& data) { int sample_cnt = io_config_.bin_construct_sample_cnt; if (static_cast<size_t>(sample_cnt) > data.size()) { sample_cnt = static_cast<int>(data.size()); } auto sample_indices = random_.Sample(static_cast<int>(data.size()), sample_cnt); std::vector<std::string> out(sample_indices.size()); for (size_t i = 0; i < sample_indices.size(); ++i) { const size_t idx = sample_indices[i]; out[i] = data[idx]; } return out; } std::vector<std::string> DatasetLoader::SampleTextDataFromFile(const char* filename, const Metadata& metadata, int rank, int num_machines, int* num_global_data, std::vector<data_size_t>* used_data_indices) { const data_size_t sample_cnt = static_cast<data_size_t>(io_config_.bin_construct_sample_cnt); TextReader<data_size_t> text_reader(filename, io_config_.has_header); std::vector<std::string> out_data; if (num_machines == 1 || io_config_.is_pre_partition) { *num_global_data = static_cast<data_size_t>(text_reader.SampleFromFile(random_, sample_cnt, &out_data)); } else { // need partition data // get query data const data_size_t* query_boundaries = metadata.query_boundaries(); if (query_boundaries == nullptr) { // if not contain query file, minimal sample unit is one record *num_global_data = text_reader.SampleAndFilterFromFile([this, rank, num_machines] (data_size_t) { if (random_.NextShort(0, num_machines) == rank) { return true; } else { return false; } }, used_data_indices, random_, sample_cnt, &out_data); } else { // if contain query file, minimal sample unit is one query data_size_t num_queries = metadata.num_queries(); data_size_t qid = -1; bool is_query_used = false; *num_global_data = text_reader.SampleAndFilterFromFile( [this, rank, num_machines, &qid, &query_boundaries, &is_query_used, num_queries] (data_size_t line_idx) { if (qid >= num_queries) { Log::Fatal("Query id exceeds the range of the query file, \ please ensure the query file is correct"); } if (line_idx >= query_boundaries[qid + 1]) { // if is new query is_query_used = false; if (random_.NextShort(0, num_machines) == rank) { is_query_used = true; } ++qid; } return is_query_used; }, used_data_indices, random_, sample_cnt, &out_data); } } return out_data; } void DatasetLoader::ConstructBinMappersFromTextData(int rank, int num_machines, const std::vector<std::string>& sample_data, const Parser* parser, Dataset* dataset) { std::vector<std::vector<double>> sample_values; std::vector<std::vector<int>> sample_indices; std::vector<std::pair<int, double>> oneline_features; double label; for (int i = 0; i < static_cast<int>(sample_data.size()); ++i) { oneline_features.clear(); // parse features parser->ParseOneLine(sample_data[i].c_str(), &oneline_features, &label); for (std::pair<int, double>& inner_data : oneline_features) { if (static_cast<size_t>(inner_data.first) >= sample_values.size()) { sample_values.resize(inner_data.first + 1); sample_indices.resize(inner_data.first + 1); } if (std::fabs(inner_data.second) > kEpsilon) { sample_values[inner_data.first].emplace_back(inner_data.second); sample_indices[inner_data.first].emplace_back(i); } } } dataset->feature_groups_.clear(); if (feature_names_.empty()) { // -1 means doesn't use this feature dataset->used_feature_map_ = std::vector<int>(sample_values.size(), -1); dataset->num_total_features_ = static_cast<int>(sample_values.size()); } else { dataset->used_feature_map_ = std::vector<int>(feature_names_.size(), -1); dataset->num_total_features_ = static_cast<int>(feature_names_.size()); } // check the range of label_idx, weight_idx and group_idx CHECK(label_idx_ >= 0 && label_idx_ <= dataset->num_total_features_); CHECK(weight_idx_ < 0 || weight_idx_ < dataset->num_total_features_); CHECK(group_idx_ < 0 || group_idx_ < dataset->num_total_features_); // fill feature_names_ if not header if (feature_names_.empty()) { for (int i = 0; i < dataset->num_total_features_; ++i) { std::stringstream str_buf; str_buf << "Column_" << i; feature_names_.push_back(str_buf.str()); } } dataset->feature_names_ = feature_names_; std::vector<std::unique_ptr<BinMapper>> bin_mappers(sample_values.size()); const data_size_t filter_cnt = static_cast<data_size_t>( static_cast<double>(io_config_.min_data_in_leaf* sample_data.size()) / dataset->num_data_); // start find bins if (num_machines == 1) { OMP_INIT_EX(); // if only one machine, find bin locally #pragma omp parallel for schedule(guided) for (int i = 0; i < static_cast<int>(sample_values.size()); ++i) { OMP_LOOP_EX_BEGIN(); if (ignore_features_.count(i) > 0) { bin_mappers[i] = nullptr; continue; } BinType bin_type = BinType::NumericalBin; if (categorical_features_.count(i)) { bin_type = BinType::CategoricalBin; } bin_mappers[i].reset(new BinMapper()); bin_mappers[i]->FindBin(sample_values[i].data(), static_cast<int>(sample_values[i].size()), sample_data.size(), io_config_.max_bin, io_config_.min_data_in_bin, filter_cnt, bin_type); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } else { // if have multi-machines, need to find bin distributed // different machines will find bin for different features // start and len will store the process feature indices for different machines // machine i will find bins for features in [ start[i], start[i] + len[i] ) std::vector<int> start(num_machines); std::vector<int> len(num_machines); int total_num_feature = static_cast<int>(sample_values.size()); int step = (total_num_feature + num_machines - 1) / num_machines; if (step < 1) { step = 1; } start[0] = 0; for (int i = 0; i < num_machines - 1; ++i) { len[i] = std::min(step, total_num_feature - start[i]); start[i + 1] = start[i] + len[i]; } len[num_machines - 1] = total_num_feature - start[num_machines - 1]; OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < len[rank]; ++i) { OMP_LOOP_EX_BEGIN(); if (ignore_features_.count(start[rank] + i) > 0) { continue; } BinType bin_type = BinType::NumericalBin; if (categorical_features_.count(start[rank] + i)) { bin_type = BinType::CategoricalBin; } bin_mappers[i].reset(new BinMapper()); bin_mappers[i]->FindBin(sample_values[start[rank] + i].data(), static_cast<int>(sample_values[start[rank] + i].size()), sample_data.size(), io_config_.max_bin, io_config_.min_data_in_bin, filter_cnt, bin_type); OMP_LOOP_EX_END(); } OMP_THROW_EX(); // get max_bin int local_max_bin = 0; for (int i = 0; i < len[rank]; ++i) { if (ignore_features_.count(start[rank] + i) > 0) { continue; } local_max_bin = std::max(local_max_bin, bin_mappers[i]->num_bin()); } int max_bin = local_max_bin; // sync global max_bin Network::Allreduce(reinterpret_cast<char*>(&local_max_bin), sizeof(local_max_bin), sizeof(local_max_bin), reinterpret_cast<char*>(&max_bin), [](const char* src, char* dst, int len) { int used_size = 0; const int type_size = sizeof(int); const int *p1; int *p2; while (used_size < len) { p1 = reinterpret_cast<const int *>(src); p2 = reinterpret_cast<int *>(dst); if (*p1 > *p2) { std::memcpy(dst, src, type_size); } src += type_size; dst += type_size; used_size += type_size; } }); // get size of bin mapper with max_bin size int type_size = BinMapper::SizeForSpecificBin(max_bin); // since sizes of different feature may not be same, we expand all bin mapper to type_size int buffer_size = type_size * total_num_feature; auto input_buffer = std::vector<char>(buffer_size); auto output_buffer = std::vector<char>(buffer_size); // find local feature bins and copy to buffer #pragma omp parallel for schedule(guided) for (int i = 0; i < len[rank]; ++i) { OMP_LOOP_EX_BEGIN(); if (ignore_features_.count(start[rank] + i) > 0) { continue; } bin_mappers[i]->CopyTo(input_buffer.data() + i * type_size); // free bin_mappers[i].reset(nullptr); OMP_LOOP_EX_END(); } OMP_THROW_EX(); // convert to binary size for (int i = 0; i < num_machines; ++i) { start[i] *= type_size; len[i] *= type_size; } // gather global feature bin mappers Network::Allgather(input_buffer.data(), buffer_size, start.data(), len.data(), output_buffer.data()); // restore features bins from buffer for (int i = 0; i < total_num_feature; ++i) { if (ignore_features_.count(i) > 0) { bin_mappers[i] = nullptr; continue; } bin_mappers[i].reset(new BinMapper()); bin_mappers[i]->CopyFrom(output_buffer.data() + i * type_size); } } sample_values.clear(); dataset->Construct(bin_mappers, Common::Vector2Ptr<int>(sample_indices).data(), Common::VectorSize<int>(sample_indices).data(), sample_data.size(), io_config_); } /*! \brief Extract local features from memory */ void DatasetLoader::ExtractFeaturesFromMemory(std::vector<std::string>& text_data, const Parser* parser, Dataset* dataset) { std::vector<std::pair<int, double>> oneline_features; double tmp_label = 0.0f; if (predict_fun_ == nullptr) { OMP_INIT_EX(); // if doesn't need to prediction with initial model #pragma omp parallel for schedule(static) private(oneline_features) firstprivate(tmp_label) for (data_size_t i = 0; i < dataset->num_data_; ++i) { OMP_LOOP_EX_BEGIN(); const int tid = omp_get_thread_num(); oneline_features.clear(); // parser parser->ParseOneLine(text_data[i].c_str(), &oneline_features, &tmp_label); // set label dataset->metadata_.SetLabelAt(i, static_cast<float>(tmp_label)); // free processed line: text_data[i].clear(); // shrink_to_fit will be very slow in linux, and seems not free memory, disable for now // text_reader_->Lines()[i].shrink_to_fit(); // push data for (auto& inner_data : oneline_features) { if (inner_data.first >= dataset->num_total_features_) { continue; } int feature_idx = dataset->used_feature_map_[inner_data.first]; if (feature_idx >= 0) { // if is used feature int group = dataset->feature2group_[feature_idx]; int sub_feature = dataset->feature2subfeature_[feature_idx]; dataset->feature_groups_[group]->PushData(tid, sub_feature, i, inner_data.second); } else { if (inner_data.first == weight_idx_) { dataset->metadata_.SetWeightAt(i, static_cast<float>(inner_data.second)); } else if (inner_data.first == group_idx_) { dataset->metadata_.SetQueryAt(i, static_cast<data_size_t>(inner_data.second)); } } } OMP_LOOP_EX_END(); } OMP_THROW_EX(); } else { OMP_INIT_EX(); // if need to prediction with initial model std::vector<double> init_score(dataset->num_data_ * num_class_); #pragma omp parallel for schedule(static) private(oneline_features) firstprivate(tmp_label) for (data_size_t i = 0; i < dataset->num_data_; ++i) { OMP_LOOP_EX_BEGIN(); const int tid = omp_get_thread_num(); oneline_features.clear(); // parser parser->ParseOneLine(text_data[i].c_str(), &oneline_features, &tmp_label); // set initial score std::vector<double> oneline_init_score(num_class_); predict_fun_(oneline_features, oneline_init_score.data()); for (int k = 0; k < num_class_; ++k) { init_score[k * dataset->num_data_ + i] = static_cast<double>(oneline_init_score[k]); } // set label dataset->metadata_.SetLabelAt(i, static_cast<float>(tmp_label)); // free processed line: text_data[i].clear(); // shrink_to_fit will be very slow in linux, and seems not free memory, disable for now // text_reader_->Lines()[i].shrink_to_fit(); // push data for (auto& inner_data : oneline_features) { if (inner_data.first >= dataset->num_total_features_) { continue; } int feature_idx = dataset->used_feature_map_[inner_data.first]; if (feature_idx >= 0) { // if is used feature int group = dataset->feature2group_[feature_idx]; int sub_feature = dataset->feature2subfeature_[feature_idx]; dataset->feature_groups_[group]->PushData(tid, sub_feature, i, inner_data.second); } else { if (inner_data.first == weight_idx_) { dataset->metadata_.SetWeightAt(i, static_cast<float>(inner_data.second)); } else if (inner_data.first == group_idx_) { dataset->metadata_.SetQueryAt(i, static_cast<data_size_t>(inner_data.second)); } } } OMP_LOOP_EX_END(); } OMP_THROW_EX(); // metadata_ will manage space of init_score dataset->metadata_.SetInitScore(init_score.data(), dataset->num_data_ * num_class_); } dataset->FinishLoad(); // text data can be free after loaded feature values text_data.clear(); } /*! \brief Extract local features from file */ void DatasetLoader::ExtractFeaturesFromFile(const char* filename, const Parser* parser, const std::vector<data_size_t>& used_data_indices, Dataset* dataset) { std::vector<double> init_score; if (predict_fun_ != nullptr) { init_score = std::vector<double>(dataset->num_data_ * num_class_); } std::function<void(data_size_t, const std::vector<std::string>&)> process_fun = [this, &init_score, &parser, &dataset] (data_size_t start_idx, const std::vector<std::string>& lines) { std::vector<std::pair<int, double>> oneline_features; double tmp_label = 0.0f; OMP_INIT_EX(); #pragma omp parallel for schedule(static) private(oneline_features) firstprivate(tmp_label) for (data_size_t i = 0; i < static_cast<data_size_t>(lines.size()); ++i) { OMP_LOOP_EX_BEGIN(); const int tid = omp_get_thread_num(); oneline_features.clear(); // parser parser->ParseOneLine(lines[i].c_str(), &oneline_features, &tmp_label); // set initial score if (!init_score.empty()) { std::vector<double> oneline_init_score(num_class_); predict_fun_(oneline_features, oneline_init_score.data()); for (int k = 0; k < num_class_; ++k) { init_score[k * dataset->num_data_ + start_idx + i] = static_cast<double>(oneline_init_score[k]); } } // set label dataset->metadata_.SetLabelAt(start_idx + i, static_cast<float>(tmp_label)); // push data for (auto& inner_data : oneline_features) { if (inner_data.first >= dataset->num_total_features_) { continue; } int feature_idx = dataset->used_feature_map_[inner_data.first]; if (feature_idx >= 0) { // if is used feature int group = dataset->feature2group_[feature_idx]; int sub_feature = dataset->feature2subfeature_[feature_idx]; dataset->feature_groups_[group]->PushData(tid, sub_feature, start_idx + i, inner_data.second); } else { if (inner_data.first == weight_idx_) { dataset->metadata_.SetWeightAt(start_idx + i, static_cast<float>(inner_data.second)); } else if (inner_data.first == group_idx_) { dataset->metadata_.SetQueryAt(start_idx + i, static_cast<data_size_t>(inner_data.second)); } } } OMP_LOOP_EX_END(); } OMP_THROW_EX(); }; TextReader<data_size_t> text_reader(filename, io_config_.has_header); if (!used_data_indices.empty()) { // only need part of data text_reader.ReadPartAndProcessParallel(used_data_indices, process_fun); } else { // need full data text_reader.ReadAllAndProcessParallel(process_fun); } // metadata_ will manage space of init_score if (!init_score.empty()) { dataset->metadata_.SetInitScore(init_score.data(), dataset->num_data_ * num_class_); } dataset->FinishLoad(); } /*! \brief Check can load from binary file */ std::string DatasetLoader::CheckCanLoadFromBin(const char* filename) { std::string bin_filename(filename); bin_filename.append(".bin"); FILE* file; #ifdef _MSC_VER fopen_s(&file, bin_filename.c_str(), "rb"); #else file = fopen(bin_filename.c_str(), "rb"); #endif if (file == NULL) { bin_filename = std::string(filename); #ifdef _MSC_VER fopen_s(&file, bin_filename.c_str(), "rb"); #else file = fopen(bin_filename.c_str(), "rb"); #endif if (file == NULL) { Log::Fatal("cannot open data file %s", bin_filename.c_str()); } } size_t buffer_size = 256; auto buffer = std::vector<char>(buffer_size); // read size of token size_t size_of_token = std::strlen(Dataset::binary_file_token); size_t read_cnt = fread(buffer.data(), sizeof(char), size_of_token, file); fclose(file); if (read_cnt == size_of_token && std::string(buffer.data()) == std::string(Dataset::binary_file_token)) { return bin_filename; } else { return std::string(); } } }
1
16,505
why not enable split by space here ?
microsoft-LightGBM
cpp
@@ -1076,7 +1076,8 @@ fpga_result mmio_error(struct RASCommandLine *rasCmdLine) return result; } - if(value != FPGA_INTEGRATED_DEVICEID) { + if( (value != FPGA_INTEGRATED_DEVICEID) || + (value != FPGA_DISCRETE_DEVICEID) ) { FPGA_ERR("Failed to read Device id"); return FPGA_NOT_SUPPORTED; }
1
// Copyright(c) 2017, Intel Corporation // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Intel Corporation nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include <errno.h> #include <stdbool.h> #include <malloc.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <time.h> #include <unistd.h> #include <fcntl.h> #include <sys/mman.h> #include <unistd.h> #include <uuid/uuid.h> #include "safe_string/safe_string.h" #include "opae/fpga.h" #include "types_int.h" #include "common_int.h" // SYSFS FME Errors #define FME_SYSFS_FME_ERRORS "errors/fme-errors/errors" #define FME_SYSFS_PCIE0_ERRORS "errors/pcie0_errors" #define FME_SYSFS_PCIE1_ERRORS "errors/pcie1_errors" #define FME_SYSFS_BBS_ERRORS "errors/bbs_errors" #define FME_SYSFS_GBS_ERRORS "errors/gbs_errors" #define FME_SYSFS_WARNING_ERRORS "errors/warning_errors" #define FME_SYSFS_NONFATAL_ERRORS "errors/nonfatal_errors" #define FME_SYSFS_CATFATAL_ERRORS "errors/catfatal_errors" #define FME_SYSFS_INJECT_ERROR "errors/inject_error" #define FME_SYSFS_ERR_REVISION "errors/revision" #define PORT_SYSFS_ERR "errors/errors" #define PORT_SYSFS_ERR_CLEAR "errors/clear" // SYFS Thermal #define FME_SYSFS_THERMAL_MGMT_TEMP "thermal_mgmt/temperature" #define FME_SYSFS_THERMAL_MGMT_THRESHOLD_TRIP "thermal_mgmt/threshold_trip" // SYSFS Power #define FME_SYSFS_POWER_MGMT_CONSUMED "power_mgmt/consumed" // MMIO scratchpad #define PORT_SCRATCHPAD0 0x0028 #define NLB_CSR_SCRATCHPAD (0x40000 + 0x0104 ) #define PORT_MMIO_LEN (0x40000 + 0x0512 ) #define MMO_WRITE64_VALUE 0xF1F1F1F1F1F1F1F1 #define MMO_WRITE32_VALUE 0xF1F1F1 #define FPGA_CSR_LEN 64 #define DEVICEID_PATH "/sys/bus/pci/devices/%04x:%02x:%02x.%d/device" #define FPGA_PORT_RES_PATH "/sys/bus/pci/devices/%04x:%02x:%02x.%d/resource2" #define FPGA_SET_BIT(val, index) val |= (1 << index) #define FPGA_CLEAR_BIT(val, index) val &= ~(1 << index) #define FPGA_TOGGLE_BIT(val, index) val ^= (1 << index) #define FPGA_BIT_IS_SET(val, index) (((val) >> (index)) & 1) /* Type definitions */ typedef struct { uint32_t uint[16]; } cache_line; int usleep(unsigned); #ifndef CL # define CL(x) ((x) * 64) #endif // CL #ifndef LOG2_CL # define LOG2_CL 6 #endif // LOG2_CL #ifndef MB # define MB(x) ((x) * 1024 * 1024) #endif // MB #define CACHELINE_ALIGNED_ADDR(p) ((p) >> LOG2_CL) #define LPBK1_BUFFER_SIZE MB(1) #define LPBK1_BUFFER_ALLOCATION_SIZE MB(2) #define LPBK1_DSM_SIZE MB(2) #define CSR_SRC_ADDR 0x0120 #define CSR_DST_ADDR 0x0128 #define CSR_CTL 0x0138 #define CSR_CFG 0x0140 #define CSR_NUM_LINES 0x0130 #define DSM_STATUS_TEST_COMPLETE 0x40 #define CSR_AFU_DSM_BASEL 0x0110 #define CSR_AFU_DSM_BASEH 0x0114 /* SKX-P NLB0 AFU_ID */ #define SKX_P_NLB0_AFUID "D8424DC4-A4A3-C413-F89E-433683F9040B" static const char * const FME_ERROR[] = { "Fabric error detected", \ "Fabric fifo under / overflow error detected", \ "KTI CDC Parity Error detected", \ "KTI CDC Parity Error detected", \ "IOMMU Parity error detected", \ "AFU PF/VF access mismatch detected", \ "Indicates an MBP event error detected", \ }; static const char * const PCIE0_ERROR[] = { "TLP format/type error detected", \ "TTLP MW address error detected", \ "TLP MW length error detected", \ "TLP MR address error detected", \ "TLP MR length error detected", \ "TLP CPL tag error detected", \ "TLP CPL status error detected", \ "TLP CPL timeout error detected", \ "CCI bridge parity error detected", \ "TLP with EP error detected", \ }; static const char * const PCIE1_ERROR[] = { "TLP format/type error detected", \ "TTLP MW address error detected", \ "TLP MW length error detected", \ "TLP MR address error detected", \ "TLP MR length error detected", \ "TLP CPL tag error detected", \ "TLP CPL status error detected", \ "TLP CPL timeout error detected", \ "CCI bridge parity error detected", \ "TLP with EP error detected", \ }; static const char * const RAS_NONFATAL_ERROR [] = { "Temperature threshold triggered AP1 detected", \ "Temperature threshold triggered AP2 detected", \ "PCIe error detected", \ "AFU port Fatal error detected", \ "ProcHot event error detected", \ "AFU PF/VF access mismatch error detected", \ "Injected Warning Error detected", \ "Reserved", \ "Reserved", \ "Temperature threshold triggered AP6 detected", \ "Power threshold triggered AP1 error detected", \ "Power threshold triggered AP2 error detected", \ "MBP event error detected", \ }; static const char * const RAS_CATFATAL_ERROR[] = { "KTI link layer error detected.", \ "tag-n-cache error detected.", \ "CCI error detected.", \ "KTI protocol error detected.", \ "Fatal DRAM error detected", \ "IOMMU fatal parity error detected.", \ "Fabric fatal error detected", \ "Poison error from any of PCIe ports detected", \ "Injected Fatal Error detected", \ "Catastrophic CRC error detected", \ "Catastrophic thermal runaway event detected", \ "Injected Catastrophic Error detected", \ }; static const char * const RAS_INJECT_ERROR[] = { "Set Catastrophic error .", \ "Set Fatal error.", \ "Ser Non-fatal error .", \ }; static const char * const RAS_GBS_ERROR [] = { "Temperature threshold triggered AP1 detected", \ "Temperature threshold triggered AP2 detected", \ "PCIe error detected", \ "AFU port Fatal error detected", \ "ProcHot event error detected", \ "AFU PF/VF access mismatch error detected", \ "Injected Warning Error detected", \ "Poison error from any of PCIe ports detected", \ "GBS CRC errordetected ", \ "Temperature threshold triggered AP6 detected", \ "Power threshold triggered AP1 error detected", \ "Power threshold triggered AP2 error detected", \ "MBP event error detected", \ }; static const char * const RAS_BBS_ERROR[] = { "KTI link layer error detected.", \ "tag-n-cache error detected.", \ "CCI error detected.", \ "KTI protocol error detected.", \ "Fatal DRAM error detected", \ "IOMMU fatal parity error detected.", \ "Fabric fatal error detected", \ "Poison error from any of PCIe ports detected", \ "Injected Fatal Error detected", \ "Catastrophic CRC error detected", \ "Catastrophic thermal runaway event detected", \ "Injected Catastrophic Error detected", \ }; static const char * const RAS_WARNING_ERROR[] = { "Green bitstream fatal event error detected.", \ }; static const char * const PORT_ERROR[] = { "Tx Channel 0 overflow error detected.", \ "Tx Channel 0 invalid request encodingr error detected.", \ "Tx Channel 0 cl_len=3 not supported error detected.", \ "Tx Channel 0 request with cl_len=2 does NOT have a 2CL aligned address error detected.", \ "Tx Channel 0 request with cl_len=4 does NOT have a 4CL aligned address error detected.", \ "RSVD.", "RSVD.", "RSVD.","RSVD.",\ "AFU MMIO RD received while PORT is in reset error detected", \ "AFU MMIO WR received while PORT is in reset error detected", \ "RSVD.", "RSVD.", "RSVD.", "RSVD.", "RSVD.",\ "Tx Channel 1 invalid request encoding error detected", \ "Tx Channel 1 cl_len=3 not supported error detected.", \ "Tx Channel 1 request with cl_len=2 does NOT have a 2CL aligned address error detected", \ "Tx Channel 1 request with cl_len=4 does NOT have a 4CL aligned address error detected", \ "Tx Channel 1 insufficient data payload Error detected", \ "Tx Channel 1 data payload overrun error detected", \ "Tx Channel 1 incorrect address on subsequent payloads error detected", \ "Tx Channel 1 Non-zero SOP detected for requests!=WrLine_* error detected", \ "Tx Channel 1 Illegal VC_SEL. Atomic request is only supported on VL0 error detected", \ "RSVD.", "RSVD.", "RSVD.", "RSVD.", "RSVD.", "RSVD.", "RSVD.",\ "MMIO TimedOut error detected", \ "Tx Channel 2 fifo overflo error detected", \ "MMIO Read response received, with no matching request pending error detected", \ "RSVD.", "RSVD.", "RSVD.", "RSVD.", "RSVD.", \ "Number of pending requests: counter overflow error detected", \ "Request with Address violating SMM range error detected", \ "Request with Address violating second SMM range error detected", \ "Request with Address violating ME stolen range", \ "Request with Address violating Generic protected range error detected ", \ "Request with Address violating Legacy Range Low error detected", \ "Request with Address violating Legacy Range High error detected", \ "Request with Address violating VGA memory range error detected", \ "Page Fault error detected", \ "PMR Erro error detected", \ "AP6 event detected ", \ "VF FLR detected on port when PORT configured in PF access mode error detected ", \ }; // RAS Error Inject CSR struct ras_inject_error { union { uint64_t csr; struct { /* Catastrophic error */ uint64_t catastrophicr_error : 1; /* Fatal error */ uint64_t fatal_error : 1; /* Non-fatal error */ uint64_t nonfatal_error : 1; /* Reserved */ uint64_t rsvd : 61; }; }; }; #define GETOPT_STRING ":hB:D:F:S:PQRNTCEGHIO" struct option longopts[] = { {"help", no_argument, NULL, 'h'}, {"bus-number", required_argument, NULL, 'B'}, {"device-number", required_argument, NULL, 'D'}, {"function-number", required_argument, NULL, 'F'}, {"socket-number", required_argument, NULL, 'S'}, {"print-error", no_argument, NULL, 'P'}, {"catast-error", no_argument, NULL, 'Q'}, {"fatal-error", no_argument, NULL, 'R'}, {"nofatal-error", no_argument, NULL, 'N'}, {"thermal-trip", no_argument, NULL, 'T'}, {"clearinj-error", no_argument, NULL, 'C'}, {"mwaddress-error", no_argument, NULL, 'E'}, {"mraddress-error", no_argument, NULL, 'G'}, {"mwlength-error", no_argument, NULL, 'H'}, {"mrlength-error", no_argument, NULL, 'I'}, {"pagefault-error", no_argument, NULL, 'O'}, {0,0,0,0} }; // RAS Command line struct struct RASCommandLine { uint32_t flags; #define RASAPP_CMD_FLAG_HELP 0x00000001 #define RASAPP_CMD_FLAG_VERSION 0x00000002 #define RASAPP_CMD_PARSE_ERROR 0x00000003 #define RASAPP_CMD_FLAG_BUS 0x00000008 #define RASAPP_CMD_FLAG_DEV 0x00000010 #define RASAPP_CMD_FLAG_FUNC 0x00000020 #define RASAPP_CMD_FLAG_SOCKET 0x00000040 int bus; int device; int function; int socket; bool print_error; bool catast_error; bool fatal_error; bool nonfatal_error; bool clear_injerror; bool mwaddress_error; bool mraddress_error; bool mwlength_error; bool mrlength_error; bool pagefault_error; }; struct RASCommandLine rasCmdLine = { 0, -1, -1, -1, -1, false, false, false, false,false, false, false, false, false, false}; // RAS Command line input help void RASAppShowHelp() { printf("Usage:\n"); printf("./ras \n"); printf("<Bus> --bus=<BUS NUMBER> " "OR -B=<BUS NUMBER>\n"); printf("<Device> --device=<DEVICE NUMBER> " "OR -D=<DEVICE NUMBER>\n"); printf("<Function> --function=<FUNCTION NUMBER> " "OR -F=<FUNCTION NUMBER>\n"); printf("<Socket> --socket=<socket NUMBER> " " OR -S=<SOCKET NUMBER>\n"); printf("<Print Error> --print-error OR -P \n"); printf("<Catast Error> --catast-error OR -Q \n"); printf("<Fatal Error> --fatal-error OR -R \n"); printf("<NoFatal Error> --nofatal-error OR -N \n"); printf("<Clear Inj Error> --clearinj-error OR -C \n"); printf("<MW Address error> --mwaddress-error OR -E \n"); printf("<MR Address error> --mwaddress-error OR -G \n"); printf("<MW Length error> --mwlength-error OR -H \n"); printf("<MR Length error> --mrlength-error OR -I \n"); printf("<Page Fault Error> --pagefault-error OR -O \n"); printf("\n"); } /* * macro to check return codes, print error message, and goto cleanup label * NOTE: this changes the program flow (uses goto)! */ #define ON_ERR_GOTO(res, label, desc) \ do { \ if ((res) != FPGA_OK) { \ print_err((desc), (res)); \ goto label; \ } \ } while (0) void print_err(const char *s, fpga_result res) { fprintf(stderr, "Error %s: %s\n", s, fpgaErrStr(res)); } fpga_result print_ras_errors(fpga_token token); fpga_result print_pwr_temp(fpga_token token); fpga_result clear_inject_ras_errors(fpga_token token, struct RASCommandLine *rasCmdLine); fpga_result inject_ras_errors(fpga_token token, struct RASCommandLine *rasCmdLine); fpga_result mmio_error(struct RASCommandLine *rasCmdLine); fpga_result print_port_errors(fpga_token token); fpga_result clear_port_errors(fpga_token token); fpga_result page_fault_errors(); int ParseCmds(struct RASCommandLine *rasCmdLine, int argc, char *argv[]); int main( int argc, char** argv ) { fpga_result result = 0; fpga_properties filter = NULL; fpga_token fme_token ; uint32_t num_matches = 1; // Parse command line if ( argc < 2 ) { RASAppShowHelp(); return 1; } else if ( 0!= ParseCmds(&rasCmdLine, argc, argv) ) { FPGA_ERR( "Error scanning command line \n."); return 2; } printf(" ------- Command line Input Start ---- \n \n"); printf(" Bus : %d\n", rasCmdLine.bus); printf(" Device : %d \n", rasCmdLine.device); printf(" Function : %d \n", rasCmdLine.function); printf(" Socket : %d \n", rasCmdLine.socket); printf(" Print Error : %d \n", rasCmdLine.print_error); printf(" Catas Error : %d \n", rasCmdLine.catast_error); printf(" Fatal Error : %d \n", rasCmdLine.fatal_error); printf(" NonFatal Error : %d \n", rasCmdLine.nonfatal_error); printf(" Clear Error : %d \n", rasCmdLine.clear_injerror); printf(" MW Address Error : %d \n", rasCmdLine.mwaddress_error); printf(" MR Address Error : %d \n", rasCmdLine.mraddress_error); printf(" MW Length Error : %d \n", rasCmdLine.mwlength_error); printf(" MR Length Error : %d \n", rasCmdLine.mrlength_error); printf(" Page Fault Error : %d \n", rasCmdLine.pagefault_error); printf(" ------- Command line Input END ---- \n\n"); // Enum FPGA device result = fpgaGetProperties(NULL, &filter); ON_ERR_GOTO(result, out_exit, "creating properties object"); result = fpgaPropertiesSetObjectType(filter, FPGA_DEVICE); ON_ERR_GOTO(result, out_destroy_prop, "setting object type"); if (rasCmdLine.bus >0){ result = fpgaPropertiesSetBus(filter, rasCmdLine.bus); ON_ERR_GOTO(result, out_destroy_prop, "setting bus"); } if (rasCmdLine.device >0) { result = fpgaPropertiesSetDevice(filter, rasCmdLine.device); ON_ERR_GOTO(result, out_destroy_prop, "setting device"); } if (rasCmdLine.function >0){ result = fpgaPropertiesSetFunction(filter, rasCmdLine.function); ON_ERR_GOTO(result, out_destroy_prop, "setting function"); } if (rasCmdLine.socket >0){ result = fpgaPropertiesSetSocketID(filter, rasCmdLine.socket); ON_ERR_GOTO(result, out_destroy_prop, "setting socket"); } result = fpgaEnumerate(&filter, 1, &fme_token,1, &num_matches); ON_ERR_GOTO(result, out_destroy_prop, "enumerating FPGAs"); if (num_matches < 1) { fprintf(stderr, "FPGA Resource not found.\n"); result = fpgaDestroyProperties(&filter); return FPGA_INVALID_PARAM; } fprintf(stderr, "FME Resource found.\n"); // Inject error if (rasCmdLine.catast_error || rasCmdLine.fatal_error || rasCmdLine.nonfatal_error) { // Inject RAS ERROR result = inject_ras_errors(fme_token,&rasCmdLine); if (result != FPGA_OK) { FPGA_ERR("Failed to print fme errors"); goto out_destroy_prop; } } // inject MMIO error if ( (rasCmdLine.mwaddress_error == true) || (rasCmdLine.mraddress_error == true) || (rasCmdLine.mwlength_error == true) || (rasCmdLine.mrlength_error == true) ) { result = mmio_error(&rasCmdLine); if (result != FPGA_OK) { FPGA_ERR("Failed set MMIO errors"); goto out_destroy_prop; } } // Clear Inject Error if (rasCmdLine.clear_injerror ) { // clear RAS ERROR result = clear_inject_ras_errors(fme_token,&rasCmdLine); if (result != FPGA_OK) { FPGA_ERR("Failed to clear inject errors"); goto out_destroy_prop; } // clear Port ERROR result = clear_port_errors(fme_token); if (result != FPGA_OK) { FPGA_ERR("Failed to clear port errors"); goto out_destroy_prop; } } if (rasCmdLine.pagefault_error) { // Page fault error result = page_fault_errors(); if (result != FPGA_OK) { FPGA_ERR("Failed to trigger page fault errors"); goto out_destroy_prop; } } sleep(1); if (rasCmdLine.print_error) { // Print RAS Error result = print_ras_errors(fme_token); if (result != FPGA_OK) { FPGA_ERR("Failed to print fme errors"); goto out_destroy_prop; } // Print port Error result = print_port_errors(fme_token); if (result != FPGA_OK) { FPGA_ERR("Failed to print port errors"); goto out_destroy_prop; } // Print power and temp result = print_pwr_temp(fme_token); if (result != FPGA_OK) { FPGA_ERR("Failed to get power and temp"); goto out_destroy_prop; } } /* Destroy properties object */ out_destroy_prop: result = fpgaDestroyProperties(&filter); ON_ERR_GOTO(result, out_exit, "destroying properties object"); out_exit: return result; } // Print Error fpga_result print_errors(fpga_token token, const char * err_path, const char * const* err_strings, int size) { struct _fpga_token *_token = 0; int i = 0; uint64_t value = 0; char syfs_path[SYSFS_PATH_MAX] = {0}; fpga_result result = FPGA_OK; _token = (struct _fpga_token*)token; if (_token == NULL) { FPGA_ERR("Token not found"); return FPGA_INVALID_PARAM; } if(err_path == NULL || err_strings == NULL) { FPGA_ERR("Invalid input sting"); return FPGA_INVALID_PARAM; } snprintf_s_ss(syfs_path, sizeof(syfs_path), "%s/%s", _token->sysfspath, err_path ); // Read error. result = sysfs_read_u64(syfs_path, &value); if (result != FPGA_OK) { FPGA_ERR("Failed to get errors"); return result; } printf(" CSR : 0x%lx \n", value); for (i = 0; i < FPGA_CSR_LEN; i++) { if ((i < size) && FPGA_BIT_IS_SET(value, i)) { printf("\t %s \n", err_strings[i]); } } return result; } // prints RAS errors fpga_result print_ras_errors(fpga_token token) { struct _fpga_token *_token = 0; uint64_t revision = 0; char syfs_path[SYSFS_PATH_MAX] = {0}; fpga_result result = FPGA_OK; _token = (struct _fpga_token*)token; if (_token == NULL) { FPGA_ERR("Token not found"); return FPGA_INVALID_PARAM; } printf("\n ==========================================\n"); printf(" ----------- PRINT FME ERROR START-------- \n \n"); // get revision snprintf_s_ss(syfs_path, sizeof(syfs_path), "%s/%s", _token->sysfspath, FME_SYSFS_ERR_REVISION ); // Read revision. result = sysfs_read_u64(syfs_path, &revision); if (result != FPGA_OK) { FPGA_ERR("Failed to get fme revison"); return result; } printf(" fme error revison : %ld \n", revision); // Revision 0 if( revision == 1 ) { // Non Fatal Error printf("\n ------- Non Fatal error ------------ \n"); result = print_errors(token, FME_SYSFS_NONFATAL_ERRORS, RAS_NONFATAL_ERROR, sizeof(RAS_NONFATAL_ERROR) /sizeof(RAS_NONFATAL_ERROR[0])); if (result != FPGA_OK) { FPGA_ERR("Failed to get fme non fatal errors"); return result; } // Fatal Error printf("\n ------- Fatal error ------------ \n"); result = print_errors(token, FME_SYSFS_CATFATAL_ERRORS, RAS_CATFATAL_ERROR, sizeof(RAS_CATFATAL_ERROR) /sizeof(RAS_CATFATAL_ERROR[0])); if (result != FPGA_OK) { FPGA_ERR("Failed to get fme fatal errors"); return result; } // Injected error printf("\n ------- Injected error ------------ \n"); result = print_errors(token, FME_SYSFS_INJECT_ERROR, RAS_INJECT_ERROR, sizeof(RAS_INJECT_ERROR) /sizeof(RAS_INJECT_ERROR[0])); if (result != FPGA_OK) { FPGA_ERR("Failed to get fme Injected errors"); return result; } // FME error printf("\n ------- FME error ------------ \n"); result = print_errors(token, FME_SYSFS_FME_ERRORS, FME_ERROR, sizeof(FME_ERROR) /sizeof(FME_ERROR[0])); if (result != FPGA_OK) { FPGA_ERR("Failed to get fme errors"); return result; } // PCIe0 error printf("\n ------- PCIe0 error ------------ \n"); result = print_errors(token, FME_SYSFS_PCIE0_ERRORS, PCIE0_ERROR, sizeof(PCIE0_ERROR) /sizeof(PCIE0_ERROR[0])); if (result != FPGA_OK) { FPGA_ERR("Failed to get pcie0 errors"); return result; } // PCIe1 error printf("\n ------- PCIe1 error ------------ \n"); result = print_errors(token, FME_SYSFS_PCIE1_ERRORS, PCIE1_ERROR, sizeof(PCIE1_ERROR) /sizeof(PCIE1_ERROR[0])); if (result != FPGA_OK) { FPGA_ERR("Failed to get pcie1 errors"); return result; } // Revision 0 } else if( revision == 0){ // GBS Error printf("\n ------- GBS error ------------ \n"); result = print_errors(token, FME_SYSFS_GBS_ERRORS, RAS_GBS_ERROR, sizeof(RAS_GBS_ERROR) /sizeof(RAS_GBS_ERROR[0])); if (result != FPGA_OK) { FPGA_ERR("Failed to get fme gbs errors"); return result; } // BBS Error printf("\n ------- BBS error ------------ \n"); result = print_errors(token, FME_SYSFS_BBS_ERRORS, RAS_BBS_ERROR, sizeof(RAS_BBS_ERROR) /sizeof(RAS_BBS_ERROR[0])); if (result != FPGA_OK) { FPGA_ERR("Failed to get fme bbs errors"); return result; } // Injected error printf("\n ------- Injected error ------------ \n"); result = print_errors(token, FME_SYSFS_INJECT_ERROR, RAS_INJECT_ERROR, sizeof(RAS_INJECT_ERROR) /sizeof(RAS_INJECT_ERROR[0])); if (result != FPGA_OK) { FPGA_ERR("Failed to get fme Injected errors"); return result; } // FME error printf("\n ------- FME error ------------ \n"); result = print_errors(token, FME_SYSFS_FME_ERRORS, FME_ERROR, sizeof(FME_ERROR) /sizeof(FME_ERROR[0])); if (result != FPGA_OK) { FPGA_ERR("Failed to get fme errors"); return result; } // PCIe0 error printf("\n ------- PCIe0 error ------------ \n"); result = print_errors(token, FME_SYSFS_PCIE0_ERRORS, PCIE0_ERROR, sizeof(PCIE0_ERROR) /sizeof(PCIE0_ERROR[0])); if (result != FPGA_OK) { FPGA_ERR("Failed to get pcie0 errors"); return result; } // PCIe1 error printf("\n ------- PCIe1 error ------------ \n"); result = print_errors(token, FME_SYSFS_PCIE1_ERRORS, PCIE1_ERROR, sizeof(PCIE1_ERROR) /sizeof(PCIE1_ERROR[0])); if (result != FPGA_OK) { FPGA_ERR("Failed to get pcie1 errors"); return result; } } else { printf("\n Invalid FME Error Revision \n"); } printf("\n ----------- PRINT FME ERROR END----------\n"); printf(" ========================================== \n \n"); return result; } // prints PORT errors fpga_result print_port_errors(fpga_token token) { struct _fpga_token *_token = 0; int i = 0; uint64_t value = 0; int size = 0; char sysfs_port[SYSFS_PATH_MAX] = {0}; fpga_result result = FPGA_OK; char *p = 0; int device_id = 0; _token = (struct _fpga_token*)token; if (_token == NULL) { FPGA_ERR("Token not found"); return FPGA_INVALID_PARAM; } printf("\n ==========================================\n"); printf(" ----------- PRINT PORT ERROR START-------- \n \n"); p = strstr(_token->sysfspath, FPGA_SYSFS_FME); if (NULL == p) return FPGA_INVALID_PARAM; p = strrchr(_token->sysfspath, '.'); if (NULL == p) return FPGA_INVALID_PARAM; device_id = atoi(p + 1); snprintf_s_iis(sysfs_port, SYSFS_PATH_MAX, SYSFS_FPGA_CLASS_PATH SYSFS_AFU_PATH_FMT"/%s", device_id, device_id,PORT_SYSFS_ERR); // Read port error. result = sysfs_read_u64(sysfs_port, &value); if (result != FPGA_OK) { FPGA_ERR("Failed to get fme errors"); return result; } printf("\n \n Port error CSR : 0x%lx \n", value); size = sizeof(PORT_ERROR) /sizeof(PORT_ERROR[0]); for (i = 0; i < 64; i++) { if ( FPGA_BIT_IS_SET(value, i) && (i < size)) { printf("\t %s \n", PORT_ERROR[i]); } } printf("\n ----------- PRINT PORT ERROR END----------\n"); printf(" ========================================== \n \n"); return result; } // clear PORT errors fpga_result clear_port_errors(fpga_token token) { struct _fpga_token *_token = 0; uint64_t value = 0; char sysfs_port[SYSFS_PATH_MAX] = {0}; fpga_result result = FPGA_OK; char *p = 0; int device_id = 0; _token = (struct _fpga_token*)token; if (_token == NULL) { FPGA_ERR("Token not found"); return FPGA_INVALID_PARAM; } printf(" ----------- Clear port error-------- \n \n"); p = strstr(_token->sysfspath, FPGA_SYSFS_FME); if (NULL == p) return FPGA_INVALID_PARAM; p = strrchr(_token->sysfspath, '.'); if (NULL == p) return FPGA_INVALID_PARAM; device_id = atoi(p + 1); snprintf_s_iis(sysfs_port, SYSFS_PATH_MAX, SYSFS_FPGA_CLASS_PATH SYSFS_AFU_PATH_FMT"/%s", device_id, device_id,PORT_SYSFS_ERR); // Read port error. result = sysfs_read_u64(sysfs_port, &value); if (result != FPGA_OK) { FPGA_ERR("Failed to get port errors"); return result; } printf("\n \n Port error CSR : 0x%lx \n", value); snprintf_s_iis(sysfs_port, SYSFS_PATH_MAX, SYSFS_FPGA_CLASS_PATH SYSFS_AFU_PATH_FMT"/%s", device_id, device_id,PORT_SYSFS_ERR_CLEAR); result = sysfs_write_u64(sysfs_port, value); if (result != FPGA_OK) { FPGA_ERR("Failed to write errors"); } return result; } // Inject RAS errors fpga_result inject_ras_errors(fpga_token token, struct RASCommandLine *rasCmdLine) { struct _fpga_token *_token = NULL; struct ras_inject_error inj_error = {{0}}; char sysfs_path[SYSFS_PATH_MAX] = {0}; fpga_result result = FPGA_OK; _token = (struct _fpga_token*)token; if (_token == NULL) { FPGA_ERR("Token not found"); return FPGA_INVALID_PARAM; } printf("----------- INJECT ERROR START -------- \n \n"); snprintf_s_ss(sysfs_path, sizeof(sysfs_path), "%s/%s", _token->sysfspath, FME_SYSFS_INJECT_ERROR); result = sysfs_read_u64(sysfs_path, &inj_error.csr); if (result != FPGA_OK) { FPGA_ERR("Failed to get fme errors"); return result; } printf("inj_error.csr: %ld \n", inj_error.csr); if (rasCmdLine->catast_error ) { inj_error.catastrophicr_error = 1; } if (rasCmdLine->fatal_error ) { inj_error.fatal_error = 1; } if (rasCmdLine->nonfatal_error ) { inj_error.nonfatal_error = 1; } printf("inj_error.csr: %ld \n", inj_error.csr); result = sysfs_write_u64(sysfs_path ,inj_error.csr); if (result != FPGA_OK) { FPGA_ERR("Failed to write RAS inject errors"); return result; } printf("----------- INJECT ERROR END-------- \n \n"); return result; } // Clear Inject RAS errors fpga_result clear_inject_ras_errors(fpga_token token, struct RASCommandLine *rasCmdLine) { struct _fpga_token *_token = NULL; struct ras_inject_error inj_error = {{0}}; char sysfs_path[SYSFS_PATH_MAX] = {0}; fpga_result result = FPGA_OK; UNUSED_PARAM(rasCmdLine); _token = (struct _fpga_token*)token; if (_token == NULL) { FPGA_ERR("Token not found"); return FPGA_INVALID_PARAM; } snprintf_s_ss(sysfs_path, sizeof(sysfs_path), "%s/%s", _token->sysfspath, FME_SYSFS_INJECT_ERROR); result = sysfs_read_u64(sysfs_path, &inj_error.csr); if (result != FPGA_OK) { FPGA_ERR("Failed to read inject error"); return result; } printf(" Clear inj_error.csr: 0x%lx \n", inj_error.csr); result = sysfs_write_u64(sysfs_path ,0x0); if (result != FPGA_OK) { FPGA_ERR("Failed to clear inject errors"); return result; } return result; } // Print FPGA power and temperature fpga_result print_pwr_temp(fpga_token token) { struct _fpga_token *_token = 0; uint64_t value = 0; char sysfs_path[SYSFS_PATH_MAX] = {0}; fpga_result result = FPGA_OK; _token = (struct _fpga_token*)token; if (_token == NULL) { FPGA_ERR("Token not found"); return FPGA_INVALID_PARAM; } printf("\n ----------- POWER & THERMAL -------------\n"); printf(" ========================================== \n \n"); snprintf_s_ss(sysfs_path, sizeof(sysfs_path), "%s/%s", _token->sysfspath, FME_SYSFS_POWER_MGMT_CONSUMED); result = sysfs_read_u64(sysfs_path, &value); if (result != FPGA_OK) { FPGA_ERR("Failed to get power consumed"); return result; } printf(" Power consumed : %lu watts \n",value); snprintf_s_ss(sysfs_path, sizeof(sysfs_path), "%s/%s", _token->sysfspath, FME_SYSFS_THERMAL_MGMT_TEMP); result = sysfs_read_u64(sysfs_path, &value); if (result != FPGA_OK) { FPGA_ERR("Failed to get temperature"); return result; } printf(" Temperature : %lu Centigrade \n",value ); snprintf_s_ss(sysfs_path, sizeof(sysfs_path), "%s/%s", _token->sysfspath, FME_SYSFS_THERMAL_MGMT_THRESHOLD_TRIP); result = sysfs_read_u64(sysfs_path, &value); if (result != FPGA_OK) { FPGA_ERR("Failed to get temperature"); return result; } printf(" Thermal Trip : %lu Centigrade \n",value ); printf("\n ----------- POWER & THERMAL -------------\n"); printf(" ========================================== \n \n"); return result; } // MMIO erros fpga_result mmio_error(struct RASCommandLine *rasCmdLine) { char sysfs_path[SYSFS_PATH_MAX] = {0}; fpga_result result = FPGA_OK; int bus = 0; int device = 0; int function = 0; uint64_t value = 0; int fd = 0; uint8_t *ptr = 0; if (rasCmdLine == NULL ) { FPGA_ERR("Invalid input "); return FPGA_INVALID_PARAM; } if ( rasCmdLine->bus >0 ) bus = rasCmdLine->bus; if ( rasCmdLine->device >0 ) device = rasCmdLine->bus; if ( rasCmdLine->function >0 ) function = rasCmdLine->bus; snprintf(sysfs_path, sizeof(sysfs_path), DEVICEID_PATH,0,bus,device,function); result = sysfs_read_u64(sysfs_path, &value); if (result != FPGA_OK) { FPGA_ERR("Failed to read Device id"); return result; } if(value != FPGA_INTEGRATED_DEVICEID) { FPGA_ERR("Failed to read Device id"); return FPGA_NOT_SUPPORTED; } snprintf(sysfs_path, sizeof(sysfs_path), FPGA_PORT_RES_PATH,0,bus,device,function); fd = open(sysfs_path, O_RDWR); if (fd < 0) { FPGA_ERR("Failed to open FPGA PCIE BAR2"); return FPGA_EXCEPTION; } ptr = mmap(NULL, PORT_MMIO_LEN, PROT_READ|PROT_WRITE,MAP_SHARED, fd, 0); if (ptr == MAP_FAILED ) { FPGA_ERR("Failed to map FPGA PCIE BAR2"); result = FPGA_EXCEPTION; goto out_close ; } // Memory Write length error if(rasCmdLine->mwlength_error) { FPGA_DBG("Memory Write length error \n"); *((volatile uint64_t *) (ptr + PORT_SCRATCHPAD0+3)) = (uint16_t)MMO_WRITE64_VALUE; } // Memory Read length error if(rasCmdLine->mrlength_error) { FPGA_DBG(" Memory Read length error \n"); value = *((volatile uint64_t *) (ptr + PORT_SCRATCHPAD0+3)); FPGA_DBG(" Memory Read length value %lx\n",value); } // Memory Read addresss error if(rasCmdLine->mraddress_error) { FPGA_DBG("Memory Read addresss error \n"); value = *((volatile uint16_t *) (ptr + NLB_CSR_SCRATCHPAD +3)); FPGA_DBG("Memory Read addresss value %lx\n",value); value = *((volatile uint64_t *) (ptr + PORT_SCRATCHPAD0+3)); FPGA_DBG("Memory Read addresss value %lx\n",value); } // Memory Write addresss error if(rasCmdLine->mwaddress_error) { FPGA_DBG("Memory Write addresss error \n"); *((volatile uint16_t *) (ptr + NLB_CSR_SCRATCHPAD +3)) = (uint16_t)MMO_WRITE32_VALUE; } if(ptr) munmap(ptr, PORT_MMIO_LEN); out_close: if(fd >=0) close(fd); return result; } // page fault errors fpga_result page_fault_errors() { fpga_properties filter = NULL; fpga_token accelerator_token; fpga_handle accelerator_handle; fpga_guid guid; uint32_t num_matches; volatile uint64_t *dsm_ptr = NULL; volatile uint64_t *input_ptr = NULL; volatile uint64_t *output_ptr = NULL; uint64_t dsm_wsid; uint64_t input_wsid; uint64_t output_wsid; fpga_result res = FPGA_OK; if (uuid_parse(SKX_P_NLB0_AFUID, guid) < 0) { fprintf(stderr, "Error parsing guid '%s'\n", SKX_P_NLB0_AFUID); goto out_exit; } /* Look for accelerator with MY_ACCELERATOR_ID */ res = fpgaGetProperties(NULL, &filter); ON_ERR_GOTO(res, out_exit, "creating properties object"); res = fpgaPropertiesSetObjectType(filter, FPGA_ACCELERATOR); ON_ERR_GOTO(res, out_destroy_prop, "setting object type"); res = fpgaPropertiesSetGUID(filter, guid); ON_ERR_GOTO(res, out_destroy_prop, "setting GUID"); if (rasCmdLine.bus >0){ res = fpgaPropertiesSetBus(filter, rasCmdLine.bus); ON_ERR_GOTO(res, out_destroy_prop, "setting bus"); } if (rasCmdLine.device >0) { res = fpgaPropertiesSetDevice(filter, rasCmdLine.device); ON_ERR_GOTO(res, out_destroy_prop, "setting device"); } if (rasCmdLine.function >0){ res = fpgaPropertiesSetFunction(filter, rasCmdLine.function); ON_ERR_GOTO(res, out_destroy_prop, "setting function"); } res = fpgaEnumerate(&filter, 1, &accelerator_token, 1, &num_matches); ON_ERR_GOTO(res, out_destroy_prop, "enumerating accelerators"); if (num_matches < 1) { fprintf(stderr, "accelerator not found.\n"); res = fpgaDestroyProperties(&filter); return FPGA_INVALID_PARAM; } /* Open accelerator and map MMIO */ res = fpgaOpen(accelerator_token, &accelerator_handle, FPGA_OPEN_SHARED); ON_ERR_GOTO(res, out_destroy_tok, "opening accelerator"); res = fpgaMapMMIO(accelerator_handle, 0, NULL); ON_ERR_GOTO(res, out_close, "mapping MMIO space"); /* Allocate buffers */ res = fpgaPrepareBuffer(accelerator_handle, LPBK1_DSM_SIZE, (void **)&dsm_ptr, &dsm_wsid, 0); ON_ERR_GOTO(res, out_close, "allocating DSM buffer"); res = fpgaPrepareBuffer(accelerator_handle, LPBK1_BUFFER_ALLOCATION_SIZE, (void **)&input_ptr, &input_wsid, 0); ON_ERR_GOTO(res, out_free_dsm, "allocating input buffer"); res = fpgaPrepareBuffer(accelerator_handle, LPBK1_BUFFER_ALLOCATION_SIZE, (void **)&output_ptr, &output_wsid, 0); ON_ERR_GOTO(res, out_free_input, "allocating output buffer"); printf("Running Test\n"); /* Initialize buffers */ memset((void *)dsm_ptr, 0, LPBK1_DSM_SIZE); memset((void *)input_ptr, 0xAF, LPBK1_BUFFER_SIZE); memset((void *)output_ptr, 0xBE, LPBK1_BUFFER_SIZE); cache_line *cl_ptr = (cache_line *)input_ptr; for (uint32_t i = 0; i < LPBK1_BUFFER_SIZE / CL(1); ++i) { cl_ptr[i].uint[15] = i+1; /* set the last uint in every cacheline */ } /* Reset accelerator */ res = fpgaReset(accelerator_handle); ON_ERR_GOTO(res, out_free_output, "resetting accelerator"); /* Program DMA addresses */ uint64_t iova; res = fpgaGetIOAddress(accelerator_handle, dsm_wsid, &iova); ON_ERR_GOTO(res, out_free_output, "getting DSM IOVA"); res = fpgaWriteMMIO64(accelerator_handle, 0, CSR_AFU_DSM_BASEL, iova); ON_ERR_GOTO(res, out_free_output, "writing CSR_AFU_DSM_BASEL"); res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_CTL, 0); ON_ERR_GOTO(res, out_free_output, "writing CSR_CFG"); res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_CTL, 1); ON_ERR_GOTO(res, out_free_output, "writing CSR_CFG"); res = fpgaGetIOAddress(accelerator_handle, input_wsid, &iova); ON_ERR_GOTO(res, out_free_output, "getting input IOVA"); // Free Input buffer res = fpgaReleaseBuffer(accelerator_handle, input_wsid); res = fpgaWriteMMIO64(accelerator_handle, 0, CSR_SRC_ADDR, CACHELINE_ALIGNED_ADDR(iova)); ON_ERR_GOTO(res, out_free_output, "writing CSR_SRC_ADDR"); res = fpgaGetIOAddress(accelerator_handle, output_wsid, &iova); ON_ERR_GOTO(res, out_free_output, "getting output IOVA"); res = fpgaWriteMMIO64(accelerator_handle, 0, CSR_DST_ADDR, CACHELINE_ALIGNED_ADDR(iova)); ON_ERR_GOTO(res, out_free_output, "writing CSR_DST_ADDR"); res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_NUM_LINES, LPBK1_BUFFER_SIZE / CL(1)); ON_ERR_GOTO(res, out_free_output, "writing CSR_NUM_LINES"); res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_CFG, 0x42000); ON_ERR_GOTO(res, out_free_output, "writing CSR_CFG"); /* Start the test */ res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_CTL, 3); ON_ERR_GOTO(res, out_free_output, "writing CSR_CFG"); /* Wait for test completion */ usleep(10000); /* Stop the device */ res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_CTL, 7); ON_ERR_GOTO(res, out_free_output, "writing CSR_CFG"); printf("Done Running Test\n"); /* Release buffers */ out_free_output: res = fpgaReleaseBuffer(accelerator_handle, output_wsid); ON_ERR_GOTO(res, out_free_input, "releasing output buffer"); out_free_input: // res = fpgaReleaseBuffer(accelerator_handle, input_wsid); // ON_ERR_GOTO(res, out_free_dsm, "releasing input buffer"); out_free_dsm: res = fpgaReleaseBuffer(accelerator_handle, dsm_wsid); ON_ERR_GOTO(res, out_unmap, "releasing DSM buffer"); /* Unmap MMIO space */ out_unmap: res = fpgaUnmapMMIO(accelerator_handle, 0); ON_ERR_GOTO(res, out_close, "unmapping MMIO space"); /* Release accelerator */ out_close: res = fpgaClose(accelerator_handle); ON_ERR_GOTO(res, out_destroy_tok, "closing accelerator"); /* Destroy token */ out_destroy_tok: res = fpgaDestroyToken(&accelerator_token); ON_ERR_GOTO(res, out_destroy_prop, "destroying token"); /* Destroy properties object */ out_destroy_prop: res = fpgaDestroyProperties(&filter); ON_ERR_GOTO(res, out_exit, "destroying properties object"); out_exit: return res; } // parse Input command line int ParseCmds(struct RASCommandLine *rasCmdLine, int argc, char *argv[]) { int getopt_ret = 0; int option_index = 0; char *endptr = NULL; while( -1 != ( getopt_ret = getopt_long(argc, argv, GETOPT_STRING, longopts, &option_index))){ const char *tmp_optarg = optarg; if ((optarg) && ('=' == *tmp_optarg)){ ++tmp_optarg; } switch(getopt_ret){ case 'h': // Command line help RASAppShowHelp(); return -2; break; case 'B': // bus number if (tmp_optarg == NULL ) break; endptr = NULL; rasCmdLine->bus = strtol(tmp_optarg, &endptr, 0); break; case 'D': // Device number if (tmp_optarg == NULL ) break; endptr = NULL; rasCmdLine->device = strtol(tmp_optarg, &endptr, 0); break; case 'F': // Function number if (tmp_optarg == NULL ) break; endptr = NULL; rasCmdLine->function = strtol(tmp_optarg, &endptr, 0); break; case 'S': // Socket number if (tmp_optarg == NULL ) break; endptr = NULL; rasCmdLine->socket = strtol(tmp_optarg, &endptr, 0); break; case 'P': // Print Errors rasCmdLine->print_error = true; break; case 'Q': // Set Cast error rasCmdLine->catast_error = true; break; case 'R': // Set Fatal error rasCmdLine->fatal_error = true; break; case 'O': // Set page fault error rasCmdLine->pagefault_error = true; break; case 'N': // Set Non Fatal error rasCmdLine->nonfatal_error = true; break; case 'C': // Clear Injected Error rasCmdLine->clear_injerror = true; break; case 'E': // Set MW Address error rasCmdLine->mwaddress_error = true; break; case 'G': // Set MR Address error rasCmdLine->mraddress_error = true; break; case 'H': // Set MW Length error rasCmdLine->mwlength_error = true; break; case 'I': // Set MR Length error rasCmdLine->mrlength_error = true; break; case ':': /* missing option argument */ printf("Missing option argument.\n"); return -1; case '?': default: /* invalid option */ printf("Invalid cmdline options.\n"); return -1; } } return 0; }
1
15,152
Shouldn't the operator be && ?
OPAE-opae-sdk
c
@@ -161,6 +161,17 @@ module Beaker FileUtils.rm_rf(@vagrant_path) end + #snapshotting depends on https://github.com/scalefactory/vagrant-multiprovider-snap + def take_snapshot(host,snapshot_name) + @logger.debug "Creating snapshot of #{host}" + vagrant_cmd("snap take #{host} --name=#{snapshot_name}") + end + + def restore_snapshot(host,snapshot_name) + @logger.debug "Restoring snapshot of #{host}" + vagrant_cmd("snap rollback #{host} --name=#{snapshot_name}") + end + def vagrant_cmd(args) Dir.chdir(@vagrant_path) do exit_status = 1
1
require 'open3' module Beaker class Vagrant < Beaker::Hypervisor # Return a random mac address # # @return [String] a random mac address def randmac "080027" + (1..3).map{"%0.2X"%rand(256)}.join end def rand_chunk (2 + rand(252)).to_s #don't want a 0, 1, or a 255 end def randip "10.255.#{rand_chunk}.#{rand_chunk}" end def make_vfile hosts, options = {} #HACK HACK HACK - add checks here to ensure that we have box + box_url #generate the VagrantFile v_file = "Vagrant.configure(\"2\") do |c|\n" v_file << " c.ssh.forward_agent = true\n" if options[:forward_ssh_agent] == true hosts.each do |host| host['ip'] ||= randip #use the existing ip, otherwise default to a random ip v_file << " c.vm.define '#{host.name}' do |v|\n" v_file << " v.vm.hostname = '#{host.name}'\n" v_file << " v.vm.box = '#{host['box']}'\n" v_file << " v.vm.box_url = '#{host['box_url']}'\n" unless host['box_url'].nil? v_file << " v.vm.box_version = '#{host['box_version']}'\n" unless host['box_version'].nil? v_file << " v.vm.box_check_update = '#{host['box_check_update'] ||= 'true'}'\n" v_file << " v.vm.synced_folder '.', '/vagrant', disabled: true\n" if host['synced_folder'] == 'disabled' v_file << " v.vm.network :private_network, ip: \"#{host['ip'].to_s}\", :netmask => \"#{host['netmask'] ||= "255.255.0.0"}\", :mac => \"#{randmac}\"\n" if /windows/i.match(host['platform']) v_file << " v.vm.network :forwarded_port, guest: 3389, host: 3389\n" v_file << " v.vm.network :forwarded_port, guest: 5985, host: 5985, id: 'winrm', auto_correct: true\n" v_file << " v.vm.guest = :windows" end if /osx/i.match(host['platform']) v_file << " v.vm.network 'private_network', ip: '10.0.1.10'\n" v_file << " v.vm.synced_folder '.', '/vagrant', :nfs => true\n" end v_file << self.class.provider_vfile_section(host, options) v_file << " end\n" @logger.debug "created Vagrantfile for VagrantHost #{host.name}" end v_file << "end\n" File.open(@vagrant_file, 'w') do |f| f.write(v_file) end end def self.provider_vfile_section host, options # Backwards compatibility; default to virtualbox Beaker::VagrantVirtualbox.provider_vfile_section(host, options) end def set_ssh_config host, user f = Tempfile.new("#{host.name}") ssh_config = Dir.chdir(@vagrant_path) do stdin, stdout, stderr, wait_thr = Open3.popen3('vagrant', 'ssh-config', host.name) if not wait_thr.value.success? raise "Failed to 'vagrant ssh-config' for #{host.name}" end stdout.read end #replace hostname with ip ssh_config = ssh_config.gsub(/Host #{host.name}/, "Host #{host['ip']}") unless not host['ip'] if host['platform'] =~ /windows/ ssh_config = ssh_config.gsub(/127\.0\.0\.1/, host['ip']) unless not host['ip'] end #set the user ssh_config = ssh_config.gsub(/User vagrant/, "User #{user}") f.write(ssh_config) f.rewind host['ssh'] = {:config => f.path()} host['user'] = user @temp_files << f end def get_ip_from_vagrant_file(hostname) ip = '' if File.file?(@vagrant_file) #we should have a vagrant file available to us for reading f = File.read(@vagrant_file) m = /#{hostname}.*?ip:\s*('|")\s*([^'"]+)('|")/m.match(f) if m ip = m[2] @logger.debug("Determined existing vagrant box #{hostname} ip to be: #{ip} ") else raise("Unable to determine ip for vagrant box #{hostname}") end else raise("No vagrant file found (should be located at #{@vagrant_file})") end ip end def initialize(vagrant_hosts, options) require 'tempfile' @options = options @logger = options[:logger] @temp_files = [] @hosts = vagrant_hosts @vagrant_path = File.expand_path(File.join(File.basename(__FILE__), '..', '.vagrant', 'beaker_vagrant_files', File.basename(options[:hosts_file]))) FileUtils.mkdir_p(@vagrant_path) @vagrant_file = File.expand_path(File.join(@vagrant_path, "Vagrantfile")) end def provision(provider = nil) if !@options[:provision] and !File.file?(@vagrant_file) raise "Beaker is configured with provision = false but no vagrant file was found at #{@vagrant_file}. You need to enable provision" end if @options[:provision] #setting up new vagrant hosts #make sure that any old boxes are dead dead dead vagrant_cmd("destroy --force") if File.file?(@vagrant_file) make_vfile @hosts, @options vagrant_cmd("up#{" --provider #{provider}" if provider}") else #set host ip of already up boxes @hosts.each do |host| host[:ip] = get_ip_from_vagrant_file(host.name) end end @logger.debug "configure vagrant boxes (set ssh-config, switch to root user, hack etc/hosts)" @hosts.each do |host| default_user = host['user'] set_ssh_config host, 'vagrant' #copy vagrant's keys to roots home dir, to allow for login as root copy_ssh_to_root host, @options #ensure that root login is enabled for this host enable_root_login host, @options #shut down connection, will reconnect on next exec host.close set_ssh_config host, default_user end hack_etc_hosts @hosts, @options end def cleanup @logger.debug "removing temporory ssh-config files per-vagrant box" @temp_files.each do |f| f.close() end @logger.notify "Destroying vagrant boxes" vagrant_cmd("destroy --force") FileUtils.rm_rf(@vagrant_path) end def vagrant_cmd(args) Dir.chdir(@vagrant_path) do exit_status = 1 Open3.popen3("vagrant #{args}") {|stdin, stdout, stderr, wait_thr| while line = stdout.gets @logger.info(line) end if not wait_thr.value.success? raise "Failed to exec 'vagrant #{args}'. Error was #{stderr.read}" end exit_status = wait_thr.value } if exit_status != 0 raise "Failed to execute vagrant_cmd ( #{args} ). Error was #{stderr.read}" end end end end end
1
8,529
My best guess is that you want to use host.name in these parts to get the name of the host ?
voxpupuli-beaker
rb
@@ -270,13 +270,6 @@ func (n *Node) UnmarshalBinary(data []byte) error { n.entry = append([]byte{}, data[nodeHeaderSize:nodeHeaderSize+refBytesSize]...) offset := nodeHeaderSize + refBytesSize // skip entry - // Currently we don't persist the root nodeType when we marshal the manifest, as a result - // the root nodeType information is lost on Unmarshal. This causes issues when we want to - // perform a path 'Walk' on the root. If there is more than 1 fork, the root node type - // is an edge, so we will deduce this information from index byte array - if !bytes.Equal(data[offset:offset+32], make([]byte, 32)) { - n.nodeType = nodeTypeEdge - } n.forks = make(map[byte]*fork) bb := &bitsForBytes{} bb.fromBytes(data[offset:])
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package mantaray import ( "bytes" "crypto/rand" "encoding/binary" "encoding/hex" "encoding/json" "errors" "fmt" ) const ( maxUint16 = ^uint16(0) ) // Version constants. const ( versionNameString = "mantaray" versionCode01String = "0.1" versionCode02String = "0.2" versionSeparatorString = ":" version01String = versionNameString + versionSeparatorString + versionCode01String // "mantaray:0.1" version01HashString = "025184789d63635766d78c41900196b57d7400875ebe4d9b5d1e76bd9652a9b7" // pre-calculated version string, Keccak-256 version02String = versionNameString + versionSeparatorString + versionCode02String // "mantaray:0.2" version02HashString = "5768b3b6a7db56d21d1abff40d41cebfc83448fed8d7e9b06ec0d3b073f28f7b" // pre-calculated version string, Keccak-256 ) // Node header fields constants. const ( nodeObfuscationKeySize = 32 versionHashSize = 31 nodeRefBytesSize = 1 // nodeHeaderSize defines the total size of the header part nodeHeaderSize = nodeObfuscationKeySize + versionHashSize + nodeRefBytesSize ) // Node fork constats. const ( nodeForkTypeBytesSize = 1 nodeForkPrefixBytesSize = 1 nodeForkHeaderSize = nodeForkTypeBytesSize + nodeForkPrefixBytesSize // 2 nodeForkPreReferenceSize = 32 nodePrefixMaxSize = nodeForkPreReferenceSize - nodeForkHeaderSize // 30 // "mantaray:0.2" nodeForkMetadataBytesSize = 2 ) var ( version01HashBytes []byte version02HashBytes []byte ) func init() { initVersion(version01HashString, &version01HashBytes) initVersion(version02HashString, &version02HashBytes) } func initVersion(hash string, bytes *[]byte) { b, err := hex.DecodeString(hash) if err != nil { panic(err) } *bytes = make([]byte, versionHashSize) copy(*bytes, b) } var ( // ErrTooShort signals too short input. ErrTooShort = errors.New("serialised input too short") // ErrInvalidInput signals invalid input to serialise. ErrInvalidInput = errors.New("input invalid") // ErrInvalidVersionHash signals unknown version of hash. ErrInvalidVersionHash = errors.New("invalid version hash") ) var obfuscationKeyFn = rand.Read // SetObfuscationKeyFn allows configuring custom function for generating // obfuscation key. // // NOTE: This should only be used in tests. func SetObfuscationKeyFn(fn func([]byte) (int, error)) { obfuscationKeyFn = fn } // MarshalBinary serialises the node func (n *Node) MarshalBinary() (bytes []byte, err error) { if n.forks == nil { return nil, ErrInvalidInput } // header headerBytes := make([]byte, nodeHeaderSize) if len(n.obfuscationKey) == 0 { // generate obfuscation key obfuscationKey := make([]byte, nodeObfuscationKeySize) for i := 0; i < nodeObfuscationKeySize; { read, _ := obfuscationKeyFn(obfuscationKey[i:]) i += read } n.obfuscationKey = obfuscationKey } copy(headerBytes[0:nodeObfuscationKeySize], n.obfuscationKey) copy(headerBytes[nodeObfuscationKeySize:nodeObfuscationKeySize+versionHashSize], version02HashBytes) headerBytes[nodeObfuscationKeySize+versionHashSize] = uint8(n.refBytesSize) bytes = append(bytes, headerBytes...) // entry entryBytes := make([]byte, n.refBytesSize) copy(entryBytes, n.entry) bytes = append(bytes, entryBytes...) // index indexBytes := make([]byte, 32) var index = &bitsForBytes{} for k := range n.forks { index.set(k) } copy(indexBytes, index.bytes()) bytes = append(bytes, indexBytes...) err = index.iter(func(b byte) error { f := n.forks[b] ref, err := f.bytes() if err != nil { return fmt.Errorf("%w on byte '%x'", err, []byte{b}) } bytes = append(bytes, ref...) return nil }) if err != nil { return nil, err } // perform XOR encryption on bytes after obfuscation key xorEncryptedBytes := make([]byte, len(bytes)) copy(xorEncryptedBytes, bytes[0:nodeObfuscationKeySize]) for i := nodeObfuscationKeySize; i < len(bytes); i += nodeObfuscationKeySize { end := i + nodeObfuscationKeySize if end > len(bytes) { end = len(bytes) } encrypted := encryptDecrypt(bytes[i:end], n.obfuscationKey) copy(xorEncryptedBytes[i:end], encrypted) } return xorEncryptedBytes, nil } // bitsForBytes is a set of bytes represented as a 256-length bitvector type bitsForBytes struct { bits [32]byte } func (bb *bitsForBytes) bytes() (b []byte) { b = append(b, bb.bits[:]...) return b } func (bb *bitsForBytes) fromBytes(b []byte) { copy(bb.bits[:], b) } func (bb *bitsForBytes) set(b byte) { bb.bits[b/8] |= 1 << (b % 8) } //nolint,unused func (bb *bitsForBytes) get(b byte) bool { // skipcq: SCC-U1000 return bb.getUint8(b) } func (bb *bitsForBytes) getUint8(i uint8) bool { return (bb.bits[i/8]>>(i%8))&1 > 0 } func (bb *bitsForBytes) iter(f func(byte) error) error { for i := uint8(0); ; i++ { if bb.getUint8(i) { if err := f(i); err != nil { return err } } if i == 255 { return nil } } } // UnmarshalBinary deserialises a node func (n *Node) UnmarshalBinary(data []byte) error { if len(data) < nodeHeaderSize { return ErrTooShort } n.obfuscationKey = append([]byte{}, data[0:nodeObfuscationKeySize]...) // perform XOR decryption on bytes after obfuscation key xorDecryptedBytes := make([]byte, len(data)) copy(xorDecryptedBytes, data[0:nodeObfuscationKeySize]) for i := nodeObfuscationKeySize; i < len(data); i += nodeObfuscationKeySize { end := i + nodeObfuscationKeySize if end > len(data) { end = len(data) } decrypted := encryptDecrypt(data[i:end], n.obfuscationKey) copy(xorDecryptedBytes[i:end], decrypted) } data = xorDecryptedBytes // Verify version hash. versionHash := data[nodeObfuscationKeySize : nodeObfuscationKeySize+versionHashSize] if bytes.Equal(versionHash, version01HashBytes) { refBytesSize := int(data[nodeHeaderSize-1]) n.entry = append([]byte{}, data[nodeHeaderSize:nodeHeaderSize+refBytesSize]...) offset := nodeHeaderSize + refBytesSize // skip entry n.forks = make(map[byte]*fork) bb := &bitsForBytes{} bb.fromBytes(data[offset:]) offset += 32 // skip forks return bb.iter(func(b byte) error { f := &fork{} if len(data) < offset+nodeForkPreReferenceSize+refBytesSize { err := fmt.Errorf("not enough bytes for node fork: %d (%d)", (len(data) - offset), (nodeForkPreReferenceSize + refBytesSize)) return fmt.Errorf("%w on byte '%x'", err, []byte{b}) } err := f.fromBytes(data[offset : offset+nodeForkPreReferenceSize+refBytesSize]) if err != nil { return fmt.Errorf("%w on byte '%x'", err, []byte{b}) } n.forks[b] = f offset += nodeForkPreReferenceSize + refBytesSize return nil }) } else if bytes.Equal(versionHash, version02HashBytes) { refBytesSize := int(data[nodeHeaderSize-1]) n.entry = append([]byte{}, data[nodeHeaderSize:nodeHeaderSize+refBytesSize]...) offset := nodeHeaderSize + refBytesSize // skip entry // Currently we don't persist the root nodeType when we marshal the manifest, as a result // the root nodeType information is lost on Unmarshal. This causes issues when we want to // perform a path 'Walk' on the root. If there is more than 1 fork, the root node type // is an edge, so we will deduce this information from index byte array if !bytes.Equal(data[offset:offset+32], make([]byte, 32)) { n.nodeType = nodeTypeEdge } n.forks = make(map[byte]*fork) bb := &bitsForBytes{} bb.fromBytes(data[offset:]) offset += 32 // skip forks return bb.iter(func(b byte) error { f := &fork{} if len(data) < offset+nodeForkTypeBytesSize { return fmt.Errorf("not enough bytes for node fork: %d (%d) on byte '%x'", (len(data) - offset), (nodeForkTypeBytesSize), []byte{b}) } nodeType := data[offset] nodeForkSize := nodeForkPreReferenceSize + refBytesSize if nodeTypeIsWithMetadataType(nodeType) { if len(data) < offset+nodeForkPreReferenceSize+refBytesSize+nodeForkMetadataBytesSize { return fmt.Errorf("not enough bytes for node fork: %d (%d) on byte '%x'", (len(data) - offset), (nodeForkPreReferenceSize + refBytesSize + nodeForkMetadataBytesSize), []byte{b}) } metadataBytesSize := binary.BigEndian.Uint16(data[offset+nodeForkSize : offset+nodeForkSize+nodeForkMetadataBytesSize]) nodeForkSize += nodeForkMetadataBytesSize nodeForkSize += int(metadataBytesSize) err := f.fromBytes02(data[offset:offset+nodeForkSize], refBytesSize, int(metadataBytesSize)) if err != nil { return fmt.Errorf("%w on byte '%x'", err, []byte{b}) } } else { if len(data) < offset+nodeForkPreReferenceSize+refBytesSize { return fmt.Errorf("not enough bytes for node fork: %d (%d) on byte '%x'", (len(data) - offset), (nodeForkPreReferenceSize + refBytesSize), []byte{b}) } err := f.fromBytes(data[offset : offset+nodeForkSize]) if err != nil { return fmt.Errorf("%w on byte '%x'", err, []byte{b}) } } n.forks[b] = f offset += nodeForkSize return nil }) } return fmt.Errorf("%x: %w", versionHash, ErrInvalidVersionHash) } func (f *fork) fromBytes(b []byte) error { nodeType := b[0] prefixLen := int(b[1]) if prefixLen == 0 || prefixLen > nodePrefixMaxSize { return fmt.Errorf("invalid prefix length: %d", prefixLen) } f.prefix = b[nodeForkHeaderSize : nodeForkHeaderSize+prefixLen] f.Node = NewNodeRef(b[nodeForkPreReferenceSize:]) f.Node.nodeType = nodeType return nil } func (f *fork) fromBytes02(b []byte, refBytesSize, metadataBytesSize int) error { nodeType := b[0] prefixLen := int(b[1]) if prefixLen == 0 || prefixLen > nodePrefixMaxSize { return fmt.Errorf("invalid prefix length: %d", prefixLen) } f.prefix = b[nodeForkHeaderSize : nodeForkHeaderSize+prefixLen] f.Node = NewNodeRef(b[nodeForkPreReferenceSize : nodeForkPreReferenceSize+refBytesSize]) f.Node.nodeType = nodeType if metadataBytesSize > 0 { metadataBytes := b[nodeForkPreReferenceSize+refBytesSize+nodeForkMetadataBytesSize:] metadata := make(map[string]string) // using JSON encoding for metadata err := json.Unmarshal(metadataBytes, &metadata) if err != nil { return err } f.Node.metadata = metadata } return nil } func (f *fork) bytes() (b []byte, err error) { r := refBytes(f) // using 1 byte ('f.Node.refBytesSize') for size if len(r) > 256 { err = fmt.Errorf("node reference size > 256: %d", len(r)) return } b = append(b, f.Node.nodeType, uint8(len(f.prefix))) prefixBytes := make([]byte, nodePrefixMaxSize) copy(prefixBytes, f.prefix) b = append(b, prefixBytes...) refBytes := make([]byte, len(r)) copy(refBytes, r) b = append(b, refBytes...) if f.Node.IsWithMetadataType() { // using JSON encoding for metadata metadataJSONBytes, err1 := json.Marshal(f.Node.metadata) if err1 != nil { return b, err1 } metadataJSONBytesSizeWithSize := len(metadataJSONBytes) + nodeForkMetadataBytesSize // pad JSON bytes if necessary if metadataJSONBytesSizeWithSize < nodeObfuscationKeySize { paddingLength := nodeObfuscationKeySize - metadataJSONBytesSizeWithSize padding := make([]byte, paddingLength) for i := range padding { padding[i] = '\n' } metadataJSONBytes = append(metadataJSONBytes, padding...) } else if metadataJSONBytesSizeWithSize > nodeObfuscationKeySize { paddingLength := nodeObfuscationKeySize - metadataJSONBytesSizeWithSize%nodeObfuscationKeySize padding := make([]byte, paddingLength) for i := range padding { padding[i] = '\n' } metadataJSONBytes = append(metadataJSONBytes, padding...) } metadataJSONBytesSize := len(metadataJSONBytes) if metadataJSONBytesSize > int(maxUint16) { return b, ErrMetadataTooLarge } mBytesSize := make([]byte, nodeForkMetadataBytesSize) binary.BigEndian.PutUint16(mBytesSize, uint16(metadataJSONBytesSize)) b = append(b, mBytesSize...) b = append(b, metadataJSONBytes...) } return b, nil } var refBytes = nodeRefBytes func nodeRefBytes(f *fork) []byte { return f.Node.ref } // encryptDecrypt runs a XOR encryption on the input bytes, encrypting it if it // hasn't already been, and decrypting it if it has, using the key provided. func encryptDecrypt(input, key []byte) []byte { output := make([]byte, len(input)) for i := 0; i < len(input); i++ { output[i] = input[i] ^ key[i%len(key)] } return output }
1
15,226
IMO this edge case handling could remain here, just instead of overwriting the `n.nodeType`, the `makeEdgeType` method of `n` should be called, so `n.nodeType = nodeTypeEdge` -> `n.makeEdge()`
ethersphere-bee
go
@@ -205,9 +205,10 @@ public class DownloadService extends Service { Log.d(TAG, "Service shutting down"); isRunning = false; + boolean showAutoDownloadReport = UserPreferences.showAutoDownloadReport(); if (ClientConfig.downloadServiceCallbacks.shouldCreateReport() - && UserPreferences.showDownloadReport()) { - notificationManager.updateReport(reportQueue); + && (UserPreferences.showDownloadReport() || showAutoDownloadReport)) { + notificationManager.updateReport(reportQueue, showAutoDownloadReport); reportQueue.clear(); }
1
package de.danoeh.antennapod.core.service.download; import android.app.Notification; import android.app.NotificationManager; import android.app.Service; import android.content.BroadcastReceiver; import android.content.Context; import android.content.Intent; import android.content.IntentFilter; import android.os.Binder; import android.os.Handler; import android.os.IBinder; import android.text.TextUtils; import android.util.Log; import androidx.annotation.NonNull; import androidx.annotation.Nullable; import androidx.annotation.VisibleForTesting; import org.apache.commons.io.FileUtils; import org.greenrobot.eventbus.EventBus; import java.io.File; import java.io.IOException; import java.net.HttpURLConnection; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.CompletionService; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import de.danoeh.antennapod.core.ClientConfig; import de.danoeh.antennapod.core.event.DownloadEvent; import de.danoeh.antennapod.core.event.FeedItemEvent; import de.danoeh.antennapod.core.feed.Feed; import de.danoeh.antennapod.core.feed.FeedItem; import de.danoeh.antennapod.core.feed.FeedMedia; import de.danoeh.antennapod.core.preferences.GpodnetPreferences; import de.danoeh.antennapod.core.preferences.UserPreferences; import de.danoeh.antennapod.core.service.GpodnetSyncService; import de.danoeh.antennapod.core.service.download.handler.FailedDownloadHandler; import de.danoeh.antennapod.core.service.download.handler.FeedSyncTask; import de.danoeh.antennapod.core.service.download.handler.MediaDownloadedHandler; import de.danoeh.antennapod.core.service.download.handler.PostDownloaderTask; import de.danoeh.antennapod.core.storage.DBReader; import de.danoeh.antennapod.core.storage.DBTasks; import de.danoeh.antennapod.core.storage.DBWriter; import de.danoeh.antennapod.core.storage.DownloadRequester; import de.danoeh.antennapod.core.util.DownloadError; /** * Manages the download of feedfiles in the app. Downloads can be enqueued via the startService intent. * The argument of the intent is an instance of DownloadRequest in the EXTRA_REQUESTS field of * the intent. * After the downloads have finished, the downloaded object will be passed on to a specific handler, depending on the * type of the feedfile. */ public class DownloadService extends Service { private static final String TAG = "DownloadService"; /** * Cancels one download. The intent MUST have an EXTRA_DOWNLOAD_URL extra that contains the download URL of the * object whose download should be cancelled. */ public static final String ACTION_CANCEL_DOWNLOAD = "action.de.danoeh.antennapod.core.service.cancelDownload"; /** * Cancels all running downloads. */ public static final String ACTION_CANCEL_ALL_DOWNLOADS = "action.de.danoeh.antennapod.core.service.cancelAllDownloads"; /** * Extra for ACTION_CANCEL_DOWNLOAD */ public static final String EXTRA_DOWNLOAD_URL = "downloadUrl"; /** * Extra for ACTION_ENQUEUE_DOWNLOAD intent. */ public static final String EXTRA_REQUESTS = "downloadRequests"; public static final String EXTRA_CLEANUP_MEDIA = "cleanupMedia"; public static final int NOTIFICATION_ID = 2; /** * Contains all completed downloads that have not been included in the report yet. */ private final List<DownloadStatus> reportQueue; private final ExecutorService syncExecutor; private final CompletionService<Downloader> downloadExecutor; private final DownloadRequester requester; private DownloadServiceNotification notificationManager; /** * Currently running downloads. */ private final List<Downloader> downloads; /** * Number of running downloads. */ private AtomicInteger numberOfDownloads; /** * True if service is running. */ public static boolean isRunning = false; private Handler handler; private NotificationUpdater notificationUpdater; private ScheduledFuture<?> notificationUpdaterFuture; private ScheduledFuture<?> downloadPostFuture; private static final int SCHED_EX_POOL_SIZE = 1; private ScheduledThreadPoolExecutor schedExecutor; private static DownloaderFactory downloaderFactory = new DefaultDownloaderFactory(); private final IBinder mBinder = new LocalBinder(); private class LocalBinder extends Binder { public DownloadService getService() { return DownloadService.this; } } public DownloadService() { reportQueue = Collections.synchronizedList(new ArrayList<>()); downloads = Collections.synchronizedList(new ArrayList<>()); numberOfDownloads = new AtomicInteger(0); requester = DownloadRequester.getInstance(); syncExecutor = Executors.newSingleThreadExecutor(r -> { Thread t = new Thread(r, "SyncThread"); t.setPriority(Thread.MIN_PRIORITY); return t; }); Log.d(TAG, "parallel downloads: " + UserPreferences.getParallelDownloads()); downloadExecutor = new ExecutorCompletionService<>( Executors.newFixedThreadPool(UserPreferences.getParallelDownloads(), r -> { Thread t = new Thread(r, "DownloadThread"); t.setPriority(Thread.MIN_PRIORITY); return t; } ) ); schedExecutor = new ScheduledThreadPoolExecutor(SCHED_EX_POOL_SIZE, r -> { Thread t = new Thread(r, "DownloadSchedExecutorThread"); t.setPriority(Thread.MIN_PRIORITY); return t; }, (r, executor) -> Log.w(TAG, "SchedEx rejected submission of new task") ); } @Override public int onStartCommand(Intent intent, int flags, int startId) { if (intent != null && intent.getParcelableArrayListExtra(EXTRA_REQUESTS) != null) { Notification notification = notificationManager.updateNotifications( requester.getNumberOfDownloads(), downloads); startForeground(NOTIFICATION_ID, notification); syncExecutor.execute(() -> onDownloadQueued(intent)); } else if (numberOfDownloads.get() == 0) { stopSelf(); } else { Log.d(TAG, "onStartCommand: Unknown intent"); } return Service.START_NOT_STICKY; } @Override public void onCreate() { Log.d(TAG, "Service started"); isRunning = true; handler = new Handler(); notificationManager = new DownloadServiceNotification(this); IntentFilter cancelDownloadReceiverFilter = new IntentFilter(); cancelDownloadReceiverFilter.addAction(ACTION_CANCEL_ALL_DOWNLOADS); cancelDownloadReceiverFilter.addAction(ACTION_CANCEL_DOWNLOAD); registerReceiver(cancelDownloadReceiver, cancelDownloadReceiverFilter); downloadCompletionThread.start(); Notification notification = notificationManager.updateNotifications( requester.getNumberOfDownloads(), downloads); startForeground(NOTIFICATION_ID, notification); } @Override public IBinder onBind(Intent intent) { return mBinder; } @Override public void onDestroy() { Log.d(TAG, "Service shutting down"); isRunning = false; if (ClientConfig.downloadServiceCallbacks.shouldCreateReport() && UserPreferences.showDownloadReport()) { notificationManager.updateReport(reportQueue); reportQueue.clear(); } EventBus.getDefault().postSticky(DownloadEvent.refresh(Collections.emptyList())); downloadCompletionThread.interrupt(); try { downloadCompletionThread.join(1000); } catch (InterruptedException e) { e.printStackTrace(); } syncExecutor.shutdown(); schedExecutor.shutdown(); cancelNotificationUpdater(); if (downloadPostFuture != null) { downloadPostFuture.cancel(true); } unregisterReceiver(cancelDownloadReceiver); stopForeground(true); NotificationManager nm = (NotificationManager) getSystemService(NOTIFICATION_SERVICE); nm.cancel(NOTIFICATION_ID); // if this was the initial gpodder sync, i.e. we just synced the feeds successfully, // it is now time to sync the episode actions if (GpodnetPreferences.loggedIn() && GpodnetPreferences.getLastSubscriptionSyncTimestamp() > 0 && GpodnetPreferences.getLastEpisodeActionsSyncTimestamp() == 0) { GpodnetSyncService.sendSyncActionsIntent(this); } // start auto download in case anything new has shown up DBTasks.autodownloadUndownloadedItems(getApplicationContext()); } private final Thread downloadCompletionThread = new Thread("DownloadCompletionThread") { private static final String TAG = "downloadCompletionThd"; @Override public void run() { Log.d(TAG, "downloadCompletionThread was started"); while (!isInterrupted()) { try { Downloader downloader = downloadExecutor.take().get(); Log.d(TAG, "Received 'Download Complete' - message."); if (downloader.getResult().isSuccessful()) { syncExecutor.execute(() -> { handleSuccessfulDownload(downloader); removeDownload(downloader); numberOfDownloads.decrementAndGet(); queryDownloadsAsync(); }); } else { handleFailedDownload(downloader); removeDownload(downloader); numberOfDownloads.decrementAndGet(); queryDownloadsAsync(); } } catch (InterruptedException e) { Log.e(TAG, "DownloadCompletionThread was interrupted"); return; } catch (ExecutionException e) { Log.e(TAG, "ExecutionException in DownloadCompletionThread: " + e.getMessage()); return; } } Log.d(TAG, "End of downloadCompletionThread"); } }; private void handleSuccessfulDownload(Downloader downloader) { DownloadRequest request = downloader.getDownloadRequest(); DownloadStatus status = downloader.getResult(); final int type = status.getFeedfileType(); if (type == Feed.FEEDFILETYPE_FEED) { Log.d(TAG, "Handling completed Feed Download"); FeedSyncTask task = new FeedSyncTask(DownloadService.this, request); boolean success = task.run(); if (success) { // we create a 'successful' download log if the feed's last refresh failed List<DownloadStatus> log = DBReader.getFeedDownloadLog(request.getFeedfileId()); if (log.size() > 0 && !log.get(0).isSuccessful()) { saveDownloadStatus(task.getDownloadStatus()); } } else { DBWriter.setFeedLastUpdateFailed(request.getFeedfileId(), true); saveDownloadStatus(task.getDownloadStatus()); } } else if (type == FeedMedia.FEEDFILETYPE_FEEDMEDIA) { Log.d(TAG, "Handling completed FeedMedia Download"); MediaDownloadedHandler handler = new MediaDownloadedHandler(DownloadService.this, status, request); handler.run(); saveDownloadStatus(handler.getUpdatedStatus()); } } private void handleFailedDownload(Downloader downloader) { DownloadStatus status = downloader.getResult(); final int type = status.getFeedfileType(); if (!status.isCancelled()) { if (status.getReason() == DownloadError.ERROR_UNAUTHORIZED) { notificationManager.postAuthenticationNotification(downloader.getDownloadRequest()); } else if (status.getReason() == DownloadError.ERROR_HTTP_DATA_ERROR && Integer.parseInt(status.getReasonDetailed()) == 416) { Log.d(TAG, "Requested invalid range, restarting download from the beginning"); FileUtils.deleteQuietly(new File(downloader.getDownloadRequest().getDestination())); DownloadRequester.getInstance().download(DownloadService.this, downloader.getDownloadRequest()); } else { Log.e(TAG, "Download failed"); saveDownloadStatus(status); syncExecutor.execute(new FailedDownloadHandler(downloader.getDownloadRequest())); if (type == FeedMedia.FEEDFILETYPE_FEEDMEDIA) { FeedItem item = getFeedItemFromId(status.getFeedfileId()); if (item == null) { return; } boolean httpNotFound = status.getReason() == DownloadError.ERROR_HTTP_DATA_ERROR && String.valueOf(HttpURLConnection.HTTP_NOT_FOUND).equals(status.getReasonDetailed()); boolean forbidden = status.getReason() == DownloadError.ERROR_FORBIDDEN && String.valueOf(HttpURLConnection.HTTP_FORBIDDEN).equals(status.getReasonDetailed()); boolean notEnoughSpace = status.getReason() == DownloadError.ERROR_NOT_ENOUGH_SPACE; boolean wrongFileType = status.getReason() == DownloadError.ERROR_FILE_TYPE; boolean httpGone = status.getReason() == DownloadError.ERROR_HTTP_DATA_ERROR && String.valueOf(HttpURLConnection.HTTP_GONE).equals(status.getReasonDetailed()); boolean httpBadReq = status.getReason() == DownloadError.ERROR_HTTP_DATA_ERROR && String.valueOf(HttpURLConnection.HTTP_BAD_REQUEST).equals(status.getReasonDetailed()); if (httpNotFound || forbidden || notEnoughSpace || wrongFileType || httpGone || httpBadReq ) { try { DBWriter.saveFeedItemAutoDownloadFailed(item).get(); } catch (ExecutionException | InterruptedException e) { Log.d(TAG, "Ignoring exception while setting item download status"); e.printStackTrace(); } } // to make lists reload the failed item, we fake an item update EventBus.getDefault().post(FeedItemEvent.updated(item)); } } } else { // if FeedMedia download has been canceled, fake FeedItem update // so that lists reload that it if (status.getFeedfileType() == FeedMedia.FEEDFILETYPE_FEEDMEDIA) { FeedItem item = getFeedItemFromId(status.getFeedfileId()); if (item == null) { return; } EventBus.getDefault().post(FeedItemEvent.updated(item)); } } } private Downloader getDownloader(String downloadUrl) { for (Downloader downloader : downloads) { if (downloader.getDownloadRequest().getSource().equals(downloadUrl)) { return downloader; } } return null; } private final BroadcastReceiver cancelDownloadReceiver = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { if (TextUtils.equals(intent.getAction(), ACTION_CANCEL_DOWNLOAD)) { String url = intent.getStringExtra(EXTRA_DOWNLOAD_URL); if (url == null) { throw new IllegalArgumentException("ACTION_CANCEL_DOWNLOAD intent needs download url extra"); } Log.d(TAG, "Cancelling download with url " + url); Downloader d = getDownloader(url); if (d != null) { d.cancel(); DownloadRequest request = d.getDownloadRequest(); DownloadRequester.getInstance().removeDownload(request); FeedItem item = getFeedItemFromId(request.getFeedfileId()); if (item != null) { // undo enqueue upon cancel if (request.isMediaEnqueued()) { Log.v(TAG, "Undoing enqueue upon cancelling download"); try { DBWriter.removeQueueItem(getApplicationContext(), false, item).get(); } catch (Throwable t) { Log.e(TAG, "Unexpected exception during undoing enqueue upon cancel", t); } } EventBus.getDefault().post(FeedItemEvent.updated(item)); } } else { Log.e(TAG, "Could not cancel download with url " + url); } postDownloaders(); } else if (TextUtils.equals(intent.getAction(), ACTION_CANCEL_ALL_DOWNLOADS)) { for (Downloader d : downloads) { d.cancel(); Log.d(TAG, "Cancelled all downloads"); } postDownloaders(); } queryDownloads(); } }; private void onDownloadQueued(Intent intent) { List<DownloadRequest> requests = intent.getParcelableArrayListExtra(EXTRA_REQUESTS); if (requests == null) { throw new IllegalArgumentException( "ACTION_ENQUEUE_DOWNLOAD intent needs request extra"); } boolean cleanupMedia = intent.getBooleanExtra(EXTRA_CLEANUP_MEDIA, false); Log.d(TAG, "Received enqueue request. #requests=" + requests.size() + ", cleanupMedia=" + cleanupMedia); if (cleanupMedia) { ClientConfig.dbTasksCallbacks.getEpisodeCacheCleanupAlgorithm() .makeRoomForEpisodes(getApplicationContext(), requests.size()); } // #2448: First, add to-download items to the queue before actual download // so that the resulting queue order is the same as when download is clicked List<? extends FeedItem> itemsEnqueued; try { itemsEnqueued = enqueueFeedItems(requests); } catch (Exception e) { Log.e(TAG, "Unexpected exception during enqueue before downloads. Abort download", e); return; } for (DownloadRequest request : requests) { onDownloadQueued(request, itemsEnqueued); } } private List<? extends FeedItem> enqueueFeedItems(@NonNull List<? extends DownloadRequest> requests) throws Exception { List<FeedItem> feedItems = new ArrayList<>(); for (DownloadRequest request : requests) { if (request.getFeedfileType() == FeedMedia.FEEDFILETYPE_FEEDMEDIA) { long mediaId = request.getFeedfileId(); FeedMedia media = DBReader.getFeedMedia(mediaId); if (media == null) { Log.w(TAG, "enqueueFeedItems() : FeedFile Id " + mediaId + " is not found. ignore it."); continue; } feedItems.add(media.getItem()); } } return DBTasks.enqueueFeedItemsToDownload(getApplicationContext(), feedItems); } private void onDownloadQueued(@NonNull DownloadRequest request, @NonNull List<? extends FeedItem> itemsEnqueued) { writeFileUrl(request); Downloader downloader = downloaderFactory.create(request); if (downloader != null) { numberOfDownloads.incrementAndGet(); if (request.getFeedfileType() == FeedMedia.FEEDFILETYPE_FEEDMEDIA && isEnqueued(request, itemsEnqueued)) { request.setMediaEnqueued(true); } handler.post(() -> { downloads.add(downloader); postDownloaders(); }); // Needs to be done after postDownloaders() because otherwise, // it might take long before the progress bar circle starts spinning ClientConfig.installSslProvider(this); handler.post(() -> downloadExecutor.submit(downloader)); } handler.post(this::queryDownloads); } private static boolean isEnqueued(@NonNull DownloadRequest request, @NonNull List<? extends FeedItem> itemsEnqueued) { if (request.getFeedfileType() == FeedMedia.FEEDFILETYPE_FEEDMEDIA) { final long mediaId = request.getFeedfileId(); for (FeedItem item : itemsEnqueued) { if (item.getMedia() != null && item.getMedia().getId() == mediaId) { return true; } } } return false; } @VisibleForTesting public static DownloaderFactory getDownloaderFactory() { return downloaderFactory; } // public scope rather than package private, // because androidTest put classes in the non-standard de.test.antennapod hierarchy @VisibleForTesting public static void setDownloaderFactory(DownloaderFactory downloaderFactory) { DownloadService.downloaderFactory = downloaderFactory; } /** * Remove download from the DownloadRequester list and from the * DownloadService list. */ private void removeDownload(final Downloader d) { handler.post(() -> { Log.d(TAG, "Removing downloader: " + d.getDownloadRequest().getSource()); boolean rc = downloads.remove(d); Log.d(TAG, "Result of downloads.remove: " + rc); DownloadRequester.getInstance().removeDownload(d.getDownloadRequest()); postDownloaders(); }); } /** * Adds a new DownloadStatus object to the list of completed downloads and * saves it in the database * * @param status the download that is going to be saved */ private void saveDownloadStatus(DownloadStatus status) { reportQueue.add(status); DBWriter.addDownloadStatus(status); } /** * Calls query downloads on the services main thread. This method should be used instead of queryDownloads if it is * used from a thread other than the main thread. */ private void queryDownloadsAsync() { handler.post(DownloadService.this::queryDownloads); } /** * Check if there's something else to download, otherwise stop */ private void queryDownloads() { Log.d(TAG, numberOfDownloads.get() + " downloads left"); if (numberOfDownloads.get() <= 0 && DownloadRequester.getInstance().hasNoDownloads()) { Log.d(TAG, "Number of downloads is " + numberOfDownloads.get() + ", attempting shutdown"); stopSelf(); notificationUpdater.run(); } else { setupNotificationUpdater(); Notification notification = notificationManager.updateNotifications( requester.getNumberOfDownloads(), downloads); startForeground(NOTIFICATION_ID, notification); } } @Nullable private FeedItem getFeedItemFromId(long id) { FeedMedia media = DBReader.getFeedMedia(id); if (media != null) { return media.getItem(); } else { return null; } } /** * Creates the destination file and writes FeedMedia File_url directly after starting download * to make it possible to resume download after the service was killed by the system. */ private void writeFileUrl(DownloadRequest request) { if (request.getFeedfileType() != FeedMedia.FEEDFILETYPE_FEEDMEDIA) { return; } File dest = new File(request.getDestination()); if (!dest.exists()) { try { dest.createNewFile(); } catch (IOException e) { Log.e(TAG, "Unable to create file"); } } if (dest.exists()) { Log.d(TAG, "Writing file url"); FeedMedia media = DBReader.getFeedMedia(request.getFeedfileId()); if (media == null) { Log.d(TAG, "No media"); return; } media.setFile_url(request.getDestination()); try { DBWriter.setFeedMedia(media).get(); } catch (InterruptedException e) { Log.e(TAG, "writeFileUrl was interrupted"); } catch (ExecutionException e) { Log.e(TAG, "ExecutionException in writeFileUrl: " + e.getMessage()); } } } /** * Schedules the notification updater task if it hasn't been scheduled yet. */ private void setupNotificationUpdater() { if (notificationUpdater == null) { Log.d(TAG, "Setting up notification updater"); notificationUpdater = new NotificationUpdater(); notificationUpdaterFuture = schedExecutor.scheduleAtFixedRate(notificationUpdater, 1, 1, TimeUnit.SECONDS); } } private void cancelNotificationUpdater() { boolean result = false; if (notificationUpdaterFuture != null) { result = notificationUpdaterFuture.cancel(true); } notificationUpdater = null; notificationUpdaterFuture = null; Log.d(TAG, "NotificationUpdater cancelled. Result: " + result); } private class NotificationUpdater implements Runnable { public void run() { Notification n = notificationManager.updateNotifications(requester.getNumberOfDownloads(), downloads); if (n != null) { NotificationManager nm = (NotificationManager) getSystemService(Context.NOTIFICATION_SERVICE); nm.notify(NOTIFICATION_ID, n); } } } private void postDownloaders() { new PostDownloaderTask(downloads).run(); if (downloadPostFuture == null) { downloadPostFuture = schedExecutor.scheduleAtFixedRate( new PostDownloaderTask(downloads), 1, 1, TimeUnit.SECONDS); } } }
1
15,779
Just wondering... The two kinds of notifications are now quite different (Channel, text, maybe even icon). Would it make sense to extract the auto download notification to a new class instead of handling everything in the existing `notificationManager`? I have not checked if this will lead to a lot of code duplication, though.
AntennaPod-AntennaPod
java
@@ -140,3 +140,11 @@ func (c *Call) RoutingDelegate() string { } return c.ic.req.RoutingDelegate } + +// Features returns the RequestFeatures for this request. +func (c *Call) Features() transport.RequestFeatures { + if c == nil { + return transport.RequestFeatures{} + } + return c.ic.req.Features +}
1
// Copyright (c) 2017 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package encoding import ( "context" "errors" "sort" "go.uber.org/yarpc/api/transport" ) type keyValuePair struct{ k, v string } // Call provides information about the current request inside handlers. type Call struct{ ic *InboundCall } // CallFromContext retrieves information about the current incoming request // from the given context. Returns nil if the context is not a valid request // context. // // The object is valid only as long as the request is ongoing. func CallFromContext(ctx context.Context) *Call { if ic, ok := getInboundCall(ctx); ok { return &Call{ic} } return nil } // WriteResponseHeader writes headers to the response of this call. func (c *Call) WriteResponseHeader(k, v string) error { if c == nil { return errors.New( "failed to write response header: " + "Call was nil, make sure CallFromContext was called with a request context") } c.ic.resHeaders = append(c.ic.resHeaders, keyValuePair{k: k, v: v}) return nil } // Caller returns the name of the service making this request. func (c *Call) Caller() string { if c == nil { return "" } return c.ic.req.Caller } // Service returns the name of the service being called. func (c *Call) Service() string { if c == nil { return "" } return c.ic.req.Service } // Procedure returns the name of the procedure being called. func (c *Call) Procedure() string { if c == nil { return "" } return c.ic.req.Procedure } // Encoding returns the encoding for this request. func (c *Call) Encoding() transport.Encoding { if c == nil { return "" } return c.ic.req.Encoding } // Header returns the value of the given request header provided with the // request. func (c *Call) Header(k string) string { if c == nil { return "" } if v, ok := c.ic.req.Headers.Get(k); ok { return v } return "" } // HeaderNames returns a sorted list of the names of user defined headers // provided with this request. func (c *Call) HeaderNames() []string { if c == nil { return nil } var names []string for k := range c.ic.req.Headers.Items() { names = append(names, k) } sort.Strings(names) return names } // ShardKey returns the shard key for this request. func (c *Call) ShardKey() string { if c == nil { return "" } return c.ic.req.ShardKey } // RoutingKey returns the routing key for this request. func (c *Call) RoutingKey() string { if c == nil { return "" } return c.ic.req.RoutingKey } // RoutingDelegate returns the routing delegate for this request. func (c *Call) RoutingDelegate() string { if c == nil { return "" } return c.ic.req.RoutingDelegate }
1
15,422
Do we need to put this on the Call? We only need the API communication between the Encoding and the Transports which is currently done through the Transport.Request. The Call is used by users right?
yarpc-yarpc-go
go
@@ -1202,3 +1202,15 @@ L: } return filtered } + +func (a *WebAPI) GenerateAPIKey(ctx context.Context, req *webservice.GenerateAPIKeyRequest) (*webservice.GenerateAPIKeyResponse, error) { + return nil, status.Error(codes.Unimplemented, "") +} + +func (a *WebAPI) DisableAPIKey(ctx context.Context, req *webservice.DisableAPIKeyRequest) (*webservice.DisableAPIKeyResponse, error) { + return nil, status.Error(codes.Unimplemented, "") +} + +func (a *WebAPI) ListAPIKeys(ctx context.Context, req *webservice.ListAPIKeysRequest) (*webservice.ListAPIKeysResponse, error) { + return nil, status.Error(codes.Unimplemented, "") +}
1
// Copyright 2020 The PipeCD Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package grpcapi import ( "context" "errors" "fmt" "strings" "time" "github.com/google/uuid" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/pipe-cd/pipe/pkg/app/api/applicationlivestatestore" "github.com/pipe-cd/pipe/pkg/app/api/commandstore" "github.com/pipe-cd/pipe/pkg/app/api/service/webservice" "github.com/pipe-cd/pipe/pkg/app/api/stagelogstore" "github.com/pipe-cd/pipe/pkg/cache" "github.com/pipe-cd/pipe/pkg/cache/memorycache" "github.com/pipe-cd/pipe/pkg/config" "github.com/pipe-cd/pipe/pkg/crypto" "github.com/pipe-cd/pipe/pkg/datastore" "github.com/pipe-cd/pipe/pkg/git" "github.com/pipe-cd/pipe/pkg/model" "github.com/pipe-cd/pipe/pkg/rpc/rpcauth" ) type encrypter interface { Encrypt(text string) (string, error) } // WebAPI implements the behaviors for the gRPC definitions of WebAPI. type WebAPI struct { applicationStore datastore.ApplicationStore environmentStore datastore.EnvironmentStore deploymentStore datastore.DeploymentStore pipedStore datastore.PipedStore projectStore datastore.ProjectStore stageLogStore stagelogstore.Store applicationLiveStateStore applicationlivestatestore.Store commandStore commandstore.Store encrypter encrypter appProjectCache cache.Cache deploymentProjectCache cache.Cache pipedProjectCache cache.Cache projectsInConfig map[string]config.ControlPlaneProject logger *zap.Logger } // NewWebAPI creates a new WebAPI instance. func NewWebAPI( ctx context.Context, ds datastore.DataStore, sls stagelogstore.Store, alss applicationlivestatestore.Store, cmds commandstore.Store, projs map[string]config.ControlPlaneProject, encrypter encrypter, logger *zap.Logger) *WebAPI { a := &WebAPI{ applicationStore: datastore.NewApplicationStore(ds), environmentStore: datastore.NewEnvironmentStore(ds), deploymentStore: datastore.NewDeploymentStore(ds), pipedStore: datastore.NewPipedStore(ds), projectStore: datastore.NewProjectStore(ds), stageLogStore: sls, applicationLiveStateStore: alss, commandStore: cmds, projectsInConfig: projs, encrypter: encrypter, appProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour), deploymentProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour), pipedProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour), logger: logger.Named("web-api"), } return a } // Register registers all handling of this service into the specified gRPC server. func (a *WebAPI) Register(server *grpc.Server) { webservice.RegisterWebServiceServer(server, a) } func (a *WebAPI) AddEnvironment(ctx context.Context, req *webservice.AddEnvironmentRequest) (*webservice.AddEnvironmentResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } env := model.Environment{ Id: uuid.New().String(), Name: req.Name, Desc: req.Desc, ProjectId: claims.Role.ProjectId, } err = a.environmentStore.AddEnvironment(ctx, &env) if errors.Is(err, datastore.ErrAlreadyExists) { return nil, status.Error(codes.AlreadyExists, "The environment already exists") } if err != nil { a.logger.Error("failed to create environment", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to create environment") } return &webservice.AddEnvironmentResponse{}, nil } func (a *WebAPI) UpdateEnvironmentDesc(ctx context.Context, req *webservice.UpdateEnvironmentDescRequest) (*webservice.UpdateEnvironmentDescResponse, error) { return nil, status.Error(codes.Unimplemented, "") } func (a *WebAPI) ListEnvironments(ctx context.Context, req *webservice.ListEnvironmentsRequest) (*webservice.ListEnvironmentsResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } opts := datastore.ListOptions{ Filters: []datastore.ListFilter{ { Field: "ProjectId", Operator: "==", Value: claims.Role.ProjectId, }, }, } envs, err := a.environmentStore.ListEnvironments(ctx, opts) if err != nil { a.logger.Error("failed to get environments", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get environments") } return &webservice.ListEnvironmentsResponse{ Environments: envs, }, nil } func (a *WebAPI) RegisterPiped(ctx context.Context, req *webservice.RegisterPipedRequest) (*webservice.RegisterPipedResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } key, keyHash, err := model.GeneratePipedKey() if err != nil { a.logger.Error("failed to generate piped key", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to generate the piped key") } id := uuid.New().String() piped := model.Piped{ Id: id, Name: req.Name, Desc: req.Desc, ProjectId: claims.Role.ProjectId, EnvIds: req.EnvIds, Status: model.Piped_OFFLINE, } piped.AddKey(keyHash, claims.Subject, time.Now()) err = a.pipedStore.AddPiped(ctx, &piped) if errors.Is(err, datastore.ErrAlreadyExists) { return nil, status.Error(codes.AlreadyExists, "The piped already exists") } if err != nil { a.logger.Error("failed to register piped", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to register piped") } return &webservice.RegisterPipedResponse{ Id: id, Key: key, }, nil } func (a *WebAPI) RecreatePipedKey(ctx context.Context, req *webservice.RecreatePipedKeyRequest) (*webservice.RecreatePipedKeyResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } key, keyHash, err := model.GeneratePipedKey() if err != nil { a.logger.Error("failed to generate piped key", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to generate the piped key") } updater := func(ctx context.Context, pipedID string) error { return a.pipedStore.AddKey(ctx, pipedID, keyHash, claims.Subject, time.Now()) } if err := a.updatePiped(ctx, req.Id, updater); err != nil { return nil, err } return &webservice.RecreatePipedKeyResponse{ Key: key, }, nil } func (a *WebAPI) EnablePiped(ctx context.Context, req *webservice.EnablePipedRequest) (*webservice.EnablePipedResponse, error) { if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.EnablePiped); err != nil { return nil, err } return &webservice.EnablePipedResponse{}, nil } func (a *WebAPI) DisablePiped(ctx context.Context, req *webservice.DisablePipedRequest) (*webservice.DisablePipedResponse, error) { if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.DisablePiped); err != nil { return nil, err } return &webservice.DisablePipedResponse{}, nil } func (a *WebAPI) updatePiped(ctx context.Context, pipedID string, updater func(context.Context, string) error) error { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return err } if err := a.validatePipedBelongsToProject(ctx, pipedID, claims.Role.ProjectId); err != nil { return err } if err := updater(ctx, pipedID); err != nil { switch err { case datastore.ErrNotFound: return status.Error(codes.InvalidArgument, "The piped is not found") case datastore.ErrInvalidArgument: return status.Error(codes.InvalidArgument, "Invalid value for update") default: a.logger.Error("failed to update the piped", zap.String("piped-id", pipedID), zap.Error(err), ) return status.Error(codes.Internal, "Failed to update the piped ") } } return nil } // TODO: Consider using piped-stats to decide piped connection status. func (a *WebAPI) ListPipeds(ctx context.Context, req *webservice.ListPipedsRequest) (*webservice.ListPipedsResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } opts := datastore.ListOptions{ Filters: []datastore.ListFilter{ { Field: "ProjectId", Operator: "==", Value: claims.Role.ProjectId, }, }, } if req.Options != nil { if req.Options.Enabled != nil { opts.Filters = append(opts.Filters, datastore.ListFilter{ Field: "Disabled", Operator: "==", Value: !req.Options.Enabled.GetValue(), }) } } pipeds, err := a.pipedStore.ListPipeds(ctx, opts) if err != nil { a.logger.Error("failed to get pipeds", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get pipeds") } // Redact all sensitive data inside piped message before sending to the client. for i := range pipeds { pipeds[i].RedactSensitiveData() } return &webservice.ListPipedsResponse{ Pipeds: pipeds, }, nil } func (a *WebAPI) GetPiped(ctx context.Context, req *webservice.GetPipedRequest) (*webservice.GetPipedResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } piped, err := a.getPiped(ctx, req.PipedId) if err != nil { return nil, err } if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil { return nil, err } // Redact all sensitive data inside piped message before sending to the client. piped.RedactSensitiveData() return &webservice.GetPipedResponse{ Piped: piped, }, nil } func (a *WebAPI) getPiped(ctx context.Context, pipedID string) (*model.Piped, error) { piped, err := a.pipedStore.GetPiped(ctx, pipedID) if errors.Is(err, datastore.ErrNotFound) { return nil, status.Error(codes.NotFound, "Piped is not found") } if err != nil { a.logger.Error("failed to get piped", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get piped") } return piped, nil } // validatePipedBelongsToProject checks if the given piped belongs to the given project. // It gives back error unless the piped belongs to the project. func (a *WebAPI) validatePipedBelongsToProject(ctx context.Context, pipedID, projectID string) error { pid, err := a.pipedProjectCache.Get(pipedID) if err == nil { if pid != projectID { return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in") } return nil } piped, err := a.getPiped(ctx, pipedID) if err != nil { return err } a.pipedProjectCache.Put(pipedID, piped.ProjectId) if piped.ProjectId != projectID { return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in") } return nil } // TODO: Validate the specified piped to ensure that it belongs to the specified environment. func (a *WebAPI) AddApplication(ctx context.Context, req *webservice.AddApplicationRequest) (*webservice.AddApplicationResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } // The path to the application directory must be relative. if strings.HasPrefix(req.GitPath.Path, "/") { return nil, status.Error(codes.InvalidArgument, "The path must be a relative path") } gitpath, err := a.makeGitPath(ctx, req.GitPath.Repo.Id, req.GitPath.Path, req.GitPath.ConfigFilename, req.PipedId, claims.Role.ProjectId) if err != nil { return nil, err } app := model.Application{ Id: uuid.New().String(), Name: req.Name, EnvId: req.EnvId, PipedId: req.PipedId, ProjectId: claims.Role.ProjectId, GitPath: gitpath, Kind: req.Kind, CloudProvider: req.CloudProvider, } err = a.applicationStore.AddApplication(ctx, &app) if errors.Is(err, datastore.ErrAlreadyExists) { return nil, status.Error(codes.AlreadyExists, "The application already exists") } if err != nil { a.logger.Error("failed to create application", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to create application") } return &webservice.AddApplicationResponse{ ApplicationId: app.Id, }, nil } // makeGitPath returns an ApplicationGitPath by adding Repository info and GitPath URL to given args. func (a *WebAPI) makeGitPath(ctx context.Context, repoID, path, cfgFilename, pipedID, projectID string) (*model.ApplicationGitPath, error) { piped, err := a.getPiped(ctx, pipedID) if err != nil { return nil, err } if err := a.validatePipedBelongsToProject(ctx, pipedID, projectID); err != nil { return nil, err } var repo *model.ApplicationGitRepository for _, r := range piped.Repositories { if r.Id == repoID { repo = r break } } if repo == nil { a.logger.Error("repository not found", zap.String("repo-id", repoID), zap.String("piped-id", pipedID), zap.Error(err), ) return nil, status.Error(codes.Internal, "The repository is not found") } u, err := git.MakeDirURL(repo.Remote, path, repo.Branch) if err != nil { a.logger.Error("failed to make GitPath URL", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to make GitPath URL") } return &model.ApplicationGitPath{ Repo: repo, Path: path, ConfigFilename: cfgFilename, Url: u, }, nil } func (a *WebAPI) EnableApplication(ctx context.Context, req *webservice.EnableApplicationRequest) (*webservice.EnableApplicationResponse, error) { if err := a.updateApplicationEnable(ctx, req.ApplicationId, true); err != nil { return nil, err } return &webservice.EnableApplicationResponse{}, nil } func (a *WebAPI) DisableApplication(ctx context.Context, req *webservice.DisableApplicationRequest) (*webservice.DisableApplicationResponse, error) { if err := a.updateApplicationEnable(ctx, req.ApplicationId, false); err != nil { return nil, err } return &webservice.DisableApplicationResponse{}, nil } func (a *WebAPI) updateApplicationEnable(ctx context.Context, appID string, enable bool) error { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return err } if err := a.validateAppBelongsToProject(ctx, appID, claims.Role.ProjectId); err != nil { return err } var updater func(context.Context, string) error if enable { updater = a.applicationStore.EnableApplication } else { updater = a.applicationStore.DisableApplication } if err := updater(ctx, appID); err != nil { switch err { case datastore.ErrNotFound: return status.Error(codes.InvalidArgument, "The application is not found") case datastore.ErrInvalidArgument: return status.Error(codes.InvalidArgument, "Invalid value for update") default: a.logger.Error("failed to update the application", zap.String("application-id", appID), zap.Error(err), ) return status.Error(codes.Internal, "Failed to update the application") } } return nil } func (a *WebAPI) ListApplications(ctx context.Context, req *webservice.ListApplicationsRequest) (*webservice.ListApplicationsResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } orders := []datastore.Order{ { Field: "UpdatedAt", Direction: datastore.Desc, }, } filters := []datastore.ListFilter{ { Field: "ProjectId", Operator: "==", Value: claims.Role.ProjectId, }, } if o := req.Options; o != nil { if o.Enabled != nil { filters = append(filters, datastore.ListFilter{ Field: "Disabled", Operator: "==", Value: !o.Enabled.GetValue(), }) } // Allowing multiple so that it can do In Query later. // Currently only the first value is used. if len(o.Kinds) > 0 { filters = append(filters, datastore.ListFilter{ Field: "Kind", Operator: "==", Value: o.Kinds[0], }) } if len(o.SyncStatuses) > 0 { filters = append(filters, datastore.ListFilter{ Field: "SyncState.Status", Operator: "==", Value: o.SyncStatuses[0], }) } if len(o.EnvIds) > 0 { filters = append(filters, datastore.ListFilter{ Field: "EnvId", Operator: "==", Value: o.EnvIds[0], }) } } apps, err := a.applicationStore.ListApplications(ctx, datastore.ListOptions{ Filters: filters, Orders: orders, }) if err != nil { a.logger.Error("failed to get applications", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get applications") } return &webservice.ListApplicationsResponse{ Applications: apps, }, nil } func (a *WebAPI) SyncApplication(ctx context.Context, req *webservice.SyncApplicationRequest) (*webservice.SyncApplicationResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } app, err := a.getApplication(ctx, req.ApplicationId) if err != nil { return nil, err } if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil { return nil, err } commandID := uuid.New().String() cmd := model.Command{ Id: commandID, PipedId: app.PipedId, ApplicationId: app.Id, Type: model.Command_SYNC_APPLICATION, Commander: claims.Subject, SyncApplication: &model.Command_SyncApplication{ ApplicationId: req.ApplicationId, }, } if err := a.addCommand(ctx, &cmd); err != nil { return nil, err } return &webservice.SyncApplicationResponse{ CommandId: commandID, }, nil } func (a *WebAPI) addCommand(ctx context.Context, cmd *model.Command) error { if err := a.commandStore.AddCommand(ctx, cmd); err != nil { a.logger.Error("failed to create command", zap.Error(err)) return status.Error(codes.Internal, "Failed to create command") } return nil } func (a *WebAPI) GetApplication(ctx context.Context, req *webservice.GetApplicationRequest) (*webservice.GetApplicationResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } app, err := a.getApplication(ctx, req.ApplicationId) if err != nil { return nil, err } if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil { return nil, err } return &webservice.GetApplicationResponse{ Application: app, }, nil } func (a *WebAPI) GenerateApplicationSealedSecret(ctx context.Context, req *webservice.GenerateApplicationSealedSecretRequest) (*webservice.GenerateApplicationSealedSecretResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } piped, err := a.getPiped(ctx, req.PipedId) if err != nil { return nil, err } if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil { return nil, err } sse := piped.SealedSecretEncryption if sse == nil { return nil, status.Error(codes.FailedPrecondition, "The piped does not contain the encryption configuration") } var enc encrypter switch model.SealedSecretManagementType(sse.Type) { case model.SealedSecretManagementSealingKey: if sse.PublicKey == "" { return nil, status.Error(codes.FailedPrecondition, "The piped does not contain a public key") } enc, err = crypto.NewHybridEncrypter(sse.PublicKey) if err != nil { a.logger.Error("failed to initialize the crypter", zap.Error(err)) return nil, status.Error(codes.FailedPrecondition, "Failed to initialize the encrypter") } default: return nil, status.Error(codes.FailedPrecondition, "The piped does not contain a valid encryption type") } encryptedText, err := enc.Encrypt(req.Data) if err != nil { a.logger.Error("failed to encrypt the secret", zap.Error(err)) return nil, status.Error(codes.FailedPrecondition, "Failed to encrypt the secret") } return &webservice.GenerateApplicationSealedSecretResponse{ Data: encryptedText, }, nil } func (a *WebAPI) getApplication(ctx context.Context, appID string) (*model.Application, error) { app, err := a.applicationStore.GetApplication(ctx, appID) if errors.Is(err, datastore.ErrNotFound) { return nil, status.Error(codes.NotFound, "The application is not found") } if err != nil { a.logger.Error("failed to get application", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get application") } return app, nil } // validateAppBelongsToProject checks if the given application belongs to the given project. // It gives back error unless the application belongs to the project. func (a *WebAPI) validateAppBelongsToProject(ctx context.Context, appID, projectID string) error { pid, err := a.appProjectCache.Get(appID) if err == nil { if pid != projectID { return status.Error(codes.PermissionDenied, "Requested application doesn't belong to the project you logged in") } return nil } app, err := a.getApplication(ctx, appID) if err != nil { return err } a.appProjectCache.Put(appID, app.ProjectId) if app.ProjectId != projectID { return status.Error(codes.PermissionDenied, "Requested application doesn't belong to the project you logged in") } return nil } func (a *WebAPI) ListDeployments(ctx context.Context, req *webservice.ListDeploymentsRequest) (*webservice.ListDeploymentsResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } orders := []datastore.Order{ { Field: "UpdatedAt", Direction: datastore.Desc, }, } filters := []datastore.ListFilter{ { Field: "ProjectId", Operator: "==", Value: claims.Role.ProjectId, }, } if o := req.Options; o != nil { // Allowing multiple so that it can do In Query later. // Currently only the first value is used. if len(o.Statuses) > 0 { filters = append(filters, datastore.ListFilter{ Field: "Status", Operator: "==", Value: o.Statuses[0], }) } if len(o.Kinds) > 0 { filters = append(filters, datastore.ListFilter{ Field: "Kind", Operator: "==", Value: o.Kinds[0], }) } if len(o.ApplicationIds) > 0 { filters = append(filters, datastore.ListFilter{ Field: "ApplicationId", Operator: "==", Value: o.ApplicationIds[0], }) } if len(o.EnvIds) > 0 { filters = append(filters, datastore.ListFilter{ Field: "EnvId", Operator: "==", Value: o.EnvIds[0], }) } if o.MaxUpdatedAt != 0 { filters = append(filters, datastore.ListFilter{ Field: "UpdatedAt", Operator: "<=", Value: o.MaxUpdatedAt, }) } } deployments, err := a.deploymentStore.ListDeployments(ctx, datastore.ListOptions{ Filters: filters, Orders: orders, PageSize: int(req.PageSize), }) if err != nil { a.logger.Error("failed to get deployments", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get deployments") } return &webservice.ListDeploymentsResponse{ Deployments: deployments, }, nil } func (a *WebAPI) GetDeployment(ctx context.Context, req *webservice.GetDeploymentRequest) (*webservice.GetDeploymentResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } deployment, err := a.getDeployment(ctx, req.DeploymentId) if err != nil { return nil, err } if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil { return nil, err } return &webservice.GetDeploymentResponse{ Deployment: deployment, }, nil } func (a *WebAPI) getDeployment(ctx context.Context, deploymentID string) (*model.Deployment, error) { deployment, err := a.deploymentStore.GetDeployment(ctx, deploymentID) if errors.Is(err, datastore.ErrNotFound) { return nil, status.Error(codes.NotFound, "The deployment is not found") } if err != nil { a.logger.Error("failed to get deployment", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get deployment") } return deployment, nil } // validateDeploymentBelongsToProject checks if the given deployment belongs to the given project. // It gives back error unless the deployment belongs to the project. func (a *WebAPI) validateDeploymentBelongsToProject(ctx context.Context, deploymentID, projectID string) error { pid, err := a.deploymentProjectCache.Get(deploymentID) if err == nil { if pid != projectID { return status.Error(codes.PermissionDenied, "Requested deployment doesn't belong to the project you logged in") } return nil } deployment, err := a.getDeployment(ctx, deploymentID) if err != nil { return err } a.deploymentProjectCache.Put(deploymentID, deployment.ProjectId) if deployment.ProjectId != projectID { return status.Error(codes.PermissionDenied, "Requested deployment doesn't belong to the project you logged in") } return nil } func (a *WebAPI) GetStageLog(ctx context.Context, req *webservice.GetStageLogRequest) (*webservice.GetStageLogResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil { return nil, err } blocks, completed, err := a.stageLogStore.FetchLogs(ctx, req.DeploymentId, req.StageId, req.RetriedCount, req.OffsetIndex) if errors.Is(err, stagelogstore.ErrNotFound) { return nil, status.Error(codes.NotFound, "The stage log not found") } if err != nil { a.logger.Error("failed to get stage logs", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get stage logs") } return &webservice.GetStageLogResponse{ Blocks: blocks, Completed: completed, }, nil } func (a *WebAPI) CancelDeployment(ctx context.Context, req *webservice.CancelDeploymentRequest) (*webservice.CancelDeploymentResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } deployment, err := a.getDeployment(ctx, req.DeploymentId) if err != nil { return nil, err } if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil { return nil, err } if model.IsCompletedDeployment(deployment.Status) { return nil, status.Errorf(codes.FailedPrecondition, "could not cancel the deployment because it was already completed") } commandID := uuid.New().String() cmd := model.Command{ Id: commandID, PipedId: deployment.PipedId, ApplicationId: deployment.ApplicationId, DeploymentId: req.DeploymentId, Type: model.Command_CANCEL_DEPLOYMENT, Commander: claims.Subject, CancelDeployment: &model.Command_CancelDeployment{ DeploymentId: req.DeploymentId, ForceRollback: req.ForceRollback, ForceNoRollback: req.ForceNoRollback, }, } if err := a.addCommand(ctx, &cmd); err != nil { return nil, err } return &webservice.CancelDeploymentResponse{ CommandId: commandID, }, nil } func (a *WebAPI) ApproveStage(ctx context.Context, req *webservice.ApproveStageRequest) (*webservice.ApproveStageResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } deployment, err := a.getDeployment(ctx, req.DeploymentId) if err != nil { return nil, err } if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil { return nil, err } stage, ok := deployment.StageStatusMap()[req.StageId] if !ok { return nil, status.Error(codes.FailedPrecondition, "The stage was not found in the deployment") } if model.IsCompletedStage(stage) { return nil, status.Errorf(codes.FailedPrecondition, "Could not approve the stage because it was already completed") } commandID := uuid.New().String() cmd := model.Command{ Id: commandID, PipedId: deployment.PipedId, ApplicationId: deployment.ApplicationId, DeploymentId: req.DeploymentId, StageId: req.StageId, Type: model.Command_APPROVE_STAGE, Commander: claims.Subject, ApproveStage: &model.Command_ApproveStage{ DeploymentId: req.DeploymentId, StageId: req.StageId, }, } if err := a.addCommand(ctx, &cmd); err != nil { return nil, err } return &webservice.ApproveStageResponse{ CommandId: commandID, }, nil } func (a *WebAPI) GetApplicationLiveState(ctx context.Context, req *webservice.GetApplicationLiveStateRequest) (*webservice.GetApplicationLiveStateResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil { return nil, err } snapshot, err := a.applicationLiveStateStore.GetStateSnapshot(ctx, req.ApplicationId) if err != nil { a.logger.Error("failed to get application live state", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get application live state") } return &webservice.GetApplicationLiveStateResponse{ Snapshot: snapshot, }, nil } // GetProject gets the specified porject without sensitive data. func (a *WebAPI) GetProject(ctx context.Context, req *webservice.GetProjectRequest) (*webservice.GetProjectResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } project, err := a.getProject(ctx, claims.Role.ProjectId) if err != nil { return nil, err } // Redact all sensitive data inside project message before sending to the client. project.RedactSensitiveData() return &webservice.GetProjectResponse{ Project: project, }, nil } func (a *WebAPI) getProject(ctx context.Context, projectID string) (*model.Project, error) { if p, ok := a.projectsInConfig[projectID]; ok { return &model.Project{ Id: p.Id, Desc: p.Desc, StaticAdmin: &model.ProjectStaticUser{ Username: p.StaticAdmin.Username, PasswordHash: p.StaticAdmin.PasswordHash, }, }, nil } project, err := a.projectStore.GetProject(ctx, projectID) if errors.Is(err, datastore.ErrNotFound) { return nil, status.Error(codes.NotFound, "The project is not found") } if err != nil { a.logger.Error("failed to get project", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get project") } return project, nil } // UpdateProjectStaticAdmin updates the static admin user settings. func (a *WebAPI) UpdateProjectStaticAdmin(ctx context.Context, req *webservice.UpdateProjectStaticAdminRequest) (*webservice.UpdateProjectStaticAdminResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok { return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration") } if err := a.projectStore.UpdateProjectStaticAdmin(ctx, claims.Role.ProjectId, req.Username, req.Password); err != nil { a.logger.Error("failed to update static admin", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to update static admin") } return &webservice.UpdateProjectStaticAdminResponse{}, nil } // EnableStaticAdmin enables static admin login. func (a *WebAPI) EnableStaticAdmin(ctx context.Context, req *webservice.EnableStaticAdminRequest) (*webservice.EnableStaticAdminResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok { return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration") } if err := a.projectStore.EnableStaticAdmin(ctx, claims.Role.ProjectId); err != nil { a.logger.Error("failed to enable static admin login", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to enable static admin login") } return &webservice.EnableStaticAdminResponse{}, nil } // DisableStaticAdmin disables static admin login. func (a *WebAPI) DisableStaticAdmin(ctx context.Context, req *webservice.DisableStaticAdminRequest) (*webservice.DisableStaticAdminResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok { return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration") } if err := a.projectStore.DisableStaticAdmin(ctx, claims.Role.ProjectId); err != nil { a.logger.Error("failed to disenable static admin login", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to disenable static admin login") } return &webservice.DisableStaticAdminResponse{}, nil } // UpdateProjectSSOConfig updates the sso settings. func (a *WebAPI) UpdateProjectSSOConfig(ctx context.Context, req *webservice.UpdateProjectSSOConfigRequest) (*webservice.UpdateProjectSSOConfigResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok { return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration") } if err := req.Sso.Encrypt(a.encrypter); err != nil { a.logger.Error("failed to encrypt sensitive data in sso configurations", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to encrypt sensitive data in sso configurations") } if err := a.projectStore.UpdateProjectSSOConfig(ctx, claims.Role.ProjectId, req.Sso); err != nil { a.logger.Error("failed to update project single sign on settings", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to update project single sign on settings") } return &webservice.UpdateProjectSSOConfigResponse{}, nil } // UpdateProjectRBACConfig updates the sso settings. func (a *WebAPI) UpdateProjectRBACConfig(ctx context.Context, req *webservice.UpdateProjectRBACConfigRequest) (*webservice.UpdateProjectRBACConfigResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok { return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration") } if err := a.projectStore.UpdateProjectRBACConfig(ctx, claims.Role.ProjectId, req.Rbac); err != nil { a.logger.Error("failed to update project single sign on settings", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to update project single sign on settings") } return &webservice.UpdateProjectRBACConfigResponse{}, nil } // GetMe gets information about the current user. func (a *WebAPI) GetMe(ctx context.Context, req *webservice.GetMeRequest) (*webservice.GetMeResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } return &webservice.GetMeResponse{ Subject: claims.Subject, AvatarUrl: claims.AvatarURL, ProjectId: claims.Role.ProjectId, ProjectRole: claims.Role.ProjectRole, }, nil } func (a *WebAPI) GetCommand(ctx context.Context, req *webservice.GetCommandRequest) (*webservice.GetCommandResponse, error) { cmd, err := a.commandStore.GetCommand(ctx, req.CommandId) if errors.Is(err, datastore.ErrNotFound) { return nil, status.Error(codes.NotFound, "The command is not found") } if err != nil { return nil, status.Error(codes.Internal, "Failed to get command") } // TODO: Add check if requested command belongs to logged-in project, after adding project id field to model.Command. return &webservice.GetCommandResponse{ Command: cmd, }, nil } func (a *WebAPI) ListDeploymentConfigTemplates(ctx context.Context, req *webservice.ListDeploymentConfigTemplatesRequest) (*webservice.ListDeploymentConfigTemplatesResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } app, err := a.getApplication(ctx, req.ApplicationId) if err != nil { return nil, err } if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil { return nil, err } var templates []*webservice.DeploymentConfigTemplate switch app.Kind { case model.ApplicationKind_KUBERNETES: templates = k8sDeploymentConfigTemplates case model.ApplicationKind_TERRAFORM: templates = terraformDeploymentConfigTemplates case model.ApplicationKind_CROSSPLANE: templates = crossplaneDeploymentConfigTemplates case model.ApplicationKind_LAMBDA: templates = lambdaDeploymentConfigTemplates case model.ApplicationKind_CLOUDRUN: templates = cloudrunDeploymentConfigTemplates default: return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Unknown application kind %v", app.Kind)) } for _, t := range templates { g := app.GetGitPath() filename := g.ConfigFilename if filename == "" { filename = ".pipe.yaml" } t.FileCreationUrl, err = git.MakeFileCreationURL(g.Repo.Remote, g.Path, g.Repo.Branch, filename, t.Content) if err != nil { a.logger.Error("failed to make a link to creat a file", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to make a link to creat a file") } } if len(req.Labels) == 0 { return &webservice.ListDeploymentConfigTemplatesResponse{Templates: templates}, nil } filtered := filterDeploymentConfigTemplates(templates, req.Labels) return &webservice.ListDeploymentConfigTemplatesResponse{Templates: filtered}, nil } // Returns the one from the given templates with all the specified labels. func filterDeploymentConfigTemplates(templates []*webservice.DeploymentConfigTemplate, labels []webservice.DeploymentConfigTemplateLabel) []*webservice.DeploymentConfigTemplate { filtered := make([]*webservice.DeploymentConfigTemplate, 0, len(templates)) L: for _, template := range templates { for _, l := range labels { if !template.HasLabel(l) { continue L } } filtered = append(filtered, template) } return filtered }
1
11,638
`ctx` is unused in GenerateAPIKey
pipe-cd-pipe
go
@@ -75,6 +75,7 @@ type ( func NewBlockDAO(indexers []BlockIndexer, cfg config.DB) BlockDAO { blkStore, err := filedao.NewFileDAO(cfg) if err != nil { + log.L().Fatal(err.Error(), zap.Any("cfg", cfg)) return nil } return createBlockDAO(blkStore, indexers, cfg)
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package blockdao import ( "context" "sync/atomic" "time" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/iotexproject/go-pkgs/cache" "github.com/iotexproject/go-pkgs/hash" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-proto/golang/iotextypes" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/action/protocol" "github.com/iotexproject/iotex-core/blockchain/block" "github.com/iotexproject/iotex-core/blockchain/filedao" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/pkg/lifecycle" "github.com/iotexproject/iotex-core/pkg/log" "github.com/iotexproject/iotex-core/pkg/prometheustimer" ) // vars var ( cacheMtc = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "iotex_blockdao_cache", Help: "IoTeX blockdao cache counter.", }, []string{"result"}, ) ) type ( // BlockDAO represents the block data access object BlockDAO interface { filedao.FileDAO GetActionByActionHash(hash.Hash256, uint64) (action.SealedEnvelope, error) GetReceiptByActionHash(hash.Hash256, uint64) (*action.Receipt, error) DeleteBlockToTarget(uint64) error } // BlockIndexer defines an interface to accept block to build index BlockIndexer interface { Start(ctx context.Context) error Stop(ctx context.Context) error Height() (uint64, error) PutBlock(context.Context, *block.Block) error DeleteTipBlock(blk *block.Block) error } blockDAO struct { blockStore filedao.FileDAO indexers []BlockIndexer timerFactory *prometheustimer.TimerFactory lifecycle lifecycle.Lifecycle headerCache *cache.ThreadSafeLruCache bodyCache *cache.ThreadSafeLruCache footerCache *cache.ThreadSafeLruCache tipHeight uint64 } ) // NewBlockDAO instantiates a block DAO func NewBlockDAO(indexers []BlockIndexer, cfg config.DB) BlockDAO { blkStore, err := filedao.NewFileDAO(cfg) if err != nil { return nil } return createBlockDAO(blkStore, indexers, cfg) } // NewBlockDAOInMemForTest creates a in-memory block DAO for testing func NewBlockDAOInMemForTest(indexers []BlockIndexer) BlockDAO { blkStore, err := filedao.NewFileDAOInMemForTest() if err != nil { return nil } return createBlockDAO(blkStore, indexers, config.DB{MaxCacheSize: 16}) } // Start starts block DAO and initiates the top height if it doesn't exist func (dao *blockDAO) Start(ctx context.Context) error { err := dao.lifecycle.OnStart(ctx) if err != nil { return errors.Wrap(err, "failed to start child services") } tipHeight, err := dao.blockStore.Height() if err != nil { return err } atomic.StoreUint64(&dao.tipHeight, tipHeight) return dao.checkIndexers(ctx) } func (dao *blockDAO) fillWithBlockInfoAsTip(ctx context.Context, height uint64) (context.Context, error) { bcCtx, ok := protocol.GetBlockchainCtx(ctx) if !ok { return nil, errors.New("failed to find blockchain ctx") } if height == 0 { bcCtx.Tip = protocol.TipInfo{ Height: 0, Hash: bcCtx.Genesis.Hash(), Timestamp: time.Unix(bcCtx.Genesis.Timestamp, 0), } } else { header, err := dao.HeaderByHeight(height) if err != nil { return nil, err } bcCtx.Tip = protocol.TipInfo{ Height: height, Hash: header.HashHeader(), Timestamp: header.Timestamp(), } } return protocol.WithBlockchainCtx(ctx, bcCtx), nil } func (dao *blockDAO) checkIndexers(ctx context.Context) error { bcCtx, ok := protocol.GetBlockchainCtx(ctx) if !ok { return errors.New("failed to find blockchain ctx") } for ii, indexer := range dao.indexers { tipHeight, err := indexer.Height() if err != nil { return err } if tipHeight > dao.tipHeight { // TODO: delete block return errors.New("indexer tip height cannot by higher than dao tip height") } for i := tipHeight + 1; i <= dao.tipHeight; i++ { blk, err := dao.GetBlockByHeight(i) if err != nil { return err } if blk.Receipts == nil { blk.Receipts, err = dao.GetReceipts(i) if err != nil { return err } } producer, err := address.FromBytes(blk.PublicKey().Hash()) if err != nil { return err } ctx, err = dao.fillWithBlockInfoAsTip(ctx, i-1) if err != nil { return err } if err := indexer.PutBlock(protocol.WithBlockCtx( ctx, protocol.BlockCtx{ BlockHeight: i, BlockTimeStamp: blk.Timestamp(), Producer: producer, GasLimit: bcCtx.Genesis.BlockGasLimit, }, ), blk); err != nil { return err } if i%5000 == 0 { log.L().Info( "indexer is catching up.", zap.Int("indexer", ii), zap.Uint64("height", i), ) } } log.L().Info( "indexer is up to date.", zap.Int("indexer", ii), zap.Uint64("height", tipHeight), ) } return nil } func (dao *blockDAO) Stop(ctx context.Context) error { return dao.lifecycle.OnStop(ctx) } func (dao *blockDAO) GetBlockHash(height uint64) (hash.Hash256, error) { timer := dao.timerFactory.NewTimer("get_block_hash") defer timer.End() return dao.blockStore.GetBlockHash(height) } func (dao *blockDAO) GetBlockHeight(hash hash.Hash256) (uint64, error) { timer := dao.timerFactory.NewTimer("get_block_height") defer timer.End() return dao.blockStore.GetBlockHeight(hash) } func (dao *blockDAO) GetBlock(hash hash.Hash256) (*block.Block, error) { timer := dao.timerFactory.NewTimer("get_block") defer timer.End() return dao.blockStore.GetBlock(hash) } func (dao *blockDAO) GetBlockByHeight(height uint64) (*block.Block, error) { timer := dao.timerFactory.NewTimer("get_block_byheight") defer timer.End() return dao.blockStore.GetBlockByHeight(height) } func (dao *blockDAO) HeaderByHeight(height uint64) (*block.Header, error) { if v, ok := lruCacheGet(dao.headerCache, height); ok { cacheMtc.WithLabelValues("hit_header").Inc() return v.(*block.Header), nil } cacheMtc.WithLabelValues("miss_header").Inc() header, err := dao.blockStore.HeaderByHeight(height) if err != nil { return nil, err } lruCachePut(dao.headerCache, height, header) return header, nil } func (dao *blockDAO) FooterByHeight(height uint64) (*block.Footer, error) { if v, ok := lruCacheGet(dao.footerCache, height); ok { cacheMtc.WithLabelValues("hit_footer").Inc() return v.(*block.Footer), nil } cacheMtc.WithLabelValues("miss_footer").Inc() footer, err := dao.blockStore.FooterByHeight(height) if err != nil { return nil, err } lruCachePut(dao.footerCache, height, footer) return footer, nil } func (dao *blockDAO) Height() (uint64, error) { return dao.blockStore.Height() } func (dao *blockDAO) Header(h hash.Hash256) (*block.Header, error) { if header, ok := lruCacheGet(dao.headerCache, h); ok { cacheMtc.WithLabelValues("hit_header").Inc() return header.(*block.Header), nil } cacheMtc.WithLabelValues("miss_header").Inc() header, err := dao.blockStore.Header(h) if err != nil { return nil, err } lruCachePut(dao.headerCache, h, header) return header, nil } func (dao *blockDAO) GetActionByActionHash(h hash.Hash256, height uint64) (action.SealedEnvelope, error) { blk, err := dao.blockStore.GetBlockByHeight(height) if err != nil { return action.SealedEnvelope{}, err } for _, act := range blk.Actions { if act.Hash() == h { return act, nil } } return action.SealedEnvelope{}, errors.Errorf("block %d does not have action %x", height, h) } func (dao *blockDAO) GetReceiptByActionHash(h hash.Hash256, height uint64) (*action.Receipt, error) { receipts, err := dao.blockStore.GetReceipts(height) if err != nil { return nil, err } for _, r := range receipts { if r.ActionHash == h { return r, nil } } return nil, errors.Errorf("receipt of action %x isn't found", h) } func (dao *blockDAO) GetReceipts(height uint64) ([]*action.Receipt, error) { timer := dao.timerFactory.NewTimer("get_receipt") defer timer.End() return dao.blockStore.GetReceipts(height) } func (dao *blockDAO) ContainsTransactionLog() bool { return dao.blockStore.ContainsTransactionLog() } func (dao *blockDAO) TransactionLogs(height uint64) (*iotextypes.TransactionLogs, error) { timer := dao.timerFactory.NewTimer("get_transactionlog") defer timer.End() return dao.blockStore.TransactionLogs(height) } func (dao *blockDAO) PutBlock(ctx context.Context, blk *block.Block) error { timer := dao.timerFactory.NewTimer("put_block") defer timer.End() var err error ctx, err = dao.fillWithBlockInfoAsTip(ctx, dao.tipHeight) if err != nil { return err } if err := dao.blockStore.PutBlock(ctx, blk); err != nil { return err } atomic.StoreUint64(&dao.tipHeight, blk.Height()) header := blk.Header lruCachePut(dao.headerCache, blk.Height(), &header) lruCachePut(dao.headerCache, header.HashHeader(), &header) // index the block if there's indexer for _, indexer := range dao.indexers { if err := indexer.PutBlock(ctx, blk); err != nil { return err } } return nil } func (dao *blockDAO) DeleteTipBlock() error { timer := dao.timerFactory.NewTimer("del_block") defer timer.End() return dao.blockStore.DeleteTipBlock() } func (dao *blockDAO) DeleteBlockToTarget(targetHeight uint64) error { tipHeight, err := dao.blockStore.Height() if err != nil { return err } for tipHeight > targetHeight { blk, err := dao.blockStore.GetBlockByHeight(tipHeight) if err != nil { return errors.Wrap(err, "failed to get tip block") } // delete block index if there's indexer for _, indexer := range dao.indexers { if err := indexer.DeleteTipBlock(blk); err != nil { return err } } if err := dao.blockStore.DeleteTipBlock(); err != nil { return err } // purge from cache h := blk.HashBlock() lruCacheDel(dao.headerCache, tipHeight) lruCacheDel(dao.headerCache, h) lruCacheDel(dao.bodyCache, tipHeight) lruCacheDel(dao.bodyCache, h) lruCacheDel(dao.footerCache, tipHeight) lruCacheDel(dao.footerCache, h) tipHeight-- atomic.StoreUint64(&dao.tipHeight, tipHeight) } return nil } func createBlockDAO(blkStore filedao.FileDAO, indexers []BlockIndexer, cfg config.DB) BlockDAO { if blkStore == nil { return nil } blockDAO := &blockDAO{ blockStore: blkStore, indexers: indexers, } blockDAO.lifecycle.Add(blkStore) for _, indexer := range indexers { blockDAO.lifecycle.Add(indexer) } if cfg.MaxCacheSize > 0 { blockDAO.headerCache = cache.NewThreadSafeLruCache(cfg.MaxCacheSize) blockDAO.bodyCache = cache.NewThreadSafeLruCache(cfg.MaxCacheSize) blockDAO.footerCache = cache.NewThreadSafeLruCache(cfg.MaxCacheSize) } timerFactory, err := prometheustimer.New( "iotex_block_dao_perf", "Performance of block DAO", []string{"type"}, []string{"default"}, ) if err != nil { return nil } blockDAO.timerFactory = timerFactory return blockDAO } func lruCacheGet(c *cache.ThreadSafeLruCache, key interface{}) (interface{}, bool) { if c != nil { return c.Get(key) } return nil, false } func lruCachePut(c *cache.ThreadSafeLruCache, k, v interface{}) { if c != nil { c.Add(k, v) } } func lruCacheDel(c *cache.ThreadSafeLruCache, k interface{}) { if c != nil { c.Remove(k) } }
1
22,978
Can you judge the type of error?
iotexproject-iotex-core
go
@@ -204,7 +204,7 @@ class GridInterface(DictInterface): if edges and not isedges: data = cls._infer_interval_breaks(data) elif not edges and isedges: - data = np.convolve(data, [0.5, 0.5], 'valid') + data = data[:-1] + np.diff(data)/2. return data
1
from __future__ import absolute_import from collections import OrderedDict, defaultdict, Iterable try: import itertools.izip as zip except ImportError: pass import numpy as np array_types = (np.ndarray,) try: import dask.array as da array_types += (da.Array,) except ImportError: da = None from .dictionary import DictInterface from .interface import Interface, DataError from ..dimension import Dimension from ..element import Element from ..dimension import OrderedDict as cyODict from ..ndmapping import NdMapping, item_check from .. import util class GridInterface(DictInterface): """ Interface for simple dictionary-based dataset format using a compressed representation that uses the cartesian product between key dimensions. As with DictInterface, the dictionary keys correspond to the column (i.e dimension) names and the values are NumPy arrays representing the values in that column. To use this compressed format, the key dimensions must be orthogonal to one another with each key dimension specifying an axis of the multidimensional space occupied by the value dimension data. For instance, given an temperature recordings sampled regularly across the earth surface, a list of N unique latitudes and M unique longitudes can specify the position of NxM temperature samples. """ types = (dict, OrderedDict, cyODict) datatype = 'grid' gridded = True @classmethod def init(cls, eltype, data, kdims, vdims): if kdims is None: kdims = eltype.kdims if vdims is None: vdims = eltype.vdims if not vdims: raise ValueError('GridInterface interface requires at least ' 'one value dimension.') ndims = len(kdims) dimensions = [d.name if isinstance(d, Dimension) else d for d in kdims + vdims] if isinstance(data, tuple): data = {d: v for d, v in zip(dimensions, data)} elif isinstance(data, list) and data == []: data = OrderedDict([(d, []) for d in dimensions]) elif not any(isinstance(data, tuple(t for t in interface.types if t is not None)) for interface in cls.interfaces.values()): data = {k: v for k, v in zip(dimensions, zip(*data))} elif isinstance(data, np.ndarray): if data.ndim == 1: if eltype._auto_indexable_1d and len(kdims)+len(vdims)>1: data = np.column_stack([np.arange(len(data)), data]) else: data = np.atleast_2d(data).T data = {k: data[:,i] for i,k in enumerate(dimensions)} elif isinstance(data, list) and data == []: data = {d: np.array([]) for d in dimensions[:ndims]} data.update({d: np.empty((0,) * ndims) for d in dimensions[ndims:]}) elif not isinstance(data, dict): raise TypeError('GridInterface must be instantiated as a ' 'dictionary or tuple') for dim in kdims+vdims: name = dim.name if isinstance(dim, Dimension) else dim if name not in data: raise ValueError("Values for dimension %s not found" % dim) if not isinstance(data[name], array_types): data[name] = np.array(data[name]) kdim_names = [d.name if isinstance(d, Dimension) else d for d in kdims] vdim_names = [d.name if isinstance(d, Dimension) else d for d in vdims] expected = tuple([len(data[kd]) for kd in kdim_names]) irregular_shape = data[kdim_names[0]].shape if kdim_names else () valid_shape = irregular_shape if len(irregular_shape) > 1 else expected[::-1] shapes = tuple([data[kd].shape for kd in kdim_names]) for vdim in vdim_names: shape = data[vdim].shape error = DataError if len(shape) > 1 else ValueError if (not expected and shape == (1,)) or (len(set((shape,)+shapes)) == 1 and len(shape) > 1): # If empty or an irregular mesh pass elif len(shape) != len(expected): raise error('The shape of the %s value array does not ' 'match the expected dimensionality indicated ' 'by the key dimensions. Expected %d-D array, ' 'found %d-D array.' % (vdim, len(expected), len(shape))) elif any((s!=e and (s+1)!=e) for s, e in zip(shape, valid_shape)): raise error('Key dimension values and value array %s ' 'shapes do not match. Expected shape %s, ' 'actual shape: %s' % (vdim, valid_shape, shape), cls) return data, {'kdims':kdims, 'vdims':vdims}, {} @classmethod def irregular(cls, dataset, dim): return dataset.data[dim.name if isinstance(dim, Dimension) else dim].ndim > 1 @classmethod def isscalar(cls, dataset, dim): return np.unique(cls.values(dataset, dim, expanded=False)) == 1 @classmethod def validate(cls, dataset, vdims=True): Interface.validate(dataset, vdims) @classmethod def dimension_type(cls, dataset, dim): if dim in dataset.dimensions(): arr = cls.values(dataset, dim, False, False) else: return None return arr.dtype.type @classmethod def shape(cls, dataset, gridded=False): shape = dataset.data[dataset.vdims[0].name].shape if gridded: return shape else: return (np.product(shape), len(dataset.dimensions())) @classmethod def length(cls, dataset): return cls.shape(dataset)[0] @classmethod def _infer_interval_breaks(cls, coord, axis=0): """ >>> GridInterface._infer_interval_breaks(np.arange(5)) array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5]) >>> GridInterface._infer_interval_breaks([[0, 1], [3, 4]], axis=1) array([[-0.5, 0.5, 1.5], [ 2.5, 3.5, 4.5]]) """ coord = np.asarray(coord) deltas = 0.5 * np.diff(coord, axis=axis) first = np.take(coord, [0], axis=axis) - np.take(deltas, [0], axis=axis) last = np.take(coord, [-1], axis=axis) + np.take(deltas, [-1], axis=axis) trim_last = tuple(slice(None, -1) if n == axis else slice(None) for n in range(coord.ndim)) return np.concatenate([first, coord[trim_last] + deltas, last], axis=axis) @classmethod def coords(cls, dataset, dim, ordered=False, expanded=False, edges=False): """ Returns the coordinates along a dimension. Ordered ensures coordinates are in ascending order and expanded creates ND-array matching the dimensionality of the dataset. """ dim = dataset.get_dimension(dim, strict=True) irregular = cls.irregular(dataset, dim) if irregular or expanded: if irregular: data = dataset.data[dim.name] else: data = util.expand_grid_coords(dataset, dim) if edges and data.shape == dataset.data[dataset.vdims[0].name].shape: data = cls._infer_interval_breaks(data, axis=1) data = cls._infer_interval_breaks(data, axis=0) return data data = dataset.data[dim.name] if ordered and np.all(data[1:] < data[:-1]): data = data[::-1] shape = cls.shape(dataset, True) if dim in dataset.kdims: idx = dataset.get_dimension_index(dim) isedges = (dim in dataset.kdims and len(shape) == dataset.ndims and len(data) == (shape[dataset.ndims-idx-1]+1)) else: isedges = False if edges and not isedges: data = cls._infer_interval_breaks(data) elif not edges and isedges: data = np.convolve(data, [0.5, 0.5], 'valid') return data @classmethod def canonicalize(cls, dataset, data, data_coords=None, virtual_coords=[]): """ Canonicalize takes an array of values as input and reorients and transposes it to match the canonical format expected by plotting functions. In certain cases the dimensions defined via the kdims of an Element may not match the dimensions of the underlying data. A set of data_coords may be passed in to define the dimensionality of the data, which can then be used to np.squeeze the data to remove any constant dimensions. If the data is also irregular, i.e. contains multi-dimensional coordinates, a set of virtual_coords can be supplied, required by some interfaces (e.g. xarray) to index irregular datasets with a virtual integer index. This ensures these coordinates are not simply dropped. """ if data_coords is None: data_coords = dataset.dimensions('key', label='name')[::-1] # Reorient data invert = False slices = [] for d in data_coords: coords = cls.coords(dataset, d) if np.all(coords[1:] < coords[:-1]): slices.append(slice(None, None, -1)) invert = True else: slices.append(slice(None)) data = data[tuple(slices)] if invert else data # Transpose data dims = [name for name in data_coords if isinstance(cls.coords(dataset, name), array_types)] dropped = [dims.index(d) for d in dims if d not in dataset.kdims+virtual_coords] if dropped: data = np.squeeze(data, axis=tuple(dropped)) if not any(cls.irregular(dataset, d) for d in dataset.kdims): inds = [dims.index(kd.name) for kd in dataset.kdims] inds = [i - sum([1 for d in dropped if i>=d]) for i in inds] if inds: data = data.transpose(inds[::-1]) # Allow lower dimensional views into data if len(dataset.kdims) < 2: data = data.flatten() return data @classmethod def invert_index(cls, index, length): if np.isscalar(index): return length - index elif isinstance(index, slice): start, stop = index.start, index.stop new_start, new_stop = None, None if start is not None: new_stop = length - start if stop is not None: new_start = length - stop return slice(new_start-1, new_stop-1) elif isinstance(index, Iterable): new_index = [] for ind in index: new_index.append(length-ind) return new_index @classmethod def ndloc(cls, dataset, indices): selected = {} adjusted_inds = [] all_scalar = True for i, (kd, ind) in enumerate(zip(dataset.kdims[::-1], indices)): coords = cls.coords(dataset, kd.name, True) if np.isscalar(ind): ind = [ind] else: all_scalar = False selected[kd.name] = coords[ind] adjusted_inds.append(ind) for kd in dataset.kdims: if kd.name not in selected: coords = cls.coords(dataset, kd.name) selected[kd.name] = coords all_scalar = False for d in dataset.dimensions(): if d in dataset.kdims and not cls.irregular(dataset, d): continue arr = cls.values(dataset, d, flat=False, compute=False) if all_scalar and len(dataset.vdims) == 1: return arr[tuple(ind[0] for ind in adjusted_inds)] selected[d.name] = arr[tuple(adjusted_inds)] return tuple(selected[d.name] for d in dataset.dimensions()) @classmethod def values(cls, dataset, dim, expanded=True, flat=True, compute=True): dim = dataset.get_dimension(dim, strict=True) if dim in dataset.vdims or dataset.data[dim.name].ndim > 1: data = dataset.data[dim.name] data = cls.canonicalize(dataset, data) if compute and da and isinstance(data, da.Array): data = data.compute() return data.T.flatten() if flat else data elif expanded: data = cls.coords(dataset, dim.name, expanded=True) return data.T.flatten() if flat else data else: return cls.coords(dataset, dim.name, ordered=True) @classmethod def groupby(cls, dataset, dim_names, container_type, group_type, **kwargs): # Get dimensions information dimensions = [dataset.get_dimension(d, strict=True) for d in dim_names] if 'kdims' in kwargs: kdims = kwargs['kdims'] else: kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions] kwargs['kdims'] = kdims invalid = [d for d in dimensions if dataset.data[d.name].ndim > 1] if invalid: if len(invalid) == 1: invalid = "'%s'" % invalid[0] raise ValueError("Cannot groupby irregularly sampled dimension(s) %s." % invalid) # Update the kwargs appropriately for Element group types group_kwargs = {} group_type = dict if group_type == 'raw' else group_type if issubclass(group_type, Element): group_kwargs.update(util.get_param_values(dataset)) else: kwargs.pop('kdims') group_kwargs.update(kwargs) drop_dim = any(d not in group_kwargs['kdims'] for d in kdims) # Find all the keys along supplied dimensions keys = [cls.coords(dataset, d.name) for d in dimensions] transpose = [dataset.ndims-dataset.kdims.index(kd)-1 for kd in kdims] transpose += [i for i in range(dataset.ndims) if i not in transpose] # Iterate over the unique entries applying selection masks grouped_data = [] for unique_key in zip(*util.cartesian_product(keys)): select = dict(zip(dim_names, unique_key)) if drop_dim: group_data = dataset.select(**select) group_data = group_data if np.isscalar(group_data) else group_data.columns() else: group_data = cls.select(dataset, **select) if np.isscalar(group_data) or (isinstance(group_data, array_types) and group_data.shape == ()): group_data = {dataset.vdims[0].name: np.atleast_1d(group_data)} for dim, v in zip(dim_names, unique_key): group_data[dim] = np.atleast_1d(v) elif not drop_dim: if isinstance(group_data, array_types): group_data = {dataset.vdims[0].name: group_data} for vdim in dataset.vdims: data = group_data[vdim.name] data = data.transpose(transpose[::-1]) group_data[vdim.name] = np.squeeze(data) group_data = group_type(group_data, **group_kwargs) grouped_data.append((tuple(unique_key), group_data)) if issubclass(container_type, NdMapping): with item_check(False): return container_type(grouped_data, kdims=dimensions) else: return container_type(grouped_data) @classmethod def key_select_mask(cls, dataset, values, ind): if isinstance(ind, tuple): ind = slice(*ind) if isinstance(ind, array_types): mask = ind elif isinstance(ind, slice): mask = True if ind.start is not None: mask &= ind.start <= values if ind.stop is not None: mask &= values < ind.stop # Expand empty mask if mask is True: mask = np.ones(values.shape, dtype=np.bool) elif isinstance(ind, (set, list)): iter_slcs = [] for ik in ind: iter_slcs.append(values == ik) mask = np.logical_or.reduce(iter_slcs) elif callable(ind): mask = ind(values) elif ind is None: mask = None else: index_mask = values == ind if (dataset.ndims == 1 or dataset._binned) and np.sum(index_mask) == 0: data_index = np.argmin(np.abs(values - ind)) mask = np.zeros(len(values), dtype=np.bool) mask[data_index] = True else: mask = index_mask if mask is None: mask = np.ones(values.shape, dtype=bool) return mask @classmethod def select(cls, dataset, selection_mask=None, **selection): dimensions = dataset.kdims val_dims = [vdim for vdim in dataset.vdims if vdim in selection] if val_dims: raise IndexError('Cannot slice value dimensions in compressed format, ' 'convert to expanded format before slicing.') indexed = cls.indexed(dataset, selection) full_selection = [(d, selection.get(d.name, selection.get(d.label))) for d in dimensions] data = {} value_select = [] for i, (dim, ind) in enumerate(full_selection): irregular = cls.irregular(dataset, dim) values = cls.coords(dataset, dim, irregular) mask = cls.key_select_mask(dataset, values, ind) if irregular: if np.isscalar(ind) or isinstance(ind, (set, list)): raise IndexError("Indexing not supported for irregularly " "sampled data. %s value along %s dimension." "must be a slice or 2D boolean mask." % (ind, dim)) mask = mask.max(axis=i) elif dataset._binned: edges = cls.coords(dataset, dim, False, edges=True) inds = np.argwhere(mask) if np.isscalar(ind): emin, emax = edges.min(), edges.max() if ind < emin: raise IndexError("Index %s less than lower bound " "of %s for %s dimension." % (ind, emin, dim)) elif ind >= emax: raise IndexError("Index %s more than or equal to upper bound " "of %s for %s dimension." % (ind, emax, dim)) idx = max([np.digitize([ind], edges)[0]-1, 0]) mask = np.zeros(len(values), dtype=np.bool) mask[idx] = True values = edges[idx:idx+2] elif len(inds): values = edges[inds.min(): inds.max()+2] else: values = edges[0:0] else: values = values[mask] values, mask = np.asarray(values), np.asarray(mask) value_select.append(mask) data[dim.name] = np.array([values]) if np.isscalar(values) else values int_inds = [np.argwhere(v) for v in value_select][::-1] index = np.ix_(*[np.atleast_1d(np.squeeze(ind)) if ind.ndim > 1 else np.atleast_1d(ind) for ind in int_inds]) for kdim in dataset.kdims: if cls.irregular(dataset, dim): if da and isinstance(dataset.data[kdim.name], da.Array): data[kdim.name] = dataset.data[kdim.name].vindex[index] else: data[kdim.name] = np.asarray(data[kdim.name])[index] for vdim in dataset.vdims: if da and isinstance(dataset.data[vdim.name], da.Array): data[vdim.name] = dataset.data[vdim.name].vindex[index] else: data[vdim.name] = np.asarray(dataset.data[vdim.name])[index] if indexed: if len(dataset.vdims) == 1: arr = np.squeeze(data[dataset.vdims[0].name]) if da and isinstance(arr, da.Array): arr = arr.compute() return arr if np.isscalar(arr) else arr[()] else: return np.array([np.squeeze(data[vd.name]) for vd in dataset.vdims]) return data @classmethod def sample(cls, dataset, samples=[]): """ Samples the gridded data into dataset of samples. """ ndims = dataset.ndims dimensions = dataset.dimensions(label='name') arrays = [dataset.data[vdim.name] for vdim in dataset.vdims] data = defaultdict(list) for sample in samples: if np.isscalar(sample): sample = [sample] if len(sample) != ndims: sample = [sample[i] if i < len(sample) else None for i in range(ndims)] sampled, int_inds = [], [] for d, ind in zip(dimensions, sample): cdata = dataset.data[d] mask = cls.key_select_mask(dataset, cdata, ind) inds = np.arange(len(cdata)) if mask is None else np.argwhere(mask) int_inds.append(inds) sampled.append(cdata[mask]) for d, arr in zip(dimensions, np.meshgrid(*sampled)): data[d].append(arr) for vdim, array in zip(dataset.vdims, arrays): flat_index = np.ravel_multi_index(tuple(int_inds)[::-1], array.shape) if da and isinstance(array, da.Array): data[vdim.name].append(array.flatten().vindex[tuple(flat_index)]) else: data[vdim.name].append(array.flat[flat_index]) concatenated = {d: np.concatenate(arrays).flatten() for d, arrays in data.items()} return concatenated @classmethod def aggregate(cls, dataset, kdims, function, **kwargs): kdims = [kd.name if isinstance(kd, Dimension) else kd for kd in kdims] data = {kdim: dataset.data[kdim] for kdim in kdims} axes = tuple(dataset.ndims-dataset.get_dimension_index(kdim)-1 for kdim in dataset.kdims if kdim not in kdims) for vdim in dataset.vdims: data[vdim.name] = np.atleast_1d(function(dataset.data[vdim.name], axis=axes, **kwargs)) return data @classmethod def reindex(cls, dataset, kdims, vdims): dropped_kdims = [kd for kd in dataset.kdims if kd not in kdims] dropped_vdims = ([vdim for vdim in dataset.vdims if vdim not in vdims] if vdims else []) constant = {} for kd in dropped_kdims: vals = cls.values(dataset, kd.name, expanded=False) if len(vals) == 1: constant[kd.name] = vals[0] data = {k: values for k, values in dataset.data.items() if k not in dropped_kdims+dropped_vdims} if len(constant) == len(dropped_kdims): joined_dims = kdims+dropped_kdims axes = tuple(dataset.ndims-dataset.kdims.index(d)-1 for d in joined_dims) dropped_axes = tuple(dataset.ndims-joined_dims.index(d)-1 for d in dropped_kdims) for vdim in vdims: vdata = data[vdim.name] if len(axes) > 1: vdata = vdata.transpose(axes[::-1]) if dropped_axes: vdata = np.squeeze(vdata, axis=dropped_axes) data[vdim.name] = vdata return data elif dropped_kdims: return tuple(dataset.columns(kdims+vdims).values()) return data @classmethod def add_dimension(cls, dataset, dimension, dim_pos, values, vdim): if not vdim: raise Exception("Cannot add key dimension to a dense representation.") dim = dimension.name if isinstance(dimension, Dimension) else dimension return dict(dataset.data, **{dim: values}) @classmethod def sort(cls, dataset, by=[], reverse=False): if not by or by in [dataset.kdims, dataset.dimensions()]: return dataset.data else: raise Exception('Compressed format cannot be sorted, either instantiate ' 'in the desired order or use the expanded format.') @classmethod def iloc(cls, dataset, index): rows, cols = index scalar = False if np.isscalar(cols): scalar = np.isscalar(rows) cols = [dataset.get_dimension(cols, strict=True)] elif isinstance(cols, slice): cols = dataset.dimensions()[cols] else: cols = [dataset.get_dimension(d, strict=True) for d in cols] if np.isscalar(rows): rows = [rows] new_data = [] for d in cols: new_data.append(cls.values(dataset, d, compute=False)[rows]) if scalar: if new_data and isinstance(new_data[0], da.Array): return new_data[0].compute()[0] return new_data[0][0] return tuple(new_data) @classmethod def range(cls, dataset, dimension): if dataset._binned and dimension in dataset.kdims: expanded = cls.irregular(dataset, dimension) column = cls.coords(dataset, dimension, expanded=expanded, edges=True) else: column = cls.values(dataset, dimension, flat=False) if column.dtype.kind == 'M': dmin, dmax = column.min(), column.max() if da and isinstance(column, da.Array): return da.compute(dmin, dmax) return dmin, dmax elif len(column) == 0: return np.NaN, np.NaN else: try: dmin, dmax = (np.nanmin(column), np.nanmax(column)) if da and isinstance(column, da.Array): return da.compute(dmin, dmax) return dmin, dmax except TypeError: column.sort() return column[0], column[-1] Interface.register(GridInterface)
1
21,055
Much simpler than a weird and confusing ``convolve`` call!
holoviz-holoviews
py
@@ -739,8 +739,8 @@ describe('suspense', () => { expect(scratch.innerHTML).to.eql( `<div>Hello first 2</div><div>Hello second 2</div>` ); - expect(Suspender1.prototype.render).to.have.been.calledThrice; - expect(Suspender2.prototype.render).to.have.been.calledThrice; + expect(Suspender1.prototype.render).to.have.been.calledTwice; + expect(Suspender2.prototype.render).to.have.been.calledTwice; }); }); });
1
import { setupRerender } from 'preact/test-utils'; import React, { createElement, render, Component, Suspense, lazy, Fragment, createContext } from 'preact/compat'; import { setupScratch, teardown } from '../../../test/_util/helpers'; const h = React.createElement; /* eslint-env browser, mocha */ /** * @typedef {import('../../../src').ComponentType<any>} ComponentType * @returns {[typeof Component, (c: ComponentType) => Promise<void>, (c: ComponentType) => void]} */ function createLazy() { /** @type {(c: ComponentType) => Promise<void>} */ let resolver, rejecter; const Lazy = lazy(() => { let promise = new Promise((resolve, reject) => { resolver = c => { resolve({ default: c }); return promise; }; rejecter = () => { reject(); return promise; }; }); return promise; }); return [Lazy, c => resolver(c), e => rejecter(e)]; } /** * @typedef {[(c: ComponentType) => Promise<void>, (error: Error) => Promise<void>]} Resolvers * @param {ComponentType} DefaultComponent * @returns {[typeof Component, () => Resolvers]} */ export function createSuspender(DefaultComponent) { /** @type {(lazy: React.JSX.Element) => void} */ let renderLazy; class Suspender extends Component { constructor(props, context) { super(props, context); this.state = { Lazy: null }; renderLazy = Lazy => this.setState({ Lazy }); } render(props, state) { return state.Lazy ? h(state.Lazy, props) : h(DefaultComponent, props); } } sinon.spy(Suspender.prototype, 'render'); /** * @returns {Resolvers} */ function suspend() { const [Lazy, resolve, reject] = createLazy(); renderLazy(Lazy); return [resolve, reject]; } return [Suspender, suspend]; } class Catcher extends Component { constructor(props) { super(props); this.state = { error: false }; } componentDidCatch(e) { if (e.then) { this.setState({ error: { message: '{Promise}' } }); } else { this.setState({ error: e }); } } render(props, state) { return state.error ? ( <div>Catcher did catch: {state.error.message}</div> ) : ( props.children ); } } describe('suspense', () => { /** @type {HTMLDivElement} */ let scratch, rerender, unhandledEvents = []; function onUnhandledRejection(event) { unhandledEvents.push(event); } beforeEach(() => { scratch = setupScratch(); rerender = setupRerender(); unhandledEvents = []; if ('onunhandledrejection' in window) { window.addEventListener('unhandledrejection', onUnhandledRejection); } }); afterEach(() => { teardown(scratch); if ('onunhandledrejection' in window) { window.removeEventListener('unhandledrejection', onUnhandledRejection); if (unhandledEvents.length) { throw unhandledEvents[0].reason; } } }); it('should support lazy', () => { const LazyComp = ({ name }) => <div>Hello from {name}</div>; /** @type {() => Promise<void>} */ let resolve; const Lazy = lazy(() => { const p = new Promise(res => { resolve = () => { res({ default: LazyComp }); return p; }; }); return p; }); render( <Suspense fallback={<div>Suspended...</div>}> <Lazy name="LazyComp" /> </Suspense>, scratch ); // Render initial state rerender(); // Re-render with fallback cuz lazy threw expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); return resolve().then(() => { rerender(); expect(scratch.innerHTML).to.eql(`<div>Hello from LazyComp</div>`); }); }); it('lazy should forward refs', () => { const LazyComp = () => <div>Hello from LazyComp</div>; let ref = {}; /** @type {() => Promise<void>} */ let resolve; const Lazy = lazy(() => { const p = new Promise(res => { resolve = () => { res({ default: LazyComp }); return p; }; }); return p; }); render( <Suspense fallback={<div>Suspended...</div>}> <Lazy ref={ref} /> </Suspense>, scratch ); rerender(); return resolve().then(() => { rerender(); expect(ref.current.constructor).to.equal(LazyComp); }); }); it('should suspend when a promise is thrown', () => { class ClassWrapper extends Component { render(props) { return <div id="class-wrapper">{props.children}</div>; } } const FuncWrapper = props => <div id="func-wrapper">{props.children}</div>; const [Suspender, suspend] = createSuspender(() => <div>Hello</div>); render( <Suspense fallback={<div>Suspended...</div>}> <ClassWrapper> <FuncWrapper> <Suspender /> </FuncWrapper> </ClassWrapper> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql( `<div id="class-wrapper"><div id="func-wrapper"><div>Hello</div></div></div>` ); const [resolve] = suspend(); rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); return resolve(() => <div>Hello2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql( `<div id="class-wrapper"><div id="func-wrapper"><div>Hello2</div></div></div>` ); }); }); it('should not call lifecycle methods of an initially suspending component', () => { let componentWillMount = sinon.spy(); let componentDidMount = sinon.spy(); let componentWillUnmount = sinon.spy(); /** @type {() => Promise<void>} */ let resolve; let resolved = false; const promise = new Promise(_resolve => { resolve = () => { resolved = true; _resolve(); return promise; }; }); class LifecycleSuspender extends Component { render() { if (!resolved) { throw promise; } return <div>Lifecycle</div>; } componentWillMount() { componentWillMount(); } componentDidMount() { componentDidMount(); } componentWillUnmount() { componentWillUnmount(); } } render( <Suspense fallback={<div>Suspended...</div>}> <LifecycleSuspender /> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql(``); expect(componentWillMount).to.have.been.calledOnce; expect(componentDidMount).to.not.have.been.called; expect(componentWillUnmount).to.not.have.been.called; rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); expect(componentWillMount).to.have.been.calledOnce; expect(componentDidMount).to.not.have.been.called; expect(componentWillUnmount).to.not.have.been.called; return resolve().then(() => { rerender(); expect(scratch.innerHTML).to.eql(`<div>Lifecycle</div>`); expect(componentWillMount).to.have.been.calledOnce; expect(componentDidMount).to.have.been.calledOnce; expect(componentWillUnmount).to.not.have.been.called; }); }); it('should properly call lifecycle methods and maintain state of a delayed suspending component', () => { let componentWillMount = sinon.spy(); let componentDidMount = sinon.spy(); let componentDidUpdate = sinon.spy(); let componentWillUnmount = sinon.spy(); /** @type {() => void} */ let increment; /** @type {() => Promise<void>} */ let resolve; let resolved = false; const promise = new Promise(_resolve => { resolve = () => { resolved = true; _resolve(); return promise; }; }); class LifecycleSuspender extends Component { constructor(props) { super(props); this.state = { count: 0 }; increment = () => this.setState(({ count }) => ({ count: count + 1 })); } render() { if (this.state.count == 2 && !resolved) { throw promise; } return ( <Fragment> <p>Count: {this.state.count}</p> </Fragment> ); } componentWillMount() { componentWillMount(); } componentDidMount() { componentDidMount(); } componentWillUnmount() { componentWillUnmount(); } componentDidUpdate() { componentDidUpdate(); } } render( <Suspense fallback={<div>Suspended...</div>}> <LifecycleSuspender /> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql(`<p>Count: 0</p>`); expect(componentWillMount).to.have.been.calledOnce; expect(componentDidMount).to.have.been.calledOnce; expect(componentDidUpdate).to.not.have.been.called; expect(componentWillUnmount).to.not.have.been.called; increment(); rerender(); expect(scratch.innerHTML).to.eql(`<p>Count: 1</p>`); expect(componentWillMount).to.have.been.calledOnce; expect(componentDidMount).to.have.been.calledOnce; expect(componentDidUpdate).to.have.been.calledOnce; expect(componentWillUnmount).to.not.have.been.called; increment(); rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); expect(componentWillMount).to.have.been.calledOnce; expect(componentDidMount).to.have.been.calledOnce; expect(componentDidUpdate).to.have.been.calledOnce; expect(componentWillUnmount).to.not.have.been.called; return resolve().then(() => { rerender(); expect(scratch.innerHTML).to.eql(`<p>Count: 2</p>`); expect(componentWillMount).to.have.been.calledOnce; expect(componentDidMount).to.have.been.calledOnce; // TODO: This is called thrice since the cDU queued up after the second // increment is never cleared once the component suspends. So when it // resumes and the component is rerendered, we queue up another cDU so // cDU is called an extra time. expect(componentDidUpdate).to.have.been.calledThrice; expect(componentWillUnmount).to.not.have.been.called; }); }); it('should not call lifecycle methods when a sibling suspends', () => { let componentWillMount = sinon.spy(); let componentDidMount = sinon.spy(); let componentWillUnmount = sinon.spy(); class LifecycleLogger extends Component { render() { return <div>Lifecycle</div>; } componentWillMount() { componentWillMount(); } componentDidMount() { componentDidMount(); } componentWillUnmount() { componentWillUnmount(); } } const [Suspender, suspend] = createSuspender(() => <div>Suspense</div>); render( <Suspense fallback={<div>Suspended...</div>}> <Suspender /> <LifecycleLogger /> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql(`<div>Suspense</div><div>Lifecycle</div>`); expect(componentWillMount).to.have.been.calledOnce; expect(componentDidMount).to.have.been.calledOnce; expect(componentWillUnmount).to.not.have.been.called; const [resolve] = suspend(); rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); expect(componentWillMount).to.have.been.calledOnce; expect(componentDidMount).to.have.been.calledOnce; expect(componentWillUnmount).to.not.have.been.called; return resolve(() => <div>Suspense 2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql( `<div>Suspense 2</div><div>Lifecycle</div>` ); expect(componentWillMount).to.have.been.calledOnce; expect(componentDidMount).to.have.been.calledOnce; expect(componentWillUnmount).to.not.have.been.called; }); }); it("should call fallback's lifecycle methods when suspending", () => { class LifecycleLogger extends Component { render() { return <div>Lifecycle</div>; } componentWillMount() {} componentDidMount() {} componentWillUnmount() {} } const componentWillMount = sinon.spy( LifecycleLogger.prototype, 'componentWillMount' ); const componentDidMount = sinon.spy( LifecycleLogger.prototype, 'componentDidMount' ); const componentWillUnmount = sinon.spy( LifecycleLogger.prototype, 'componentWillUnmount' ); const [Suspender, suspend] = createSuspender(() => <div>Suspense</div>); render( <Suspense fallback={<LifecycleLogger />}> <Suspender /> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql(`<div>Suspense</div>`); expect(componentWillMount).to.not.have.been.called; expect(componentDidMount).to.not.have.been.called; expect(componentWillUnmount).to.not.have.been.called; const [resolve] = suspend(); rerender(); expect(scratch.innerHTML).to.eql(`<div>Lifecycle</div>`); expect(componentWillMount).to.have.been.calledOnce; expect(componentDidMount).to.have.been.calledOnce; expect(componentWillUnmount).to.not.have.been.called; return resolve(() => <div>Suspense 2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspense 2</div>`); expect(componentWillMount).to.have.been.calledOnce; expect(componentDidMount).to.have.been.calledOnce; expect(componentWillUnmount).to.have.been.calledOnce; }); }); it('should keep state of siblings when suspending', () => { /** @type {(state: { s: string }) => void} */ let setState; class Stateful extends Component { constructor(props) { super(props); setState = this.setState.bind(this); this.state = { s: 'initial' }; } render(props, state) { return <div>Stateful: {state.s}</div>; } } const [Suspender, suspend] = createSuspender(() => <div>Suspense</div>); render( <Suspense fallback={<div>Suspended...</div>}> <Suspender /> <Stateful /> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql( `<div>Suspense</div><div>Stateful: initial</div>` ); setState({ s: 'first' }); rerender(); expect(scratch.innerHTML).to.eql( `<div>Suspense</div><div>Stateful: first</div>` ); const [resolve] = suspend(); rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); return resolve(() => <div>Suspense 2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql( `<div>Suspense 2</div><div>Stateful: first</div>` ); }); }); // TODO: Fix this test it.skip('should allow children to update state while suspending', () => { /** @type {(state: { s: string }) => void} */ let setState; class Stateful extends Component { constructor(props) { super(props); setState = this.setState.bind(this); this.state = { s: 'initial' }; } render(props, state) { return <div>Stateful: {state.s}</div>; } } const [Suspender, suspend] = createSuspender(() => <div>Suspense</div>); render( <Suspense fallback={<div>Suspended...</div>}> <Suspender /> <Stateful /> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql( `<div>Suspense</div><div>Stateful: initial</div>` ); setState({ s: 'first' }); rerender(); expect(scratch.innerHTML).to.eql( `<div>Suspense</div><div>Stateful: first</div>` ); const [resolve] = suspend(); rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); setState({ s: 'second' }); rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); return resolve(() => <div>Suspense 2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql( `<div>Suspense 2</div><div>Stateful: second</div>` ); }); }); it('should allow siblings of Suspense to update state while suspending', () => { /** @type {(state: { s: string }) => void} */ let setState; class Stateful extends Component { constructor(props) { super(props); setState = this.setState.bind(this); this.state = { s: 'initial' }; } render(props, state) { return <div>Stateful: {state.s}</div>; } } const [Suspender, suspend] = createSuspender(() => <div>Suspense</div>); render( <Fragment> <Suspense fallback={<div>Suspended...</div>}> <Suspender /> </Suspense> <Stateful /> </Fragment>, scratch ); expect(scratch.innerHTML).to.eql( `<div>Suspense</div><div>Stateful: initial</div>` ); setState({ s: 'first' }); rerender(); expect(scratch.innerHTML).to.eql( `<div>Suspense</div><div>Stateful: first</div>` ); const [resolve] = suspend(); rerender(); expect(scratch.innerHTML).to.eql( `<div>Suspended...</div><div>Stateful: first</div>` ); setState({ s: 'second' }); rerender(); expect(scratch.innerHTML).to.eql( `<div>Suspended...</div><div>Stateful: second</div>` ); return resolve(() => <div>Suspense 2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql( `<div>Suspense 2</div><div>Stateful: second</div>` ); }); }); it('should suspend with custom error boundary', () => { const [Suspender, suspend] = createSuspender(() => ( <div>within error boundary</div> )); render( <Suspense fallback={<div>Suspended...</div>}> <Catcher> <Suspender /> </Catcher> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql(`<div>within error boundary</div>`); const [resolve] = suspend(); rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); return resolve(() => <div>within error boundary 2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql(`<div>within error boundary 2</div>`); }); }); it('should allow multiple sibling children to suspend', () => { const [Suspender1, suspend1] = createSuspender(() => ( <div>Hello first</div> )); const [Suspender2, suspend2] = createSuspender(() => ( <div>Hello second</div> )); render( <Suspense fallback={<div>Suspended...</div>}> <Catcher> <Suspender1 /> <Suspender2 /> </Catcher> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql( `<div>Hello first</div><div>Hello second</div>` ); expect(Suspender1.prototype.render).to.have.been.calledOnce; expect(Suspender2.prototype.render).to.have.been.calledOnce; const [resolve1] = suspend1(); const [resolve2] = suspend2(); expect(Suspender1.prototype.render).to.have.been.calledOnce; expect(Suspender2.prototype.render).to.have.been.calledOnce; rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); expect(Suspender1.prototype.render).to.have.been.calledTwice; expect(Suspender2.prototype.render).to.have.been.calledTwice; return resolve1(() => <div>Hello first 2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); expect(Suspender1.prototype.render).to.have.been.calledTwice; expect(Suspender2.prototype.render).to.have.been.calledTwice; return resolve2(() => <div>Hello second 2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql( `<div>Hello first 2</div><div>Hello second 2</div>` ); expect(Suspender1.prototype.render).to.have.been.calledThrice; expect(Suspender2.prototype.render).to.have.been.calledThrice; }); }); }); it('should call multiple nested sibling suspending components render in one go', () => { const [Suspender1, suspend1] = createSuspender(() => ( <div>Hello first</div> )); const [Suspender2, suspend2] = createSuspender(() => ( <div>Hello second</div> )); render( <Suspense fallback={<div>Suspended...</div>}> <Catcher> <Suspender1 /> <div> <Suspender2 /> </div> </Catcher> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql( `<div>Hello first</div><div><div>Hello second</div></div>` ); expect(Suspender1.prototype.render).to.have.been.calledOnce; expect(Suspender2.prototype.render).to.have.been.calledOnce; const [resolve1] = suspend1(); const [resolve2] = suspend2(); expect(Suspender1.prototype.render).to.have.been.calledOnce; expect(Suspender2.prototype.render).to.have.been.calledOnce; rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); expect(Suspender1.prototype.render).to.have.been.calledTwice; expect(Suspender2.prototype.render).to.have.been.calledTwice; return resolve1(() => <div>Hello first 2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); expect(Suspender1.prototype.render).to.have.been.calledTwice; expect(Suspender2.prototype.render).to.have.been.calledTwice; return resolve2(() => <div>Hello second 2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql( `<div>Hello first 2</div><div><div>Hello second 2</div></div>` ); expect(Suspender1.prototype.render).to.have.been.calledThrice; expect(Suspender2.prototype.render).to.have.been.calledThrice; }); }); }); it('should support text directly under Suspense', () => { const [Suspender, suspend] = createSuspender(() => <div>Hello</div>); render( <Suspense fallback={<div>Suspended...</div>}> Text {/* Adding a <div> here will make things work... */} <Suspender /> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql(`Text<div>Hello</div>`); const [resolve] = suspend(); rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); return resolve(() => <div>Hello 2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql(`Text<div>Hello 2</div>`); }); }); it('should support to change DOM tag directly under suspense', () => { /** @type {(state: {tag: string}) => void} */ let setState; class StatefulComp extends Component { constructor(props) { super(props); setState = this.setState.bind(this); this.state = { tag: props.defaultTag }; } render(props, { tag: Tag }) { return <Tag>Stateful</Tag>; } } const [Suspender, suspend] = createSuspender(() => <div>Hello</div>); render( <Suspense fallback={<div>Suspended...</div>}> <StatefulComp defaultTag="div" /> <Suspender /> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql(`<div>Stateful</div><div>Hello</div>`); const [resolve] = suspend(); rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); setState({ tag: 'article' }); return resolve(() => <div>Hello 2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql( `<article>Stateful</article><div>Hello 2</div>` ); }); }); it('should only suspend the most inner Suspend', () => { const [Suspender, suspend] = createSuspender(() => <div>Hello</div>); render( <Suspense fallback={<div>Suspended... 1</div>}> Not suspended... <Suspense fallback={<div>Suspended... 2</div>}> <Catcher> <Suspender /> </Catcher> </Suspense> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql(`Not suspended...<div>Hello</div>`); const [resolve] = suspend(); rerender(); expect(scratch.innerHTML).to.eql( `Not suspended...<div>Suspended... 2</div>` ); return resolve(() => <div>Hello 2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql(`Not suspended...<div>Hello 2</div>`); }); }); it('should throw when missing Suspense', () => { const [Suspender, suspend] = createSuspender(() => <div>Hello</div>); render( <Catcher> <Suspender /> </Catcher>, scratch ); rerender(); expect(scratch.innerHTML).to.eql(`<div>Hello</div>`); suspend(); rerender(); expect(scratch.innerHTML).to.eql(`<div>Catcher did catch: {Promise}</div>`); }); it("should throw when lazy's loader throws", () => { /** @type {() => Promise<any>} */ let reject; const ThrowingLazy = lazy(() => { const prom = new Promise((res, rej) => { reject = () => { rej(new Error("Thrown in lazy's loader...")); return prom; }; }); return prom; }); render( <Suspense fallback={<div>Suspended...</div>}> <Catcher> <ThrowingLazy /> </Catcher> </Suspense>, scratch ); rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); return reject().then( () => { expect.fail('Suspended promises resolved instead of rejected.'); }, () => { rerender(); expect(scratch.innerHTML).to.eql( `<div>Catcher did catch: Thrown in lazy's loader...</div>` ); } ); }); it('should support null fallback', () => { const [Suspender, suspend] = createSuspender(() => <div>Hello</div>); render( <div id="wrapper"> <Suspense fallback={null}> <div id="inner"> <Suspender /> </div> </Suspense> </div>, scratch ); expect(scratch.innerHTML).to.equal( `<div id="wrapper"><div id="inner"><div>Hello</div></div></div>` ); const [resolve] = suspend(); rerender(); expect(scratch.innerHTML).to.equal(`<div id="wrapper"></div>`); return resolve(() => <div>Hello2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.equal( `<div id="wrapper"><div id="inner"><div>Hello2</div></div></div>` ); }); }); it('should support suspending multiple times', () => { const [Suspender, suspend] = createSuspender(() => ( <div>initial render</div> )); const Loading = () => <div>Suspended...</div>; render( <Suspense fallback={<Loading />}> <Suspender /> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql(`<div>initial render</div>`); let [resolve] = suspend(); rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); return resolve(() => <div>Hello1</div>) .then(() => { // Rerender promise resolution rerender(); expect(scratch.innerHTML).to.eql(`<div>Hello1</div>`); // suspend again [resolve] = suspend(); rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); return resolve(() => <div>Hello2</div>); }) .then(() => { // Rerender promise resolution rerender(); expect(scratch.innerHTML).to.eql(`<div>Hello2</div>`); }); }); it("should correctly render when a suspended component's child also suspends", () => { const [Suspender1, suspend1] = createSuspender(() => <div>Hello1</div>); const [LazyChild, resolveChild] = createLazy(); render( <Suspense fallback={<div>Suspended...</div>}> <Suspender1 /> </Suspense>, scratch ); expect(scratch.innerHTML).to.equal(`<div>Hello1</div>`); let [resolve1] = suspend1(); rerender(); expect(scratch.innerHTML).to.equal('<div>Suspended...</div>'); return resolve1(() => <LazyChild />) .then(() => { rerender(); expect(scratch.innerHTML).to.equal('<div>Suspended...</div>'); return resolveChild(() => <div>All done!</div>); }) .then(() => { rerender(); expect(scratch.innerHTML).to.equal('<div>All done!</div>'); }); }); it('should correctly render nested Suspense components', () => { // Inspired by the nested-suspense demo from #1865 // TODO: Explore writing a test that varies the loading orders const [Lazy1, resolve1] = createLazy(); const [Lazy2, resolve2] = createLazy(); const [Lazy3, resolve3] = createLazy(); const Loading = () => <div>Suspended...</div>; const loadingHtml = `<div>Suspended...</div>`; render( <Suspense fallback={<Loading />}> <Lazy1 /> <div> <Suspense fallback={<Loading />}> <Lazy2 /> </Suspense> <Lazy3 /> </div> <b>4</b> </Suspense>, scratch ); rerender(); // Rerender with the fallback HTML expect(scratch.innerHTML).to.equal(loadingHtml); return resolve1(() => <b>1</b>) .then(() => { rerender(); expect(scratch.innerHTML).to.equal(loadingHtml); return resolve3(() => <b>3</b>); }) .then(() => { rerender(); expect(scratch.innerHTML).to.equal( `<b>1</b><div>${loadingHtml}<b>3</b></div><b>4</b>` ); return resolve2(() => <b>2</b>); }) .then(() => { rerender(); expect(scratch.innerHTML).to.equal( `<b>1</b><div><b>2</b><b>3</b></div><b>4</b>` ); }); }); it('should correctly render Suspense components inside Fragments', () => { // Issue #2106. const [Lazy1, resolve1] = createLazy(); const [Lazy2, resolve2] = createLazy(); const [Lazy3, resolve3] = createLazy(); const Loading = () => <div>Suspended...</div>; const loadingHtml = `<div>Suspended...</div>`; render( <Fragment> <Suspense fallback={<Loading />}> <Lazy1 /> </Suspense> <Fragment> <Suspense fallback={<Loading />}> <Lazy2 /> </Suspense> </Fragment> <Suspense fallback={<Loading />}> <Lazy3 /> </Suspense> </Fragment>, scratch ); rerender(); expect(scratch.innerHTML).to.eql( `${loadingHtml}${loadingHtml}${loadingHtml}` ); return resolve2(() => <span>2</span>) .then(() => { return resolve1(() => <span>1</span>); }) .then(() => { rerender(); expect(scratch.innerHTML).to.eql( `<span>1</span><span>2</span>${loadingHtml}` ); return resolve3(() => <span>3</span>); }) .then(() => { rerender(); expect(scratch.innerHTML).to.eql( `<span>1</span><span>2</span><span>3</span>` ); }); }); it('should not render any of the children if one child suspends', () => { const [Lazy, resolve] = createLazy(); const Loading = () => <div>Suspended...</div>; const loadingHtml = `<div>Suspended...</div>`; render( <Suspense fallback={<Loading />}> <Lazy /> <div>World</div> </Suspense>, scratch ); rerender(); expect(scratch.innerHTML).to.eql(loadingHtml); return resolve(() => <div>Hello</div>).then(() => { rerender(); expect(scratch.innerHTML).to.equal(`<div>Hello</div><div>World</div>`); }); }); it('should render correctly when multiple children suspend with the same promise', () => { /** @type {() => Promise<void>} */ let resolve; let resolved = false; const promise = new Promise(_resolve => { resolve = () => { resolved = true; _resolve(); return promise; }; }); const Child = props => { if (!resolved) { throw promise; } return props.children; }; const Loading = () => <div>Suspended...</div>; const loadingHtml = `<div>Suspended...</div>`; render( <Suspense fallback={<Loading />}> <Child> <div>A</div> </Child> <Child> <div>B</div> </Child> </Suspense>, scratch ); rerender(); expect(scratch.innerHTML).to.eql(loadingHtml); return resolve().then(() => { resolved = true; rerender(); expect(scratch.innerHTML).to.equal(`<div>A</div><div>B</div>`); }); }); it('should un-suspend when suspender unmounts', () => { const [Suspender, suspend] = createSuspender(() => <div>Suspender</div>); let hide; class Conditional extends Component { constructor(props) { super(props); this.state = { show: true }; hide = () => { this.setState({ show: false }); }; } render(props, { show }) { return ( <div> conditional {show ? 'show' : 'hide'} {show && <Suspender />} </div> ); } } render( <Suspense fallback={<div>Suspended...</div>}> <Conditional /> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql( `<div>conditional show<div>Suspender</div></div>` ); expect(Suspender.prototype.render).to.have.been.calledOnce; suspend(); rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); hide(); rerender(); expect(scratch.innerHTML).to.eql(`<div>conditional hide</div>`); }); it('should call componentWillUnmount on a suspended component', () => { const cWUSpy = sinon.spy(); // eslint-disable-next-line react/require-render-return class Suspender extends Component { render() { throw new Promise(() => {}); } } Suspender.prototype.componentWillUnmount = cWUSpy; let hide; let suspender = null; let suspenderRef = s => { // skip null values as we want to keep the ref even after unmount if (s) { suspender = s; } }; class Conditional extends Component { constructor(props) { super(props); this.state = { show: true }; hide = () => { this.setState({ show: false }); }; } render(props, { show }) { return ( <div> conditional {show ? 'show' : 'hide'} {show && <Suspender ref={suspenderRef} />} </div> ); } } render( <Suspense fallback={<div>Suspended...</div>}> <Conditional /> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql(`<div>conditional show</div>`); expect(cWUSpy).to.not.have.been.called; hide(); rerender(); expect(cWUSpy).to.have.been.calledOnce; expect(suspender).not.to.be.undefined; expect(suspender).not.to.be.null; expect(cWUSpy.getCall(0).thisValue).to.eql(suspender); expect(scratch.innerHTML).to.eql(`<div>conditional hide</div>`); }); xit('should support sCU=false when un-suspending', () => { // See #2176 #2125 const [Suspender, suspend] = createSuspender(() => <div>Hello</div>); render( <Suspense fallback={<div>Suspended...</div>}> Text {/* Adding a <div> here will make things work... */} <Suspender /> </Suspense>, scratch ); expect(scratch.innerHTML).to.eql(`Text<div>Hello</div>`); const [resolve] = suspend(); rerender(); expect(scratch.innerHTML).to.eql(`<div>Suspended...</div>`); Suspender.prototype.shouldComponentUpdate = () => false; return resolve(() => <div>Hello 2</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql(`Text<div>Hello 2</div>`); }); }); xit('should allow suspended children to update', () => { const log = []; class Logger extends Component { constructor(props) { super(props); log.push('construct'); } render({ children }) { log.push('render'); return children; } } let suspender; class Suspender extends Component { constructor(props) { super(props); this.state = { promise: new Promise(() => {}) }; suspender = this; } unsuspend() { this.setState({ promise: null }); } render() { if (this.state.promise) { throw this.state.promise; } return 'hello'; } } render( <section> <Suspense fallback={<div>fallback</div>}> <Suspender /> <Logger /> </Suspense> </section>, scratch ); expect(log).to.eql(['construct', 'render']); expect(scratch.innerHTML).to.eql('<section></section>'); // this rerender is needed because of Suspense issuing a forceUpdate itself rerender(); expect(scratch.innerHTML).to.eql('<section><div>fallback</div></section>'); suspender.unsuspend(); rerender(); /** * These currently failing assertion shows the issue that we currently unmount * the suspended tree (unlike react, which adds a display="none") and block any * further processing on that tree. Thus updates below a suspended Suspense are * getting lost. */ expect(log).to.eql(['construct', 'render', 'render']); /** * When the above assertion will hold true we will certainly run into the second issue * here. The problem is that we do not remove suspensions from an instance of Suspense * when one of its suspending children no longer throws because of a state * update. */ expect(scratch.innerHTML).to.eql( '<section><div>Suspender un-suspended</div></section>' ); }); it('should render delayed lazy components through components using shouldComponentUpdate', () => { const [Suspender1, suspend1] = createSuspender(() => <i>1</i>); const [Suspender2, suspend2] = createSuspender(() => <i>2</i>); class Blocker extends Component { shouldComponentUpdate() { return false; } render(props) { return ( <b> <i>a</i> {props.children} <i>d</i> </b> ); } } render( <Suspense fallback={<div>Suspended...</div>}> <Blocker> <Suspender1 /> <Suspender2 /> </Blocker> </Suspense>, scratch ); expect(scratch.innerHTML).to.equal( '<b><i>a</i><i>1</i><i>2</i><i>d</i></b>' ); const [resolve1] = suspend1(); const [resolve2] = suspend2(); rerender(); expect(scratch.innerHTML).to.equal('<div>Suspended...</div>'); return resolve1(() => <i>b</i>) .then(() => { rerender(); expect(scratch.innerHTML).to.equal('<div>Suspended...</div>'); return resolve2(() => <i>c</i>); }) .then(() => { rerender(); expect(scratch.innerHTML).to.equal( '<b><i>a</i><i>b</i><i>c</i><i>d</i></b>' ); }); }); it('should render initially lazy components through components using shouldComponentUpdate', () => { const [Lazy1, resolve1] = createLazy(); const [Lazy2, resolve2] = createLazy(); class Blocker extends Component { shouldComponentUpdate() { return false; } render(props) { return ( <b> <i>a</i> {props.children} <i>d</i> </b> ); } } render( <Suspense fallback={<div>Suspended...</div>}> <Blocker> <Lazy1 /> <Lazy2 /> </Blocker> </Suspense>, scratch ); rerender(); expect(scratch.innerHTML).to.equal('<div>Suspended...</div>'); return resolve1(() => <i>b</i>) .then(() => { rerender(); expect(scratch.innerHTML).to.equal('<div>Suspended...</div>'); return resolve2(() => <i>c</i>); }) .then(() => { rerender(); expect(scratch.innerHTML).to.equal( '<b><i>a</i><i>b</i><i>c</i><i>d</i></b>' ); }); }); it('should render initially lazy components through createContext', () => { const ctx = createContext(null); const [Lazy, resolve] = createLazy(); const suspense = ( <Suspense fallback={<div>Suspended...</div>}> <ctx.Provider value="123"> <ctx.Consumer>{value => <Lazy value={value} />}</ctx.Consumer> </ctx.Provider> </Suspense> ); render(suspense, scratch); rerender(); expect(scratch.innerHTML).to.equal(`<div>Suspended...</div>`); return resolve(props => <div>{props.value}</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql(`<div>123</div>`); }); }); it('should render delayed lazy components through createContext', () => { const ctx = createContext(null); const [Suspender, suspend] = createSuspender(({ value }) => ( <span>{value}</span> )); const suspense = ( <Suspense fallback={<div>Suspended...</div>}> <ctx.Provider value="123"> <ctx.Consumer>{value => <Suspender value={value} />}</ctx.Consumer> </ctx.Provider> </Suspense> ); render(suspense, scratch); expect(scratch.innerHTML).to.equal('<span>123</span>'); const [resolve] = suspend(); rerender(); expect(scratch.innerHTML).to.equal(`<div>Suspended...</div>`); return resolve(props => <div>{props.value}</div>).then(() => { rerender(); expect(scratch.innerHTML).to.eql(`<div>123</div>`); }); }); });
1
15,387
Huh, this seems weird to me... Surely the suspenders render was called again in order to get the new `<div>Hello second 2</div>` output... Imma take a peak at these tests to understand what's going on
preactjs-preact
js
@@ -80,6 +80,10 @@ class ApplicationController < ActionController::Base def failed_destroy_error(obj, obj_name) "#{_('Could not delete the %{o}.') % {o: obj_name}} #{errors_to_s(obj)}" end + + def success_message(obj_name, action) + "#{_('Successfully %{action} your %{object}.') % {object: obj_name, action: action}}" + end private # Override rails default render action to look for a branded version of a
1
class ApplicationController < ActionController::Base protect_from_forgery with: :exception # Look for template overrides before rendering before_filter :prepend_view_paths include GlobalHelpers include Pundit helper_method GlobalHelpers.instance_methods rescue_from Pundit::NotAuthorizedError, with: :user_not_authorized def user_not_authorized if user_signed_in? redirect_to plans_url, notice: _('You are not authorized to perform this action.') else redirect_to root_url, alert: _('You need to sign in or sign up before continuing.') end end before_filter :set_gettext_locale after_filter :store_location # Sets FastGettext locale for every request made def set_gettext_locale FastGettext.locale = session[:locale] || FastGettext.default_locale end # PATCH /locale/:locale REST method def set_locale_session if FastGettext.default_available_locales.include?(params[:locale]) session[:locale] = params[:locale] end redirect_to(request.referer || root_path) #redirects the user to URL where she/he was when the request to this resource was made or root if none is encountered end def store_location # store last url - this is needed for post-login redirect to whatever the user last visited. unless ["/users/sign_in", "/users/sign_up", "/users/password", "/users/invitation/accept", ].any? { |ur| request.fullpath.include?(ur) } \ or request.xhr? # don't store ajax calls session[:previous_url] = request.fullpath end end def after_sign_in_path_for(resource) session[:previous_url] || root_path end def after_sign_up_path_for(resource) session[:previous_url] || root_path end def after_sign_in_error_path_for(resource) session[:previous_url] || root_path end def after_sign_up_error_path_for(resource) session[:previous_url] || root_path end def authenticate_admin! # currently if admin has any super-admin task, they can view the super-admin redirect_to root_path unless user_signed_in? && (current_user.can_add_orgs? || current_user.can_change_org? || current_user.can_super_admin?) end def failed_create_error(obj, obj_name) "#{_('Could not create your %{o}.') % {o: obj_name}} #{errors_to_s(obj)}" end def failed_update_error(obj, obj_name) "#{_('Could not update your %{o}.') % {o: obj_name}} #{errors_to_s(obj)}" end def failed_destroy_error(obj, obj_name) "#{_('Could not delete the %{o}.') % {o: obj_name}} #{errors_to_s(obj)}" end private # Override rails default render action to look for a branded version of a # template instead of using the default one. If no override exists, the # default version in ./app/views/[:controller]/[:action] will be used # # The path in the app/views/branded/ directory must match the the file it is # replacing. For example: # app/views/branded/layouts/_header.html.erb -> app/views/layouts/_header.html.erb def prepend_view_paths prepend_view_path "app/views/branded" end def errors_to_s(obj) if obj.errors.count > 0 msg = "<br />" obj.errors.each do |e,m| if m.include?('empty') || m.include?('blank') msg += "#{_(e)} - #{_(m)}<br />" else msg += "'#{obj[e]}' - #{_(m)}<br />" end end msg end end ## # Sign out of Shibboleth SP local session too. # ------------------------------------------------------------- def after_sign_out_path_for(resource_or_scope) if Rails.application.config.shibboleth_enabled return Rails.application.config.shibboleth_logout_url + root_url super else super end end # ------------------------------------------------------------- end
1
16,752
Since this is just a helper function to create text, perhaps this could live in a helper? I noticed that we have a few other error/message creators in the application controller as well, but I think this type of function should be re-factored to be part of a helper.
DMPRoadmap-roadmap
rb
@@ -92,7 +92,7 @@ evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / stateme # Template used to display messages. This is a python new-style format string # used to format the message information. See doc for all details -#msg-template= +msg-template=[{msg_id}] {path}:{line:3d}:{column}: {msg} [SPELLING]
1
[MASTER] # Specify a configuration file. #rcfile= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). init-hook='import sys; sys.path.append("build/thrift/v6/gen-py"); sys.path.append("tools/plist_to_html"); sys.path.append("analyzer"); sys.path.append("web"); sys.path.append("web/client"); sys.path.append("web/server");' # Add files or directories to the blacklist. They should be base names, not # paths. ignore=CVS # Pickle collected data for later comparisons. persistent=yes # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins= # Use multiple processes to speed up Pylint. jobs=1 # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. unsafe-load-any-extension=no # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may # run arbitrary code extension-pkg-whitelist= # Allow optimization of some AST trees. This will activate a peephole AST # optimizer, which will apply various small optimizations. For instance, it can # be used to obtain the result of joining multiple strings with the addition # operator. Joining a lot of strings can lead to a maximum recursion error in # Pylint and this flag can prevent that. It has one side effect, the resulting # AST will be different than the one from reality. optimize-ast=no [MESSAGES CONTROL] # Only show warnings with the listed confidence levels. Leave empty to show # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED confidence= # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option # multiple time. See also the "--disable" option for examples. #enable= # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifiers separated by comma (,) or put this # option multiple times (only on the command line, not in the configuration # file where it should appear only once).You can also use "--disable=all" to # disable everything first and then reenable specific checks. For example, if # you want to run only the similarities checker, you can use "--disable=all # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" disable=import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating [REPORTS] # Set the output format. Available formats are text, parseable, colorized, msvs # (visual studio) and html. You can also give a reporter class, eg # mypackage.mymodule.MyReporterClass. output-format=text # Put messages in a separate file for each module / package specified on the # command line instead of printing them on stdout. Reports (if any) will be # written in a file name "pylint_global.[txt|html]". files-output=no # Tells whether to display a full report or only the messages reports=yes # Python expression which should return a note less than 10 (10 is the highest # note). You have access to the variables errors warning, statement which # respectively contain the number of errors / warnings messages and the total # number of statements analyzed. This is used by the global evaluation report # (RP0004). evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) # Template used to display messages. This is a python new-style format string # used to format the message information. See doc for all details #msg-template= [SPELLING] # Spelling dictionary name. Available dictionaries: none. To make it working # install python-enchant package. spelling-dict= # List of comma separated words that should not be checked. spelling-ignore-words= # A path to a file that contains private dictionary; one word per line. spelling-private-dict-file= # Tells whether to store unknown words to indicated private dictionary in # --spelling-private-dict-file option instead of raising a message. spelling-store-unknown-words=no [LOGGING] # Logging modules to check that the string format arguments are in logging # function parameter format logging-modules=logging [FORMAT] # Maximum number of characters on a single line. max-line-length=80 # Regexp for a line that is allowed to be longer than the limit. ignore-long-lines=^\s*(# )?<?https?://\S+>?$ # Allow the body of an if to be on the same line as the test if there is no # else. single-line-if-stmt=no # List of optional constructs for which whitespace checking is disabled. `dict- # separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. # `trailing-comma` allows a space between comma and closing bracket: (a, ). # `empty-line` allows space-only lines. no-space-check=trailing-comma,dict-separator # Maximum number of lines in a module max-module-lines=2000 # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 # tab). indent-string=' ' # Number of spaces of indent required inside a hanging or continued line. indent-after-paren=4 # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. expected-line-ending-format= [MISCELLANEOUS] # List of note tags to take in consideration, separated by a comma. #notes=FIXME,XXX,TODO [VARIABLES] # Tells whether we should check for unused import in __init__ files. init-import=no # A regular expression matching the name of dummy variables (i.e. expectedly # not used). dummy-variables-rgx=_$|dummy # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins= # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. callbacks=cb_,_cb [BASIC] # List of builtins function names that should not be used, separated by a comma bad-functions= # Good variable names which should always be accepted, separated by a comma good-names=i,j,k,ex,Run,_ # Bad variable names which should always be refused, separated by a comma bad-names=foo,bar,baz,toto,tutu,tata # Colon-delimited sets of names that determine each other's naming style when # the name regexes allow several styles. name-group= # Include a hint for the correct naming format with invalid-name include-naming-hint=no # Regular expression matching correct function names function-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming hint for function names function-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression matching correct variable names variable-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming hint for variable names variable-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression matching correct constant names const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ # Naming hint for constant names const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ # Regular expression matching correct attribute names attr-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming hint for attribute names attr-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression matching correct argument names argument-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming hint for argument names argument-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression matching correct class attribute names class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ # Naming hint for class attribute names class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ # Regular expression matching correct inline iteration names inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ # Naming hint for inline iteration names inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ # Regular expression matching correct class names class-rgx=[A-Z_][a-zA-Z0-9]+$ # Naming hint for class names class-name-hint=[A-Z_][a-zA-Z0-9]+$ # Regular expression matching correct module names module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Naming hint for module names module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Regular expression matching correct method names method-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming hint for method names method-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match function or class names that do # not require a docstring. no-docstring-rgx=^_ # Minimum line length for functions/classes that require docstrings, shorter # ones are exempt. docstring-min-length=50 [ELIF] # Maximum number of nested blocks for function / method body max-nested-blocks=5 [TYPECHECK] # Tells whether missing members accessed in mixin class should be ignored. A # mixin class is detected if its name ends with "mixin" (case insensitive). ignore-mixin-members=yes # List of module names for which member attributes should not be checked # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis. It # supports qualified module names, as well as Unix pattern matching. ignored-modules= # List of classes names for which member attributes should not be checked # (useful for classes with attributes dynamically set). This supports can work # with qualified names. ignored-classes= # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. generated-members= [SIMILARITIES] # Minimum lines number of a similarity. min-similarity-lines=4 # Ignore comments when computing similarities. ignore-comments=yes # Ignore docstrings when computing similarities. ignore-docstrings=yes # Ignore imports when computing similarities. ignore-imports=no [DESIGN] # Maximum number of arguments for function / method max-args=8 # Argument names that match this expression will be ignored. Default to name # with leading underscore ignored-argument-names=_.* # Maximum number of locals for function / method body max-locals=20 # Maximum number of return / yield for function / method body max-returns=6 # Maximum number of branch for function / method body max-branches=12 # Maximum number of statements in function / method body max-statements=50 # Maximum number of parents for a class (see R0901). max-parents=7 # Maximum number of attributes for a class (see R0902). max-attributes=7 # Minimum number of public methods for a class (see R0903). min-public-methods=2 # Maximum number of public methods for a class (see R0904). max-public-methods=20 # Maximum number of boolean expressions in a if statement max-bool-expr=5 [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules=regsub,TERMIOS,Bastion,rexec # Create a graph of every (i.e. internal and external) dependencies in the # given file (report RP0402 must not be disabled) import-graph= # Create a graph of external dependencies in the given file (report RP0402 must # not be disabled) ext-import-graph= # Create a graph of internal dependencies in the given file (report RP0402 must # not be disabled) int-import-graph= [CLASSES] # List of method names used to declare (i.e. assign) instance attributes. defining-attr-methods=__init__,__new__,setUp # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls # List of valid names for the first argument in a metaclass class method. valid-metaclass-classmethod-first-arg=mcs # List of member names, which should be excluded from the protected access # warning. exclude-protected=_asdict,_fields,_replace,_source,_make [EXCEPTIONS] # Exceptions that will emit a warning when being caught. Defaults to # "Exception" overgeneral-exceptions=Exception
1
10,511
Will this mess up parsing the messages by other editors? (vscode, vim ...)
Ericsson-codechecker
c
@@ -1,4 +1,3 @@ -# Set up gems listed in the Gemfile. -ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', __FILE__) +ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../Gemfile', __dir__) -require 'bundler/setup' if File.exists?(ENV['BUNDLE_GEMFILE']) +require 'bundler/setup' # Set up gems listed in the Gemfile.
1
# Set up gems listed in the Gemfile. ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', __FILE__) require 'bundler/setup' if File.exists?(ENV['BUNDLE_GEMFILE'])
1
18,619
Style/StringLiterals: Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping.
thoughtbot-upcase
rb
@@ -28,8 +28,13 @@ namespace Microsoft.AspNet.Server.Kestrel.Filter _filteredStream = filteredStream; _socketInputStream = new SocketInputStream(SocketInput); - _filteredStream.CopyToAsync(_socketInputStream).ContinueWith((task, state) => + var block = memory.Lease(); + // Use pooled block for copy + _filteredStream.CopyToAsync(_socketInputStream, block).ContinueWith((task, state) => { + var returnedBlock = task.Result; + returnedBlock.Pool?.Return(returnedBlock); + ((FilteredStreamAdapter)state).OnStreamClose(task); }, this); }
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.IO; using System.Threading.Tasks; using Microsoft.AspNet.Server.Kestrel.Http; using Microsoft.AspNet.Server.Kestrel.Infrastructure; using Microsoft.Extensions.Logging; namespace Microsoft.AspNet.Server.Kestrel.Filter { public class FilteredStreamAdapter { private readonly Stream _filteredStream; private readonly Stream _socketInputStream; private readonly IKestrelTrace _log; public FilteredStreamAdapter( Stream filteredStream, MemoryPool2 memory, IKestrelTrace logger) { SocketInput = new SocketInput(memory); SocketOutput = new StreamSocketOutput(filteredStream, memory); _log = logger; _filteredStream = filteredStream; _socketInputStream = new SocketInputStream(SocketInput); _filteredStream.CopyToAsync(_socketInputStream).ContinueWith((task, state) => { ((FilteredStreamAdapter)state).OnStreamClose(task); }, this); } public SocketInput SocketInput { get; private set; } public ISocketOutput SocketOutput { get; private set; } private void OnStreamClose(Task copyAsyncTask) { if (copyAsyncTask.IsFaulted) { _log.LogError("FilteredStreamAdapter.CopyToAsync", copyAsyncTask.Exception); } else if (copyAsyncTask.IsCanceled) { _log.LogError("FilteredStreamAdapter.CopyToAsync canceled."); } try { _filteredStream.Dispose(); _socketInputStream.Dispose(); } catch (Exception ex) { _log.LogError("FilteredStreamAdapter.OnStreamClose", ex); } } } }
1
7,272
Why can the pool be null?
aspnet-KestrelHttpServer
.cs
@@ -74,6 +74,15 @@ class ToggleButton(ia2Web.Ia2Web): return states +class PresentationalList(ia2Web.Ia2Web): + """ Ensures that lists like UL, DL and OL always have the readonly state.""" + + def _get_states(self): + states = super().states + states.add(controlTypes.STATE_READONLY) + return states + + def findExtraOverlayClasses(obj, clsList): """Determine the most appropriate class(es) for Chromium objects. This works similarly to L{NVDAObjects.NVDAObject.findOverlayClasses} except that it never calls any other findOverlayClasses method.
1
#NVDAObjects/IAccessible/chromium.py #A part of NonVisual Desktop Access (NVDA) #This file is covered by the GNU General Public License. #See the file COPYING for more details. # Copyright (C) 2010-2013 NV Access Limited """NVDAObjects for the Chromium browser project """ from comtypes import COMError import oleacc import controlTypes import IAccessibleHandler from NVDAObjects.IAccessible import IAccessible from virtualBuffers.gecko_ia2 import Gecko_ia2 as GeckoVBuf, Gecko_ia2_TextInfo as GeckoVBufTextInfo from . import ia2Web class ChromeVBufTextInfo(GeckoVBufTextInfo): def _normalizeControlField(self, attrs): attrs = super()._normalizeControlField(attrs) if attrs['role'] == controlTypes.ROLE_TOGGLEBUTTON and controlTypes.STATE_CHECKABLE in attrs['states']: # In Chromium, the checkable state is exposed erroneously on toggle buttons. attrs['states'].discard(controlTypes.STATE_CHECKABLE) return attrs class ChromeVBuf(GeckoVBuf): TextInfo = ChromeVBufTextInfo def __contains__(self, obj): if obj.windowHandle != self.rootNVDAObject.windowHandle: return False if not isinstance(obj,ia2Web.Ia2Web): # #4080: Input composition NVDAObjects are the same window but not IAccessible2! return False accId = obj.IA2UniqueID if accId == self.rootID: return True try: self.rootNVDAObject.IAccessibleObject.accChild(accId) except COMError: return False return not self._isNVDAObjectInApplication(obj) class Document(ia2Web.Document): def _get_treeInterceptorClass(self): states = self.states if controlTypes.STATE_EDITABLE not in states and controlTypes.STATE_BUSY not in states: return ChromeVBuf return super(Document, self).treeInterceptorClass class ComboboxListItem(IAccessible): """ Represents a list item inside a combo box. """ def _get_focusRedirect(self): # Chrome 68 and below fires focus on the active list item of combo boxes even when the combo box is collapsed. # We get around this by redirecting focus back up to the combo box itself if the list inside is invisible (I.e. the combo box is collapsed). if self.parent and controlTypes.STATE_INVISIBLE in self.parent.states: return self.parent.parent class ToggleButton(ia2Web.Ia2Web): def _get_states(self): # In Chromium, the checkable state is exposed erroneously on toggle buttons. states = super().states states.discard(controlTypes.STATE_CHECKABLE) return states def findExtraOverlayClasses(obj, clsList): """Determine the most appropriate class(es) for Chromium objects. This works similarly to L{NVDAObjects.NVDAObject.findOverlayClasses} except that it never calls any other findOverlayClasses method. """ if obj.role==controlTypes.ROLE_LISTITEM and obj.parent and obj.parent.parent and obj.parent.parent.role==controlTypes.ROLE_COMBOBOX: clsList.append(ComboboxListItem) elif obj.role == controlTypes.ROLE_TOGGLEBUTTON: clsList.append(ToggleButton) ia2Web.findExtraOverlayClasses(obj, clsList, documentClass=Document)
1
30,669
It might be good to have a note here: > work-around for issue #7562 allowing us to differentiate presentational lists from interactive lists (such as of size greater 1 and ARIA list boxes). In firefox, this is possible by the presence of a read-only state, even in content editable.
nvaccess-nvda
py
@@ -66,16 +66,16 @@ import java.util.Set; * the current node) may be queried. * */ -class DigraphNode implements Cloneable, Serializable { +class DigraphNode<E> implements Cloneable, Serializable { /** The data associated with this node. */ - protected Object data; + protected E data; /** * A <code>Set</code> of neighboring nodes pointed to by this * node. */ - protected Set outNodes = new HashSet(); + protected Set<DigraphNode<E>> outNodes = new HashSet<>(); /** The in-degree of the node. */ protected int inDegree = 0;
1
/* Copyright (C) 2005-2012, by the President and Fellows of Harvard College. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Dataverse Network - A web application to share, preserve and analyze research data. Developed at the Institute for Quantitative Social Science, Harvard University. Version 3.0. */ package edu.harvard.iq.dataverse.ingest.plugin.spi; // This file was Taken out from openjdk-6-src-b16-24_apr_2009.tar.gz // http://download.java.net/openjdk/jdk6/promoted/b16/openjdk-6-src-b16-24_apr_2009.tar.gz // downloaded: 2009-05-07 /* * Copyright 2000 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Sun designates this * particular file as subject to the "Classpath" exception as provided * by Sun in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. */ //package javax.imageio.spi; import java.io.Serializable; import java.util.HashSet; import java.util.Iterator; import java.util.Set; /** * A node in a directed graph. In addition to an arbitrary * <code>Object</code> containing user data associated with the node, * each node maintains a <code>Set</code>s of nodes which are pointed * to by the current node (available from <code>getOutNodes</code>). * The in-degree of the node (that is, number of nodes that point to * the current node) may be queried. * */ class DigraphNode implements Cloneable, Serializable { /** The data associated with this node. */ protected Object data; /** * A <code>Set</code> of neighboring nodes pointed to by this * node. */ protected Set outNodes = new HashSet(); /** The in-degree of the node. */ protected int inDegree = 0; /** * A <code>Set</code> of neighboring nodes that point to this * node. */ private Set inNodes = new HashSet(); public DigraphNode(Object data) { this.data = data; } /** Returns the <code>Object</code> referenced by this node. */ public Object getData() { return data; } /** * Returns an <code>Iterator</code> containing the nodes pointed * to by this node. */ public Iterator getOutNodes() { return outNodes.iterator(); } /** * Adds a directed edge to the graph. The outNodes list of this * node is updated and the in-degree of the other node is incremented. * * @param node a <code>DigraphNode</code>. * * @return <code>true</code> if the node was not previously the * target of an edge. */ public boolean addEdge(DigraphNode node) { if (outNodes.contains(node)) { return false; } outNodes.add(node); node.inNodes.add(this); node.incrementInDegree(); return true; } /** * Returns <code>true</code> if an edge exists between this node * and the given node. * * @param node a <code>DigraphNode</code>. * * @return <code>true</code> if the node is the target of an edge. */ public boolean hasEdge(DigraphNode node) { return outNodes.contains(node); } /** * Removes a directed edge from the graph. The outNodes list of this * node is updated and the in-degree of the other node is decremented. * * @return <code>true</code> if the node was previously the target * of an edge. */ public boolean removeEdge(DigraphNode node) { if (!outNodes.contains(node)) { return false; } outNodes.remove(node); node.inNodes.remove(this); node.decrementInDegree(); return true; } /** * Removes this node from the graph, updating neighboring nodes * appropriately. */ public void dispose() { Object[] inNodesArray = inNodes.toArray(); for(int i=0; i<inNodesArray.length; i++) { DigraphNode node = (DigraphNode) inNodesArray[i]; node.removeEdge(this); } Object[] outNodesArray = outNodes.toArray(); for(int i=0; i<outNodesArray.length; i++) { DigraphNode node = (DigraphNode) outNodesArray[i]; removeEdge(node); } } /** Returns the in-degree of this node. */ public int getInDegree() { return inDegree; } /** Increments the in-degree of this node. */ private void incrementInDegree() { ++inDegree; } /** Decrements the in-degree of this node. */ private void decrementInDegree() { --inDegree; } }
1
36,251
This set of changes is the one part of this that I'm not completely sure is valid. Can someone look over this to make sure I got the E's right?
IQSS-dataverse
java
@@ -282,8 +282,12 @@ void nano::bootstrap_attempt_legacy::request_push (nano::unique_lock<std::mutex> void nano::bootstrap_attempt_legacy::add_frontier (nano::pull_info const & pull_a) { nano::pull_info pull (pull_a); - nano::lock_guard<std::mutex> lock (mutex); - frontier_pulls.push_back (pull); + // Prevent incorrent or malicious pulls with frontier 0 insertion + if (!pull.head.is_zero ()) + { + nano::lock_guard<std::mutex> lock (mutex); + frontier_pulls.push_back (pull); + } } void nano::bootstrap_attempt_legacy::add_bulk_push_target (nano::block_hash const & head, nano::block_hash const & end)
1
#include <nano/crypto_lib/random_pool.hpp> #include <nano/node/bootstrap/bootstrap.hpp> #include <nano/node/bootstrap/bootstrap_attempt.hpp> #include <nano/node/bootstrap/bootstrap_bulk_push.hpp> #include <nano/node/bootstrap/bootstrap_frontier.hpp> #include <nano/node/common.hpp> #include <nano/node/node.hpp> #include <nano/node/transport/tcp.hpp> #include <nano/node/websocket.hpp> #include <boost/format.hpp> #include <algorithm> constexpr size_t nano::bootstrap_limits::bootstrap_max_confirm_frontiers; constexpr double nano::bootstrap_limits::required_frontier_confirmation_ratio; constexpr unsigned nano::bootstrap_limits::frontier_confirmation_blocks_limit; constexpr unsigned nano::bootstrap_limits::requeued_pulls_limit; constexpr unsigned nano::bootstrap_limits::requeued_pulls_limit_dev; nano::bootstrap_attempt::bootstrap_attempt (std::shared_ptr<nano::node> node_a, nano::bootstrap_mode mode_a, uint64_t incremental_id_a, std::string id_a) : node (node_a), incremental_id (incremental_id_a), id (id_a), mode (mode_a) { if (id.empty ()) { nano::random_constants constants; id = constants.random_128.to_string (); } node->logger.always_log (boost::str (boost::format ("Starting %1% bootstrap attempt with ID %2%") % mode_text () % id)); node->bootstrap_initiator.notify_listeners (true); if (node->websocket_server) { nano::websocket::message_builder builder; node->websocket_server->broadcast (builder.bootstrap_started (id, mode_text ())); } } nano::bootstrap_attempt::~bootstrap_attempt () { node->logger.always_log (boost::str (boost::format ("Exiting %1% bootstrap attempt with ID %2%") % mode_text () % id)); node->bootstrap_initiator.notify_listeners (false); if (node->websocket_server) { nano::websocket::message_builder builder; node->websocket_server->broadcast (builder.bootstrap_exited (id, mode_text (), attempt_start, total_blocks)); } } bool nano::bootstrap_attempt::should_log () { nano::lock_guard<std::mutex> guard (next_log_mutex); auto result (false); auto now (std::chrono::steady_clock::now ()); if (next_log < now) { result = true; next_log = now + std::chrono::seconds (15); } return result; } bool nano::bootstrap_attempt::still_pulling () { debug_assert (!mutex.try_lock ()); auto running (!stopped); auto still_pulling (pulling > 0); return running && still_pulling; } void nano::bootstrap_attempt::pull_started () { { nano::lock_guard<std::mutex> guard (mutex); ++pulling; } condition.notify_all (); } void nano::bootstrap_attempt::pull_finished () { { nano::lock_guard<std::mutex> guard (mutex); --pulling; } condition.notify_all (); } void nano::bootstrap_attempt::stop () { { nano::lock_guard<std::mutex> lock (mutex); stopped = true; } condition.notify_all (); node->bootstrap_initiator.connections->clear_pulls (incremental_id); } std::string nano::bootstrap_attempt::mode_text () { std::string mode_text; if (mode == nano::bootstrap_mode::legacy) { mode_text = "legacy"; } else if (mode == nano::bootstrap_mode::lazy) { mode_text = "lazy"; } else if (mode == nano::bootstrap_mode::wallet_lazy) { mode_text = "wallet_lazy"; } return mode_text; } void nano::bootstrap_attempt::restart_condition () { debug_assert (mode == nano::bootstrap_mode::legacy); } void nano::bootstrap_attempt::add_frontier (nano::pull_info const &) { debug_assert (mode == nano::bootstrap_mode::legacy); } void nano::bootstrap_attempt::add_bulk_push_target (nano::block_hash const &, nano::block_hash const &) { debug_assert (mode == nano::bootstrap_mode::legacy); } bool nano::bootstrap_attempt::request_bulk_push_target (std::pair<nano::block_hash, nano::block_hash> &) { debug_assert (mode == nano::bootstrap_mode::legacy); return true; } void nano::bootstrap_attempt::add_recent_pull (nano::block_hash const &) { debug_assert (mode == nano::bootstrap_mode::legacy); } bool nano::bootstrap_attempt::process_block (std::shared_ptr<nano::block> block_a, nano::account const & known_account_a, uint64_t pull_blocks, nano::bulk_pull::count_t max_blocks, bool block_expected, unsigned retry_limit) { nano::unchecked_info info (block_a, known_account_a, 0, nano::signature_verification::unknown); node->block_processor.add (info); return false; } void nano::bootstrap_attempt::lazy_start (nano::hash_or_account const &, bool) { debug_assert (mode == nano::bootstrap_mode::lazy); } void nano::bootstrap_attempt::lazy_add (nano::pull_info const &) { debug_assert (mode == nano::bootstrap_mode::lazy); } void nano::bootstrap_attempt::lazy_requeue (nano::block_hash const &, nano::block_hash const &, bool) { debug_assert (mode == nano::bootstrap_mode::lazy); } uint32_t nano::bootstrap_attempt::lazy_batch_size () { debug_assert (mode == nano::bootstrap_mode::lazy); return node->network_params.bootstrap.lazy_min_pull_blocks; } bool nano::bootstrap_attempt::lazy_processed_or_exists (nano::block_hash const &) { debug_assert (mode == nano::bootstrap_mode::lazy); return false; } bool nano::bootstrap_attempt::lazy_has_expired () const { debug_assert (mode == nano::bootstrap_mode::lazy); return true; } void nano::bootstrap_attempt::requeue_pending (nano::account const &) { debug_assert (mode == nano::bootstrap_mode::wallet_lazy); } void nano::bootstrap_attempt::wallet_start (std::deque<nano::account> &) { debug_assert (mode == nano::bootstrap_mode::wallet_lazy); } size_t nano::bootstrap_attempt::wallet_size () { debug_assert (mode == nano::bootstrap_mode::wallet_lazy); return 0; } nano::bootstrap_attempt_legacy::bootstrap_attempt_legacy (std::shared_ptr<nano::node> node_a, uint64_t incremental_id_a, std::string id_a) : nano::bootstrap_attempt (node_a, nano::bootstrap_mode::legacy, incremental_id_a, id_a) { node->bootstrap_initiator.notify_listeners (true); } bool nano::bootstrap_attempt_legacy::consume_future (std::future<bool> & future_a) { bool result; try { result = future_a.get (); } catch (std::future_error &) { result = true; } return result; } void nano::bootstrap_attempt_legacy::stop () { nano::unique_lock<std::mutex> lock (mutex); stopped = true; lock.unlock (); condition.notify_all (); lock.lock (); if (auto i = frontiers.lock ()) { try { i->promise.set_value (true); } catch (std::future_error &) { } } if (auto i = push.lock ()) { try { i->promise.set_value (true); } catch (std::future_error &) { } } lock.unlock (); node->bootstrap_initiator.connections->clear_pulls (incremental_id); } void nano::bootstrap_attempt_legacy::request_push (nano::unique_lock<std::mutex> & lock_a) { bool error (false); lock_a.unlock (); auto connection_l (node->bootstrap_initiator.connections->find_connection (endpoint_frontier_request)); lock_a.lock (); if (connection_l) { std::future<bool> future; { auto this_l (shared_from_this ()); auto client (std::make_shared<nano::bulk_push_client> (connection_l, this_l)); client->start (); push = client; future = client->promise.get_future (); } lock_a.unlock (); error = consume_future (future); // This is out of scope of `client' so when the last reference via boost::asio::io_context is lost and the client is destroyed, the future throws an exception. lock_a.lock (); } if (node->config.logging.network_logging ()) { node->logger.try_log ("Exiting bulk push client"); if (error) { node->logger.try_log ("Bulk push client failed"); } } } void nano::bootstrap_attempt_legacy::add_frontier (nano::pull_info const & pull_a) { nano::pull_info pull (pull_a); nano::lock_guard<std::mutex> lock (mutex); frontier_pulls.push_back (pull); } void nano::bootstrap_attempt_legacy::add_bulk_push_target (nano::block_hash const & head, nano::block_hash const & end) { nano::lock_guard<std::mutex> lock (mutex); bulk_push_targets.emplace_back (head, end); } bool nano::bootstrap_attempt_legacy::request_bulk_push_target (std::pair<nano::block_hash, nano::block_hash> & current_target_a) { nano::lock_guard<std::mutex> lock (mutex); auto empty (bulk_push_targets.empty ()); if (!empty) { current_target_a = bulk_push_targets.back (); bulk_push_targets.pop_back (); } return empty; } void nano::bootstrap_attempt_legacy::add_recent_pull (nano::block_hash const & head_a) { nano::lock_guard<std::mutex> lock (mutex); recent_pulls_head.push_back (head_a); if (recent_pulls_head.size () > nano::bootstrap_limits::bootstrap_max_confirm_frontiers) { recent_pulls_head.pop_front (); } } void nano::bootstrap_attempt_legacy::restart_condition () { /* Conditions to start frontiers confirmation: - not completed frontiers confirmation - more than 256 pull retries usually indicating issues with requested pulls - or 128k processed blocks indicating large bootstrap */ if (!frontiers_confirmation_pending && !frontiers_confirmed && (requeued_pulls > (!node->network_params.network.is_dev_network () ? nano::bootstrap_limits::requeued_pulls_limit : nano::bootstrap_limits::requeued_pulls_limit_dev) || total_blocks > nano::bootstrap_limits::frontier_confirmation_blocks_limit)) { frontiers_confirmation_pending = true; } } void nano::bootstrap_attempt_legacy::attempt_restart_check (nano::unique_lock<std::mutex> & lock_a) { if (frontiers_confirmation_pending) { auto confirmed (confirm_frontiers (lock_a)); debug_assert (lock_a.owns_lock ()); if (!confirmed) { node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::frontier_confirmation_failed, nano::stat::dir::in); auto score (node->network.excluded_peers.add (endpoint_frontier_request, node->network.size ())); if (score >= nano::peer_exclusion::score_limit) { node->logger.always_log (boost::str (boost::format ("Adding peer %1% to excluded peers list with score %2% after %3% seconds bootstrap attempt") % endpoint_frontier_request % score % std::chrono::duration_cast<std::chrono::seconds> (std::chrono::steady_clock::now () - attempt_start).count ())); auto channel = node->network.find_channel (nano::transport::map_tcp_to_endpoint (endpoint_frontier_request)); if (channel != nullptr) { node->network.erase (*channel); } } lock_a.unlock (); stop (); lock_a.lock (); // Start new bootstrap connection auto node_l (node->shared ()); auto this_l (shared_from_this ()); node->background ([node_l, this_l]() { node_l->bootstrap_initiator.remove_attempt (this_l); // Delay after removing current attempt node_l->alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (50), [node_l]() { node_l->bootstrap_initiator.bootstrap (true); }); }); } else { node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::frontier_confirmation_successful, nano::stat::dir::in); } frontiers_confirmed = confirmed; frontiers_confirmation_pending = false; } } bool nano::bootstrap_attempt_legacy::confirm_frontiers (nano::unique_lock<std::mutex> & lock_a) { bool confirmed (false); debug_assert (!frontiers_confirmed); condition.wait (lock_a, [& stopped = stopped] { return !stopped; }); auto this_l (shared_from_this ()); std::vector<nano::block_hash> frontiers; lock_a.unlock (); nano::unique_lock<std::mutex> pulls_lock (node->bootstrap_initiator.connections->mutex); for (auto i (node->bootstrap_initiator.connections->pulls.begin ()), end (node->bootstrap_initiator.connections->pulls.end ()); i != end && frontiers.size () != nano::bootstrap_limits::bootstrap_max_confirm_frontiers; ++i) { if (!i->head.is_zero () && i->bootstrap_id == incremental_id && std::find (frontiers.begin (), frontiers.end (), i->head) == frontiers.end ()) { frontiers.push_back (i->head); } } pulls_lock.unlock (); lock_a.lock (); for (auto i (recent_pulls_head.begin ()), end (recent_pulls_head.end ()); i != end && frontiers.size () != nano::bootstrap_limits::bootstrap_max_confirm_frontiers; ++i) { if (!i->is_zero () && std::find (frontiers.begin (), frontiers.end (), *i) == frontiers.end ()) { frontiers.push_back (*i); } } lock_a.unlock (); auto frontiers_count (frontiers.size ()); if (frontiers_count > 0) { const size_t reps_limit = 20; auto representatives (node->rep_crawler.representatives ()); auto reps_weight (node->rep_crawler.total_weight ()); auto representatives_copy (representatives); nano::uint128_t total_weight (0); // Select random peers from bottom 50% of principal representatives if (representatives.size () > 1) { std::reverse (representatives.begin (), representatives.end ()); representatives.resize (representatives.size () / 2); for (auto i = static_cast<CryptoPP::word32> (representatives.size () - 1); i > 0; --i) { auto k = nano::random_pool::generate_word32 (0, i); std::swap (representatives[i], representatives[k]); } if (representatives.size () > reps_limit) { representatives.resize (reps_limit); } } for (auto const & rep : representatives) { total_weight += rep.weight.number (); } // Select peers with total 25% of reps stake from top 50% of principal representatives representatives_copy.resize (representatives_copy.size () / 2); while (total_weight < reps_weight / 4) // 25% { auto k = nano::random_pool::generate_word32 (0, static_cast<CryptoPP::word32> (representatives_copy.size () - 1)); auto rep (representatives_copy[k]); if (std::find (representatives.begin (), representatives.end (), rep) == representatives.end ()) { representatives.push_back (rep); total_weight += rep.weight.number (); } } // Start requests for (auto i (0), max_requests (20); i <= max_requests && !confirmed && !stopped; ++i) { std::unordered_map<std::shared_ptr<nano::transport::channel>, std::deque<std::pair<nano::block_hash, nano::root>>> batched_confirm_req_bundle; std::deque<std::pair<nano::block_hash, nano::root>> request; // Find confirmed frontiers (tally > 12.5% of reps stake, 60% of requestsed reps responded for (auto ii (frontiers.begin ()); ii != frontiers.end ();) { if (node->ledger.block_or_pruned_exists (*ii)) { ii = frontiers.erase (ii); } else { auto existing (node->active.find_inactive_votes_cache (*ii)); nano::uint128_t tally; for (auto & voter : existing.voters) { tally += node->ledger.weight (voter); } if (existing.status.confirmed || (tally > reps_weight / 8 && existing.voters.size () >= representatives.size () * 0.6)) // 12.5% of weight, 60% of reps { ii = frontiers.erase (ii); } else { for (auto const & rep : representatives) { if (std::find (existing.voters.begin (), existing.voters.end (), rep.account) == existing.voters.end ()) { release_assert (!ii->is_zero ()); auto rep_request (batched_confirm_req_bundle.find (rep.channel)); if (rep_request == batched_confirm_req_bundle.end ()) { std::deque<std::pair<nano::block_hash, nano::root>> insert_root_hash = { std::make_pair (*ii, *ii) }; batched_confirm_req_bundle.emplace (rep.channel, insert_root_hash); } else { rep_request->second.emplace_back (*ii, *ii); } } } ++ii; } } } auto confirmed_count (frontiers_count - frontiers.size ()); if (confirmed_count >= frontiers_count * nano::bootstrap_limits::required_frontier_confirmation_ratio) // 80% of frontiers confirmed { confirmed = true; } else if (i < max_requests) { node->network.broadcast_confirm_req_batched_many (batched_confirm_req_bundle); std::this_thread::sleep_for (std::chrono::milliseconds (!node->network_params.network.is_dev_network () ? 500 : 25)); } } if (!confirmed) { node->logger.always_log (boost::str (boost::format ("Failed to confirm frontiers for bootstrap attempt. %1% of %2% frontiers were not confirmed") % frontiers.size () % frontiers_count)); } } lock_a.lock (); return confirmed; } bool nano::bootstrap_attempt_legacy::request_frontier (nano::unique_lock<std::mutex> & lock_a, bool first_attempt) { auto result (true); lock_a.unlock (); auto connection_l (node->bootstrap_initiator.connections->connection (shared_from_this (), first_attempt)); lock_a.lock (); if (connection_l && !stopped) { endpoint_frontier_request = connection_l->channel->get_tcp_endpoint (); std::future<bool> future; { auto this_l (shared_from_this ()); auto client (std::make_shared<nano::frontier_req_client> (connection_l, this_l)); client->run (); frontiers = client; future = client->promise.get_future (); } lock_a.unlock (); result = consume_future (future); // This is out of scope of `client' so when the last reference via boost::asio::io_context is lost and the client is destroyed, the future throws an exception. lock_a.lock (); if (result) { frontier_pulls.clear (); } else { account_count = nano::narrow_cast<unsigned int> (frontier_pulls.size ()); // Shuffle pulls release_assert (std::numeric_limits<CryptoPP::word32>::max () > frontier_pulls.size ()); if (!frontier_pulls.empty ()) { for (auto i = static_cast<CryptoPP::word32> (frontier_pulls.size () - 1); i > 0; --i) { auto k = nano::random_pool::generate_word32 (0, i); std::swap (frontier_pulls[i], frontier_pulls[k]); } } // Add to regular pulls while (!frontier_pulls.empty ()) { auto pull (frontier_pulls.front ()); lock_a.unlock (); node->bootstrap_initiator.connections->add_pull (pull); lock_a.lock (); ++pulling; frontier_pulls.pop_front (); } } if (node->config.logging.network_logging ()) { if (!result) { node->logger.try_log (boost::str (boost::format ("Completed frontier request, %1% out of sync accounts according to %2%") % account_count % connection_l->channel->to_string ())); } else { node->stats.inc (nano::stat::type::error, nano::stat::detail::frontier_req, nano::stat::dir::out); } } } return result; } void nano::bootstrap_attempt_legacy::run_start (nano::unique_lock<std::mutex> & lock_a) { frontiers_received = false; frontiers_confirmed = false; total_blocks = 0; requeued_pulls = 0; recent_pulls_head.clear (); auto frontier_failure (true); uint64_t frontier_attempts (0); while (!stopped && frontier_failure) { ++frontier_attempts; frontier_failure = request_frontier (lock_a, frontier_attempts == 1); } frontiers_received = true; } void nano::bootstrap_attempt_legacy::run () { debug_assert (started); debug_assert (!node->flags.disable_legacy_bootstrap); node->bootstrap_initiator.connections->populate_connections (false); nano::unique_lock<std::mutex> lock (mutex); run_start (lock); while (still_pulling ()) { while (still_pulling ()) { // clang-format off condition.wait (lock, [&stopped = stopped, &pulling = pulling, &frontiers_confirmation_pending = frontiers_confirmation_pending] { return stopped || pulling == 0 || frontiers_confirmation_pending; }); // clang-format on attempt_restart_check (lock); } // Flushing may resolve forks which can add more pulls node->logger.try_log ("Flushing unchecked blocks"); lock.unlock (); node->block_processor.flush (); lock.lock (); node->logger.try_log ("Finished flushing unchecked blocks"); } if (!stopped) { node->logger.try_log ("Completed legacy pulls"); if (!node->flags.disable_bootstrap_bulk_push_client) { request_push (lock); } if (!stopped) { node->unchecked_cleanup (); } } lock.unlock (); stop (); condition.notify_all (); } void nano::bootstrap_attempt_legacy::get_information (boost::property_tree::ptree & tree_a) { nano::lock_guard<std::mutex> lock (mutex); tree_a.put ("frontier_pulls", std::to_string (frontier_pulls.size ())); tree_a.put ("frontiers_received", static_cast<bool> (frontiers_received)); tree_a.put ("frontiers_confirmed", static_cast<bool> (frontiers_confirmed)); tree_a.put ("frontiers_confirmation_pending", static_cast<bool> (frontiers_confirmation_pending)); }
1
16,598
There doesn't seem to be a reason to copy this here.
nanocurrency-nano-node
cpp
@@ -1774,6 +1774,7 @@ std::string h2o_raw_tracer::bpf_text() { #include <linux/sched.h> #include <linux/limits.h> +#include "include/h2o/ebpf.h" #define STR_LEN 64
1
// Generated code. Do not edit it here! extern "C" { #include <sys/time.h> #include "quicly.h" #include "h2o/ebpf.h" } #include <cstdlib> #include <cstdint> #include <cstdio> #include <cstring> #include <string> #include <algorithm> #include "h2olog.h" #include "json.h" #include "raw_tracer.cc.h" #define STR_LEN 64 #define STR_LIT(s) s, strlen(s) using namespace std; // This is enough for here. See `quicly.c` for the full definition. struct st_quicly_conn_t { struct _st_quicly_conn_public_t super; }; using typeof_st_quicly_stream_t__stream_id = decltype(st_quicly_stream_t::stream_id); using typeof_quicly_rtt_t__minimum = decltype(quicly_rtt_t::minimum); using typeof_quicly_rtt_t__smoothed = decltype(quicly_rtt_t::smoothed); using typeof_quicly_rtt_t__variance = decltype(quicly_rtt_t::variance); using typeof_quicly_rtt_t__latest = decltype(quicly_rtt_t::latest); using typeof_st_quicly_conn_t__master_id = decltype(st_quicly_conn_t::super.local.cid_set.plaintext.master_id); #define GEN_FIELD_INFO(type, field, name) gen_field_info(#type, #field, &((type *)NULL)->field, name) #define DEFINE_RESOLVE_FUNC(field_type) \ std::string gen_field_info(const char *struct_type, const char *field_name, const field_type *field_ptr, const char *name) \ { \ return do_resolve(struct_type, field_name, #field_type, field_ptr, name); \ } template <typename FieldType> static std::string do_resolve(const char *struct_type, const char *field_name, const char *field_type, const FieldType *field_ptr, const char *name) { char *buff = NULL; size_t buff_len = 0; FILE *mem = open_memstream(&buff, &buff_len); fprintf(mem, "/* %s (%s#%s) */\n", name, struct_type, field_name); fprintf(mem, "#define offsetof_%s %zd\n", name, (const char *)field_ptr - (const char *)NULL); fprintf(mem, "#define typeof_%s %s\n", name, field_type); fprintf(mem, "#define get_%s(st) *((const %s *) ((const char*)st + offsetof_%s))\n", name, field_type, name); fprintf(mem, "\n"); fflush(mem); std::string s(buff, buff_len); fclose(mem); return s; } DEFINE_RESOLVE_FUNC(int16_t); DEFINE_RESOLVE_FUNC(uint16_t); DEFINE_RESOLVE_FUNC(int32_t); DEFINE_RESOLVE_FUNC(uint32_t); DEFINE_RESOLVE_FUNC(int64_t); DEFINE_RESOLVE_FUNC(uint64_t); static std::string gen_bpf_header() { std::string bpf; bpf += "#define sizeof_st_quicly_stream_t " + std::to_string(std::min<size_t>(sizeof(struct st_quicly_stream_t), 100)) + "\n"; bpf += GEN_FIELD_INFO(struct st_quicly_stream_t, stream_id, "st_quicly_stream_t__stream_id"); bpf += "#define sizeof_quicly_rtt_t " + std::to_string(std::min<size_t>(sizeof(struct quicly_rtt_t), 100)) + "\n"; bpf += GEN_FIELD_INFO(struct quicly_rtt_t, minimum, "quicly_rtt_t__minimum"); bpf += GEN_FIELD_INFO(struct quicly_rtt_t, smoothed, "quicly_rtt_t__smoothed"); bpf += GEN_FIELD_INFO(struct quicly_rtt_t, variance, "quicly_rtt_t__variance"); bpf += GEN_FIELD_INFO(struct quicly_rtt_t, latest, "quicly_rtt_t__latest"); bpf += "#define sizeof_st_quicly_conn_t " + std::to_string(std::min<size_t>(sizeof(struct st_quicly_conn_t), 100)) + "\n"; bpf += GEN_FIELD_INFO(struct st_quicly_conn_t, super.local.cid_set.plaintext.master_id, "st_quicly_conn_t__master_id"); bpf += "#define sizeof_st_h2o_ebpf_map_key_t " + std::to_string(std::min<size_t>(sizeof(struct st_h2o_ebpf_map_key_t), 100)) + "\n"; bpf += "#define sizeof_sockaddr " + std::to_string(std::min<size_t>(sizeof(struct sockaddr), 100)) + "\n"; bpf += "#define sizeof_sockaddr_in " + std::to_string(std::min<size_t>(sizeof(struct sockaddr_in), 100)) + "\n"; bpf += "#define sizeof_sockaddr_in6 " + std::to_string(std::min<size_t>(sizeof(struct sockaddr_in6), 100)) + "\n"; bpf += GEN_FIELD_INFO(struct sockaddr, sa_family, "sockaddr__sa_family"); bpf += "#define AF_INET " + std::to_string(AF_INET) + "\n"; bpf += "#define AF_INET6 " + std::to_string(AF_INET6) + "\n"; return bpf; } enum h2olog_event_id_t { H2OLOG_EVENT_ID_SCHED_SCHED_PROCESS_EXIT, H2OLOG_EVENT_ID_QUICLY_CONNECT, H2OLOG_EVENT_ID_QUICLY_ACCEPT, H2OLOG_EVENT_ID_QUICLY_FREE, H2OLOG_EVENT_ID_QUICLY_SEND, H2OLOG_EVENT_ID_QUICLY_RECEIVE, H2OLOG_EVENT_ID_QUICLY_VERSION_SWITCH, H2OLOG_EVENT_ID_QUICLY_IDLE_TIMEOUT, H2OLOG_EVENT_ID_QUICLY_STATELESS_RESET_RECEIVE, H2OLOG_EVENT_ID_QUICLY_CRYPTO_HANDSHAKE, H2OLOG_EVENT_ID_QUICLY_CRYPTO_UPDATE_SECRET, H2OLOG_EVENT_ID_QUICLY_CRYPTO_SEND_KEY_UPDATE, H2OLOG_EVENT_ID_QUICLY_CRYPTO_SEND_KEY_UPDATE_CONFIRMED, H2OLOG_EVENT_ID_QUICLY_CRYPTO_RECEIVE_KEY_UPDATE, H2OLOG_EVENT_ID_QUICLY_CRYPTO_RECEIVE_KEY_UPDATE_PREPARE, H2OLOG_EVENT_ID_QUICLY_PACKET_SENT, H2OLOG_EVENT_ID_QUICLY_PACKET_RECEIVED, H2OLOG_EVENT_ID_QUICLY_PACKET_PREPARE, H2OLOG_EVENT_ID_QUICLY_PACKET_ACKED, H2OLOG_EVENT_ID_QUICLY_PACKET_LOST, H2OLOG_EVENT_ID_QUICLY_PACKET_DECRYPTION_FAILED, H2OLOG_EVENT_ID_QUICLY_PTO, H2OLOG_EVENT_ID_QUICLY_CC_ACK_RECEIVED, H2OLOG_EVENT_ID_QUICLY_CC_CONGESTION, H2OLOG_EVENT_ID_QUICLY_ACK_BLOCK_RECEIVED, H2OLOG_EVENT_ID_QUICLY_ACK_DELAY_RECEIVED, H2OLOG_EVENT_ID_QUICLY_ACK_SEND, H2OLOG_EVENT_ID_QUICLY_PING_SEND, H2OLOG_EVENT_ID_QUICLY_PING_RECEIVE, H2OLOG_EVENT_ID_QUICLY_TRANSPORT_CLOSE_SEND, H2OLOG_EVENT_ID_QUICLY_TRANSPORT_CLOSE_RECEIVE, H2OLOG_EVENT_ID_QUICLY_APPLICATION_CLOSE_SEND, H2OLOG_EVENT_ID_QUICLY_APPLICATION_CLOSE_RECEIVE, H2OLOG_EVENT_ID_QUICLY_STREAM_SEND, H2OLOG_EVENT_ID_QUICLY_STREAM_RECEIVE, H2OLOG_EVENT_ID_QUICLY_STREAM_ACKED, H2OLOG_EVENT_ID_QUICLY_STREAM_LOST, H2OLOG_EVENT_ID_QUICLY_MAX_DATA_SEND, H2OLOG_EVENT_ID_QUICLY_MAX_DATA_RECEIVE, H2OLOG_EVENT_ID_QUICLY_MAX_STREAMS_SEND, H2OLOG_EVENT_ID_QUICLY_MAX_STREAMS_RECEIVE, H2OLOG_EVENT_ID_QUICLY_MAX_STREAM_DATA_SEND, H2OLOG_EVENT_ID_QUICLY_MAX_STREAM_DATA_RECEIVE, H2OLOG_EVENT_ID_QUICLY_NEW_TOKEN_SEND, H2OLOG_EVENT_ID_QUICLY_NEW_TOKEN_ACKED, H2OLOG_EVENT_ID_QUICLY_NEW_TOKEN_RECEIVE, H2OLOG_EVENT_ID_QUICLY_HANDSHAKE_DONE_SEND, H2OLOG_EVENT_ID_QUICLY_HANDSHAKE_DONE_RECEIVE, H2OLOG_EVENT_ID_QUICLY_STREAMS_BLOCKED_SEND, H2OLOG_EVENT_ID_QUICLY_STREAMS_BLOCKED_RECEIVE, H2OLOG_EVENT_ID_QUICLY_NEW_CONNECTION_ID_SEND, H2OLOG_EVENT_ID_QUICLY_NEW_CONNECTION_ID_RECEIVE, H2OLOG_EVENT_ID_QUICLY_RETIRE_CONNECTION_ID_SEND, H2OLOG_EVENT_ID_QUICLY_RETIRE_CONNECTION_ID_RECEIVE, H2OLOG_EVENT_ID_QUICLY_DATA_BLOCKED_SEND, H2OLOG_EVENT_ID_QUICLY_DATA_BLOCKED_RECEIVE, H2OLOG_EVENT_ID_QUICLY_STREAM_DATA_BLOCKED_SEND, H2OLOG_EVENT_ID_QUICLY_STREAM_DATA_BLOCKED_RECEIVE, H2OLOG_EVENT_ID_QUICLY_DATAGRAM_SEND, H2OLOG_EVENT_ID_QUICLY_DATAGRAM_RECEIVE, H2OLOG_EVENT_ID_QUICLY_ACK_FREQUENCY_RECEIVE, H2OLOG_EVENT_ID_QUICLY_QUICTRACE_SEND_STREAM, H2OLOG_EVENT_ID_QUICLY_QUICTRACE_RECV_STREAM, H2OLOG_EVENT_ID_QUICLY_QUICTRACE_CC_ACK, H2OLOG_EVENT_ID_QUICLY_QUICTRACE_CC_LOST, H2OLOG_EVENT_ID_QUICLY_STREAM_ON_OPEN, H2OLOG_EVENT_ID_QUICLY_STREAM_ON_DESTROY, H2OLOG_EVENT_ID_QUICLY_STREAM_ON_SEND_SHIFT, H2OLOG_EVENT_ID_QUICLY_STREAM_ON_SEND_EMIT, H2OLOG_EVENT_ID_QUICLY_STREAM_ON_SEND_STOP, H2OLOG_EVENT_ID_QUICLY_STREAM_ON_RECEIVE, H2OLOG_EVENT_ID_QUICLY_STREAM_ON_RECEIVE_RESET, H2OLOG_EVENT_ID_QUICLY_CONN_STATS, H2OLOG_EVENT_ID_H2O__PRIVATE_SOCKET_LOOKUP_FLAGS, H2OLOG_EVENT_ID_H2O_RECEIVE_REQUEST, H2OLOG_EVENT_ID_H2O_RECEIVE_REQUEST_HEADER, H2OLOG_EVENT_ID_H2O_SEND_RESPONSE, H2OLOG_EVENT_ID_H2O_SEND_RESPONSE_HEADER, H2OLOG_EVENT_ID_H2O_H1_ACCEPT, H2OLOG_EVENT_ID_H2O_H1_CLOSE, H2OLOG_EVENT_ID_H2O_H2_UNKNOWN_FRAME_TYPE, H2OLOG_EVENT_ID_H2O_H3S_ACCEPT, H2OLOG_EVENT_ID_H2O_H3S_DESTROY, H2OLOG_EVENT_ID_H2O_H3S_STREAM_SET_STATE, H2OLOG_EVENT_ID_H2O_H3_FRAME_RECEIVE, H2OLOG_EVENT_ID_H2O_H3_PACKET_RECEIVE, H2OLOG_EVENT_ID_H2O_H3_PACKET_FORWARD, H2OLOG_EVENT_ID_H2O_H3_FORWARDED_PACKET_RECEIVE, H2OLOG_EVENT_ID_H2O_H3C_TUNNEL_CREATE, H2OLOG_EVENT_ID_H2O_TUNNEL_ON_DESTROY, H2OLOG_EVENT_ID_H2O_TUNNEL_ON_READ, H2OLOG_EVENT_ID_H2O_TUNNEL_PROCEED_READ, H2OLOG_EVENT_ID_H2O_TUNNEL_WRITE, H2OLOG_EVENT_ID_H2O_TUNNEL_ON_WRITE_COMPLETE, H2OLOG_EVENT_ID_H2O_SOCKET_TUNNEL_CREATE, H2OLOG_EVENT_ID_H2O_SOCKET_TUNNEL_START, }; struct h2olog_event_t { enum h2olog_event_id_t id; union { struct { // quicly:connect typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint32_t version; } connect; struct { // quicly:accept typeof_st_quicly_conn_t__master_id master_id; int64_t at; char dcid[STR_LEN]; struct st_quicly_address_token_plaintext_t * address_token; } accept; struct { // quicly:free typeof_st_quicly_conn_t__master_id master_id; int64_t at; } free; struct { // quicly:send typeof_st_quicly_conn_t__master_id master_id; int64_t at; int state; char dcid[STR_LEN]; } send; struct { // quicly:receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; char dcid[STR_LEN]; uint8_t bytes[STR_LEN]; size_t bytes_len; } receive; struct { // quicly:version_switch typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint32_t new_version; } version_switch; struct { // quicly:idle_timeout typeof_st_quicly_conn_t__master_id master_id; int64_t at; } idle_timeout; struct { // quicly:stateless_reset_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; } stateless_reset_receive; struct { // quicly:crypto_handshake typeof_st_quicly_conn_t__master_id master_id; int64_t at; int ret; } crypto_handshake; struct { // quicly:crypto_update_secret typeof_st_quicly_conn_t__master_id master_id; int64_t at; int is_enc; uint8_t epoch; char label[STR_LEN]; char secret[STR_LEN]; // appdata } crypto_update_secret; struct { // quicly:crypto_send_key_update typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t phase; char secret[STR_LEN]; // appdata } crypto_send_key_update; struct { // quicly:crypto_send_key_update_confirmed typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t next_pn; } crypto_send_key_update_confirmed; struct { // quicly:crypto_receive_key_update typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t phase; char secret[STR_LEN]; // appdata } crypto_receive_key_update; struct { // quicly:crypto_receive_key_update_prepare typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t phase; char secret[STR_LEN]; // appdata } crypto_receive_key_update_prepare; struct { // quicly:packet_sent typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t pn; size_t len; uint8_t packet_type; int ack_only; } packet_sent; struct { // quicly:packet_received typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t pn; uint8_t decrypted[STR_LEN]; // appdata size_t decrypted_len; uint8_t packet_type; } packet_received; struct { // quicly:packet_prepare typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint8_t first_octet; char dcid[STR_LEN]; } packet_prepare; struct { // quicly:packet_acked typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t pn; int is_late_ack; } packet_acked; struct { // quicly:packet_lost typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t pn; uint8_t packet_type; } packet_lost; struct { // quicly:packet_decryption_failed typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t pn; } packet_decryption_failed; struct { // quicly:pto typeof_st_quicly_conn_t__master_id master_id; int64_t at; size_t inflight; uint32_t cwnd; int8_t pto_count; } pto; struct { // quicly:cc_ack_received typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t largest_acked; size_t bytes_acked; uint32_t cwnd; size_t inflight; } cc_ack_received; struct { // quicly:cc_congestion typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t max_lost_pn; size_t inflight; uint32_t cwnd; } cc_congestion; struct { // quicly:ack_block_received typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t ack_block_begin; uint64_t ack_block_end; } ack_block_received; struct { // quicly:ack_delay_received typeof_st_quicly_conn_t__master_id master_id; int64_t at; int64_t ack_delay; } ack_delay_received; struct { // quicly:ack_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t largest_acked; uint64_t ack_delay; } ack_send; struct { // quicly:ping_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; } ping_send; struct { // quicly:ping_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; } ping_receive; struct { // quicly:transport_close_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t error_code; uint64_t frame_type; char reason_phrase[STR_LEN]; } transport_close_send; struct { // quicly:transport_close_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t error_code; uint64_t frame_type; char reason_phrase[STR_LEN]; } transport_close_receive; struct { // quicly:application_close_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t error_code; char reason_phrase[STR_LEN]; } application_close_send; struct { // quicly:application_close_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t error_code; char reason_phrase[STR_LEN]; } application_close_receive; struct { // quicly:stream_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; uint64_t off; size_t len; int is_fin; } stream_send; struct { // quicly:stream_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; uint64_t off; size_t len; } stream_receive; struct { // quicly:stream_acked typeof_st_quicly_conn_t__master_id master_id; int64_t at; int64_t stream_id; uint64_t off; size_t len; } stream_acked; struct { // quicly:stream_lost typeof_st_quicly_conn_t__master_id master_id; int64_t at; int64_t stream_id; uint64_t off; size_t len; } stream_lost; struct { // quicly:max_data_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t maximum; } max_data_send; struct { // quicly:max_data_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t maximum; } max_data_receive; struct { // quicly:max_streams_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t maximum; int is_unidirectional; } max_streams_send; struct { // quicly:max_streams_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t maximum; int is_unidirectional; } max_streams_receive; struct { // quicly:max_stream_data_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; uint64_t maximum; } max_stream_data_send; struct { // quicly:max_stream_data_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; int64_t stream_id; uint64_t maximum; } max_stream_data_receive; struct { // quicly:new_token_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint8_t token[STR_LEN]; size_t token_len; uint64_t generation; } new_token_send; struct { // quicly:new_token_acked typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t generation; } new_token_acked; struct { // quicly:new_token_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint8_t token[STR_LEN]; size_t token_len; } new_token_receive; struct { // quicly:handshake_done_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; } handshake_done_send; struct { // quicly:handshake_done_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; } handshake_done_receive; struct { // quicly:streams_blocked_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t maximum; int is_unidirectional; } streams_blocked_send; struct { // quicly:streams_blocked_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t maximum; int is_unidirectional; } streams_blocked_receive; struct { // quicly:new_connection_id_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t sequence; uint64_t retire_prior_to; char cid[STR_LEN]; char stateless_reset_token[STR_LEN]; } new_connection_id_send; struct { // quicly:new_connection_id_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t sequence; uint64_t retire_prior_to; char cid[STR_LEN]; char stateless_reset_token[STR_LEN]; } new_connection_id_receive; struct { // quicly:retire_connection_id_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t sequence; } retire_connection_id_send; struct { // quicly:retire_connection_id_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t sequence; } retire_connection_id_receive; struct { // quicly:data_blocked_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t off; } data_blocked_send; struct { // quicly:data_blocked_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t off; } data_blocked_receive; struct { // quicly:stream_data_blocked_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; int64_t stream_id; uint64_t maximum; } stream_data_blocked_send; struct { // quicly:stream_data_blocked_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; int64_t stream_id; uint64_t maximum; } stream_data_blocked_receive; struct { // quicly:datagram_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint8_t payload[STR_LEN]; // appdata size_t payload_len; } datagram_send; struct { // quicly:datagram_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint8_t payload[STR_LEN]; // appdata size_t payload_len; } datagram_receive; struct { // quicly:ack_frequency_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t sequence; uint64_t packet_tolerance; uint64_t max_ack_delay; int ignore_order; } ack_frequency_receive; struct { // quicly:quictrace_send_stream typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; uint64_t off; size_t len; int fin; } quictrace_send_stream; struct { // quicly:quictrace_recv_stream typeof_st_quicly_conn_t__master_id master_id; int64_t at; int64_t stream_id; uint64_t off; size_t len; int fin; } quictrace_recv_stream; struct { // quicly:quictrace_cc_ack typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_quicly_rtt_t__minimum minimum; typeof_quicly_rtt_t__smoothed smoothed; typeof_quicly_rtt_t__variance variance; typeof_quicly_rtt_t__latest latest; uint32_t cwnd; size_t inflight; } quictrace_cc_ack; struct { // quicly:quictrace_cc_lost typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_quicly_rtt_t__minimum minimum; typeof_quicly_rtt_t__smoothed smoothed; typeof_quicly_rtt_t__variance variance; typeof_quicly_rtt_t__latest latest; uint32_t cwnd; size_t inflight; } quictrace_cc_lost; struct { // quicly:stream_on_open typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; } stream_on_open; struct { // quicly:stream_on_destroy typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; int err; } stream_on_destroy; struct { // quicly:stream_on_send_shift typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; size_t delta; } stream_on_send_shift; struct { // quicly:stream_on_send_emit typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; size_t off; size_t capacity; } stream_on_send_emit; struct { // quicly:stream_on_send_stop typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; int err; } stream_on_send_stop; struct { // quicly:stream_on_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; size_t off; uint8_t src[STR_LEN]; // appdata size_t src_len; } stream_on_receive; struct { // quicly:stream_on_receive_reset typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; int err; } stream_on_receive_reset; struct { // quicly:conn_stats typeof_st_quicly_conn_t__master_id master_id; int64_t at; struct st_quicly_stats_t * stats; size_t size; } conn_stats; struct { // h2o:_private_socket_lookup_flags pid_t tid; uint64_t original_flags; struct st_h2o_ebpf_map_key_t info; } _private_socket_lookup_flags; struct { // h2o:receive_request uint64_t conn_id; uint64_t req_id; int http_version; } receive_request; struct { // h2o:receive_request_header uint64_t conn_id; uint64_t req_id; char name[STR_LEN]; // appdata size_t name_len; char value[STR_LEN]; // appdata size_t value_len; } receive_request_header; struct { // h2o:send_response uint64_t conn_id; uint64_t req_id; int status; struct st_h2o_tunnel_t * tunnel; } send_response; struct { // h2o:send_response_header uint64_t conn_id; uint64_t req_id; char name[STR_LEN]; // appdata size_t name_len; char value[STR_LEN]; // appdata size_t value_len; } send_response_header; struct { // h2o:h1_accept uint64_t conn_id; struct st_h2o_socket_t * sock; struct st_h2o_conn_t * conn; } h1_accept; struct { // h2o:h1_close uint64_t conn_id; } h1_close; struct { // h2o:h2_unknown_frame_type uint64_t conn_id; uint8_t frame_type; } h2_unknown_frame_type; struct { // h2o:h3s_accept uint64_t conn_id; struct st_h2o_conn_t * conn; typeof_st_quicly_conn_t__master_id master_id; } h3s_accept; struct { // h2o:h3s_destroy uint64_t conn_id; } h3s_destroy; struct { // h2o:h3s_stream_set_state uint64_t conn_id; uint64_t req_id; unsigned state; } h3s_stream_set_state; struct { // h2o:h3_frame_receive uint64_t frame_type; uint8_t bytes[STR_LEN]; // appdata size_t bytes_len; } h3_frame_receive; struct { // h2o:h3_packet_receive quicly_address_t dest; quicly_address_t src; uint8_t bytes[STR_LEN]; size_t bytes_len; } h3_packet_receive; struct { // h2o:h3_packet_forward quicly_address_t dest; quicly_address_t src; size_t num_packets; size_t num_bytes; int fd; } h3_packet_forward; struct { // h2o:h3_forwarded_packet_receive quicly_address_t dest; quicly_address_t src; size_t num_bytes; } h3_forwarded_packet_receive; struct { // h2o:h3c_tunnel_create struct st_h2o_tunnel_t * tunnel; } h3c_tunnel_create; struct { // h2o:tunnel_on_destroy struct st_h2o_tunnel_t * tunnel; } tunnel_on_destroy; struct { // h2o:tunnel_on_read struct st_h2o_tunnel_t * tunnel; char err[STR_LEN]; uint8_t bytes[STR_LEN]; // appdata size_t bytes_len; } tunnel_on_read; struct { // h2o:tunnel_proceed_read struct st_h2o_tunnel_t * tunnel; } tunnel_proceed_read; struct { // h2o:tunnel_write struct st_h2o_tunnel_t * tunnel; uint8_t bytes[STR_LEN]; // appdata size_t bytes_len; } tunnel_write; struct { // h2o:tunnel_on_write_complete struct st_h2o_tunnel_t * tunnel; char err[STR_LEN]; } tunnel_on_write_complete; struct { // h2o:socket_tunnel_create struct st_h2o_tunnel_t * tunnel; } socket_tunnel_create; struct { // h2o:socket_tunnel_start struct st_h2o_tunnel_t * tunnel; size_t bytes_to_consume; } socket_tunnel_start; }; }; void h2o_raw_tracer::initialize() { available_usdts.assign({ h2o_tracer::usdt("quicly", "connect", "trace_quicly__connect"), h2o_tracer::usdt("quicly", "accept", "trace_quicly__accept"), h2o_tracer::usdt("quicly", "free", "trace_quicly__free"), h2o_tracer::usdt("quicly", "send", "trace_quicly__send"), h2o_tracer::usdt("quicly", "receive", "trace_quicly__receive"), h2o_tracer::usdt("quicly", "version_switch", "trace_quicly__version_switch"), h2o_tracer::usdt("quicly", "idle_timeout", "trace_quicly__idle_timeout"), h2o_tracer::usdt("quicly", "stateless_reset_receive", "trace_quicly__stateless_reset_receive"), h2o_tracer::usdt("quicly", "crypto_handshake", "trace_quicly__crypto_handshake"), h2o_tracer::usdt("quicly", "crypto_update_secret", "trace_quicly__crypto_update_secret"), h2o_tracer::usdt("quicly", "crypto_send_key_update", "trace_quicly__crypto_send_key_update"), h2o_tracer::usdt("quicly", "crypto_send_key_update_confirmed", "trace_quicly__crypto_send_key_update_confirmed"), h2o_tracer::usdt("quicly", "crypto_receive_key_update", "trace_quicly__crypto_receive_key_update"), h2o_tracer::usdt("quicly", "crypto_receive_key_update_prepare", "trace_quicly__crypto_receive_key_update_prepare"), h2o_tracer::usdt("quicly", "packet_sent", "trace_quicly__packet_sent"), h2o_tracer::usdt("quicly", "packet_received", "trace_quicly__packet_received"), h2o_tracer::usdt("quicly", "packet_prepare", "trace_quicly__packet_prepare"), h2o_tracer::usdt("quicly", "packet_acked", "trace_quicly__packet_acked"), h2o_tracer::usdt("quicly", "packet_lost", "trace_quicly__packet_lost"), h2o_tracer::usdt("quicly", "packet_decryption_failed", "trace_quicly__packet_decryption_failed"), h2o_tracer::usdt("quicly", "pto", "trace_quicly__pto"), h2o_tracer::usdt("quicly", "cc_ack_received", "trace_quicly__cc_ack_received"), h2o_tracer::usdt("quicly", "cc_congestion", "trace_quicly__cc_congestion"), h2o_tracer::usdt("quicly", "ack_block_received", "trace_quicly__ack_block_received"), h2o_tracer::usdt("quicly", "ack_delay_received", "trace_quicly__ack_delay_received"), h2o_tracer::usdt("quicly", "ack_send", "trace_quicly__ack_send"), h2o_tracer::usdt("quicly", "ping_send", "trace_quicly__ping_send"), h2o_tracer::usdt("quicly", "ping_receive", "trace_quicly__ping_receive"), h2o_tracer::usdt("quicly", "transport_close_send", "trace_quicly__transport_close_send"), h2o_tracer::usdt("quicly", "transport_close_receive", "trace_quicly__transport_close_receive"), h2o_tracer::usdt("quicly", "application_close_send", "trace_quicly__application_close_send"), h2o_tracer::usdt("quicly", "application_close_receive", "trace_quicly__application_close_receive"), h2o_tracer::usdt("quicly", "stream_send", "trace_quicly__stream_send"), h2o_tracer::usdt("quicly", "stream_receive", "trace_quicly__stream_receive"), h2o_tracer::usdt("quicly", "stream_acked", "trace_quicly__stream_acked"), h2o_tracer::usdt("quicly", "stream_lost", "trace_quicly__stream_lost"), h2o_tracer::usdt("quicly", "max_data_send", "trace_quicly__max_data_send"), h2o_tracer::usdt("quicly", "max_data_receive", "trace_quicly__max_data_receive"), h2o_tracer::usdt("quicly", "max_streams_send", "trace_quicly__max_streams_send"), h2o_tracer::usdt("quicly", "max_streams_receive", "trace_quicly__max_streams_receive"), h2o_tracer::usdt("quicly", "max_stream_data_send", "trace_quicly__max_stream_data_send"), h2o_tracer::usdt("quicly", "max_stream_data_receive", "trace_quicly__max_stream_data_receive"), h2o_tracer::usdt("quicly", "new_token_send", "trace_quicly__new_token_send"), h2o_tracer::usdt("quicly", "new_token_acked", "trace_quicly__new_token_acked"), h2o_tracer::usdt("quicly", "new_token_receive", "trace_quicly__new_token_receive"), h2o_tracer::usdt("quicly", "handshake_done_send", "trace_quicly__handshake_done_send"), h2o_tracer::usdt("quicly", "handshake_done_receive", "trace_quicly__handshake_done_receive"), h2o_tracer::usdt("quicly", "streams_blocked_send", "trace_quicly__streams_blocked_send"), h2o_tracer::usdt("quicly", "streams_blocked_receive", "trace_quicly__streams_blocked_receive"), h2o_tracer::usdt("quicly", "new_connection_id_send", "trace_quicly__new_connection_id_send"), h2o_tracer::usdt("quicly", "new_connection_id_receive", "trace_quicly__new_connection_id_receive"), h2o_tracer::usdt("quicly", "retire_connection_id_send", "trace_quicly__retire_connection_id_send"), h2o_tracer::usdt("quicly", "retire_connection_id_receive", "trace_quicly__retire_connection_id_receive"), h2o_tracer::usdt("quicly", "data_blocked_send", "trace_quicly__data_blocked_send"), h2o_tracer::usdt("quicly", "data_blocked_receive", "trace_quicly__data_blocked_receive"), h2o_tracer::usdt("quicly", "stream_data_blocked_send", "trace_quicly__stream_data_blocked_send"), h2o_tracer::usdt("quicly", "stream_data_blocked_receive", "trace_quicly__stream_data_blocked_receive"), h2o_tracer::usdt("quicly", "datagram_send", "trace_quicly__datagram_send"), h2o_tracer::usdt("quicly", "datagram_receive", "trace_quicly__datagram_receive"), h2o_tracer::usdt("quicly", "ack_frequency_receive", "trace_quicly__ack_frequency_receive"), h2o_tracer::usdt("quicly", "quictrace_send_stream", "trace_quicly__quictrace_send_stream"), h2o_tracer::usdt("quicly", "quictrace_recv_stream", "trace_quicly__quictrace_recv_stream"), h2o_tracer::usdt("quicly", "quictrace_cc_ack", "trace_quicly__quictrace_cc_ack"), h2o_tracer::usdt("quicly", "quictrace_cc_lost", "trace_quicly__quictrace_cc_lost"), h2o_tracer::usdt("quicly", "stream_on_open", "trace_quicly__stream_on_open"), h2o_tracer::usdt("quicly", "stream_on_destroy", "trace_quicly__stream_on_destroy"), h2o_tracer::usdt("quicly", "stream_on_send_shift", "trace_quicly__stream_on_send_shift"), h2o_tracer::usdt("quicly", "stream_on_send_emit", "trace_quicly__stream_on_send_emit"), h2o_tracer::usdt("quicly", "stream_on_send_stop", "trace_quicly__stream_on_send_stop"), h2o_tracer::usdt("quicly", "stream_on_receive", "trace_quicly__stream_on_receive"), h2o_tracer::usdt("quicly", "stream_on_receive_reset", "trace_quicly__stream_on_receive_reset"), h2o_tracer::usdt("quicly", "conn_stats", "trace_quicly__conn_stats"), h2o_tracer::usdt("h2o", "_private_socket_lookup_flags", "trace_h2o___private_socket_lookup_flags"), h2o_tracer::usdt("h2o", "receive_request", "trace_h2o__receive_request"), h2o_tracer::usdt("h2o", "receive_request_header", "trace_h2o__receive_request_header"), h2o_tracer::usdt("h2o", "send_response", "trace_h2o__send_response"), h2o_tracer::usdt("h2o", "send_response_header", "trace_h2o__send_response_header"), h2o_tracer::usdt("h2o", "h1_accept", "trace_h2o__h1_accept"), h2o_tracer::usdt("h2o", "h1_close", "trace_h2o__h1_close"), h2o_tracer::usdt("h2o", "h2_unknown_frame_type", "trace_h2o__h2_unknown_frame_type"), h2o_tracer::usdt("h2o", "h3s_accept", "trace_h2o__h3s_accept"), h2o_tracer::usdt("h2o", "h3s_destroy", "trace_h2o__h3s_destroy"), h2o_tracer::usdt("h2o", "h3s_stream_set_state", "trace_h2o__h3s_stream_set_state"), h2o_tracer::usdt("h2o", "h3_frame_receive", "trace_h2o__h3_frame_receive"), h2o_tracer::usdt("h2o", "h3_packet_receive", "trace_h2o__h3_packet_receive"), h2o_tracer::usdt("h2o", "h3_packet_forward", "trace_h2o__h3_packet_forward"), h2o_tracer::usdt("h2o", "h3_forwarded_packet_receive", "trace_h2o__h3_forwarded_packet_receive"), h2o_tracer::usdt("h2o", "h3c_tunnel_create", "trace_h2o__h3c_tunnel_create"), h2o_tracer::usdt("h2o", "tunnel_on_destroy", "trace_h2o__tunnel_on_destroy"), h2o_tracer::usdt("h2o", "tunnel_on_read", "trace_h2o__tunnel_on_read"), h2o_tracer::usdt("h2o", "tunnel_proceed_read", "trace_h2o__tunnel_proceed_read"), h2o_tracer::usdt("h2o", "tunnel_write", "trace_h2o__tunnel_write"), h2o_tracer::usdt("h2o", "tunnel_on_write_complete", "trace_h2o__tunnel_on_write_complete"), h2o_tracer::usdt("h2o", "socket_tunnel_create", "trace_h2o__socket_tunnel_create"), h2o_tracer::usdt("h2o", "socket_tunnel_start", "trace_h2o__socket_tunnel_start"), }); } void h2o_raw_tracer::do_handle_event(const void *data, int data_len) { const h2olog_event_t *event = static_cast<const h2olog_event_t*>(data); if (event->id == H2OLOG_EVENT_ID_SCHED_SCHED_PROCESS_EXIT) { exit(0); } // output JSON fprintf(out_, "{"); switch (event->id) { case H2OLOG_EVENT_ID_QUICLY_CONNECT: { // quicly:connect json_write_pair_n(out_, STR_LIT("type"), STR_LIT("connect")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->connect.master_id); json_write_pair_c(out_, STR_LIT("time"), event->connect.at); json_write_pair_c(out_, STR_LIT("version"), event->connect.version); break; } case H2OLOG_EVENT_ID_QUICLY_ACCEPT: { // quicly:accept json_write_pair_n(out_, STR_LIT("type"), STR_LIT("accept")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->accept.master_id); json_write_pair_c(out_, STR_LIT("time"), event->accept.at); json_write_pair_c(out_, STR_LIT("dcid"), event->accept.dcid, strlen(event->accept.dcid)); json_write_pair_c(out_, STR_LIT("address-token"), event->accept.address_token); break; } case H2OLOG_EVENT_ID_QUICLY_FREE: { // quicly:free json_write_pair_n(out_, STR_LIT("type"), STR_LIT("free")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->free.master_id); json_write_pair_c(out_, STR_LIT("time"), event->free.at); break; } case H2OLOG_EVENT_ID_QUICLY_SEND: { // quicly:send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->send.at); json_write_pair_c(out_, STR_LIT("state"), event->send.state); json_write_pair_c(out_, STR_LIT("dcid"), event->send.dcid, strlen(event->send.dcid)); break; } case H2OLOG_EVENT_ID_QUICLY_RECEIVE: { // quicly:receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->receive.at); json_write_pair_c(out_, STR_LIT("dcid"), event->receive.dcid, strlen(event->receive.dcid)); json_write_pair_c(out_, STR_LIT("bytes"), event->receive.bytes, (event->receive.bytes_len < STR_LEN ? event->receive.bytes_len : STR_LEN)); json_write_pair_c(out_, STR_LIT("bytes-len"), event->receive.bytes_len); break; } case H2OLOG_EVENT_ID_QUICLY_VERSION_SWITCH: { // quicly:version_switch json_write_pair_n(out_, STR_LIT("type"), STR_LIT("version-switch")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->version_switch.master_id); json_write_pair_c(out_, STR_LIT("time"), event->version_switch.at); json_write_pair_c(out_, STR_LIT("new-version"), event->version_switch.new_version); break; } case H2OLOG_EVENT_ID_QUICLY_IDLE_TIMEOUT: { // quicly:idle_timeout json_write_pair_n(out_, STR_LIT("type"), STR_LIT("idle-timeout")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->idle_timeout.master_id); json_write_pair_c(out_, STR_LIT("time"), event->idle_timeout.at); break; } case H2OLOG_EVENT_ID_QUICLY_STATELESS_RESET_RECEIVE: { // quicly:stateless_reset_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("stateless-reset-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->stateless_reset_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->stateless_reset_receive.at); break; } case H2OLOG_EVENT_ID_QUICLY_CRYPTO_HANDSHAKE: { // quicly:crypto_handshake json_write_pair_n(out_, STR_LIT("type"), STR_LIT("crypto-handshake")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->crypto_handshake.master_id); json_write_pair_c(out_, STR_LIT("time"), event->crypto_handshake.at); json_write_pair_c(out_, STR_LIT("ret"), event->crypto_handshake.ret); break; } case H2OLOG_EVENT_ID_QUICLY_CRYPTO_UPDATE_SECRET: { // quicly:crypto_update_secret json_write_pair_n(out_, STR_LIT("type"), STR_LIT("crypto-update-secret")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->crypto_update_secret.master_id); json_write_pair_c(out_, STR_LIT("time"), event->crypto_update_secret.at); json_write_pair_c(out_, STR_LIT("is-enc"), event->crypto_update_secret.is_enc); json_write_pair_c(out_, STR_LIT("epoch"), event->crypto_update_secret.epoch); json_write_pair_c(out_, STR_LIT("label"), event->crypto_update_secret.label, strlen(event->crypto_update_secret.label)); if (include_appdata_) { json_write_pair_c(out_, STR_LIT("secret"), event->crypto_update_secret.secret, strlen(event->crypto_update_secret.secret)); } break; } case H2OLOG_EVENT_ID_QUICLY_CRYPTO_SEND_KEY_UPDATE: { // quicly:crypto_send_key_update json_write_pair_n(out_, STR_LIT("type"), STR_LIT("crypto-send-key-update")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->crypto_send_key_update.master_id); json_write_pair_c(out_, STR_LIT("time"), event->crypto_send_key_update.at); json_write_pair_c(out_, STR_LIT("phase"), event->crypto_send_key_update.phase); if (include_appdata_) { json_write_pair_c(out_, STR_LIT("secret"), event->crypto_send_key_update.secret, strlen(event->crypto_send_key_update.secret)); } break; } case H2OLOG_EVENT_ID_QUICLY_CRYPTO_SEND_KEY_UPDATE_CONFIRMED: { // quicly:crypto_send_key_update_confirmed json_write_pair_n(out_, STR_LIT("type"), STR_LIT("crypto-send-key-update-confirmed")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->crypto_send_key_update_confirmed.master_id); json_write_pair_c(out_, STR_LIT("time"), event->crypto_send_key_update_confirmed.at); json_write_pair_c(out_, STR_LIT("next-pn"), event->crypto_send_key_update_confirmed.next_pn); break; } case H2OLOG_EVENT_ID_QUICLY_CRYPTO_RECEIVE_KEY_UPDATE: { // quicly:crypto_receive_key_update json_write_pair_n(out_, STR_LIT("type"), STR_LIT("crypto-receive-key-update")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->crypto_receive_key_update.master_id); json_write_pair_c(out_, STR_LIT("time"), event->crypto_receive_key_update.at); json_write_pair_c(out_, STR_LIT("phase"), event->crypto_receive_key_update.phase); if (include_appdata_) { json_write_pair_c(out_, STR_LIT("secret"), event->crypto_receive_key_update.secret, strlen(event->crypto_receive_key_update.secret)); } break; } case H2OLOG_EVENT_ID_QUICLY_CRYPTO_RECEIVE_KEY_UPDATE_PREPARE: { // quicly:crypto_receive_key_update_prepare json_write_pair_n(out_, STR_LIT("type"), STR_LIT("crypto-receive-key-update-prepare")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->crypto_receive_key_update_prepare.master_id); json_write_pair_c(out_, STR_LIT("time"), event->crypto_receive_key_update_prepare.at); json_write_pair_c(out_, STR_LIT("phase"), event->crypto_receive_key_update_prepare.phase); if (include_appdata_) { json_write_pair_c(out_, STR_LIT("secret"), event->crypto_receive_key_update_prepare.secret, strlen(event->crypto_receive_key_update_prepare.secret)); } break; } case H2OLOG_EVENT_ID_QUICLY_PACKET_SENT: { // quicly:packet_sent json_write_pair_n(out_, STR_LIT("type"), STR_LIT("packet-sent")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->packet_sent.master_id); json_write_pair_c(out_, STR_LIT("time"), event->packet_sent.at); json_write_pair_c(out_, STR_LIT("pn"), event->packet_sent.pn); json_write_pair_c(out_, STR_LIT("len"), event->packet_sent.len); json_write_pair_c(out_, STR_LIT("packet-type"), event->packet_sent.packet_type); json_write_pair_c(out_, STR_LIT("ack-only"), event->packet_sent.ack_only); break; } case H2OLOG_EVENT_ID_QUICLY_PACKET_RECEIVED: { // quicly:packet_received json_write_pair_n(out_, STR_LIT("type"), STR_LIT("packet-received")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->packet_received.master_id); json_write_pair_c(out_, STR_LIT("time"), event->packet_received.at); json_write_pair_c(out_, STR_LIT("pn"), event->packet_received.pn); if (include_appdata_) { json_write_pair_c(out_, STR_LIT("decrypted"), event->packet_received.decrypted, (event->packet_received.decrypted_len < STR_LEN ? event->packet_received.decrypted_len : STR_LEN)); } json_write_pair_c(out_, STR_LIT("decrypted-len"), event->packet_received.decrypted_len); json_write_pair_c(out_, STR_LIT("packet-type"), event->packet_received.packet_type); break; } case H2OLOG_EVENT_ID_QUICLY_PACKET_PREPARE: { // quicly:packet_prepare json_write_pair_n(out_, STR_LIT("type"), STR_LIT("packet-prepare")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->packet_prepare.master_id); json_write_pair_c(out_, STR_LIT("time"), event->packet_prepare.at); json_write_pair_c(out_, STR_LIT("first-octet"), event->packet_prepare.first_octet); json_write_pair_c(out_, STR_LIT("dcid"), event->packet_prepare.dcid, strlen(event->packet_prepare.dcid)); break; } case H2OLOG_EVENT_ID_QUICLY_PACKET_ACKED: { // quicly:packet_acked json_write_pair_n(out_, STR_LIT("type"), STR_LIT("packet-acked")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->packet_acked.master_id); json_write_pair_c(out_, STR_LIT("time"), event->packet_acked.at); json_write_pair_c(out_, STR_LIT("pn"), event->packet_acked.pn); json_write_pair_c(out_, STR_LIT("is-late-ack"), event->packet_acked.is_late_ack); break; } case H2OLOG_EVENT_ID_QUICLY_PACKET_LOST: { // quicly:packet_lost json_write_pair_n(out_, STR_LIT("type"), STR_LIT("packet-lost")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->packet_lost.master_id); json_write_pair_c(out_, STR_LIT("time"), event->packet_lost.at); json_write_pair_c(out_, STR_LIT("pn"), event->packet_lost.pn); json_write_pair_c(out_, STR_LIT("packet-type"), event->packet_lost.packet_type); break; } case H2OLOG_EVENT_ID_QUICLY_PACKET_DECRYPTION_FAILED: { // quicly:packet_decryption_failed json_write_pair_n(out_, STR_LIT("type"), STR_LIT("packet-decryption-failed")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->packet_decryption_failed.master_id); json_write_pair_c(out_, STR_LIT("time"), event->packet_decryption_failed.at); json_write_pair_c(out_, STR_LIT("pn"), event->packet_decryption_failed.pn); break; } case H2OLOG_EVENT_ID_QUICLY_PTO: { // quicly:pto json_write_pair_n(out_, STR_LIT("type"), STR_LIT("pto")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->pto.master_id); json_write_pair_c(out_, STR_LIT("time"), event->pto.at); json_write_pair_c(out_, STR_LIT("inflight"), event->pto.inflight); json_write_pair_c(out_, STR_LIT("cwnd"), event->pto.cwnd); json_write_pair_c(out_, STR_LIT("pto-count"), event->pto.pto_count); break; } case H2OLOG_EVENT_ID_QUICLY_CC_ACK_RECEIVED: { // quicly:cc_ack_received json_write_pair_n(out_, STR_LIT("type"), STR_LIT("cc-ack-received")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->cc_ack_received.master_id); json_write_pair_c(out_, STR_LIT("time"), event->cc_ack_received.at); json_write_pair_c(out_, STR_LIT("largest-acked"), event->cc_ack_received.largest_acked); json_write_pair_c(out_, STR_LIT("bytes-acked"), event->cc_ack_received.bytes_acked); json_write_pair_c(out_, STR_LIT("cwnd"), event->cc_ack_received.cwnd); json_write_pair_c(out_, STR_LIT("inflight"), event->cc_ack_received.inflight); break; } case H2OLOG_EVENT_ID_QUICLY_CC_CONGESTION: { // quicly:cc_congestion json_write_pair_n(out_, STR_LIT("type"), STR_LIT("cc-congestion")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->cc_congestion.master_id); json_write_pair_c(out_, STR_LIT("time"), event->cc_congestion.at); json_write_pair_c(out_, STR_LIT("max-lost-pn"), event->cc_congestion.max_lost_pn); json_write_pair_c(out_, STR_LIT("inflight"), event->cc_congestion.inflight); json_write_pair_c(out_, STR_LIT("cwnd"), event->cc_congestion.cwnd); break; } case H2OLOG_EVENT_ID_QUICLY_ACK_BLOCK_RECEIVED: { // quicly:ack_block_received json_write_pair_n(out_, STR_LIT("type"), STR_LIT("ack-block-received")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->ack_block_received.master_id); json_write_pair_c(out_, STR_LIT("time"), event->ack_block_received.at); json_write_pair_c(out_, STR_LIT("ack-block-begin"), event->ack_block_received.ack_block_begin); json_write_pair_c(out_, STR_LIT("ack-block-end"), event->ack_block_received.ack_block_end); break; } case H2OLOG_EVENT_ID_QUICLY_ACK_DELAY_RECEIVED: { // quicly:ack_delay_received json_write_pair_n(out_, STR_LIT("type"), STR_LIT("ack-delay-received")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->ack_delay_received.master_id); json_write_pair_c(out_, STR_LIT("time"), event->ack_delay_received.at); json_write_pair_c(out_, STR_LIT("ack-delay"), event->ack_delay_received.ack_delay); break; } case H2OLOG_EVENT_ID_QUICLY_ACK_SEND: { // quicly:ack_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("ack-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->ack_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->ack_send.at); json_write_pair_c(out_, STR_LIT("largest-acked"), event->ack_send.largest_acked); json_write_pair_c(out_, STR_LIT("ack-delay"), event->ack_send.ack_delay); break; } case H2OLOG_EVENT_ID_QUICLY_PING_SEND: { // quicly:ping_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("ping-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->ping_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->ping_send.at); break; } case H2OLOG_EVENT_ID_QUICLY_PING_RECEIVE: { // quicly:ping_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("ping-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->ping_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->ping_receive.at); break; } case H2OLOG_EVENT_ID_QUICLY_TRANSPORT_CLOSE_SEND: { // quicly:transport_close_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("transport-close-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->transport_close_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->transport_close_send.at); json_write_pair_c(out_, STR_LIT("error-code"), event->transport_close_send.error_code); json_write_pair_c(out_, STR_LIT("frame-type"), event->transport_close_send.frame_type); json_write_pair_c(out_, STR_LIT("reason-phrase"), event->transport_close_send.reason_phrase, strlen(event->transport_close_send.reason_phrase)); break; } case H2OLOG_EVENT_ID_QUICLY_TRANSPORT_CLOSE_RECEIVE: { // quicly:transport_close_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("transport-close-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->transport_close_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->transport_close_receive.at); json_write_pair_c(out_, STR_LIT("error-code"), event->transport_close_receive.error_code); json_write_pair_c(out_, STR_LIT("frame-type"), event->transport_close_receive.frame_type); json_write_pair_c(out_, STR_LIT("reason-phrase"), event->transport_close_receive.reason_phrase, strlen(event->transport_close_receive.reason_phrase)); break; } case H2OLOG_EVENT_ID_QUICLY_APPLICATION_CLOSE_SEND: { // quicly:application_close_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("application-close-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->application_close_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->application_close_send.at); json_write_pair_c(out_, STR_LIT("error-code"), event->application_close_send.error_code); json_write_pair_c(out_, STR_LIT("reason-phrase"), event->application_close_send.reason_phrase, strlen(event->application_close_send.reason_phrase)); break; } case H2OLOG_EVENT_ID_QUICLY_APPLICATION_CLOSE_RECEIVE: { // quicly:application_close_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("application-close-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->application_close_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->application_close_receive.at); json_write_pair_c(out_, STR_LIT("error-code"), event->application_close_receive.error_code); json_write_pair_c(out_, STR_LIT("reason-phrase"), event->application_close_receive.reason_phrase, strlen(event->application_close_receive.reason_phrase)); break; } case H2OLOG_EVENT_ID_QUICLY_STREAM_SEND: { // quicly:stream_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("stream-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->stream_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->stream_send.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->stream_send.stream_id); json_write_pair_c(out_, STR_LIT("off"), event->stream_send.off); json_write_pair_c(out_, STR_LIT("len"), event->stream_send.len); json_write_pair_c(out_, STR_LIT("is-fin"), event->stream_send.is_fin); break; } case H2OLOG_EVENT_ID_QUICLY_STREAM_RECEIVE: { // quicly:stream_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("stream-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->stream_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->stream_receive.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->stream_receive.stream_id); json_write_pair_c(out_, STR_LIT("off"), event->stream_receive.off); json_write_pair_c(out_, STR_LIT("len"), event->stream_receive.len); break; } case H2OLOG_EVENT_ID_QUICLY_STREAM_ACKED: { // quicly:stream_acked json_write_pair_n(out_, STR_LIT("type"), STR_LIT("stream-acked")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->stream_acked.master_id); json_write_pair_c(out_, STR_LIT("time"), event->stream_acked.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->stream_acked.stream_id); json_write_pair_c(out_, STR_LIT("off"), event->stream_acked.off); json_write_pair_c(out_, STR_LIT("len"), event->stream_acked.len); break; } case H2OLOG_EVENT_ID_QUICLY_STREAM_LOST: { // quicly:stream_lost json_write_pair_n(out_, STR_LIT("type"), STR_LIT("stream-lost")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->stream_lost.master_id); json_write_pair_c(out_, STR_LIT("time"), event->stream_lost.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->stream_lost.stream_id); json_write_pair_c(out_, STR_LIT("off"), event->stream_lost.off); json_write_pair_c(out_, STR_LIT("len"), event->stream_lost.len); break; } case H2OLOG_EVENT_ID_QUICLY_MAX_DATA_SEND: { // quicly:max_data_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("max-data-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->max_data_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->max_data_send.at); json_write_pair_c(out_, STR_LIT("maximum"), event->max_data_send.maximum); break; } case H2OLOG_EVENT_ID_QUICLY_MAX_DATA_RECEIVE: { // quicly:max_data_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("max-data-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->max_data_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->max_data_receive.at); json_write_pair_c(out_, STR_LIT("maximum"), event->max_data_receive.maximum); break; } case H2OLOG_EVENT_ID_QUICLY_MAX_STREAMS_SEND: { // quicly:max_streams_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("max-streams-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->max_streams_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->max_streams_send.at); json_write_pair_c(out_, STR_LIT("maximum"), event->max_streams_send.maximum); json_write_pair_c(out_, STR_LIT("is-unidirectional"), event->max_streams_send.is_unidirectional); break; } case H2OLOG_EVENT_ID_QUICLY_MAX_STREAMS_RECEIVE: { // quicly:max_streams_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("max-streams-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->max_streams_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->max_streams_receive.at); json_write_pair_c(out_, STR_LIT("maximum"), event->max_streams_receive.maximum); json_write_pair_c(out_, STR_LIT("is-unidirectional"), event->max_streams_receive.is_unidirectional); break; } case H2OLOG_EVENT_ID_QUICLY_MAX_STREAM_DATA_SEND: { // quicly:max_stream_data_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("max-stream-data-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->max_stream_data_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->max_stream_data_send.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->max_stream_data_send.stream_id); json_write_pair_c(out_, STR_LIT("maximum"), event->max_stream_data_send.maximum); break; } case H2OLOG_EVENT_ID_QUICLY_MAX_STREAM_DATA_RECEIVE: { // quicly:max_stream_data_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("max-stream-data-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->max_stream_data_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->max_stream_data_receive.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->max_stream_data_receive.stream_id); json_write_pair_c(out_, STR_LIT("maximum"), event->max_stream_data_receive.maximum); break; } case H2OLOG_EVENT_ID_QUICLY_NEW_TOKEN_SEND: { // quicly:new_token_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("new-token-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->new_token_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->new_token_send.at); json_write_pair_c(out_, STR_LIT("token"), event->new_token_send.token, (event->new_token_send.token_len < STR_LEN ? event->new_token_send.token_len : STR_LEN)); json_write_pair_c(out_, STR_LIT("token-len"), event->new_token_send.token_len); json_write_pair_c(out_, STR_LIT("generation"), event->new_token_send.generation); break; } case H2OLOG_EVENT_ID_QUICLY_NEW_TOKEN_ACKED: { // quicly:new_token_acked json_write_pair_n(out_, STR_LIT("type"), STR_LIT("new-token-acked")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->new_token_acked.master_id); json_write_pair_c(out_, STR_LIT("time"), event->new_token_acked.at); json_write_pair_c(out_, STR_LIT("generation"), event->new_token_acked.generation); break; } case H2OLOG_EVENT_ID_QUICLY_NEW_TOKEN_RECEIVE: { // quicly:new_token_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("new-token-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->new_token_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->new_token_receive.at); json_write_pair_c(out_, STR_LIT("token"), event->new_token_receive.token, (event->new_token_receive.token_len < STR_LEN ? event->new_token_receive.token_len : STR_LEN)); json_write_pair_c(out_, STR_LIT("token-len"), event->new_token_receive.token_len); break; } case H2OLOG_EVENT_ID_QUICLY_HANDSHAKE_DONE_SEND: { // quicly:handshake_done_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("handshake-done-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->handshake_done_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->handshake_done_send.at); break; } case H2OLOG_EVENT_ID_QUICLY_HANDSHAKE_DONE_RECEIVE: { // quicly:handshake_done_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("handshake-done-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->handshake_done_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->handshake_done_receive.at); break; } case H2OLOG_EVENT_ID_QUICLY_STREAMS_BLOCKED_SEND: { // quicly:streams_blocked_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("streams-blocked-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->streams_blocked_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->streams_blocked_send.at); json_write_pair_c(out_, STR_LIT("maximum"), event->streams_blocked_send.maximum); json_write_pair_c(out_, STR_LIT("is-unidirectional"), event->streams_blocked_send.is_unidirectional); break; } case H2OLOG_EVENT_ID_QUICLY_STREAMS_BLOCKED_RECEIVE: { // quicly:streams_blocked_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("streams-blocked-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->streams_blocked_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->streams_blocked_receive.at); json_write_pair_c(out_, STR_LIT("maximum"), event->streams_blocked_receive.maximum); json_write_pair_c(out_, STR_LIT("is-unidirectional"), event->streams_blocked_receive.is_unidirectional); break; } case H2OLOG_EVENT_ID_QUICLY_NEW_CONNECTION_ID_SEND: { // quicly:new_connection_id_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("new-connection-id-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->new_connection_id_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->new_connection_id_send.at); json_write_pair_c(out_, STR_LIT("sequence"), event->new_connection_id_send.sequence); json_write_pair_c(out_, STR_LIT("retire-prior-to"), event->new_connection_id_send.retire_prior_to); json_write_pair_c(out_, STR_LIT("cid"), event->new_connection_id_send.cid, strlen(event->new_connection_id_send.cid)); json_write_pair_c(out_, STR_LIT("stateless-reset-token"), event->new_connection_id_send.stateless_reset_token, strlen(event->new_connection_id_send.stateless_reset_token)); break; } case H2OLOG_EVENT_ID_QUICLY_NEW_CONNECTION_ID_RECEIVE: { // quicly:new_connection_id_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("new-connection-id-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->new_connection_id_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->new_connection_id_receive.at); json_write_pair_c(out_, STR_LIT("sequence"), event->new_connection_id_receive.sequence); json_write_pair_c(out_, STR_LIT("retire-prior-to"), event->new_connection_id_receive.retire_prior_to); json_write_pair_c(out_, STR_LIT("cid"), event->new_connection_id_receive.cid, strlen(event->new_connection_id_receive.cid)); json_write_pair_c(out_, STR_LIT("stateless-reset-token"), event->new_connection_id_receive.stateless_reset_token, strlen(event->new_connection_id_receive.stateless_reset_token)); break; } case H2OLOG_EVENT_ID_QUICLY_RETIRE_CONNECTION_ID_SEND: { // quicly:retire_connection_id_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("retire-connection-id-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->retire_connection_id_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->retire_connection_id_send.at); json_write_pair_c(out_, STR_LIT("sequence"), event->retire_connection_id_send.sequence); break; } case H2OLOG_EVENT_ID_QUICLY_RETIRE_CONNECTION_ID_RECEIVE: { // quicly:retire_connection_id_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("retire-connection-id-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->retire_connection_id_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->retire_connection_id_receive.at); json_write_pair_c(out_, STR_LIT("sequence"), event->retire_connection_id_receive.sequence); break; } case H2OLOG_EVENT_ID_QUICLY_DATA_BLOCKED_SEND: { // quicly:data_blocked_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("data-blocked-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->data_blocked_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->data_blocked_send.at); json_write_pair_c(out_, STR_LIT("off"), event->data_blocked_send.off); break; } case H2OLOG_EVENT_ID_QUICLY_DATA_BLOCKED_RECEIVE: { // quicly:data_blocked_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("data-blocked-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->data_blocked_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->data_blocked_receive.at); json_write_pair_c(out_, STR_LIT("off"), event->data_blocked_receive.off); break; } case H2OLOG_EVENT_ID_QUICLY_STREAM_DATA_BLOCKED_SEND: { // quicly:stream_data_blocked_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("stream-data-blocked-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->stream_data_blocked_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->stream_data_blocked_send.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->stream_data_blocked_send.stream_id); json_write_pair_c(out_, STR_LIT("maximum"), event->stream_data_blocked_send.maximum); break; } case H2OLOG_EVENT_ID_QUICLY_STREAM_DATA_BLOCKED_RECEIVE: { // quicly:stream_data_blocked_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("stream-data-blocked-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->stream_data_blocked_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->stream_data_blocked_receive.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->stream_data_blocked_receive.stream_id); json_write_pair_c(out_, STR_LIT("maximum"), event->stream_data_blocked_receive.maximum); break; } case H2OLOG_EVENT_ID_QUICLY_DATAGRAM_SEND: { // quicly:datagram_send json_write_pair_n(out_, STR_LIT("type"), STR_LIT("datagram-send")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->datagram_send.master_id); json_write_pair_c(out_, STR_LIT("time"), event->datagram_send.at); if (include_appdata_) { json_write_pair_c(out_, STR_LIT("payload"), event->datagram_send.payload, (event->datagram_send.payload_len < STR_LEN ? event->datagram_send.payload_len : STR_LEN)); } json_write_pair_c(out_, STR_LIT("payload-len"), event->datagram_send.payload_len); break; } case H2OLOG_EVENT_ID_QUICLY_DATAGRAM_RECEIVE: { // quicly:datagram_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("datagram-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->datagram_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->datagram_receive.at); if (include_appdata_) { json_write_pair_c(out_, STR_LIT("payload"), event->datagram_receive.payload, (event->datagram_receive.payload_len < STR_LEN ? event->datagram_receive.payload_len : STR_LEN)); } json_write_pair_c(out_, STR_LIT("payload-len"), event->datagram_receive.payload_len); break; } case H2OLOG_EVENT_ID_QUICLY_ACK_FREQUENCY_RECEIVE: { // quicly:ack_frequency_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("ack-frequency-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->ack_frequency_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->ack_frequency_receive.at); json_write_pair_c(out_, STR_LIT("sequence"), event->ack_frequency_receive.sequence); json_write_pair_c(out_, STR_LIT("packet-tolerance"), event->ack_frequency_receive.packet_tolerance); json_write_pair_c(out_, STR_LIT("max-ack-delay"), event->ack_frequency_receive.max_ack_delay); json_write_pair_c(out_, STR_LIT("ignore-order"), event->ack_frequency_receive.ignore_order); break; } case H2OLOG_EVENT_ID_QUICLY_QUICTRACE_SEND_STREAM: { // quicly:quictrace_send_stream json_write_pair_n(out_, STR_LIT("type"), STR_LIT("quictrace-send-stream")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->quictrace_send_stream.master_id); json_write_pair_c(out_, STR_LIT("time"), event->quictrace_send_stream.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->quictrace_send_stream.stream_id); json_write_pair_c(out_, STR_LIT("off"), event->quictrace_send_stream.off); json_write_pair_c(out_, STR_LIT("len"), event->quictrace_send_stream.len); json_write_pair_c(out_, STR_LIT("fin"), event->quictrace_send_stream.fin); break; } case H2OLOG_EVENT_ID_QUICLY_QUICTRACE_RECV_STREAM: { // quicly:quictrace_recv_stream json_write_pair_n(out_, STR_LIT("type"), STR_LIT("quictrace-recv-stream")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->quictrace_recv_stream.master_id); json_write_pair_c(out_, STR_LIT("time"), event->quictrace_recv_stream.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->quictrace_recv_stream.stream_id); json_write_pair_c(out_, STR_LIT("off"), event->quictrace_recv_stream.off); json_write_pair_c(out_, STR_LIT("len"), event->quictrace_recv_stream.len); json_write_pair_c(out_, STR_LIT("fin"), event->quictrace_recv_stream.fin); break; } case H2OLOG_EVENT_ID_QUICLY_QUICTRACE_CC_ACK: { // quicly:quictrace_cc_ack json_write_pair_n(out_, STR_LIT("type"), STR_LIT("quictrace-cc-ack")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->quictrace_cc_ack.master_id); json_write_pair_c(out_, STR_LIT("time"), event->quictrace_cc_ack.at); json_write_pair_c(out_, STR_LIT("min-rtt"), event->quictrace_cc_ack.minimum); json_write_pair_c(out_, STR_LIT("smoothed-rtt"), event->quictrace_cc_ack.smoothed); json_write_pair_c(out_, STR_LIT("variance-rtt"), event->quictrace_cc_ack.variance); json_write_pair_c(out_, STR_LIT("latest-rtt"), event->quictrace_cc_ack.latest); json_write_pair_c(out_, STR_LIT("cwnd"), event->quictrace_cc_ack.cwnd); json_write_pair_c(out_, STR_LIT("inflight"), event->quictrace_cc_ack.inflight); break; } case H2OLOG_EVENT_ID_QUICLY_QUICTRACE_CC_LOST: { // quicly:quictrace_cc_lost json_write_pair_n(out_, STR_LIT("type"), STR_LIT("quictrace-cc-lost")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->quictrace_cc_lost.master_id); json_write_pair_c(out_, STR_LIT("time"), event->quictrace_cc_lost.at); json_write_pair_c(out_, STR_LIT("min-rtt"), event->quictrace_cc_lost.minimum); json_write_pair_c(out_, STR_LIT("smoothed-rtt"), event->quictrace_cc_lost.smoothed); json_write_pair_c(out_, STR_LIT("variance-rtt"), event->quictrace_cc_lost.variance); json_write_pair_c(out_, STR_LIT("latest-rtt"), event->quictrace_cc_lost.latest); json_write_pair_c(out_, STR_LIT("cwnd"), event->quictrace_cc_lost.cwnd); json_write_pair_c(out_, STR_LIT("inflight"), event->quictrace_cc_lost.inflight); break; } case H2OLOG_EVENT_ID_QUICLY_STREAM_ON_OPEN: { // quicly:stream_on_open json_write_pair_n(out_, STR_LIT("type"), STR_LIT("stream-on-open")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->stream_on_open.master_id); json_write_pair_c(out_, STR_LIT("time"), event->stream_on_open.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->stream_on_open.stream_id); break; } case H2OLOG_EVENT_ID_QUICLY_STREAM_ON_DESTROY: { // quicly:stream_on_destroy json_write_pair_n(out_, STR_LIT("type"), STR_LIT("stream-on-destroy")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->stream_on_destroy.master_id); json_write_pair_c(out_, STR_LIT("time"), event->stream_on_destroy.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->stream_on_destroy.stream_id); json_write_pair_c(out_, STR_LIT("err"), event->stream_on_destroy.err); break; } case H2OLOG_EVENT_ID_QUICLY_STREAM_ON_SEND_SHIFT: { // quicly:stream_on_send_shift json_write_pair_n(out_, STR_LIT("type"), STR_LIT("stream-on-send-shift")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->stream_on_send_shift.master_id); json_write_pair_c(out_, STR_LIT("time"), event->stream_on_send_shift.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->stream_on_send_shift.stream_id); json_write_pair_c(out_, STR_LIT("delta"), event->stream_on_send_shift.delta); break; } case H2OLOG_EVENT_ID_QUICLY_STREAM_ON_SEND_EMIT: { // quicly:stream_on_send_emit json_write_pair_n(out_, STR_LIT("type"), STR_LIT("stream-on-send-emit")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->stream_on_send_emit.master_id); json_write_pair_c(out_, STR_LIT("time"), event->stream_on_send_emit.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->stream_on_send_emit.stream_id); json_write_pair_c(out_, STR_LIT("off"), event->stream_on_send_emit.off); json_write_pair_c(out_, STR_LIT("capacity"), event->stream_on_send_emit.capacity); break; } case H2OLOG_EVENT_ID_QUICLY_STREAM_ON_SEND_STOP: { // quicly:stream_on_send_stop json_write_pair_n(out_, STR_LIT("type"), STR_LIT("stream-on-send-stop")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->stream_on_send_stop.master_id); json_write_pair_c(out_, STR_LIT("time"), event->stream_on_send_stop.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->stream_on_send_stop.stream_id); json_write_pair_c(out_, STR_LIT("err"), event->stream_on_send_stop.err); break; } case H2OLOG_EVENT_ID_QUICLY_STREAM_ON_RECEIVE: { // quicly:stream_on_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("stream-on-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->stream_on_receive.master_id); json_write_pair_c(out_, STR_LIT("time"), event->stream_on_receive.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->stream_on_receive.stream_id); json_write_pair_c(out_, STR_LIT("off"), event->stream_on_receive.off); if (include_appdata_) { json_write_pair_c(out_, STR_LIT("src"), event->stream_on_receive.src, (event->stream_on_receive.src_len < STR_LEN ? event->stream_on_receive.src_len : STR_LEN)); } json_write_pair_c(out_, STR_LIT("src-len"), event->stream_on_receive.src_len); break; } case H2OLOG_EVENT_ID_QUICLY_STREAM_ON_RECEIVE_RESET: { // quicly:stream_on_receive_reset json_write_pair_n(out_, STR_LIT("type"), STR_LIT("stream-on-receive-reset")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->stream_on_receive_reset.master_id); json_write_pair_c(out_, STR_LIT("time"), event->stream_on_receive_reset.at); json_write_pair_c(out_, STR_LIT("stream-id"), event->stream_on_receive_reset.stream_id); json_write_pair_c(out_, STR_LIT("err"), event->stream_on_receive_reset.err); break; } case H2OLOG_EVENT_ID_QUICLY_CONN_STATS: { // quicly:conn_stats json_write_pair_n(out_, STR_LIT("type"), STR_LIT("conn-stats")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn"), event->conn_stats.master_id); json_write_pair_c(out_, STR_LIT("time"), event->conn_stats.at); json_write_pair_c(out_, STR_LIT("stats"), event->conn_stats.stats); json_write_pair_c(out_, STR_LIT("size"), event->conn_stats.size); break; } case H2OLOG_EVENT_ID_H2O_RECEIVE_REQUEST: { // h2o:receive_request json_write_pair_n(out_, STR_LIT("type"), STR_LIT("receive-request")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn-id"), event->receive_request.conn_id); json_write_pair_c(out_, STR_LIT("req-id"), event->receive_request.req_id); json_write_pair_c(out_, STR_LIT("http-version"), event->receive_request.http_version); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_RECEIVE_REQUEST_HEADER: { // h2o:receive_request_header json_write_pair_n(out_, STR_LIT("type"), STR_LIT("receive-request-header")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn-id"), event->receive_request_header.conn_id); json_write_pair_c(out_, STR_LIT("req-id"), event->receive_request_header.req_id); if (include_appdata_) { json_write_pair_c(out_, STR_LIT("name"), event->receive_request_header.name, (event->receive_request_header.name_len < STR_LEN ? event->receive_request_header.name_len : STR_LEN)); } json_write_pair_c(out_, STR_LIT("name-len"), event->receive_request_header.name_len); if (include_appdata_) { json_write_pair_c(out_, STR_LIT("value"), event->receive_request_header.value, (event->receive_request_header.value_len < STR_LEN ? event->receive_request_header.value_len : STR_LEN)); } json_write_pair_c(out_, STR_LIT("value-len"), event->receive_request_header.value_len); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_SEND_RESPONSE: { // h2o:send_response json_write_pair_n(out_, STR_LIT("type"), STR_LIT("send-response")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn-id"), event->send_response.conn_id); json_write_pair_c(out_, STR_LIT("req-id"), event->send_response.req_id); json_write_pair_c(out_, STR_LIT("status"), event->send_response.status); json_write_pair_c(out_, STR_LIT("tunnel"), event->send_response.tunnel); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_SEND_RESPONSE_HEADER: { // h2o:send_response_header json_write_pair_n(out_, STR_LIT("type"), STR_LIT("send-response-header")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn-id"), event->send_response_header.conn_id); json_write_pair_c(out_, STR_LIT("req-id"), event->send_response_header.req_id); if (include_appdata_) { json_write_pair_c(out_, STR_LIT("name"), event->send_response_header.name, (event->send_response_header.name_len < STR_LEN ? event->send_response_header.name_len : STR_LEN)); } json_write_pair_c(out_, STR_LIT("name-len"), event->send_response_header.name_len); if (include_appdata_) { json_write_pair_c(out_, STR_LIT("value"), event->send_response_header.value, (event->send_response_header.value_len < STR_LEN ? event->send_response_header.value_len : STR_LEN)); } json_write_pair_c(out_, STR_LIT("value-len"), event->send_response_header.value_len); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_H1_ACCEPT: { // h2o:h1_accept json_write_pair_n(out_, STR_LIT("type"), STR_LIT("h1-accept")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn-id"), event->h1_accept.conn_id); json_write_pair_c(out_, STR_LIT("sock"), event->h1_accept.sock); json_write_pair_c(out_, STR_LIT("conn"), event->h1_accept.conn); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_H1_CLOSE: { // h2o:h1_close json_write_pair_n(out_, STR_LIT("type"), STR_LIT("h1-close")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn-id"), event->h1_close.conn_id); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_H2_UNKNOWN_FRAME_TYPE: { // h2o:h2_unknown_frame_type json_write_pair_n(out_, STR_LIT("type"), STR_LIT("h2-unknown-frame-type")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn-id"), event->h2_unknown_frame_type.conn_id); json_write_pair_c(out_, STR_LIT("frame-type"), event->h2_unknown_frame_type.frame_type); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_H3S_ACCEPT: { // h2o:h3s_accept json_write_pair_n(out_, STR_LIT("type"), STR_LIT("h3s-accept")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn-id"), event->h3s_accept.conn_id); json_write_pair_c(out_, STR_LIT("conn"), event->h3s_accept.conn); json_write_pair_c(out_, STR_LIT("conn"), event->h3s_accept.master_id); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_H3S_DESTROY: { // h2o:h3s_destroy json_write_pair_n(out_, STR_LIT("type"), STR_LIT("h3s-destroy")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn-id"), event->h3s_destroy.conn_id); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_H3S_STREAM_SET_STATE: { // h2o:h3s_stream_set_state json_write_pair_n(out_, STR_LIT("type"), STR_LIT("h3s-stream-set-state")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("conn-id"), event->h3s_stream_set_state.conn_id); json_write_pair_c(out_, STR_LIT("req-id"), event->h3s_stream_set_state.req_id); json_write_pair_c(out_, STR_LIT("state"), event->h3s_stream_set_state.state); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_H3_FRAME_RECEIVE: { // h2o:h3_frame_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("h3-frame-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("frame-type"), event->h3_frame_receive.frame_type); if (include_appdata_) { json_write_pair_c(out_, STR_LIT("bytes"), event->h3_frame_receive.bytes, (event->h3_frame_receive.bytes_len < STR_LEN ? event->h3_frame_receive.bytes_len : STR_LEN)); } json_write_pair_c(out_, STR_LIT("bytes-len"), event->h3_frame_receive.bytes_len); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_H3_PACKET_RECEIVE: { // h2o:h3_packet_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("h3-packet-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("dest"), event->h3_packet_receive.dest); json_write_pair_c(out_, STR_LIT("src"), event->h3_packet_receive.src); json_write_pair_c(out_, STR_LIT("bytes"), event->h3_packet_receive.bytes, (event->h3_packet_receive.bytes_len < STR_LEN ? event->h3_packet_receive.bytes_len : STR_LEN)); json_write_pair_c(out_, STR_LIT("bytes-len"), event->h3_packet_receive.bytes_len); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_H3_PACKET_FORWARD: { // h2o:h3_packet_forward json_write_pair_n(out_, STR_LIT("type"), STR_LIT("h3-packet-forward")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("dest"), event->h3_packet_forward.dest); json_write_pair_c(out_, STR_LIT("src"), event->h3_packet_forward.src); json_write_pair_c(out_, STR_LIT("num-packets"), event->h3_packet_forward.num_packets); json_write_pair_c(out_, STR_LIT("bytes-len"), event->h3_packet_forward.num_bytes); json_write_pair_c(out_, STR_LIT("fd"), event->h3_packet_forward.fd); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_H3_FORWARDED_PACKET_RECEIVE: { // h2o:h3_forwarded_packet_receive json_write_pair_n(out_, STR_LIT("type"), STR_LIT("h3-forwarded-packet-receive")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("dest"), event->h3_forwarded_packet_receive.dest); json_write_pair_c(out_, STR_LIT("src"), event->h3_forwarded_packet_receive.src); json_write_pair_c(out_, STR_LIT("bytes-len"), event->h3_forwarded_packet_receive.num_bytes); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_H3C_TUNNEL_CREATE: { // h2o:h3c_tunnel_create json_write_pair_n(out_, STR_LIT("type"), STR_LIT("h3c-tunnel-create")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("tunnel"), event->h3c_tunnel_create.tunnel); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_TUNNEL_ON_DESTROY: { // h2o:tunnel_on_destroy json_write_pair_n(out_, STR_LIT("type"), STR_LIT("tunnel-on-destroy")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("tunnel"), event->tunnel_on_destroy.tunnel); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_TUNNEL_ON_READ: { // h2o:tunnel_on_read json_write_pair_n(out_, STR_LIT("type"), STR_LIT("tunnel-on-read")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("tunnel"), event->tunnel_on_read.tunnel); json_write_pair_c(out_, STR_LIT("err"), event->tunnel_on_read.err, strlen(event->tunnel_on_read.err)); if (include_appdata_) { json_write_pair_c(out_, STR_LIT("bytes"), event->tunnel_on_read.bytes, (event->tunnel_on_read.bytes_len < STR_LEN ? event->tunnel_on_read.bytes_len : STR_LEN)); } json_write_pair_c(out_, STR_LIT("bytes-len"), event->tunnel_on_read.bytes_len); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_TUNNEL_PROCEED_READ: { // h2o:tunnel_proceed_read json_write_pair_n(out_, STR_LIT("type"), STR_LIT("tunnel-proceed-read")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("tunnel"), event->tunnel_proceed_read.tunnel); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_TUNNEL_WRITE: { // h2o:tunnel_write json_write_pair_n(out_, STR_LIT("type"), STR_LIT("tunnel-write")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("tunnel"), event->tunnel_write.tunnel); if (include_appdata_) { json_write_pair_c(out_, STR_LIT("bytes"), event->tunnel_write.bytes, (event->tunnel_write.bytes_len < STR_LEN ? event->tunnel_write.bytes_len : STR_LEN)); } json_write_pair_c(out_, STR_LIT("bytes-len"), event->tunnel_write.bytes_len); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_TUNNEL_ON_WRITE_COMPLETE: { // h2o:tunnel_on_write_complete json_write_pair_n(out_, STR_LIT("type"), STR_LIT("tunnel-on-write-complete")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("tunnel"), event->tunnel_on_write_complete.tunnel); json_write_pair_c(out_, STR_LIT("err"), event->tunnel_on_write_complete.err, strlen(event->tunnel_on_write_complete.err)); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_SOCKET_TUNNEL_CREATE: { // h2o:socket_tunnel_create json_write_pair_n(out_, STR_LIT("type"), STR_LIT("socket-tunnel-create")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("tunnel"), event->socket_tunnel_create.tunnel); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } case H2OLOG_EVENT_ID_H2O_SOCKET_TUNNEL_START: { // h2o:socket_tunnel_start json_write_pair_n(out_, STR_LIT("type"), STR_LIT("socket-tunnel-start")); json_write_pair_c(out_, STR_LIT("seq"), seq_); json_write_pair_c(out_, STR_LIT("tunnel"), event->socket_tunnel_start.tunnel); json_write_pair_c(out_, STR_LIT("bytes-to-consume"), event->socket_tunnel_start.bytes_to_consume); json_write_pair_c(out_, STR_LIT("time"), time_milliseconds()); break; } default: std::abort(); } fprintf(out_, "}\n"); } std::string h2o_raw_tracer::bpf_text() { // language=c return gen_bpf_header() + R"( #include <linux/sched.h> #include <linux/limits.h> #define STR_LEN 64 typedef union quicly_address_t { uint8_t sa[sizeof_sockaddr]; uint8_t sin[sizeof_sockaddr_in]; uint8_t sin6[sizeof_sockaddr_in6]; } quicly_address_t; struct st_h2o_ebpf_map_key_t { uint8_t payload[sizeof_st_h2o_ebpf_map_key_t]; }; enum h2olog_event_id_t { H2OLOG_EVENT_ID_SCHED_SCHED_PROCESS_EXIT, H2OLOG_EVENT_ID_QUICLY_CONNECT, H2OLOG_EVENT_ID_QUICLY_ACCEPT, H2OLOG_EVENT_ID_QUICLY_FREE, H2OLOG_EVENT_ID_QUICLY_SEND, H2OLOG_EVENT_ID_QUICLY_RECEIVE, H2OLOG_EVENT_ID_QUICLY_VERSION_SWITCH, H2OLOG_EVENT_ID_QUICLY_IDLE_TIMEOUT, H2OLOG_EVENT_ID_QUICLY_STATELESS_RESET_RECEIVE, H2OLOG_EVENT_ID_QUICLY_CRYPTO_HANDSHAKE, H2OLOG_EVENT_ID_QUICLY_CRYPTO_UPDATE_SECRET, H2OLOG_EVENT_ID_QUICLY_CRYPTO_SEND_KEY_UPDATE, H2OLOG_EVENT_ID_QUICLY_CRYPTO_SEND_KEY_UPDATE_CONFIRMED, H2OLOG_EVENT_ID_QUICLY_CRYPTO_RECEIVE_KEY_UPDATE, H2OLOG_EVENT_ID_QUICLY_CRYPTO_RECEIVE_KEY_UPDATE_PREPARE, H2OLOG_EVENT_ID_QUICLY_PACKET_SENT, H2OLOG_EVENT_ID_QUICLY_PACKET_RECEIVED, H2OLOG_EVENT_ID_QUICLY_PACKET_PREPARE, H2OLOG_EVENT_ID_QUICLY_PACKET_ACKED, H2OLOG_EVENT_ID_QUICLY_PACKET_LOST, H2OLOG_EVENT_ID_QUICLY_PACKET_DECRYPTION_FAILED, H2OLOG_EVENT_ID_QUICLY_PTO, H2OLOG_EVENT_ID_QUICLY_CC_ACK_RECEIVED, H2OLOG_EVENT_ID_QUICLY_CC_CONGESTION, H2OLOG_EVENT_ID_QUICLY_ACK_BLOCK_RECEIVED, H2OLOG_EVENT_ID_QUICLY_ACK_DELAY_RECEIVED, H2OLOG_EVENT_ID_QUICLY_ACK_SEND, H2OLOG_EVENT_ID_QUICLY_PING_SEND, H2OLOG_EVENT_ID_QUICLY_PING_RECEIVE, H2OLOG_EVENT_ID_QUICLY_TRANSPORT_CLOSE_SEND, H2OLOG_EVENT_ID_QUICLY_TRANSPORT_CLOSE_RECEIVE, H2OLOG_EVENT_ID_QUICLY_APPLICATION_CLOSE_SEND, H2OLOG_EVENT_ID_QUICLY_APPLICATION_CLOSE_RECEIVE, H2OLOG_EVENT_ID_QUICLY_STREAM_SEND, H2OLOG_EVENT_ID_QUICLY_STREAM_RECEIVE, H2OLOG_EVENT_ID_QUICLY_STREAM_ACKED, H2OLOG_EVENT_ID_QUICLY_STREAM_LOST, H2OLOG_EVENT_ID_QUICLY_MAX_DATA_SEND, H2OLOG_EVENT_ID_QUICLY_MAX_DATA_RECEIVE, H2OLOG_EVENT_ID_QUICLY_MAX_STREAMS_SEND, H2OLOG_EVENT_ID_QUICLY_MAX_STREAMS_RECEIVE, H2OLOG_EVENT_ID_QUICLY_MAX_STREAM_DATA_SEND, H2OLOG_EVENT_ID_QUICLY_MAX_STREAM_DATA_RECEIVE, H2OLOG_EVENT_ID_QUICLY_NEW_TOKEN_SEND, H2OLOG_EVENT_ID_QUICLY_NEW_TOKEN_ACKED, H2OLOG_EVENT_ID_QUICLY_NEW_TOKEN_RECEIVE, H2OLOG_EVENT_ID_QUICLY_HANDSHAKE_DONE_SEND, H2OLOG_EVENT_ID_QUICLY_HANDSHAKE_DONE_RECEIVE, H2OLOG_EVENT_ID_QUICLY_STREAMS_BLOCKED_SEND, H2OLOG_EVENT_ID_QUICLY_STREAMS_BLOCKED_RECEIVE, H2OLOG_EVENT_ID_QUICLY_NEW_CONNECTION_ID_SEND, H2OLOG_EVENT_ID_QUICLY_NEW_CONNECTION_ID_RECEIVE, H2OLOG_EVENT_ID_QUICLY_RETIRE_CONNECTION_ID_SEND, H2OLOG_EVENT_ID_QUICLY_RETIRE_CONNECTION_ID_RECEIVE, H2OLOG_EVENT_ID_QUICLY_DATA_BLOCKED_SEND, H2OLOG_EVENT_ID_QUICLY_DATA_BLOCKED_RECEIVE, H2OLOG_EVENT_ID_QUICLY_STREAM_DATA_BLOCKED_SEND, H2OLOG_EVENT_ID_QUICLY_STREAM_DATA_BLOCKED_RECEIVE, H2OLOG_EVENT_ID_QUICLY_DATAGRAM_SEND, H2OLOG_EVENT_ID_QUICLY_DATAGRAM_RECEIVE, H2OLOG_EVENT_ID_QUICLY_ACK_FREQUENCY_RECEIVE, H2OLOG_EVENT_ID_QUICLY_QUICTRACE_SEND_STREAM, H2OLOG_EVENT_ID_QUICLY_QUICTRACE_RECV_STREAM, H2OLOG_EVENT_ID_QUICLY_QUICTRACE_CC_ACK, H2OLOG_EVENT_ID_QUICLY_QUICTRACE_CC_LOST, H2OLOG_EVENT_ID_QUICLY_STREAM_ON_OPEN, H2OLOG_EVENT_ID_QUICLY_STREAM_ON_DESTROY, H2OLOG_EVENT_ID_QUICLY_STREAM_ON_SEND_SHIFT, H2OLOG_EVENT_ID_QUICLY_STREAM_ON_SEND_EMIT, H2OLOG_EVENT_ID_QUICLY_STREAM_ON_SEND_STOP, H2OLOG_EVENT_ID_QUICLY_STREAM_ON_RECEIVE, H2OLOG_EVENT_ID_QUICLY_STREAM_ON_RECEIVE_RESET, H2OLOG_EVENT_ID_QUICLY_CONN_STATS, H2OLOG_EVENT_ID_H2O__PRIVATE_SOCKET_LOOKUP_FLAGS, H2OLOG_EVENT_ID_H2O_RECEIVE_REQUEST, H2OLOG_EVENT_ID_H2O_RECEIVE_REQUEST_HEADER, H2OLOG_EVENT_ID_H2O_SEND_RESPONSE, H2OLOG_EVENT_ID_H2O_SEND_RESPONSE_HEADER, H2OLOG_EVENT_ID_H2O_H1_ACCEPT, H2OLOG_EVENT_ID_H2O_H1_CLOSE, H2OLOG_EVENT_ID_H2O_H2_UNKNOWN_FRAME_TYPE, H2OLOG_EVENT_ID_H2O_H3S_ACCEPT, H2OLOG_EVENT_ID_H2O_H3S_DESTROY, H2OLOG_EVENT_ID_H2O_H3S_STREAM_SET_STATE, H2OLOG_EVENT_ID_H2O_H3_FRAME_RECEIVE, H2OLOG_EVENT_ID_H2O_H3_PACKET_RECEIVE, H2OLOG_EVENT_ID_H2O_H3_PACKET_FORWARD, H2OLOG_EVENT_ID_H2O_H3_FORWARDED_PACKET_RECEIVE, H2OLOG_EVENT_ID_H2O_H3C_TUNNEL_CREATE, H2OLOG_EVENT_ID_H2O_TUNNEL_ON_DESTROY, H2OLOG_EVENT_ID_H2O_TUNNEL_ON_READ, H2OLOG_EVENT_ID_H2O_TUNNEL_PROCEED_READ, H2OLOG_EVENT_ID_H2O_TUNNEL_WRITE, H2OLOG_EVENT_ID_H2O_TUNNEL_ON_WRITE_COMPLETE, H2OLOG_EVENT_ID_H2O_SOCKET_TUNNEL_CREATE, H2OLOG_EVENT_ID_H2O_SOCKET_TUNNEL_START, }; struct h2olog_event_t { enum h2olog_event_id_t id; union { struct { // quicly:connect typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint32_t version; } connect; struct { // quicly:accept typeof_st_quicly_conn_t__master_id master_id; int64_t at; char dcid[STR_LEN]; struct st_quicly_address_token_plaintext_t * address_token; } accept; struct { // quicly:free typeof_st_quicly_conn_t__master_id master_id; int64_t at; } free; struct { // quicly:send typeof_st_quicly_conn_t__master_id master_id; int64_t at; int state; char dcid[STR_LEN]; } send; struct { // quicly:receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; char dcid[STR_LEN]; uint8_t bytes[STR_LEN]; size_t bytes_len; } receive; struct { // quicly:version_switch typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint32_t new_version; } version_switch; struct { // quicly:idle_timeout typeof_st_quicly_conn_t__master_id master_id; int64_t at; } idle_timeout; struct { // quicly:stateless_reset_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; } stateless_reset_receive; struct { // quicly:crypto_handshake typeof_st_quicly_conn_t__master_id master_id; int64_t at; int ret; } crypto_handshake; struct { // quicly:crypto_update_secret typeof_st_quicly_conn_t__master_id master_id; int64_t at; int is_enc; uint8_t epoch; char label[STR_LEN]; char secret[STR_LEN]; // appdata } crypto_update_secret; struct { // quicly:crypto_send_key_update typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t phase; char secret[STR_LEN]; // appdata } crypto_send_key_update; struct { // quicly:crypto_send_key_update_confirmed typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t next_pn; } crypto_send_key_update_confirmed; struct { // quicly:crypto_receive_key_update typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t phase; char secret[STR_LEN]; // appdata } crypto_receive_key_update; struct { // quicly:crypto_receive_key_update_prepare typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t phase; char secret[STR_LEN]; // appdata } crypto_receive_key_update_prepare; struct { // quicly:packet_sent typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t pn; size_t len; uint8_t packet_type; int ack_only; } packet_sent; struct { // quicly:packet_received typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t pn; uint8_t decrypted[STR_LEN]; // appdata size_t decrypted_len; uint8_t packet_type; } packet_received; struct { // quicly:packet_prepare typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint8_t first_octet; char dcid[STR_LEN]; } packet_prepare; struct { // quicly:packet_acked typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t pn; int is_late_ack; } packet_acked; struct { // quicly:packet_lost typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t pn; uint8_t packet_type; } packet_lost; struct { // quicly:packet_decryption_failed typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t pn; } packet_decryption_failed; struct { // quicly:pto typeof_st_quicly_conn_t__master_id master_id; int64_t at; size_t inflight; uint32_t cwnd; int8_t pto_count; } pto; struct { // quicly:cc_ack_received typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t largest_acked; size_t bytes_acked; uint32_t cwnd; size_t inflight; } cc_ack_received; struct { // quicly:cc_congestion typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t max_lost_pn; size_t inflight; uint32_t cwnd; } cc_congestion; struct { // quicly:ack_block_received typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t ack_block_begin; uint64_t ack_block_end; } ack_block_received; struct { // quicly:ack_delay_received typeof_st_quicly_conn_t__master_id master_id; int64_t at; int64_t ack_delay; } ack_delay_received; struct { // quicly:ack_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t largest_acked; uint64_t ack_delay; } ack_send; struct { // quicly:ping_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; } ping_send; struct { // quicly:ping_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; } ping_receive; struct { // quicly:transport_close_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t error_code; uint64_t frame_type; char reason_phrase[STR_LEN]; } transport_close_send; struct { // quicly:transport_close_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t error_code; uint64_t frame_type; char reason_phrase[STR_LEN]; } transport_close_receive; struct { // quicly:application_close_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t error_code; char reason_phrase[STR_LEN]; } application_close_send; struct { // quicly:application_close_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t error_code; char reason_phrase[STR_LEN]; } application_close_receive; struct { // quicly:stream_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; uint64_t off; size_t len; int is_fin; } stream_send; struct { // quicly:stream_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; uint64_t off; size_t len; } stream_receive; struct { // quicly:stream_acked typeof_st_quicly_conn_t__master_id master_id; int64_t at; int64_t stream_id; uint64_t off; size_t len; } stream_acked; struct { // quicly:stream_lost typeof_st_quicly_conn_t__master_id master_id; int64_t at; int64_t stream_id; uint64_t off; size_t len; } stream_lost; struct { // quicly:max_data_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t maximum; } max_data_send; struct { // quicly:max_data_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t maximum; } max_data_receive; struct { // quicly:max_streams_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t maximum; int is_unidirectional; } max_streams_send; struct { // quicly:max_streams_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t maximum; int is_unidirectional; } max_streams_receive; struct { // quicly:max_stream_data_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; uint64_t maximum; } max_stream_data_send; struct { // quicly:max_stream_data_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; int64_t stream_id; uint64_t maximum; } max_stream_data_receive; struct { // quicly:new_token_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint8_t token[STR_LEN]; size_t token_len; uint64_t generation; } new_token_send; struct { // quicly:new_token_acked typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t generation; } new_token_acked; struct { // quicly:new_token_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint8_t token[STR_LEN]; size_t token_len; } new_token_receive; struct { // quicly:handshake_done_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; } handshake_done_send; struct { // quicly:handshake_done_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; } handshake_done_receive; struct { // quicly:streams_blocked_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t maximum; int is_unidirectional; } streams_blocked_send; struct { // quicly:streams_blocked_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t maximum; int is_unidirectional; } streams_blocked_receive; struct { // quicly:new_connection_id_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t sequence; uint64_t retire_prior_to; char cid[STR_LEN]; char stateless_reset_token[STR_LEN]; } new_connection_id_send; struct { // quicly:new_connection_id_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t sequence; uint64_t retire_prior_to; char cid[STR_LEN]; char stateless_reset_token[STR_LEN]; } new_connection_id_receive; struct { // quicly:retire_connection_id_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t sequence; } retire_connection_id_send; struct { // quicly:retire_connection_id_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t sequence; } retire_connection_id_receive; struct { // quicly:data_blocked_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t off; } data_blocked_send; struct { // quicly:data_blocked_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t off; } data_blocked_receive; struct { // quicly:stream_data_blocked_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; int64_t stream_id; uint64_t maximum; } stream_data_blocked_send; struct { // quicly:stream_data_blocked_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; int64_t stream_id; uint64_t maximum; } stream_data_blocked_receive; struct { // quicly:datagram_send typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint8_t payload[STR_LEN]; // appdata size_t payload_len; } datagram_send; struct { // quicly:datagram_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint8_t payload[STR_LEN]; // appdata size_t payload_len; } datagram_receive; struct { // quicly:ack_frequency_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; uint64_t sequence; uint64_t packet_tolerance; uint64_t max_ack_delay; int ignore_order; } ack_frequency_receive; struct { // quicly:quictrace_send_stream typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; uint64_t off; size_t len; int fin; } quictrace_send_stream; struct { // quicly:quictrace_recv_stream typeof_st_quicly_conn_t__master_id master_id; int64_t at; int64_t stream_id; uint64_t off; size_t len; int fin; } quictrace_recv_stream; struct { // quicly:quictrace_cc_ack typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_quicly_rtt_t__minimum minimum; typeof_quicly_rtt_t__smoothed smoothed; typeof_quicly_rtt_t__variance variance; typeof_quicly_rtt_t__latest latest; uint32_t cwnd; size_t inflight; } quictrace_cc_ack; struct { // quicly:quictrace_cc_lost typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_quicly_rtt_t__minimum minimum; typeof_quicly_rtt_t__smoothed smoothed; typeof_quicly_rtt_t__variance variance; typeof_quicly_rtt_t__latest latest; uint32_t cwnd; size_t inflight; } quictrace_cc_lost; struct { // quicly:stream_on_open typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; } stream_on_open; struct { // quicly:stream_on_destroy typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; int err; } stream_on_destroy; struct { // quicly:stream_on_send_shift typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; size_t delta; } stream_on_send_shift; struct { // quicly:stream_on_send_emit typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; size_t off; size_t capacity; } stream_on_send_emit; struct { // quicly:stream_on_send_stop typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; int err; } stream_on_send_stop; struct { // quicly:stream_on_receive typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; size_t off; uint8_t src[STR_LEN]; // appdata size_t src_len; } stream_on_receive; struct { // quicly:stream_on_receive_reset typeof_st_quicly_conn_t__master_id master_id; int64_t at; typeof_st_quicly_stream_t__stream_id stream_id; int err; } stream_on_receive_reset; struct { // quicly:conn_stats typeof_st_quicly_conn_t__master_id master_id; int64_t at; struct st_quicly_stats_t * stats; size_t size; } conn_stats; struct { // h2o:_private_socket_lookup_flags pid_t tid; uint64_t original_flags; struct st_h2o_ebpf_map_key_t info; } _private_socket_lookup_flags; struct { // h2o:receive_request uint64_t conn_id; uint64_t req_id; int http_version; } receive_request; struct { // h2o:receive_request_header uint64_t conn_id; uint64_t req_id; char name[STR_LEN]; // appdata size_t name_len; char value[STR_LEN]; // appdata size_t value_len; } receive_request_header; struct { // h2o:send_response uint64_t conn_id; uint64_t req_id; int status; struct st_h2o_tunnel_t * tunnel; } send_response; struct { // h2o:send_response_header uint64_t conn_id; uint64_t req_id; char name[STR_LEN]; // appdata size_t name_len; char value[STR_LEN]; // appdata size_t value_len; } send_response_header; struct { // h2o:h1_accept uint64_t conn_id; struct st_h2o_socket_t * sock; struct st_h2o_conn_t * conn; } h1_accept; struct { // h2o:h1_close uint64_t conn_id; } h1_close; struct { // h2o:h2_unknown_frame_type uint64_t conn_id; uint8_t frame_type; } h2_unknown_frame_type; struct { // h2o:h3s_accept uint64_t conn_id; struct st_h2o_conn_t * conn; typeof_st_quicly_conn_t__master_id master_id; } h3s_accept; struct { // h2o:h3s_destroy uint64_t conn_id; } h3s_destroy; struct { // h2o:h3s_stream_set_state uint64_t conn_id; uint64_t req_id; unsigned state; } h3s_stream_set_state; struct { // h2o:h3_frame_receive uint64_t frame_type; uint8_t bytes[STR_LEN]; // appdata size_t bytes_len; } h3_frame_receive; struct { // h2o:h3_packet_receive quicly_address_t dest; quicly_address_t src; uint8_t bytes[STR_LEN]; size_t bytes_len; } h3_packet_receive; struct { // h2o:h3_packet_forward quicly_address_t dest; quicly_address_t src; size_t num_packets; size_t num_bytes; int fd; } h3_packet_forward; struct { // h2o:h3_forwarded_packet_receive quicly_address_t dest; quicly_address_t src; size_t num_bytes; } h3_forwarded_packet_receive; struct { // h2o:h3c_tunnel_create struct st_h2o_tunnel_t * tunnel; } h3c_tunnel_create; struct { // h2o:tunnel_on_destroy struct st_h2o_tunnel_t * tunnel; } tunnel_on_destroy; struct { // h2o:tunnel_on_read struct st_h2o_tunnel_t * tunnel; char err[STR_LEN]; uint8_t bytes[STR_LEN]; // appdata size_t bytes_len; } tunnel_on_read; struct { // h2o:tunnel_proceed_read struct st_h2o_tunnel_t * tunnel; } tunnel_proceed_read; struct { // h2o:tunnel_write struct st_h2o_tunnel_t * tunnel; uint8_t bytes[STR_LEN]; // appdata size_t bytes_len; } tunnel_write; struct { // h2o:tunnel_on_write_complete struct st_h2o_tunnel_t * tunnel; char err[STR_LEN]; } tunnel_on_write_complete; struct { // h2o:socket_tunnel_create struct st_h2o_tunnel_t * tunnel; } socket_tunnel_create; struct { // h2o:socket_tunnel_start struct st_h2o_tunnel_t * tunnel; size_t bytes_to_consume; } socket_tunnel_start; }; }; BPF_PERF_OUTPUT(events); // A pinned BPF object to return a value to h2o. // The table size must be larger than the number of threads in h2o. BPF_TABLE_PINNED("lru_hash", pid_t, uint64_t, h2o_return, H2O_EBPF_RETURN_MAP_SIZE, H2O_EBPF_RETURN_MAP_PATH); // HTTP/3 tracing BPF_HASH(h2o_to_quicly_conn, u64, u32); // tracepoint sched:sched_process_exit int trace_sched_process_exit(struct tracepoint__sched__sched_process_exit *ctx) { const struct task_struct *task = (const struct task_struct*)bpf_get_current_task(); pid_t h2o_pid = task->tgid; pid_t h2o_tid = task->pid; if (!(h2o_pid == H2OLOG_H2O_PID && h2o_tid == H2OLOG_H2O_PID)) { return 0; } struct h2olog_event_t ev = { .id = H2OLOG_EVENT_ID_SCHED_SCHED_PROCESS_EXIT }; events.perf_submit(ctx, &ev, sizeof(ev)); return 0; } // quicly:connect int trace_quicly__connect(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_CONNECT }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.connect.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.connect.at); // uint32_t version bpf_usdt_readarg(3, ctx, &event.connect.version); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__connect\n"); return 0; } // quicly:accept int trace_quicly__accept(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_ACCEPT }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.accept.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.accept.at); // const char * dcid bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&event.accept.dcid, sizeof(event.accept.dcid), buf); // struct st_quicly_address_token_plaintext_t * address_token bpf_usdt_readarg(4, ctx, &event.accept.address_token); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__accept\n"); return 0; } // quicly:free int trace_quicly__free(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_FREE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.free.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.free.at); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__free\n"); return 0; } // quicly:send int trace_quicly__send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.send.at); // int state bpf_usdt_readarg(3, ctx, &event.send.state); // const char * dcid bpf_usdt_readarg(4, ctx, &buf); bpf_probe_read(&event.send.dcid, sizeof(event.send.dcid), buf); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__send\n"); return 0; } // quicly:receive int trace_quicly__receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.receive.at); // const char * dcid bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&event.receive.dcid, sizeof(event.receive.dcid), buf); // const void * bytes bpf_usdt_readarg(4, ctx, &buf); bpf_probe_read(&event.receive.bytes, sizeof(event.receive.bytes), buf); // size_t bytes_len bpf_usdt_readarg(5, ctx, &event.receive.bytes_len); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__receive\n"); return 0; } // quicly:version_switch int trace_quicly__version_switch(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_VERSION_SWITCH }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.version_switch.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.version_switch.at); // uint32_t new_version bpf_usdt_readarg(3, ctx, &event.version_switch.new_version); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__version_switch\n"); return 0; } // quicly:idle_timeout int trace_quicly__idle_timeout(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_IDLE_TIMEOUT }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.idle_timeout.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.idle_timeout.at); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__idle_timeout\n"); return 0; } // quicly:stateless_reset_receive int trace_quicly__stateless_reset_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STATELESS_RESET_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.stateless_reset_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.stateless_reset_receive.at); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__stateless_reset_receive\n"); return 0; } // quicly:crypto_handshake int trace_quicly__crypto_handshake(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_CRYPTO_HANDSHAKE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.crypto_handshake.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.crypto_handshake.at); // int ret bpf_usdt_readarg(3, ctx, &event.crypto_handshake.ret); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__crypto_handshake\n"); return 0; } // quicly:crypto_update_secret int trace_quicly__crypto_update_secret(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_CRYPTO_UPDATE_SECRET }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.crypto_update_secret.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.crypto_update_secret.at); // int is_enc bpf_usdt_readarg(3, ctx, &event.crypto_update_secret.is_enc); // uint8_t epoch bpf_usdt_readarg(4, ctx, &event.crypto_update_secret.epoch); // const char * label bpf_usdt_readarg(5, ctx, &buf); bpf_probe_read(&event.crypto_update_secret.label, sizeof(event.crypto_update_secret.label), buf); // const char * secret (appdata) bpf_usdt_readarg(6, ctx, &buf); bpf_probe_read(&event.crypto_update_secret.secret, sizeof(event.crypto_update_secret.secret), buf); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__crypto_update_secret\n"); return 0; } // quicly:crypto_send_key_update int trace_quicly__crypto_send_key_update(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_CRYPTO_SEND_KEY_UPDATE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.crypto_send_key_update.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.crypto_send_key_update.at); // uint64_t phase bpf_usdt_readarg(3, ctx, &event.crypto_send_key_update.phase); // const char * secret (appdata) bpf_usdt_readarg(4, ctx, &buf); bpf_probe_read(&event.crypto_send_key_update.secret, sizeof(event.crypto_send_key_update.secret), buf); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__crypto_send_key_update\n"); return 0; } // quicly:crypto_send_key_update_confirmed int trace_quicly__crypto_send_key_update_confirmed(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_CRYPTO_SEND_KEY_UPDATE_CONFIRMED }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.crypto_send_key_update_confirmed.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.crypto_send_key_update_confirmed.at); // uint64_t next_pn bpf_usdt_readarg(3, ctx, &event.crypto_send_key_update_confirmed.next_pn); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__crypto_send_key_update_confirmed\n"); return 0; } // quicly:crypto_receive_key_update int trace_quicly__crypto_receive_key_update(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_CRYPTO_RECEIVE_KEY_UPDATE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.crypto_receive_key_update.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.crypto_receive_key_update.at); // uint64_t phase bpf_usdt_readarg(3, ctx, &event.crypto_receive_key_update.phase); // const char * secret (appdata) bpf_usdt_readarg(4, ctx, &buf); bpf_probe_read(&event.crypto_receive_key_update.secret, sizeof(event.crypto_receive_key_update.secret), buf); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__crypto_receive_key_update\n"); return 0; } // quicly:crypto_receive_key_update_prepare int trace_quicly__crypto_receive_key_update_prepare(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_CRYPTO_RECEIVE_KEY_UPDATE_PREPARE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.crypto_receive_key_update_prepare.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.crypto_receive_key_update_prepare.at); // uint64_t phase bpf_usdt_readarg(3, ctx, &event.crypto_receive_key_update_prepare.phase); // const char * secret (appdata) bpf_usdt_readarg(4, ctx, &buf); bpf_probe_read(&event.crypto_receive_key_update_prepare.secret, sizeof(event.crypto_receive_key_update_prepare.secret), buf); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__crypto_receive_key_update_prepare\n"); return 0; } // quicly:packet_sent int trace_quicly__packet_sent(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_PACKET_SENT }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.packet_sent.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.packet_sent.at); // uint64_t pn bpf_usdt_readarg(3, ctx, &event.packet_sent.pn); // size_t len bpf_usdt_readarg(4, ctx, &event.packet_sent.len); // uint8_t packet_type bpf_usdt_readarg(5, ctx, &event.packet_sent.packet_type); // int ack_only bpf_usdt_readarg(6, ctx, &event.packet_sent.ack_only); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__packet_sent\n"); return 0; } // quicly:packet_received int trace_quicly__packet_received(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_PACKET_RECEIVED }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.packet_received.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.packet_received.at); // uint64_t pn bpf_usdt_readarg(3, ctx, &event.packet_received.pn); // const void * decrypted (appdata) bpf_usdt_readarg(4, ctx, &buf); bpf_probe_read(&event.packet_received.decrypted, sizeof(event.packet_received.decrypted), buf); // size_t decrypted_len bpf_usdt_readarg(5, ctx, &event.packet_received.decrypted_len); // uint8_t packet_type bpf_usdt_readarg(6, ctx, &event.packet_received.packet_type); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__packet_received\n"); return 0; } // quicly:packet_prepare int trace_quicly__packet_prepare(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_PACKET_PREPARE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.packet_prepare.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.packet_prepare.at); // uint8_t first_octet bpf_usdt_readarg(3, ctx, &event.packet_prepare.first_octet); // const char * dcid bpf_usdt_readarg(4, ctx, &buf); bpf_probe_read(&event.packet_prepare.dcid, sizeof(event.packet_prepare.dcid), buf); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__packet_prepare\n"); return 0; } // quicly:packet_acked int trace_quicly__packet_acked(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_PACKET_ACKED }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.packet_acked.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.packet_acked.at); // uint64_t pn bpf_usdt_readarg(3, ctx, &event.packet_acked.pn); // int is_late_ack bpf_usdt_readarg(4, ctx, &event.packet_acked.is_late_ack); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__packet_acked\n"); return 0; } // quicly:packet_lost int trace_quicly__packet_lost(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_PACKET_LOST }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.packet_lost.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.packet_lost.at); // uint64_t pn bpf_usdt_readarg(3, ctx, &event.packet_lost.pn); // uint8_t packet_type bpf_usdt_readarg(4, ctx, &event.packet_lost.packet_type); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__packet_lost\n"); return 0; } // quicly:packet_decryption_failed int trace_quicly__packet_decryption_failed(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_PACKET_DECRYPTION_FAILED }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.packet_decryption_failed.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.packet_decryption_failed.at); // uint64_t pn bpf_usdt_readarg(3, ctx, &event.packet_decryption_failed.pn); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__packet_decryption_failed\n"); return 0; } // quicly:pto int trace_quicly__pto(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_PTO }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.pto.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.pto.at); // size_t inflight bpf_usdt_readarg(3, ctx, &event.pto.inflight); // uint32_t cwnd bpf_usdt_readarg(4, ctx, &event.pto.cwnd); // int8_t pto_count bpf_usdt_readarg(5, ctx, &event.pto.pto_count); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__pto\n"); return 0; } // quicly:cc_ack_received int trace_quicly__cc_ack_received(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_CC_ACK_RECEIVED }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.cc_ack_received.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.cc_ack_received.at); // uint64_t largest_acked bpf_usdt_readarg(3, ctx, &event.cc_ack_received.largest_acked); // size_t bytes_acked bpf_usdt_readarg(4, ctx, &event.cc_ack_received.bytes_acked); // uint32_t cwnd bpf_usdt_readarg(5, ctx, &event.cc_ack_received.cwnd); // size_t inflight bpf_usdt_readarg(6, ctx, &event.cc_ack_received.inflight); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__cc_ack_received\n"); return 0; } // quicly:cc_congestion int trace_quicly__cc_congestion(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_CC_CONGESTION }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.cc_congestion.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.cc_congestion.at); // uint64_t max_lost_pn bpf_usdt_readarg(3, ctx, &event.cc_congestion.max_lost_pn); // size_t inflight bpf_usdt_readarg(4, ctx, &event.cc_congestion.inflight); // uint32_t cwnd bpf_usdt_readarg(5, ctx, &event.cc_congestion.cwnd); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__cc_congestion\n"); return 0; } // quicly:ack_block_received int trace_quicly__ack_block_received(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_ACK_BLOCK_RECEIVED }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.ack_block_received.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.ack_block_received.at); // uint64_t ack_block_begin bpf_usdt_readarg(3, ctx, &event.ack_block_received.ack_block_begin); // uint64_t ack_block_end bpf_usdt_readarg(4, ctx, &event.ack_block_received.ack_block_end); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__ack_block_received\n"); return 0; } // quicly:ack_delay_received int trace_quicly__ack_delay_received(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_ACK_DELAY_RECEIVED }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.ack_delay_received.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.ack_delay_received.at); // int64_t ack_delay bpf_usdt_readarg(3, ctx, &event.ack_delay_received.ack_delay); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__ack_delay_received\n"); return 0; } // quicly:ack_send int trace_quicly__ack_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_ACK_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.ack_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.ack_send.at); // uint64_t largest_acked bpf_usdt_readarg(3, ctx, &event.ack_send.largest_acked); // uint64_t ack_delay bpf_usdt_readarg(4, ctx, &event.ack_send.ack_delay); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__ack_send\n"); return 0; } // quicly:ping_send int trace_quicly__ping_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_PING_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.ping_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.ping_send.at); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__ping_send\n"); return 0; } // quicly:ping_receive int trace_quicly__ping_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_PING_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.ping_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.ping_receive.at); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__ping_receive\n"); return 0; } // quicly:transport_close_send int trace_quicly__transport_close_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_TRANSPORT_CLOSE_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.transport_close_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.transport_close_send.at); // uint64_t error_code bpf_usdt_readarg(3, ctx, &event.transport_close_send.error_code); // uint64_t frame_type bpf_usdt_readarg(4, ctx, &event.transport_close_send.frame_type); // const char * reason_phrase bpf_usdt_readarg(5, ctx, &buf); bpf_probe_read(&event.transport_close_send.reason_phrase, sizeof(event.transport_close_send.reason_phrase), buf); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__transport_close_send\n"); return 0; } // quicly:transport_close_receive int trace_quicly__transport_close_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_TRANSPORT_CLOSE_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.transport_close_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.transport_close_receive.at); // uint64_t error_code bpf_usdt_readarg(3, ctx, &event.transport_close_receive.error_code); // uint64_t frame_type bpf_usdt_readarg(4, ctx, &event.transport_close_receive.frame_type); // const char * reason_phrase bpf_usdt_readarg(5, ctx, &buf); bpf_probe_read(&event.transport_close_receive.reason_phrase, sizeof(event.transport_close_receive.reason_phrase), buf); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__transport_close_receive\n"); return 0; } // quicly:application_close_send int trace_quicly__application_close_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_APPLICATION_CLOSE_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.application_close_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.application_close_send.at); // uint64_t error_code bpf_usdt_readarg(3, ctx, &event.application_close_send.error_code); // const char * reason_phrase bpf_usdt_readarg(4, ctx, &buf); bpf_probe_read(&event.application_close_send.reason_phrase, sizeof(event.application_close_send.reason_phrase), buf); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__application_close_send\n"); return 0; } // quicly:application_close_receive int trace_quicly__application_close_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_APPLICATION_CLOSE_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.application_close_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.application_close_receive.at); // uint64_t error_code bpf_usdt_readarg(3, ctx, &event.application_close_receive.error_code); // const char * reason_phrase bpf_usdt_readarg(4, ctx, &buf); bpf_probe_read(&event.application_close_receive.reason_phrase, sizeof(event.application_close_receive.reason_phrase), buf); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__application_close_receive\n"); return 0; } // quicly:stream_send int trace_quicly__stream_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STREAM_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.stream_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.stream_send.at); // struct st_quicly_stream_t * stream uint8_t stream[sizeof_st_quicly_stream_t] = {}; bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&stream, sizeof_st_quicly_stream_t, buf); event.stream_send.stream_id = get_st_quicly_stream_t__stream_id(stream); // uint64_t off bpf_usdt_readarg(4, ctx, &event.stream_send.off); // size_t len bpf_usdt_readarg(5, ctx, &event.stream_send.len); // int is_fin bpf_usdt_readarg(6, ctx, &event.stream_send.is_fin); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__stream_send\n"); return 0; } // quicly:stream_receive int trace_quicly__stream_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STREAM_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.stream_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.stream_receive.at); // struct st_quicly_stream_t * stream uint8_t stream[sizeof_st_quicly_stream_t] = {}; bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&stream, sizeof_st_quicly_stream_t, buf); event.stream_receive.stream_id = get_st_quicly_stream_t__stream_id(stream); // uint64_t off bpf_usdt_readarg(4, ctx, &event.stream_receive.off); // size_t len bpf_usdt_readarg(5, ctx, &event.stream_receive.len); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__stream_receive\n"); return 0; } // quicly:stream_acked int trace_quicly__stream_acked(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STREAM_ACKED }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.stream_acked.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.stream_acked.at); // int64_t stream_id bpf_usdt_readarg(3, ctx, &event.stream_acked.stream_id); // uint64_t off bpf_usdt_readarg(4, ctx, &event.stream_acked.off); // size_t len bpf_usdt_readarg(5, ctx, &event.stream_acked.len); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__stream_acked\n"); return 0; } // quicly:stream_lost int trace_quicly__stream_lost(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STREAM_LOST }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.stream_lost.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.stream_lost.at); // int64_t stream_id bpf_usdt_readarg(3, ctx, &event.stream_lost.stream_id); // uint64_t off bpf_usdt_readarg(4, ctx, &event.stream_lost.off); // size_t len bpf_usdt_readarg(5, ctx, &event.stream_lost.len); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__stream_lost\n"); return 0; } // quicly:max_data_send int trace_quicly__max_data_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_MAX_DATA_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.max_data_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.max_data_send.at); // uint64_t maximum bpf_usdt_readarg(3, ctx, &event.max_data_send.maximum); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__max_data_send\n"); return 0; } // quicly:max_data_receive int trace_quicly__max_data_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_MAX_DATA_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.max_data_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.max_data_receive.at); // uint64_t maximum bpf_usdt_readarg(3, ctx, &event.max_data_receive.maximum); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__max_data_receive\n"); return 0; } // quicly:max_streams_send int trace_quicly__max_streams_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_MAX_STREAMS_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.max_streams_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.max_streams_send.at); // uint64_t maximum bpf_usdt_readarg(3, ctx, &event.max_streams_send.maximum); // int is_unidirectional bpf_usdt_readarg(4, ctx, &event.max_streams_send.is_unidirectional); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__max_streams_send\n"); return 0; } // quicly:max_streams_receive int trace_quicly__max_streams_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_MAX_STREAMS_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.max_streams_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.max_streams_receive.at); // uint64_t maximum bpf_usdt_readarg(3, ctx, &event.max_streams_receive.maximum); // int is_unidirectional bpf_usdt_readarg(4, ctx, &event.max_streams_receive.is_unidirectional); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__max_streams_receive\n"); return 0; } // quicly:max_stream_data_send int trace_quicly__max_stream_data_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_MAX_STREAM_DATA_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.max_stream_data_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.max_stream_data_send.at); // struct st_quicly_stream_t * stream uint8_t stream[sizeof_st_quicly_stream_t] = {}; bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&stream, sizeof_st_quicly_stream_t, buf); event.max_stream_data_send.stream_id = get_st_quicly_stream_t__stream_id(stream); // uint64_t maximum bpf_usdt_readarg(4, ctx, &event.max_stream_data_send.maximum); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__max_stream_data_send\n"); return 0; } // quicly:max_stream_data_receive int trace_quicly__max_stream_data_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_MAX_STREAM_DATA_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.max_stream_data_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.max_stream_data_receive.at); // int64_t stream_id bpf_usdt_readarg(3, ctx, &event.max_stream_data_receive.stream_id); // uint64_t maximum bpf_usdt_readarg(4, ctx, &event.max_stream_data_receive.maximum); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__max_stream_data_receive\n"); return 0; } // quicly:new_token_send int trace_quicly__new_token_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_NEW_TOKEN_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.new_token_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.new_token_send.at); // uint8_t * token bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&event.new_token_send.token, sizeof(event.new_token_send.token), buf); // size_t token_len bpf_usdt_readarg(4, ctx, &event.new_token_send.token_len); // uint64_t generation bpf_usdt_readarg(5, ctx, &event.new_token_send.generation); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__new_token_send\n"); return 0; } // quicly:new_token_acked int trace_quicly__new_token_acked(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_NEW_TOKEN_ACKED }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.new_token_acked.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.new_token_acked.at); // uint64_t generation bpf_usdt_readarg(3, ctx, &event.new_token_acked.generation); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__new_token_acked\n"); return 0; } // quicly:new_token_receive int trace_quicly__new_token_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_NEW_TOKEN_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.new_token_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.new_token_receive.at); // uint8_t * token bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&event.new_token_receive.token, sizeof(event.new_token_receive.token), buf); // size_t token_len bpf_usdt_readarg(4, ctx, &event.new_token_receive.token_len); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__new_token_receive\n"); return 0; } // quicly:handshake_done_send int trace_quicly__handshake_done_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_HANDSHAKE_DONE_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.handshake_done_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.handshake_done_send.at); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__handshake_done_send\n"); return 0; } // quicly:handshake_done_receive int trace_quicly__handshake_done_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_HANDSHAKE_DONE_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.handshake_done_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.handshake_done_receive.at); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__handshake_done_receive\n"); return 0; } // quicly:streams_blocked_send int trace_quicly__streams_blocked_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STREAMS_BLOCKED_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.streams_blocked_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.streams_blocked_send.at); // uint64_t maximum bpf_usdt_readarg(3, ctx, &event.streams_blocked_send.maximum); // int is_unidirectional bpf_usdt_readarg(4, ctx, &event.streams_blocked_send.is_unidirectional); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__streams_blocked_send\n"); return 0; } // quicly:streams_blocked_receive int trace_quicly__streams_blocked_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STREAMS_BLOCKED_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.streams_blocked_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.streams_blocked_receive.at); // uint64_t maximum bpf_usdt_readarg(3, ctx, &event.streams_blocked_receive.maximum); // int is_unidirectional bpf_usdt_readarg(4, ctx, &event.streams_blocked_receive.is_unidirectional); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__streams_blocked_receive\n"); return 0; } // quicly:new_connection_id_send int trace_quicly__new_connection_id_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_NEW_CONNECTION_ID_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.new_connection_id_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.new_connection_id_send.at); // uint64_t sequence bpf_usdt_readarg(3, ctx, &event.new_connection_id_send.sequence); // uint64_t retire_prior_to bpf_usdt_readarg(4, ctx, &event.new_connection_id_send.retire_prior_to); // const char * cid bpf_usdt_readarg(5, ctx, &buf); bpf_probe_read(&event.new_connection_id_send.cid, sizeof(event.new_connection_id_send.cid), buf); // const char * stateless_reset_token bpf_usdt_readarg(6, ctx, &buf); bpf_probe_read(&event.new_connection_id_send.stateless_reset_token, sizeof(event.new_connection_id_send.stateless_reset_token), buf); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__new_connection_id_send\n"); return 0; } // quicly:new_connection_id_receive int trace_quicly__new_connection_id_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_NEW_CONNECTION_ID_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.new_connection_id_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.new_connection_id_receive.at); // uint64_t sequence bpf_usdt_readarg(3, ctx, &event.new_connection_id_receive.sequence); // uint64_t retire_prior_to bpf_usdt_readarg(4, ctx, &event.new_connection_id_receive.retire_prior_to); // const char * cid bpf_usdt_readarg(5, ctx, &buf); bpf_probe_read(&event.new_connection_id_receive.cid, sizeof(event.new_connection_id_receive.cid), buf); // const char * stateless_reset_token bpf_usdt_readarg(6, ctx, &buf); bpf_probe_read(&event.new_connection_id_receive.stateless_reset_token, sizeof(event.new_connection_id_receive.stateless_reset_token), buf); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__new_connection_id_receive\n"); return 0; } // quicly:retire_connection_id_send int trace_quicly__retire_connection_id_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_RETIRE_CONNECTION_ID_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.retire_connection_id_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.retire_connection_id_send.at); // uint64_t sequence bpf_usdt_readarg(3, ctx, &event.retire_connection_id_send.sequence); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__retire_connection_id_send\n"); return 0; } // quicly:retire_connection_id_receive int trace_quicly__retire_connection_id_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_RETIRE_CONNECTION_ID_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.retire_connection_id_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.retire_connection_id_receive.at); // uint64_t sequence bpf_usdt_readarg(3, ctx, &event.retire_connection_id_receive.sequence); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__retire_connection_id_receive\n"); return 0; } // quicly:data_blocked_send int trace_quicly__data_blocked_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_DATA_BLOCKED_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.data_blocked_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.data_blocked_send.at); // uint64_t off bpf_usdt_readarg(3, ctx, &event.data_blocked_send.off); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__data_blocked_send\n"); return 0; } // quicly:data_blocked_receive int trace_quicly__data_blocked_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_DATA_BLOCKED_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.data_blocked_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.data_blocked_receive.at); // uint64_t off bpf_usdt_readarg(3, ctx, &event.data_blocked_receive.off); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__data_blocked_receive\n"); return 0; } // quicly:stream_data_blocked_send int trace_quicly__stream_data_blocked_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STREAM_DATA_BLOCKED_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.stream_data_blocked_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.stream_data_blocked_send.at); // int64_t stream_id bpf_usdt_readarg(3, ctx, &event.stream_data_blocked_send.stream_id); // uint64_t maximum bpf_usdt_readarg(4, ctx, &event.stream_data_blocked_send.maximum); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__stream_data_blocked_send\n"); return 0; } // quicly:stream_data_blocked_receive int trace_quicly__stream_data_blocked_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STREAM_DATA_BLOCKED_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.stream_data_blocked_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.stream_data_blocked_receive.at); // int64_t stream_id bpf_usdt_readarg(3, ctx, &event.stream_data_blocked_receive.stream_id); // uint64_t maximum bpf_usdt_readarg(4, ctx, &event.stream_data_blocked_receive.maximum); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__stream_data_blocked_receive\n"); return 0; } // quicly:datagram_send int trace_quicly__datagram_send(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_DATAGRAM_SEND }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.datagram_send.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.datagram_send.at); // const void * payload (appdata) bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&event.datagram_send.payload, sizeof(event.datagram_send.payload), buf); // size_t payload_len bpf_usdt_readarg(4, ctx, &event.datagram_send.payload_len); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__datagram_send\n"); return 0; } // quicly:datagram_receive int trace_quicly__datagram_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_DATAGRAM_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.datagram_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.datagram_receive.at); // const void * payload (appdata) bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&event.datagram_receive.payload, sizeof(event.datagram_receive.payload), buf); // size_t payload_len bpf_usdt_readarg(4, ctx, &event.datagram_receive.payload_len); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__datagram_receive\n"); return 0; } // quicly:ack_frequency_receive int trace_quicly__ack_frequency_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_ACK_FREQUENCY_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.ack_frequency_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.ack_frequency_receive.at); // uint64_t sequence bpf_usdt_readarg(3, ctx, &event.ack_frequency_receive.sequence); // uint64_t packet_tolerance bpf_usdt_readarg(4, ctx, &event.ack_frequency_receive.packet_tolerance); // uint64_t max_ack_delay bpf_usdt_readarg(5, ctx, &event.ack_frequency_receive.max_ack_delay); // int ignore_order bpf_usdt_readarg(6, ctx, &event.ack_frequency_receive.ignore_order); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__ack_frequency_receive\n"); return 0; } // quicly:quictrace_send_stream int trace_quicly__quictrace_send_stream(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_QUICTRACE_SEND_STREAM }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.quictrace_send_stream.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.quictrace_send_stream.at); // struct st_quicly_stream_t * stream uint8_t stream[sizeof_st_quicly_stream_t] = {}; bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&stream, sizeof_st_quicly_stream_t, buf); event.quictrace_send_stream.stream_id = get_st_quicly_stream_t__stream_id(stream); // uint64_t off bpf_usdt_readarg(4, ctx, &event.quictrace_send_stream.off); // size_t len bpf_usdt_readarg(5, ctx, &event.quictrace_send_stream.len); // int fin bpf_usdt_readarg(6, ctx, &event.quictrace_send_stream.fin); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__quictrace_send_stream\n"); return 0; } // quicly:quictrace_recv_stream int trace_quicly__quictrace_recv_stream(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_QUICTRACE_RECV_STREAM }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.quictrace_recv_stream.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.quictrace_recv_stream.at); // int64_t stream_id bpf_usdt_readarg(3, ctx, &event.quictrace_recv_stream.stream_id); // uint64_t off bpf_usdt_readarg(4, ctx, &event.quictrace_recv_stream.off); // size_t len bpf_usdt_readarg(5, ctx, &event.quictrace_recv_stream.len); // int fin bpf_usdt_readarg(6, ctx, &event.quictrace_recv_stream.fin); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__quictrace_recv_stream\n"); return 0; } // quicly:quictrace_cc_ack int trace_quicly__quictrace_cc_ack(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_QUICTRACE_CC_ACK }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.quictrace_cc_ack.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.quictrace_cc_ack.at); // struct quicly_rtt_t * rtt uint8_t rtt[sizeof_quicly_rtt_t] = {}; bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&rtt, sizeof_quicly_rtt_t, buf); event.quictrace_cc_ack.minimum = get_quicly_rtt_t__minimum(rtt); event.quictrace_cc_ack.smoothed = get_quicly_rtt_t__smoothed(rtt); event.quictrace_cc_ack.variance = get_quicly_rtt_t__variance(rtt); event.quictrace_cc_ack.latest = get_quicly_rtt_t__latest(rtt); // uint32_t cwnd bpf_usdt_readarg(4, ctx, &event.quictrace_cc_ack.cwnd); // size_t inflight bpf_usdt_readarg(5, ctx, &event.quictrace_cc_ack.inflight); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__quictrace_cc_ack\n"); return 0; } // quicly:quictrace_cc_lost int trace_quicly__quictrace_cc_lost(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_QUICTRACE_CC_LOST }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.quictrace_cc_lost.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.quictrace_cc_lost.at); // struct quicly_rtt_t * rtt uint8_t rtt[sizeof_quicly_rtt_t] = {}; bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&rtt, sizeof_quicly_rtt_t, buf); event.quictrace_cc_lost.minimum = get_quicly_rtt_t__minimum(rtt); event.quictrace_cc_lost.smoothed = get_quicly_rtt_t__smoothed(rtt); event.quictrace_cc_lost.variance = get_quicly_rtt_t__variance(rtt); event.quictrace_cc_lost.latest = get_quicly_rtt_t__latest(rtt); // uint32_t cwnd bpf_usdt_readarg(4, ctx, &event.quictrace_cc_lost.cwnd); // size_t inflight bpf_usdt_readarg(5, ctx, &event.quictrace_cc_lost.inflight); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__quictrace_cc_lost\n"); return 0; } // quicly:stream_on_open int trace_quicly__stream_on_open(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STREAM_ON_OPEN }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.stream_on_open.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.stream_on_open.at); // struct st_quicly_stream_t * stream uint8_t stream[sizeof_st_quicly_stream_t] = {}; bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&stream, sizeof_st_quicly_stream_t, buf); event.stream_on_open.stream_id = get_st_quicly_stream_t__stream_id(stream); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__stream_on_open\n"); return 0; } // quicly:stream_on_destroy int trace_quicly__stream_on_destroy(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STREAM_ON_DESTROY }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.stream_on_destroy.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.stream_on_destroy.at); // struct st_quicly_stream_t * stream uint8_t stream[sizeof_st_quicly_stream_t] = {}; bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&stream, sizeof_st_quicly_stream_t, buf); event.stream_on_destroy.stream_id = get_st_quicly_stream_t__stream_id(stream); // int err bpf_usdt_readarg(4, ctx, &event.stream_on_destroy.err); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__stream_on_destroy\n"); return 0; } // quicly:stream_on_send_shift int trace_quicly__stream_on_send_shift(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STREAM_ON_SEND_SHIFT }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.stream_on_send_shift.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.stream_on_send_shift.at); // struct st_quicly_stream_t * stream uint8_t stream[sizeof_st_quicly_stream_t] = {}; bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&stream, sizeof_st_quicly_stream_t, buf); event.stream_on_send_shift.stream_id = get_st_quicly_stream_t__stream_id(stream); // size_t delta bpf_usdt_readarg(4, ctx, &event.stream_on_send_shift.delta); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__stream_on_send_shift\n"); return 0; } // quicly:stream_on_send_emit int trace_quicly__stream_on_send_emit(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STREAM_ON_SEND_EMIT }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.stream_on_send_emit.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.stream_on_send_emit.at); // struct st_quicly_stream_t * stream uint8_t stream[sizeof_st_quicly_stream_t] = {}; bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&stream, sizeof_st_quicly_stream_t, buf); event.stream_on_send_emit.stream_id = get_st_quicly_stream_t__stream_id(stream); // size_t off bpf_usdt_readarg(4, ctx, &event.stream_on_send_emit.off); // size_t capacity bpf_usdt_readarg(5, ctx, &event.stream_on_send_emit.capacity); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__stream_on_send_emit\n"); return 0; } // quicly:stream_on_send_stop int trace_quicly__stream_on_send_stop(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STREAM_ON_SEND_STOP }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.stream_on_send_stop.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.stream_on_send_stop.at); // struct st_quicly_stream_t * stream uint8_t stream[sizeof_st_quicly_stream_t] = {}; bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&stream, sizeof_st_quicly_stream_t, buf); event.stream_on_send_stop.stream_id = get_st_quicly_stream_t__stream_id(stream); // int err bpf_usdt_readarg(4, ctx, &event.stream_on_send_stop.err); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__stream_on_send_stop\n"); return 0; } // quicly:stream_on_receive int trace_quicly__stream_on_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STREAM_ON_RECEIVE }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.stream_on_receive.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.stream_on_receive.at); // struct st_quicly_stream_t * stream uint8_t stream[sizeof_st_quicly_stream_t] = {}; bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&stream, sizeof_st_quicly_stream_t, buf); event.stream_on_receive.stream_id = get_st_quicly_stream_t__stream_id(stream); // size_t off bpf_usdt_readarg(4, ctx, &event.stream_on_receive.off); // const void * src (appdata) bpf_usdt_readarg(5, ctx, &buf); bpf_probe_read(&event.stream_on_receive.src, sizeof(event.stream_on_receive.src), buf); // size_t src_len bpf_usdt_readarg(6, ctx, &event.stream_on_receive.src_len); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__stream_on_receive\n"); return 0; } // quicly:stream_on_receive_reset int trace_quicly__stream_on_receive_reset(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_STREAM_ON_RECEIVE_RESET }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.stream_on_receive_reset.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.stream_on_receive_reset.at); // struct st_quicly_stream_t * stream uint8_t stream[sizeof_st_quicly_stream_t] = {}; bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&stream, sizeof_st_quicly_stream_t, buf); event.stream_on_receive_reset.stream_id = get_st_quicly_stream_t__stream_id(stream); // int err bpf_usdt_readarg(4, ctx, &event.stream_on_receive_reset.err); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__stream_on_receive_reset\n"); return 0; } // quicly:conn_stats int trace_quicly__conn_stats(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_QUICLY_CONN_STATS }; // struct st_quicly_conn_t * conn uint8_t conn[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&conn, sizeof_st_quicly_conn_t, buf); event.conn_stats.master_id = get_st_quicly_conn_t__master_id(conn); // int64_t at bpf_usdt_readarg(2, ctx, &event.conn_stats.at); // struct st_quicly_stats_t * stats bpf_usdt_readarg(3, ctx, &event.conn_stats.stats); // size_t size bpf_usdt_readarg(4, ctx, &event.conn_stats.size); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_quicly__conn_stats\n"); return 0; } // h2o:_private_socket_lookup_flags int trace_h2o___private_socket_lookup_flags(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O__PRIVATE_SOCKET_LOOKUP_FLAGS }; // pid_t tid bpf_usdt_readarg(1, ctx, &event._private_socket_lookup_flags.tid); // uint64_t original_flags bpf_usdt_readarg(2, ctx, &event._private_socket_lookup_flags.original_flags); // struct st_h2o_ebpf_map_key_t * info bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&event._private_socket_lookup_flags.info, sizeof_st_h2o_ebpf_map_key_t, buf); #ifdef H2OLOG_SAMPLING_RATE_U32 uint64_t flags = event._private_socket_lookup_flags.original_flags; int skip_tracing = bpf_get_prandom_u32() > H2OLOG_SAMPLING_RATE_U32; if (skip_tracing) { flags |= H2O_EBPF_FLAGS_SKIP_TRACING_BIT; } int64_t ret = h2o_return.insert(&event._private_socket_lookup_flags.tid, &flags); if (ret != 0) bpf_trace_printk("failed to insert 0x%llx in trace_h2o___private_socket_lookup_flags with errno=%lld\n", flags, -ret); #endif return 0; } // h2o:receive_request int trace_h2o__receive_request(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_RECEIVE_REQUEST }; // uint64_t conn_id bpf_usdt_readarg(1, ctx, &event.receive_request.conn_id); // uint64_t req_id bpf_usdt_readarg(2, ctx, &event.receive_request.req_id); // int http_version bpf_usdt_readarg(3, ctx, &event.receive_request.http_version); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__receive_request\n"); return 0; } // h2o:receive_request_header int trace_h2o__receive_request_header(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_RECEIVE_REQUEST_HEADER }; // uint64_t conn_id bpf_usdt_readarg(1, ctx, &event.receive_request_header.conn_id); // uint64_t req_id bpf_usdt_readarg(2, ctx, &event.receive_request_header.req_id); // const char * name (appdata) bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&event.receive_request_header.name, sizeof(event.receive_request_header.name), buf); // size_t name_len bpf_usdt_readarg(4, ctx, &event.receive_request_header.name_len); // const char * value (appdata) bpf_usdt_readarg(5, ctx, &buf); bpf_probe_read(&event.receive_request_header.value, sizeof(event.receive_request_header.value), buf); // size_t value_len bpf_usdt_readarg(6, ctx, &event.receive_request_header.value_len); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__receive_request_header\n"); return 0; } // h2o:send_response int trace_h2o__send_response(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_SEND_RESPONSE }; // uint64_t conn_id bpf_usdt_readarg(1, ctx, &event.send_response.conn_id); // uint64_t req_id bpf_usdt_readarg(2, ctx, &event.send_response.req_id); // int status bpf_usdt_readarg(3, ctx, &event.send_response.status); // struct st_h2o_tunnel_t * tunnel bpf_usdt_readarg(4, ctx, &event.send_response.tunnel); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__send_response\n"); return 0; } // h2o:send_response_header int trace_h2o__send_response_header(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_SEND_RESPONSE_HEADER }; // uint64_t conn_id bpf_usdt_readarg(1, ctx, &event.send_response_header.conn_id); // uint64_t req_id bpf_usdt_readarg(2, ctx, &event.send_response_header.req_id); // const char * name (appdata) bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&event.send_response_header.name, sizeof(event.send_response_header.name), buf); // size_t name_len bpf_usdt_readarg(4, ctx, &event.send_response_header.name_len); // const char * value (appdata) bpf_usdt_readarg(5, ctx, &buf); bpf_probe_read(&event.send_response_header.value, sizeof(event.send_response_header.value), buf); // size_t value_len bpf_usdt_readarg(6, ctx, &event.send_response_header.value_len); #ifdef CHECK_ALLOWED_RES_HEADER_NAME if (!CHECK_ALLOWED_RES_HEADER_NAME(event.send_response_header.name, event.send_response_header.name_len)) return 0; #endif if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__send_response_header\n"); return 0; } // h2o:h1_accept int trace_h2o__h1_accept(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_H1_ACCEPT }; // uint64_t conn_id bpf_usdt_readarg(1, ctx, &event.h1_accept.conn_id); // struct st_h2o_socket_t * sock bpf_usdt_readarg(2, ctx, &event.h1_accept.sock); // struct st_h2o_conn_t * conn bpf_usdt_readarg(3, ctx, &event.h1_accept.conn); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__h1_accept\n"); return 0; } // h2o:h1_close int trace_h2o__h1_close(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_H1_CLOSE }; // uint64_t conn_id bpf_usdt_readarg(1, ctx, &event.h1_close.conn_id); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__h1_close\n"); return 0; } // h2o:h2_unknown_frame_type int trace_h2o__h2_unknown_frame_type(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_H2_UNKNOWN_FRAME_TYPE }; // uint64_t conn_id bpf_usdt_readarg(1, ctx, &event.h2_unknown_frame_type.conn_id); // uint8_t frame_type bpf_usdt_readarg(2, ctx, &event.h2_unknown_frame_type.frame_type); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__h2_unknown_frame_type\n"); return 0; } // h2o:h3s_accept int trace_h2o__h3s_accept(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_H3S_ACCEPT }; // uint64_t conn_id bpf_usdt_readarg(1, ctx, &event.h3s_accept.conn_id); // struct st_h2o_conn_t * conn bpf_usdt_readarg(2, ctx, &event.h3s_accept.conn); // struct st_quicly_conn_t * quic uint8_t quic[sizeof_st_quicly_conn_t] = {}; bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&quic, sizeof_st_quicly_conn_t, buf); event.h3s_accept.master_id = get_st_quicly_conn_t__master_id(quic); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__h3s_accept\n"); return 0; } // h2o:h3s_destroy int trace_h2o__h3s_destroy(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_H3S_DESTROY }; // uint64_t conn_id bpf_usdt_readarg(1, ctx, &event.h3s_destroy.conn_id); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__h3s_destroy\n"); return 0; } // h2o:h3s_stream_set_state int trace_h2o__h3s_stream_set_state(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_H3S_STREAM_SET_STATE }; // uint64_t conn_id bpf_usdt_readarg(1, ctx, &event.h3s_stream_set_state.conn_id); // uint64_t req_id bpf_usdt_readarg(2, ctx, &event.h3s_stream_set_state.req_id); // unsigned state bpf_usdt_readarg(3, ctx, &event.h3s_stream_set_state.state); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__h3s_stream_set_state\n"); return 0; } // h2o:h3_frame_receive int trace_h2o__h3_frame_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_H3_FRAME_RECEIVE }; // uint64_t frame_type bpf_usdt_readarg(1, ctx, &event.h3_frame_receive.frame_type); // const void * bytes (appdata) bpf_usdt_readarg(2, ctx, &buf); bpf_probe_read(&event.h3_frame_receive.bytes, sizeof(event.h3_frame_receive.bytes), buf); // size_t bytes_len bpf_usdt_readarg(3, ctx, &event.h3_frame_receive.bytes_len); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__h3_frame_receive\n"); return 0; } // h2o:h3_packet_receive int trace_h2o__h3_packet_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_H3_PACKET_RECEIVE }; // struct sockaddr * dest bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&event.h3_packet_receive.dest, sizeof_sockaddr, buf); if (get_sockaddr__sa_family(&event.h3_packet_receive.dest) == AF_INET) { bpf_probe_read(&event.h3_packet_receive.dest, sizeof_sockaddr_in, buf); } else if (get_sockaddr__sa_family(&event.h3_packet_receive.dest) == AF_INET6) { bpf_probe_read(&event.h3_packet_receive.dest, sizeof_sockaddr_in6, buf); } // struct sockaddr * src bpf_usdt_readarg(2, ctx, &buf); bpf_probe_read(&event.h3_packet_receive.src, sizeof_sockaddr, buf); if (get_sockaddr__sa_family(&event.h3_packet_receive.src) == AF_INET) { bpf_probe_read(&event.h3_packet_receive.src, sizeof_sockaddr_in, buf); } else if (get_sockaddr__sa_family(&event.h3_packet_receive.src) == AF_INET6) { bpf_probe_read(&event.h3_packet_receive.src, sizeof_sockaddr_in6, buf); } // const void * bytes bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&event.h3_packet_receive.bytes, sizeof(event.h3_packet_receive.bytes), buf); // size_t bytes_len bpf_usdt_readarg(4, ctx, &event.h3_packet_receive.bytes_len); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__h3_packet_receive\n"); return 0; } // h2o:h3_packet_forward int trace_h2o__h3_packet_forward(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_H3_PACKET_FORWARD }; // struct sockaddr * dest bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&event.h3_packet_forward.dest, sizeof_sockaddr, buf); if (get_sockaddr__sa_family(&event.h3_packet_forward.dest) == AF_INET) { bpf_probe_read(&event.h3_packet_forward.dest, sizeof_sockaddr_in, buf); } else if (get_sockaddr__sa_family(&event.h3_packet_forward.dest) == AF_INET6) { bpf_probe_read(&event.h3_packet_forward.dest, sizeof_sockaddr_in6, buf); } // struct sockaddr * src bpf_usdt_readarg(2, ctx, &buf); bpf_probe_read(&event.h3_packet_forward.src, sizeof_sockaddr, buf); if (get_sockaddr__sa_family(&event.h3_packet_forward.src) == AF_INET) { bpf_probe_read(&event.h3_packet_forward.src, sizeof_sockaddr_in, buf); } else if (get_sockaddr__sa_family(&event.h3_packet_forward.src) == AF_INET6) { bpf_probe_read(&event.h3_packet_forward.src, sizeof_sockaddr_in6, buf); } // size_t num_packets bpf_usdt_readarg(3, ctx, &event.h3_packet_forward.num_packets); // size_t num_bytes bpf_usdt_readarg(4, ctx, &event.h3_packet_forward.num_bytes); // int fd bpf_usdt_readarg(5, ctx, &event.h3_packet_forward.fd); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__h3_packet_forward\n"); return 0; } // h2o:h3_forwarded_packet_receive int trace_h2o__h3_forwarded_packet_receive(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_H3_FORWARDED_PACKET_RECEIVE }; // struct sockaddr * dest bpf_usdt_readarg(1, ctx, &buf); bpf_probe_read(&event.h3_forwarded_packet_receive.dest, sizeof_sockaddr, buf); if (get_sockaddr__sa_family(&event.h3_forwarded_packet_receive.dest) == AF_INET) { bpf_probe_read(&event.h3_forwarded_packet_receive.dest, sizeof_sockaddr_in, buf); } else if (get_sockaddr__sa_family(&event.h3_forwarded_packet_receive.dest) == AF_INET6) { bpf_probe_read(&event.h3_forwarded_packet_receive.dest, sizeof_sockaddr_in6, buf); } // struct sockaddr * src bpf_usdt_readarg(2, ctx, &buf); bpf_probe_read(&event.h3_forwarded_packet_receive.src, sizeof_sockaddr, buf); if (get_sockaddr__sa_family(&event.h3_forwarded_packet_receive.src) == AF_INET) { bpf_probe_read(&event.h3_forwarded_packet_receive.src, sizeof_sockaddr_in, buf); } else if (get_sockaddr__sa_family(&event.h3_forwarded_packet_receive.src) == AF_INET6) { bpf_probe_read(&event.h3_forwarded_packet_receive.src, sizeof_sockaddr_in6, buf); } // size_t num_bytes bpf_usdt_readarg(3, ctx, &event.h3_forwarded_packet_receive.num_bytes); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__h3_forwarded_packet_receive\n"); return 0; } // h2o:h3c_tunnel_create int trace_h2o__h3c_tunnel_create(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_H3C_TUNNEL_CREATE }; // struct st_h2o_tunnel_t * tunnel bpf_usdt_readarg(1, ctx, &event.h3c_tunnel_create.tunnel); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__h3c_tunnel_create\n"); return 0; } // h2o:tunnel_on_destroy int trace_h2o__tunnel_on_destroy(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_TUNNEL_ON_DESTROY }; // struct st_h2o_tunnel_t * tunnel bpf_usdt_readarg(1, ctx, &event.tunnel_on_destroy.tunnel); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__tunnel_on_destroy\n"); return 0; } // h2o:tunnel_on_read int trace_h2o__tunnel_on_read(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_TUNNEL_ON_READ }; // struct st_h2o_tunnel_t * tunnel bpf_usdt_readarg(1, ctx, &event.tunnel_on_read.tunnel); // const char * err bpf_usdt_readarg(2, ctx, &buf); bpf_probe_read(&event.tunnel_on_read.err, sizeof(event.tunnel_on_read.err), buf); // const void * bytes (appdata) bpf_usdt_readarg(3, ctx, &buf); bpf_probe_read(&event.tunnel_on_read.bytes, sizeof(event.tunnel_on_read.bytes), buf); // size_t bytes_len bpf_usdt_readarg(4, ctx, &event.tunnel_on_read.bytes_len); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__tunnel_on_read\n"); return 0; } // h2o:tunnel_proceed_read int trace_h2o__tunnel_proceed_read(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_TUNNEL_PROCEED_READ }; // struct st_h2o_tunnel_t * tunnel bpf_usdt_readarg(1, ctx, &event.tunnel_proceed_read.tunnel); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__tunnel_proceed_read\n"); return 0; } // h2o:tunnel_write int trace_h2o__tunnel_write(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_TUNNEL_WRITE }; // struct st_h2o_tunnel_t * tunnel bpf_usdt_readarg(1, ctx, &event.tunnel_write.tunnel); // const void * bytes (appdata) bpf_usdt_readarg(2, ctx, &buf); bpf_probe_read(&event.tunnel_write.bytes, sizeof(event.tunnel_write.bytes), buf); // size_t bytes_len bpf_usdt_readarg(3, ctx, &event.tunnel_write.bytes_len); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__tunnel_write\n"); return 0; } // h2o:tunnel_on_write_complete int trace_h2o__tunnel_on_write_complete(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_TUNNEL_ON_WRITE_COMPLETE }; // struct st_h2o_tunnel_t * tunnel bpf_usdt_readarg(1, ctx, &event.tunnel_on_write_complete.tunnel); // const char * err bpf_usdt_readarg(2, ctx, &buf); bpf_probe_read(&event.tunnel_on_write_complete.err, sizeof(event.tunnel_on_write_complete.err), buf); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__tunnel_on_write_complete\n"); return 0; } // h2o:socket_tunnel_create int trace_h2o__socket_tunnel_create(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_SOCKET_TUNNEL_CREATE }; // struct st_h2o_tunnel_t * tunnel bpf_usdt_readarg(1, ctx, &event.socket_tunnel_create.tunnel); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__socket_tunnel_create\n"); return 0; } // h2o:socket_tunnel_start int trace_h2o__socket_tunnel_start(struct pt_regs *ctx) { const void *buf = NULL; struct h2olog_event_t event = { .id = H2OLOG_EVENT_ID_H2O_SOCKET_TUNNEL_START }; // struct st_h2o_tunnel_t * tunnel bpf_usdt_readarg(1, ctx, &event.socket_tunnel_start.tunnel); // size_t bytes_to_consume bpf_usdt_readarg(2, ctx, &event.socket_tunnel_start.bytes_to_consume); if (events.perf_submit(ctx, &event, sizeof(event)) != 0) bpf_trace_printk("failed to perf_submit in trace_h2o__socket_tunnel_start\n"); return 0; } )"; }
1
15,200
We've avoided including h2o headers in BPF programs because it's a runtime dependency. However, IIRC, this is because h2olog was maintained in the separate repository so that h2olog did not know where h2o was installed. Now h2olog can use `H2O_ROOT`, we should add it to BCC's `cflags` in order to include h2o headers in BPF programs.
h2o-h2o
c
@@ -170,6 +170,10 @@ type ThanosRulerSpec struct { // Note: Currently only the CAFile, CertFile, and KeyFile fields are supported. // Maps to the '--grpc-server-tls-*' CLI args. GRPCServerTLSConfig *TLSConfig `json:"grpcServerTlsConfig,omitempty"` + // The external Query URL the Thanos Ruler will set in the 'Source' field + // of all alerts. + // Maps to the '--alert.query-url' CLI arg. + AlertQueryURL string `json:"alertQueryUrl,omitempty"` } // ThanosRulerStatus is the most recent observed status of the ThanosRuler. Read-only. Not
1
// Copyright 2020 The prometheus-operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1 import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) const ( ThanosRulerKind = "ThanosRuler" ThanosRulerName = "thanosrulers" ThanosRulerKindKey = "thanosrulers" ) // ThanosRuler defines a ThanosRuler deployment. // +genclient // +k8s:openapi-gen=true type ThanosRuler struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of the ThanosRuler cluster. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Spec ThanosRulerSpec `json:"spec"` // Most recent observed status of the ThanosRuler cluster. Read-only. Not // included when requesting from the apiserver, only from the ThanosRuler // Operator API itself. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Status *ThanosRulerStatus `json:"status,omitempty"` } // ThanosRulerList is a list of ThanosRulers. // +k8s:openapi-gen=true type ThanosRulerList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // List of Prometheuses Items []*ThanosRuler `json:"items"` } // ThanosRulerSpec is a specification of the desired behavior of the ThanosRuler. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +k8s:openapi-gen=true type ThanosRulerSpec struct { // PodMetadata contains Labels and Annotations gets propagated to the thanos ruler pods. PodMetadata *PodMeta `json:"podMetadata,omitempty"` // Thanos container image URL. Image string `json:"image,omitempty"` // An optional list of references to secrets in the same namespace // to use for pulling thanos images from registries // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` // When a ThanosRuler deployment is paused, no actions except for deletion // will be performed on the underlying objects. Paused bool `json:"paused,omitempty"` // Number of thanos ruler instances to deploy. Replicas *int32 `json:"replicas,omitempty"` // Define which Nodes the Pods are scheduled on. NodeSelector map[string]string `json:"nodeSelector,omitempty"` // Resources defines the resource requirements for single Pods. // If not provided, no requests/limits will be set Resources v1.ResourceRequirements `json:"resources,omitempty"` // If specified, the pod's scheduling constraints. Affinity *v1.Affinity `json:"affinity,omitempty"` // If specified, the pod's tolerations. Tolerations []v1.Toleration `json:"tolerations,omitempty"` // SecurityContext holds pod-level security attributes and common container settings. // This defaults to the default PodSecurityContext. SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` // Priority class assigned to the Pods PriorityClassName string `json:"priorityClassName,omitempty"` // ServiceAccountName is the name of the ServiceAccount to use to run the // Thanos Ruler Pods. ServiceAccountName string `json:"serviceAccountName,omitempty"` // Storage spec to specify how storage shall be used. Storage *StorageSpec `json:"storage,omitempty"` // Volumes allows configuration of additional volumes on the output StatefulSet definition. Volumes specified will // be appended to other volumes that are generated as a result of StorageSpec objects. Volumes []v1.Volume `json:"volumes,omitempty"` // ObjectStorageConfig configures object storage in Thanos. ObjectStorageConfig *v1.SecretKeySelector `json:"objectStorageConfig,omitempty"` // ListenLocal makes the Thanos ruler listen on loopback, so that it // does not bind against the Pod IP. ListenLocal bool `json:"listenLocal,omitempty"` // QueryEndpoints defines Thanos querier endpoints from which to query metrics. // Maps to the --query flag of thanos ruler. QueryEndpoints []string `json:"queryEndpoints,omitempty"` // Define configuration for connecting to thanos query instances. // If this is defined, the QueryEndpoints field will be ignored. // Maps to the `query.config` CLI argument. // Only available with thanos v0.11.0 and higher. QueryConfig *v1.SecretKeySelector `json:"queryConfig,omitempty"` // Define URLs to send alerts to Alertmanager. For Thanos v0.10.0 and higher, // AlertManagersConfig should be used instead. Note: this field will be ignored // if AlertManagersConfig is specified. // Maps to the `alertmanagers.url` arg. AlertManagersURL []string `json:"alertmanagersUrl,omitempty"` // Define configuration for connecting to alertmanager. Only available with thanos v0.10.0 // and higher. Maps to the `alertmanagers.config` arg. AlertManagersConfig *v1.SecretKeySelector `json:"alertmanagersConfig,omitempty"` // A label selector to select which PrometheusRules to mount for alerting and // recording. RuleSelector *metav1.LabelSelector `json:"ruleSelector,omitempty"` // Namespaces to be selected for Rules discovery. If unspecified, only // the same namespace as the ThanosRuler object is in is used. RuleNamespaceSelector *metav1.LabelSelector `json:"ruleNamespaceSelector,omitempty"` // EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert // and metric that is user created. The label value will always be the namespace of the object that is // being created. EnforcedNamespaceLabel string `json:"enforcedNamespaceLabel,omitempty"` // Log level for ThanosRuler to be configured with. LogLevel string `json:"logLevel,omitempty"` // Log format for ThanosRuler to be configured with. LogFormat string `json:"logFormat,omitempty"` // Port name used for the pods and governing service. // This defaults to web PortName string `json:"portName,omitempty"` // Interval between consecutive evaluations. EvaluationInterval string `json:"evaluationInterval,omitempty"` // Time duration ThanosRuler shall retain data for. Default is '24h', // and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds seconds minutes hours days weeks years). Retention string `json:"retention,omitempty"` // Containers allows injecting additional containers or modifying operator generated // containers. This can be used to allow adding an authentication proxy to a ThanosRuler pod or // to change the behavior of an operator generated container. Containers described here modify // an operator generated container if they share the same name and modifications are done via a // strategic merge patch. The current container names are: `thanos-ruler` and `rules-configmap-reloader`. // Overriding containers is entirely outside the scope of what the maintainers will support and by doing // so, you accept that this behaviour may break at any time without notice. Containers []v1.Container `json:"containers,omitempty"` // InitContainers allows adding initContainers to the pod definition. Those can be used to e.g. // fetch secrets for injection into the ThanosRuler configuration from external sources. Any // errors during the execution of an initContainer will lead to a restart of the Pod. // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ // Using initContainers for any use case other then secret fetching is entirely outside the scope // of what the maintainers will support and by doing so, you accept that this behaviour may break // at any time without notice. InitContainers []v1.Container `json:"initContainers,omitempty"` // TracingConfig configures tracing in Thanos. This is an experimental feature, it may change in any upcoming release in a breaking way. TracingConfig *v1.SecretKeySelector `json:"tracingConfig,omitempty"` // Labels configure the external label pairs to ThanosRuler. If not provided, default replica label // `thanos_ruler_replica` will be added as a label and be dropped in alerts. Labels map[string]string `json:"labels,omitempty"` // AlertDropLabels configure the label names which should be dropped in ThanosRuler alerts. // If `labels` field is not provided, `thanos_ruler_replica` will be dropped in alerts by default. AlertDropLabels []string `json:"alertDropLabels,omitempty"` // The external URL the Thanos Ruler instances will be available under. This is // necessary to generate correct URLs. This is necessary if Thanos Ruler is not // served from root of a DNS name. ExternalPrefix string `json:"externalPrefix,omitempty"` // The route prefix ThanosRuler registers HTTP handlers for. This allows thanos UI to be served on a sub-path. RoutePrefix string `json:"routePrefix,omitempty"` // GRPCServerTLSConfig configures the gRPC server from which Thanos Querier reads // recorded rule data. // Note: Currently only the CAFile, CertFile, and KeyFile fields are supported. // Maps to the '--grpc-server-tls-*' CLI args. GRPCServerTLSConfig *TLSConfig `json:"grpcServerTlsConfig,omitempty"` } // ThanosRulerStatus is the most recent observed status of the ThanosRuler. Read-only. Not // included when requesting from the apiserver, only from the Prometheus // Operator API itself. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +k8s:openapi-gen=true type ThanosRulerStatus struct { // Represents whether any actions on the underlying managed objects are // being performed. Only delete actions will be performed. Paused bool `json:"paused"` // Total number of non-terminated pods targeted by this ThanosRuler deployment // (their labels match the selector). Replicas int32 `json:"replicas"` // Total number of non-terminated pods targeted by this ThanosRuler deployment // that have the desired version spec. UpdatedReplicas int32 `json:"updatedReplicas"` // Total number of available pods (ready for at least minReadySeconds) // targeted by this ThanosRuler deployment. AvailableReplicas int32 `json:"availableReplicas"` // Total number of unavailable pods targeted by this ThanosRuler deployment. UnavailableReplicas int32 `json:"unavailableReplicas"` } // DeepCopyObject implements the runtime.Object interface. func (l *ThanosRuler) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *ThanosRulerList) DeepCopyObject() runtime.Object { return l.DeepCopy() }
1
13,980
I think the docstring here should include the CLI arg `--alert.query-url` just to make it clear to users which setting this uses.
prometheus-operator-prometheus-operator
go
@@ -224,9 +224,7 @@ func (m *ipipManager) CompleteDeferredWork() error { for _, ip := range m.activeHostnameToIP { members = append(members, ip) } - for _, ip := range m.externalNodeCIDRs { - members = append(members, ip) - } + members = append(members, m.externalNodeCIDRs...) m.ipsetsDataplane.AddOrReplaceIPSet(m.ipSetMetadata, members) m.ipSetInSync = true }
1
// Copyright (c) 2016-2017, 2019 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package intdataplane import ( "net" "time" log "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "github.com/projectcalico/felix/ipsets" "github.com/projectcalico/felix/proto" "github.com/projectcalico/felix/rules" "github.com/projectcalico/libcalico-go/lib/set" ) // ipipManager manages the all-hosts IP set, which is used by some rules in our static chains // when IPIP is enabled. It doesn't actually program the rules, because they are part of the // top-level static chains. // // ipipManager also takes care of the configuration of the IPIP tunnel device. type ipipManager struct { ipsetsDataplane ipsetsDataplane // activeHostnameToIP maps hostname to string IP address. We don't bother to parse into // net.IPs because we're going to pass them directly to the IPSet API. activeHostnameToIP map[string]string ipSetInSync bool // Config for creating/refreshing the IP set. ipSetMetadata ipsets.IPSetMetadata // Dataplane shim. dataplane ipipDataplane // Configured list of external node ip cidr's to be added to the ipset. externalNodeCIDRs []string } func newIPIPManager( ipsetsDataplane ipsetsDataplane, maxIPSetSize int, externalNodeCidrs []string, ) *ipipManager { return newIPIPManagerWithShim(ipsetsDataplane, maxIPSetSize, realIPIPNetlink{}, externalNodeCidrs) } func newIPIPManagerWithShim( ipsetsDataplane ipsetsDataplane, maxIPSetSize int, dataplane ipipDataplane, externalNodeCIDRs []string, ) *ipipManager { ipipMgr := &ipipManager{ ipsetsDataplane: ipsetsDataplane, activeHostnameToIP: map[string]string{}, dataplane: dataplane, ipSetMetadata: ipsets.IPSetMetadata{ MaxSize: maxIPSetSize, SetID: rules.IPSetIDAllHostNets, Type: ipsets.IPSetTypeHashNet, }, externalNodeCIDRs: externalNodeCIDRs, } return ipipMgr } // KeepIPIPDeviceInSync is a goroutine that configures the IPIP tunnel device, then periodically // checks that it is still correctly configured. func (d *ipipManager) KeepIPIPDeviceInSync(mtu int, address net.IP) { log.Info("IPIP thread started.") for { err := d.configureIPIPDevice(mtu, address) if err != nil { log.WithError(err).Warn("Failed configure IPIP tunnel device, retrying...") time.Sleep(1 * time.Second) continue } time.Sleep(10 * time.Second) } } // configureIPIPDevice ensures the IPIP tunnel device is up and configures correctly. func (d *ipipManager) configureIPIPDevice(mtu int, address net.IP) error { logCxt := log.WithFields(log.Fields{ "mtu": mtu, "tunnelAddr": address, }) logCxt.Debug("Configuring IPIP tunnel") link, err := d.dataplane.LinkByName("tunl0") if err != nil { log.WithError(err).Info("Failed to get IPIP tunnel device, assuming it isn't present") // We call out to "ip tunnel", which takes care of loading the kernel module if // needed. The tunl0 device is actually created automatically by the kernel // module. err := d.dataplane.RunCmd("ip", "tunnel", "add", "tunl0", "mode", "ipip") if err != nil { log.WithError(err).Warning("Failed to add IPIP tunnel device") return err } link, err = d.dataplane.LinkByName("tunl0") if err != nil { log.WithError(err).Warning("Failed to get tunnel device") return err } } attrs := link.Attrs() oldMTU := attrs.MTU if oldMTU != mtu { logCxt.WithField("oldMTU", oldMTU).Info("Tunnel device MTU needs to be updated") if err := d.dataplane.LinkSetMTU(link, mtu); err != nil { log.WithError(err).Warn("Failed to set tunnel device MTU") return err } logCxt.Info("Updated tunnel MTU") } if attrs.Flags&net.FlagUp == 0 { logCxt.WithField("flags", attrs.Flags).Info("Tunnel wasn't admin up, enabling it") if err := d.dataplane.LinkSetUp(link); err != nil { log.WithError(err).Warn("Failed to set tunnel device up") return err } logCxt.Info("Set tunnel admin up") } if err := d.setLinkAddressV4("tunl0", address); err != nil { log.WithError(err).Warn("Failed to set tunnel device IP") return err } return nil } // setLinkAddressV4 updates the given link to set its local IP address. It removes any other // addresses. func (d *ipipManager) setLinkAddressV4(linkName string, address net.IP) error { logCxt := log.WithFields(log.Fields{ "link": linkName, "addr": address, }) logCxt.Debug("Setting local IPv4 address on link.") link, err := d.dataplane.LinkByName(linkName) if err != nil { log.WithError(err).WithField("name", linkName).Warning("Failed to get device") return err } addrs, err := d.dataplane.AddrList(link, netlink.FAMILY_V4) if err != nil { log.WithError(err).Warn("Failed to list interface addresses") return err } found := false for _, oldAddr := range addrs { if address != nil && oldAddr.IP.Equal(address) { logCxt.Debug("Address already present.") found = true continue } logCxt.WithField("oldAddr", oldAddr).Info("Removing old address") if err := d.dataplane.AddrDel(link, &oldAddr); err != nil { log.WithError(err).Warn("Failed to delete address") return err } } if !found && address != nil { logCxt.Info("Address wasn't present, adding it.") mask := net.CIDRMask(32, 32) ipNet := net.IPNet{ IP: address.Mask(mask), // Mask the IP to match ParseCIDR()'s behaviour. Mask: mask, } addr := &netlink.Addr{ IPNet: &ipNet, } if err := d.dataplane.AddrAdd(link, addr); err != nil { log.WithError(err).WithField("addr", address).Warn("Failed to add address") return err } } logCxt.Debug("Address set.") return nil } func (d *ipipManager) OnUpdate(msg interface{}) { switch msg := msg.(type) { case *proto.HostMetadataUpdate: log.WithField("hostanme", msg.Hostname).Debug("Host update/create") d.activeHostnameToIP[msg.Hostname] = msg.Ipv4Addr d.ipSetInSync = false case *proto.HostMetadataRemove: log.WithField("hostname", msg.Hostname).Debug("Host removed") delete(d.activeHostnameToIP, msg.Hostname) d.ipSetInSync = false } } func (m *ipipManager) CompleteDeferredWork() error { if !m.ipSetInSync { // For simplicity (and on the assumption that host add/removes are rare) rewrite // the whole IP set whenever we get a change. To replace this with delta handling // would require reference counting the IPs because it's possible for two hosts // to (at least transiently) share an IP. That would add occupancy and make the // code more complex. log.Info("All-hosts IP set out-of sync, refreshing it.") members := make([]string, 0, len(m.activeHostnameToIP)+len(m.externalNodeCIDRs)) for _, ip := range m.activeHostnameToIP { members = append(members, ip) } for _, ip := range m.externalNodeCIDRs { members = append(members, ip) } m.ipsetsDataplane.AddOrReplaceIPSet(m.ipSetMetadata, members) m.ipSetInSync = true } return nil } // ipsetsDataplane is a shim interface for mocking the IPSets object. type ipsetsDataplane interface { AddOrReplaceIPSet(setMetadata ipsets.IPSetMetadata, members []string) AddMembers(setID string, newMembers []string) RemoveMembers(setID string, removedMembers []string) RemoveIPSet(setID string) GetIPFamily() ipsets.IPFamily GetTypeOf(setID string) (ipsets.IPSetType, error) GetMembers(setID string) (set.Set, error) }
1
17,206
Same change just above?
projectcalico-felix
c
@@ -511,4 +511,14 @@ describe('services_SearchEngine', function() { expect((await engine.search('"- [ ]"', { searchType: SearchEngine.SEARCH_TYPE_BASIC })).length).toBe(1); expect((await engine.search('"[ ]"', { searchType: SearchEngine.SEARCH_TYPE_BASIC })).length).toBe(2); })); + + it('should not mistake cyrillic "l" for latin "n"', asyncTest(async () => { + const n1 = await Note.save({ title: 'latin n', body: 'n' }); + const n2 = await Note.save({ title: 'cyrillic l', body: 'л' }); + + await engine.syncTables(); + + expect((await engine.search('n')).length).toBe(1); + expect((await engine.search('л')).length).toBe(1); + })); });
1
/* eslint-disable no-unused-vars */ /* eslint prefer-const: 0*/ const time = require('@joplin/lib/time').default; const { fileContentEqual, setupDatabase, setupDatabaseAndSynchronizer, db, synchronizer, fileApi, sleep, clearDatabase, switchClient, syncTargetId, objectsEqual, checkThrowAsync, restoreDate } = require('./test-utils.js'); const SearchEngine = require('@joplin/lib/services/searchengine/SearchEngine'); const Note = require('@joplin/lib/models/Note'); const ItemChange = require('@joplin/lib/models/ItemChange'); const Setting = require('@joplin/lib/models/Setting').default; let engine = null; const IDF = (N, n) => Math.max(Math.log((N - n + 0.5) / (n + 0.5)), 0); const frequency = (word, string) => { const re = new RegExp(`\\b(${word})\\b`, 'g'); return (string.match(re) || []).length; }; const calculateScore = (searchString, notes) => { const K1 = 1.2; const B = 0.75; const freqTitle = notes.map(note => frequency(searchString, note.title)); const notesWithWord = freqTitle.filter(count => count !== 0).length; const numTokens = notes.map(note => note.title.split(' ').length); const avgTokens = Math.round(numTokens.reduce((a, b) => a + b, 0) / notes.length); const msSinceEpoch = Math.round(new Date().getTime()); const msPerDay = 86400000; const weightForDaysSinceLastUpdate = (row) => { // BM25 weights typically range 0-10, and last updated date should weight similarly, though prioritizing recency logarithmically. // An alpha of 200 ensures matches in the last week will show up front (11.59) and often so for matches within 2 weeks (5.99), // but is much less of a factor at 30 days (2.84) or very little after 90 days (0.95), focusing mostly on content at that point. if (!row.user_updated_time) { return 0; } const alpha = 200; const daysSinceLastUpdate = (msSinceEpoch - row.user_updated_time) / msPerDay; return alpha * Math.log(1 + 1 / Math.max(daysSinceLastUpdate, 0.5)); }; let titleBM25WeightedByLastUpdate = new Array(notes.length).fill(-1); if (avgTokens != 0) { for (let i = 0; i < notes.length; i++) { titleBM25WeightedByLastUpdate[i] = IDF(notes.length, notesWithWord) * ((freqTitle[i] * (K1 + 1)) / (freqTitle[i] + K1 * (1 - B + B * (numTokens[i] / avgTokens)))); titleBM25WeightedByLastUpdate[i] += weightForDaysSinceLastUpdate(notes[i]); } } const scores = []; for (let i = 0; i < notes.length; i++) { if (freqTitle[i]) scores.push(titleBM25WeightedByLastUpdate[i]); } scores.sort().reverse(); return scores; }; describe('services_SearchEngine', function() { beforeEach(async (done) => { await setupDatabaseAndSynchronizer(1); await switchClient(1); engine = new SearchEngine(); engine.setDb(db()); done(); }); it('should keep the content and FTS table in sync', (async () => { let rows, n1, n2, n3; n1 = await Note.save({ title: 'a' }); n2 = await Note.save({ title: 'b' }); await engine.syncTables(); rows = await engine.search('a'); expect(rows.length).toBe(1); expect(rows[0].title).toBe('a'); await Note.delete(n1.id); await engine.syncTables(); rows = await engine.search('a'); expect(rows.length).toBe(0); rows = await engine.search('b'); expect(rows[0].title).toBe('b'); await Note.save({ id: n2.id, title: 'c' }); await engine.syncTables(); rows = await engine.search('b'); expect(rows.length).toBe(0); rows = await engine.search('c'); expect(rows[0].title).toBe('c'); await Note.save({ id: n2.id, encryption_applied: 1 }); await engine.syncTables(); rows = await engine.search('c'); expect(rows.length).toBe(0); await Note.save({ id: n2.id, encryption_applied: 0 }); await engine.syncTables(); rows = await engine.search('c'); expect(rows.length).toBe(1); })); it('should, after initial indexing, save the last change ID', (async () => { const n1 = await Note.save({ title: 'abcd efgh' }); // 3 const n2 = await Note.save({ title: 'abcd aaaaa abcd abcd' }); // 1 expect(Setting.value('searchEngine.initialIndexingDone')).toBe(false); await ItemChange.waitForAllSaved(); const lastChangeId = await ItemChange.lastChangeId(); await engine.syncTables(); expect(Setting.value('searchEngine.lastProcessedChangeId')).toBe(lastChangeId); expect(Setting.value('searchEngine.initialIndexingDone')).toBe(true); })); it('should order search results by relevance BM25', (async () => { // BM25 is based on term frequency - inverse document frequency // The tf–idf value increases proportionally to the number of times a word appears in the document // and is offset by the number of documents in the corpus that contain the word, which helps to adjust // for the fact that some words appear more frequently in general. // BM25 returns weight zero for search term which occurs in more than half the notes. // So terms that are abundant in all notes to have zero relevance w.r.t BM25. const n1 = await Note.save({ title: 'abcd efgh' }); // 3 const n2 = await Note.save({ title: 'abcd efgh abcd abcd' }); // 1 const n3 = await Note.save({ title: 'abcd aaaaa bbbb eeee abcd' }); // 2 const n4 = await Note.save({ title: 'xyz xyz' }); const n5 = await Note.save({ title: 'xyz xyz xyz xyz' }); const n6 = await Note.save({ title: 'xyz xyz xyz xyz xyz xyz' }); const n7 = await Note.save({ title: 'xyz xyz xyz xyz xyz xyz' }); const n8 = await Note.save({ title: 'xyz xyz xyz xyz xyz xyz xyz xyz' }); await engine.syncTables(); let rows = await engine.search('abcd'); expect(rows[0].id).toBe(n2.id); expect(rows[1].id).toBe(n3.id); expect(rows[2].id).toBe(n1.id); rows = await engine.search('abcd efgh'); expect(rows[0].id).toBe(n1.id); // shorter note; also 'efgh' is more rare than 'abcd'. expect(rows[1].id).toBe(n2.id); })); // TODO: Need to update and replace jasmine.mockDate() calls with Jest // equivalent // it('should correctly weigh notes using BM25 and user_updated_time', (async () => { // await mockDate(2020, 9, 30, 50); // const noteData = [ // { // title: 'abc test2 test2', // updated_time: 1601425064756, // user_updated_time: 1601425064756, // created_time: 1601425064756, // user_created_time: 1601425064756, // }, // { // title: 'foo foo', // updated_time: 1601425064758, // user_updated_time: 1601425064758, // created_time: 1601425064758, // user_created_time: 1601425064758, // }, // { // title: 'dead beef', // updated_time: 1601425064760, // user_updated_time: 1601425064760, // created_time: 1601425064760, // user_created_time: 1601425064760, // }, // { // title: 'test2 bar', // updated_time: 1601425064761, // user_updated_time: 1601425064761, // created_time: 1601425064761, // user_created_time: 1601425064761, // }, // { // title: 'blah blah abc', // updated_time: 1601425064763, // user_updated_time: 1601425064763, // created_time: 1601425064763, // user_created_time: 1601425064763, // }, // ]; // const n0 = await Note.save(noteData[0], { autoTimestamp: false }); // const n1 = await Note.save(noteData[1], { autoTimestamp: false }); // const n2 = await Note.save(noteData[2], { autoTimestamp: false }); // const n3 = await Note.save(noteData[3], { autoTimestamp: false }); // const n4 = await Note.save(noteData[4], { autoTimestamp: false }); // restoreDate(); // await engine.syncTables(); // await mockDate(2020, 9, 30, 50); // let searchString = 'abc'; // let scores = calculateScore(searchString, noteData); // let rows = await engine.search(searchString); // expect(rows[0].weight).toEqual(scores[0]); // expect(rows[1].weight).toEqual(scores[1]); // // console.log(rows); // // console.log(scores); // searchString = 'test2'; // scores = calculateScore(searchString, noteData); // rows = await engine.search(searchString); // // console.log(rows); // // console.log(scores); // expect(rows[0].weight).toEqual(scores[0]); // expect(rows[1].weight).toEqual(scores[1]); // searchString = 'foo'; // scores = calculateScore(searchString, noteData); // rows = await engine.search(searchString); // // console.log(rows); // // console.log(scores); // expect(rows[0].weight).toEqual(scores[0]); // await restoreDate(); // })); it('should tell where the results are found', (async () => { const notes = [ await Note.save({ title: 'abcd efgh', body: 'abcd' }), await Note.save({ title: 'abcd' }), await Note.save({ title: 'efgh', body: 'abcd' }), ]; await engine.syncTables(); const testCases = [ ['abcd', ['title', 'body'], ['title'], ['body']], ['efgh', ['title'], [], ['title']], ]; for (const testCase of testCases) { const rows = await engine.search(testCase[0]); for (let i = 0; i < notes.length; i++) { const row = rows.find(row => row.id === notes[i].id); const actual = row ? row.fields.sort().join(',') : ''; const expected = testCase[i + 1].sort().join(','); expect(expected).toBe(actual); } } })); it('should order search results by relevance (last updated first)', (async () => { let rows; const n1 = await Note.save({ title: 'abcd' }); await sleep(0.1); const n2 = await Note.save({ title: 'abcd' }); await sleep(0.1); const n3 = await Note.save({ title: 'abcd' }); await sleep(0.1); await engine.syncTables(); rows = await engine.search('abcd'); expect(rows[0].id).toBe(n3.id); expect(rows[1].id).toBe(n2.id); expect(rows[2].id).toBe(n1.id); await Note.save({ id: n1.id, title: 'abcd' }); await engine.syncTables(); rows = await engine.search('abcd'); expect(rows[0].id).toBe(n1.id); expect(rows[1].id).toBe(n3.id); expect(rows[2].id).toBe(n2.id); })); it('should order search results by relevance (completed to-dos last)', (async () => { let rows; const n1 = await Note.save({ title: 'abcd', is_todo: 1 }); await sleep(0.1); const n2 = await Note.save({ title: 'abcd', is_todo: 1 }); await sleep(0.1); const n3 = await Note.save({ title: 'abcd', is_todo: 1 }); await sleep(0.1); await engine.syncTables(); rows = await engine.search('abcd'); expect(rows[0].id).toBe(n3.id); expect(rows[1].id).toBe(n2.id); expect(rows[2].id).toBe(n1.id); await Note.save({ id: n3.id, todo_completed: Date.now() }); await engine.syncTables(); rows = await engine.search('abcd'); expect(rows[0].id).toBe(n2.id); expect(rows[1].id).toBe(n1.id); expect(rows[2].id).toBe(n3.id); })); it('should supports various query types', (async () => { let rows; const n1 = await Note.save({ title: 'abcd efgh ijkl', body: 'aaaa bbbb' }); const n2 = await Note.save({ title: 'iiii efgh bbbb', body: 'aaaa bbbb' }); const n3 = await Note.save({ title: 'Агентство Рейтер' }); const n4 = await Note.save({ title: 'Dog' }); const n5 = await Note.save({ title: 'СООБЩИЛО' }); await engine.syncTables(); rows = await engine.search('abcd ijkl'); expect(rows.length).toBe(1); rows = await engine.search('"abcd ijkl"'); expect(rows.length).toBe(0); rows = await engine.search('"abcd efgh"'); expect(rows.length).toBe(1); rows = await engine.search('title:abcd'); expect(rows.length).toBe(1); rows = await engine.search('title:efgh'); expect(rows.length).toBe(2); rows = await engine.search('body:abcd'); expect(rows.length).toBe(0); rows = await engine.search('body:bbbb'); expect(rows.length).toBe(2); rows = await engine.search('body:bbbb iiii'); expect(rows.length).toBe(1); rows = await engine.search('Рейтер'); expect(rows.length).toBe(1); rows = await engine.search('рейтер'); expect(rows.length).toBe(1); rows = await engine.search('Dog'); expect(rows.length).toBe(1); rows = await engine.search('dog'); expect(rows.length).toBe(1); rows = await engine.search('сообщило'); expect(rows.length).toBe(1); })); it('should support queries with or without accents', (async () => { let rows; const n1 = await Note.save({ title: 'père noël' }); await engine.syncTables(); expect((await engine.search('père')).length).toBe(1); expect((await engine.search('pere')).length).toBe(1); expect((await engine.search('noe*')).length).toBe(1); expect((await engine.search('noë*')).length).toBe(1); })); it('should support queries with Chinese characters', (async () => { let rows; const n1 = await Note.save({ title: '我是法国人', body: '中文测试' }); await engine.syncTables(); expect((await engine.search('我')).length).toBe(1); expect((await engine.search('法国人')).length).toBe(1); expect((await engine.search('法国人*'))[0].fields.sort()).toEqual(['body', 'title']); // usually assume that keyword was matched in body expect((await engine.search('测试')).length).toBe(1); expect((await engine.search('测试'))[0].fields).toEqual(['body']); expect((await engine.search('测试*'))[0].fields).toEqual(['body']); })); it('should support queries with Japanese characters', (async () => { let rows; const n1 = await Note.save({ title: '私は日本語を話すことができません', body: 'テスト' }); await engine.syncTables(); expect((await engine.search('日本')).length).toBe(1); expect((await engine.search('できません')).length).toBe(1); expect((await engine.search('できません*'))[0].fields.sort()).toEqual(['body', 'title']); // usually assume that keyword was matched in body expect((await engine.search('テスト'))[0].fields.sort()).toEqual(['body']); })); it('should support queries with Korean characters', (async () => { let rows; const n1 = await Note.save({ title: '이것은 한국말이다' }); await engine.syncTables(); expect((await engine.search('이것은')).length).toBe(1); expect((await engine.search('말')).length).toBe(1); })); it('should support queries with Thai characters', (async () => { let rows; const n1 = await Note.save({ title: 'นี่คือคนไทย' }); await engine.syncTables(); expect((await engine.search('นี่คือค')).length).toBe(1); expect((await engine.search('ไทย')).length).toBe(1); })); it('should support field restricted queries with Chinese characters', (async () => { let rows; const n1 = await Note.save({ title: '你好', body: '我是法国人' }); await engine.syncTables(); expect((await engine.search('title:你好*')).length).toBe(1); expect((await engine.search('title:你好*'))[0].fields).toEqual(['title']); expect((await engine.search('body:法国人')).length).toBe(1); expect((await engine.search('body:法国人'))[0].fields).toEqual(['body']); expect((await engine.search('body:你好')).length).toBe(0); expect((await engine.search('title:你好 body:法国人')).length).toBe(1); expect((await engine.search('title:你好 body:法国人'))[0].fields.sort()).toEqual(['body', 'title']); expect((await engine.search('title:你好 body:bla')).length).toBe(0); expect((await engine.search('title:你好 我是')).length).toBe(1); expect((await engine.search('title:你好 我是'))[0].fields.sort()).toEqual(['body', 'title']); expect((await engine.search('title:bla 我是')).length).toBe(0); // For non-alpha char, only the first field is looked at, the following ones are ignored // expect((await engine.search('title:你好 title:hello')).length).toBe(1); })); it('should parse normal query strings', (async () => { let rows; const testCases = [ ['abcd efgh', { _: ['abcd', 'efgh'] }], ['abcd efgh', { _: ['abcd', 'efgh'] }], ['title:abcd efgh', { _: ['efgh'], title: ['abcd'] }], ['title:abcd', { title: ['abcd'] }], ['"abcd efgh"', { _: ['abcd efgh'] }], ['title:abcd title:efgh', { title: ['abcd', 'efgh'] }], ]; for (let i = 0; i < testCases.length; i++) { const t = testCases[i]; const input = t[0]; const expected = t[1]; const actual = await engine.parseQuery(input); const _Values = actual.terms._ ? actual.terms._.map(v => v.value) : undefined; const titleValues = actual.terms.title ? actual.terms.title.map(v => v.value) : undefined; const bodyValues = actual.terms.body ? actual.terms.body.map(v => v.value) : undefined; expect(JSON.stringify(_Values)).toBe(JSON.stringify(expected._), `Test case (_) ${i}`); expect(JSON.stringify(titleValues)).toBe(JSON.stringify(expected.title), `Test case (title) ${i}`); expect(JSON.stringify(bodyValues)).toBe(JSON.stringify(expected.body), `Test case (body) ${i}`); } })); it('should handle queries with special characters', (async () => { let rows; const testCases = [ // "-" is considered a word delimiter so it is stripped off // when indexing the notes. "did-not-match" is translated to // three word "did", "not", "match" ['did-not-match', 'did not match'], ['did-not-match', '"did-not-match"'], ['does match', 'does match'], ]; for (let i = 0; i < testCases.length; i++) { const t = testCases[i]; const content = t[0]; const query = t[1]; const n = await Note.save({ title: content }); await engine.syncTables(); rows = await engine.search(query); expect(rows.length).toBe(1); await Note.delete(n.id); } })); it('should allow using basic search', (async () => { const n1 = await Note.save({ title: '- [ ] abcd' }); const n2 = await Note.save({ title: '[ ] abcd' }); await engine.syncTables(); expect((await engine.search('"- [ ]"', { searchType: SearchEngine.SEARCH_TYPE_FTS })).length).toBe(0); expect((await engine.search('"- [ ]"', { searchType: SearchEngine.SEARCH_TYPE_BASIC })).length).toBe(1); expect((await engine.search('"[ ]"', { searchType: SearchEngine.SEARCH_TYPE_BASIC })).length).toBe(2); })); });
1
15,679
Could you check the result content rather than just the number of search results please? For example with this test if the search engine suddenly starts returning "latin n" for both queries, we won't know about it.
laurent22-joplin
js
@@ -47,9 +47,10 @@ const ( otherDomainID = "spiffe://otherdomain.test" - serverID = "spiffe://example.org/spire/server" - agentID = "spiffe://example.org/spire/agent/test/id" - workloadID = "spiffe://example.org/workload" + serverID = "spiffe://example.org/spire/server" + agentID = "spiffe://example.org/spire/agent/test/id" + agentlessID = "spiffe://example.org/test/id" + workloadID = "spiffe://example.org/workload" // used to cancel stream operations on test failure instead of blocking the // full go test timeout period (i.e. 10 minutes)
1
package node import ( "crypto/rand" "crypto/tls" "crypto/x509" "errors" "fmt" "math/big" "net" "sync" "testing" "time" "github.com/gogo/protobuf/proto" "github.com/sirupsen/logrus/hooks/test" "github.com/spiffe/spire/pkg/common/auth" "github.com/spiffe/spire/pkg/common/bundleutil" "github.com/spiffe/spire/pkg/common/idutil" "github.com/spiffe/spire/pkg/common/pemutil" "github.com/spiffe/spire/pkg/common/telemetry" "github.com/spiffe/spire/pkg/common/util" "github.com/spiffe/spire/pkg/server/ca" "github.com/spiffe/spire/proto/api/node" "github.com/spiffe/spire/proto/common" "github.com/spiffe/spire/proto/server/datastore" "github.com/spiffe/spire/proto/server/nodeattestor" "github.com/spiffe/spire/proto/server/noderesolver" "github.com/spiffe/spire/test/clock" "github.com/spiffe/spire/test/fakes/fakedatastore" "github.com/spiffe/spire/test/fakes/fakenoderesolver" "github.com/spiffe/spire/test/fakes/fakeserverca" "github.com/spiffe/spire/test/fakes/fakeservercatalog" "github.com/spiffe/spire/test/fakes/fakeservernodeattestor" "github.com/stretchr/testify/suite" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" ) const ( trustDomain = "example.org" trustDomainID = "spiffe://example.org" otherDomainID = "spiffe://otherdomain.test" serverID = "spiffe://example.org/spire/server" agentID = "spiffe://example.org/spire/agent/test/id" workloadID = "spiffe://example.org/workload" // used to cancel stream operations on test failure instead of blocking the // full go test timeout period (i.e. 10 minutes) testTimeout = time.Minute ) var ( trustDomainURL, _ = idutil.ParseSpiffeID(trustDomainID, idutil.AllowAnyTrustDomain()) otherDomainBundle = &common.Bundle{ TrustDomainId: otherDomainID, } testKey, _ = pemutil.ParseECPrivateKey([]byte(` -----BEGIN PRIVATE KEY----- MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgUdF3LNDNZWKYQHFj UIs5TNt4LXDawuZFFj2J7D1T9mehRANCAASEhjkDbIFdNaZ9EneJaSXKfLiBDqt2 l37cUGNqRvIYDhSH/IJycqxLTtvHoYMHLSV9N5UHIFgPJ/30RCBQiH3t -----END PRIVATE KEY----- `)) ) func TestHandler(t *testing.T) { suite.Run(t, new(HandlerSuite)) } type HandlerSuite struct { suite.Suite server *grpc.Server logHook *test.Hook limiter *fakeLimiter handler *Handler unattestedClient node.NodeClient attestedClient node.NodeClient ds *fakedatastore.DataStore catalog *fakeservercatalog.Catalog clock *clock.Mock bundle *common.Bundle agentSVID []*x509.Certificate serverCA *fakeserverca.ServerCA } func (s *HandlerSuite) SetupTest() { s.clock = clock.NewMock(s.T()) log, logHook := test.NewNullLogger() s.logHook = logHook s.limiter = new(fakeLimiter) s.ds = fakedatastore.New() s.catalog = fakeservercatalog.New() s.catalog.SetDataStores(s.ds) s.serverCA = fakeserverca.New(s.T(), trustDomain, &fakeserverca.Options{ Clock: s.clock, }) s.bundle = bundleutil.BundleProtoFromRootCAs(trustDomainID, s.serverCA.Bundle()) s.createBundle(s.bundle) // Create server and agent SVIDs for TLS communication serverSVID := s.makeSVID(serverID) s.agentSVID = s.makeSVID(agentID) handler := NewHandler(HandlerConfig{ Log: log, Metrics: telemetry.Blackhole{}, Catalog: s.catalog, ServerCA: s.serverCA, TrustDomain: *trustDomainURL, Clock: s.clock, }) handler.limiter = s.limiter // Streaming methods and auth are easier to test from the client point of view. // TODO: share the setup done by the "endpoints" code so these don't go out // of sync. rootCAs := x509.NewCertPool() for _, bundleCert := range s.serverCA.Bundle() { rootCAs.AddCert(bundleCert) } var tlsCertificate [][]byte for _, serverCert := range serverSVID { tlsCertificate = append(tlsCertificate, serverCert.Raw) } server := grpc.NewServer( grpc.UnaryInterceptor(auth.UnaryAuthorizeCall), grpc.StreamInterceptor(auth.StreamAuthorizeCall), grpc.Creds(credentials.NewTLS(&tls.Config{ Certificates: []tls.Certificate{ { Certificate: tlsCertificate, PrivateKey: testKey, }, }, ClientCAs: rootCAs, ClientAuth: tls.VerifyClientCertIfGiven, }))) node.RegisterNodeServer(server, handler) listener, err := net.Listen("tcp", "localhost:0") s.Require().NoError(err) go server.Serve(listener) unattestedConn, err := grpc.Dial(listener.Addr().String(), grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{ // skip verification of the server certificate. otherwise we'd // need SANs to allow the connection over localhost. this isn't // important for these tests. InsecureSkipVerify: true, }))) s.Require().NoError(err) attestedConn, err := grpc.Dial(listener.Addr().String(), grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{ // skip verification of the server certificate. otherwise we'd // need SANs to allow the connection over localhost. this isn't // important for these tests. InsecureSkipVerify: true, GetClientCertificate: s.getClientCertificate, }))) s.Require().NoError(err) s.handler = handler s.server = server s.unattestedClient = node.NewNodeClient(unattestedConn) s.attestedClient = node.NewNodeClient(attestedConn) } func (s *HandlerSuite) TearDownTest() { s.server.Stop() } func (s *HandlerSuite) TestAttestLimits() { s.limiter.setNextError(errors.New("limit exceeded")) s.requireAttestFailure(&node.AttestRequest{}, codes.ResourceExhausted, "limit exceeded") // Attest always adds 1 count s.Equal(1, s.limiter.callsFor(AttestMsg)) } func (s *HandlerSuite) TestAttestWithNoAttestationData() { s.requireAttestFailure(&node.AttestRequest{}, codes.InvalidArgument, "request missing attestation data") } func (s *HandlerSuite) TestAttestWithNoAttestationDataType() { s.requireAttestFailure(&node.AttestRequest{ AttestationData: &common.AttestationData{}, }, codes.InvalidArgument, "request missing attestation data type") } func (s *HandlerSuite) TestAttestWithNoCSR() { s.requireAttestFailure(&node.AttestRequest{ AttestationData: makeAttestationData("test", ""), }, codes.InvalidArgument, "request missing CSR") } func (s *HandlerSuite) TestAttestWithMalformedCSR() { s.requireAttestFailure(&node.AttestRequest{ AttestationData: makeAttestationData("test", ""), Csr: []byte("MALFORMED"), }, codes.InvalidArgument, "request CSR is invalid: failed to parse CSR") } func (s *HandlerSuite) TestAttestWithCSRMissingURISAN() { csr, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{ SignatureAlgorithm: x509.ECDSAWithSHA256, }, testKey) s.Require().NoError(err) s.requireAttestFailure(&node.AttestRequest{ AttestationData: makeAttestationData("test", ""), Csr: csr, }, codes.InvalidArgument, "request CSR is invalid: the CSR must have exactly one URI SAN") } func (s *HandlerSuite) TestAttestWithAgentIDFromWrongTrustDomainInCSR() { s.requireAttestFailure(&node.AttestRequest{ AttestationData: makeAttestationData("test", ""), Csr: s.makeCSR("spiffe://otherdomain.test/spire/agent/test/id"), }, codes.InvalidArgument, `request CSR is invalid: "spiffe://otherdomain.test/spire/agent/test/id" does not belong to trust domain`) } func (s *HandlerSuite) TestAttestWithNonAgentIDInCSR() { s.requireAttestFailure(&node.AttestRequest{ AttestationData: makeAttestationData("test", ""), Csr: s.makeCSR("spiffe://example.org"), }, codes.InvalidArgument, `request CSR is invalid: "spiffe://example.org" is not a valid agent SPIFFE ID`) } func (s *HandlerSuite) TestAttestWhenAgentAlreadyAttested() { s.addAttestor("test", fakeservernodeattestor.Config{}) s.createAttestedNode(&common.AttestedNode{ SpiffeId: "spiffe://example.org/spire/agent/test/id", }) s.requireAttestFailure(&node.AttestRequest{ AttestationData: makeAttestationData("test", ""), Csr: s.makeCSR("spiffe://example.org/spire/agent/test/id"), }, codes.Unknown, "reattestation is not permitted") } func (s *HandlerSuite) TestAttestWithUnknownAttestor() { s.requireAttestFailure(&node.AttestRequest{ AttestationData: makeAttestationData("test", ""), Csr: s.makeCSR("spiffe://example.org/spire/agent/test/id"), }, codes.Unknown, `could not find node attestor type "test"`) } func (s *HandlerSuite) TestAttestWithMismatchedAgentID() { s.addAttestor("test", fakeservernodeattestor.Config{ Data: map[string]string{"data": "id"}, }) s.requireAttestFailure(&node.AttestRequest{ AttestationData: makeAttestationData("test", "data"), Csr: s.makeCSR("spiffe://example.org/spire/agent/test/other"), }, codes.Unknown, "attestor returned unexpected response") s.assertLastLogMessage("attested SPIFFE ID does not match CSR") } func (s *HandlerSuite) TestAttestSuccess() { // Create a federated bundle to return with the SVID update s.createBundle(otherDomainBundle) // Create a registration entry to return with the SVID update entry := s.createRegistrationEntry(&common.RegistrationEntry{ ParentId: agentID, SpiffeId: workloadID, FederatesWith: []string{otherDomainID}, }) s.addAttestor("test", fakeservernodeattestor.Config{ Data: map[string]string{"data": "id"}, }) upd := s.requireAttestSuccess(&node.AttestRequest{ AttestationData: makeAttestationData("test", "data"), Csr: s.makeCSR(agentID), }) // assert update contents s.Equal([]*common.RegistrationEntry{entry}, upd.RegistrationEntries) s.assertBundlesInUpdate(upd, otherDomainBundle) svidChain := s.assertSVIDsInUpdate(upd, agentID)[0] // Assert an attested node entry has been created attestedNode := s.fetchAttestedNode(agentID) s.Require().NotNil(attestedNode) s.Equal("test", attestedNode.AttestationDataType) s.Equal(agentID, attestedNode.SpiffeId) s.Equal(svidChain[0].SerialNumber.String(), attestedNode.CertSerialNumber) s.WithinDuration(svidChain[0].NotAfter, time.Unix(attestedNode.CertNotAfter, 0), 0) // No selectors were returned and no resolvers were available, so the node // selectors should be empty. s.Empty(s.getNodeSelectors(agentID)) } func (s *HandlerSuite) TestAttestReattestation() { // Make sure reattestation is allowed by the attestor s.addAttestor("test", fakeservernodeattestor.Config{ CanReattest: true, Data: map[string]string{"data": "id"}, }) // Create an attested node entry s.createAttestedNode(&common.AttestedNode{ SpiffeId: agentID, }) // Reattest s.requireAttestSuccess(&node.AttestRequest{ AttestationData: makeAttestationData("test", "data"), Csr: s.makeCSR(agentID), }) // Assert the attested node entry has been updated attestedNode := s.fetchAttestedNode(agentID) s.Require().NotNil(attestedNode) s.Equal(agentID, attestedNode.SpiffeId) s.NotEmpty(attestedNode.CertSerialNumber) s.NotEqual(0, attestedNode.CertNotAfter) // Attestation data type is NOT updatable s.Equal("", attestedNode.AttestationDataType) } func (s *HandlerSuite) TestAttestChallengeResponseSuccess() { // Make sure reattestation is allowed by the attestor s.addAttestor("test", fakeservernodeattestor.Config{ Data: map[string]string{"data": "id"}, Challenges: map[string][]string{ "id": {"one", "two", "three"}, }, }) // Attest via challenge response s.requireAttestSuccess(&node.AttestRequest{ AttestationData: makeAttestationData("test", "data"), Csr: s.makeCSR(agentID), }, "one", "two", "three") } func (s *HandlerSuite) TestAttestWithUnknownJoinToken() { s.requireAttestFailure(&node.AttestRequest{ AttestationData: &common.AttestationData{Type: "join_token", Data: []byte("TOKEN")}, Csr: s.makeCSR("spiffe://example.org/spire/agent/join_token/TOKEN"), }, codes.Unknown, "failed to attest: no such token") } func (s *HandlerSuite) TestAttestWithAlreadyUsedJoinToken() { s.createAttestedNode(&common.AttestedNode{ SpiffeId: "spiffe://example.org/spire/agent/join_token/TOKEN", }) s.requireAttestFailure(&node.AttestRequest{ AttestationData: &common.AttestationData{Type: "join_token", Data: []byte("TOKEN")}, Csr: s.makeCSR("spiffe://example.org/spire/agent/join_token/TOKEN"), }, codes.Unknown, "failed to attest: join token has already been used") } func (s *HandlerSuite) TestAttestWithExpiredJoinToken() { s.createJoinToken("TOKEN", s.clock.Now().Add(-time.Second)) s.requireAttestFailure(&node.AttestRequest{ AttestationData: makeAttestationData("join_token", "TOKEN"), Csr: s.makeCSR("spiffe://example.org/spire/agent/join_token/TOKEN"), }, codes.Unknown, "failed to attest: join token expired") // join token should be removed from the datastore even if attestation failed s.Nil(s.fetchJoinToken("TOKEN")) } func (s *HandlerSuite) TestAttestWithValidJoinToken() { s.createJoinToken("TOKEN", s.clock.Now().Add(time.Second)) s.requireAttestSuccess(&node.AttestRequest{ AttestationData: makeAttestationData("join_token", "TOKEN"), Csr: s.makeCSR("spiffe://example.org/spire/agent/join_token/TOKEN"), }) // join token should be removed for successful attestation s.Nil(s.fetchJoinToken("TOKEN")) } func (s *HandlerSuite) TestAttestWithOnlyAttestorSelectors() { // configure the attestor to return selectors s.addAttestor("test", fakeservernodeattestor.Config{ Data: map[string]string{"data": "id"}, Selectors: map[string][]string{ "id": {"test-attestor-value"}, }, }) s.requireAttestSuccess(&node.AttestRequest{ AttestationData: makeAttestationData("test", "data"), Csr: s.makeCSR("spiffe://example.org/spire/agent/test/id"), }) s.Equal([]*common.Selector{ {Type: "test", Value: "test-attestor-value"}, }, s.getNodeSelectors("spiffe://example.org/spire/agent/test/id")) } func (s *HandlerSuite) TestAttestWithOnlyResolverSelectors() { // configure the attestor to return selectors s.addAttestor("test", fakeservernodeattestor.Config{ Data: map[string]string{"data": "id"}, }) // this resolver does not match the attestor type and should be ignored s.addResolver("other", fakenoderesolver.Config{ Selectors: map[string][]string{ "spiffe://example.org/spire/agent/test/id": {"other-resolver-value"}, }, }) // this resolver matches the attestor type and should be used s.addResolver("test", fakenoderesolver.Config{ Selectors: map[string][]string{ "spiffe://example.org/spire/agent/test/id": {"test-resolver-value"}, }, }) s.requireAttestSuccess(&node.AttestRequest{ AttestationData: makeAttestationData("test", "data"), Csr: s.makeCSR("spiffe://example.org/spire/agent/test/id"), }) s.Equal([]*common.Selector{ {Type: "test", Value: "test-resolver-value"}, }, s.getNodeSelectors("spiffe://example.org/spire/agent/test/id")) } func (s *HandlerSuite) TestAttestWithBothAttestorAndResolverSelectors() { // configure the attestor to return selectors s.addAttestor("test", fakeservernodeattestor.Config{ Data: map[string]string{"data": "id"}, Selectors: map[string][]string{ "id": {"test-attestor-value"}, }, }) s.addResolver("test", fakenoderesolver.Config{ Selectors: map[string][]string{ "spiffe://example.org/spire/agent/test/id": {"test-resolver-value"}, }, }) s.requireAttestSuccess(&node.AttestRequest{ AttestationData: makeAttestationData("test", "data"), Csr: s.makeCSR("spiffe://example.org/spire/agent/test/id"), }) s.Equal([]*common.Selector{ {Type: "test", Value: "test-resolver-value"}, {Type: "test", Value: "test-attestor-value"}, }, s.getNodeSelectors("spiffe://example.org/spire/agent/test/id")) } func (s *HandlerSuite) TestFetchX509SVIDWithUnattestedAgent() { s.requireFetchX509SVIDAuthFailure() } func (s *HandlerSuite) TestFetchX509SVIDLimits() { s.attestAgent() // Test with no CSRs (no count should be added) s.limiter.setNextError(errors.New("limit exceeded")) s.requireFetchX509SVIDFailure(&node.FetchX509SVIDRequest{}, codes.ResourceExhausted, "limit exceeded") s.Equal(0, s.limiter.callsFor(CSRMsg)) // Test with 5 CSRs (5 count should be added) s.limiter.setNextError(errors.New("limit exceeded")) s.requireFetchX509SVIDFailure(&node.FetchX509SVIDRequest{Csrs: make([][]byte, 5)}, codes.ResourceExhausted, "limit exceeded") s.Equal(5, s.limiter.callsFor(CSRMsg)) } func (s *HandlerSuite) TestFetchX509SVIDWithNoRegistrationEntries() { s.attestAgent() upd := s.requireFetchX509SVIDSuccess(&node.FetchX509SVIDRequest{}) s.assertBundlesInUpdate(upd) } func (s *HandlerSuite) TestFetchX509SVIDWithNoCSRs() { s.attestAgent() s.createBundle(otherDomainBundle) entry := s.createRegistrationEntry(&common.RegistrationEntry{ ParentId: agentID, SpiffeId: workloadID, FederatesWith: []string{otherDomainID}, }) upd := s.requireFetchX509SVIDSuccess(&node.FetchX509SVIDRequest{}) s.Equal([]*common.RegistrationEntry{entry}, upd.RegistrationEntries) s.assertBundlesInUpdate(upd, otherDomainBundle) s.Empty(upd.Svids) } func (s *HandlerSuite) TestFetchX509SVIDWithMalformedCSR() { s.attestAgent() s.requireFetchX509SVIDFailure(&node.FetchX509SVIDRequest{ Csrs: [][]byte{[]byte("MALFORMED")}, }, codes.Unknown, "failed to sign CSRs") s.assertLastLogMessageContains("failed to parse CSR") } func (s *HandlerSuite) TestFetchX509SVIDWithUnauthorizedCSR() { s.attestAgent() s.requireFetchX509SVIDFailure(&node.FetchX509SVIDRequest{ Csrs: s.makeCSRs(workloadID), }, codes.Unknown, "failed to sign CSRs") s.assertLastLogMessageContains(`not entitled to sign CSR for "spiffe://example.org/workload"`) } func (s *HandlerSuite) TestFetchX509SVIDWithAgentCSR() { s.attestAgent() upd := s.requireFetchX509SVIDSuccess(&node.FetchX509SVIDRequest{ Csrs: s.makeCSRs(agentID), }) s.Empty(upd.RegistrationEntries) s.assertBundlesInUpdate(upd) svidChain := s.assertSVIDsInUpdate(upd, agentID)[0] // Assert an attested node entry has been updated attestedNode := s.fetchAttestedNode(agentID) s.Require().NotNil(attestedNode) s.Equal("test", attestedNode.AttestationDataType) s.Equal(agentID, attestedNode.SpiffeId) s.Equal(svidChain[0].SerialNumber.String(), attestedNode.CertSerialNumber) s.WithinDuration(svidChain[0].NotAfter, time.Unix(attestedNode.CertNotAfter, 0), 0) } func (s *HandlerSuite) TestFetchX509SVIDWithStaleAgent() { // make a copy of the agent SVID and tweak the serial number // before "attesting" agentSVID := *s.agentSVID[0] agentSVID.SerialNumber = big.NewInt(9999999999) s.Require().NoError(createAttestationEntry(context.Background(), s.ds, &agentSVID, "test")) s.requireFetchX509SVIDAuthFailure() } func (s *HandlerSuite) TestFetchX509SVIDWithUnauthorizedDownstreamCSR() { s.attestAgent() s.requireFetchX509SVIDFailure(&node.FetchX509SVIDRequest{ Csrs: s.makeCSRs("spiffe://example.org"), }, codes.Unknown, "failed to sign CSRs") s.assertLastLogMessageContains(`"spiffe://example.org/spire/agent/test/id" is not an authorized downstream workload`) } func (s *HandlerSuite) TestFetchX509SVIDWithDownstreamCSR() { s.attestAgent() s.createRegistrationEntry(&common.RegistrationEntry{ ParentId: trustDomainID, SpiffeId: agentID, Downstream: true, DnsNames: []string{"ca-dns1"}, }) upd := s.requireFetchX509SVIDSuccess(&node.FetchX509SVIDRequest{ Csrs: s.makeCSRs(trustDomainID), }) // Downstream responses don't contain the downstream registration entry // since downstream entries aren't intended for workloads. s.Empty(upd.RegistrationEntries) s.assertBundlesInUpdate(upd) chains := s.assertSVIDsInUpdate(upd, trustDomainID) for _, chain := range chains { // CA certs should not have DNS names associated with them s.Empty(chain[0].DNSNames) // CA certs should not CN based on DNS names s.Empty(chain[0].Subject.CommonName) } } func (s *HandlerSuite) TestFetchX509SVIDWithWorkloadCSR() { s.attestAgent() entry := s.createRegistrationEntry(&common.RegistrationEntry{ ParentId: agentID, SpiffeId: workloadID, }) upd := s.requireFetchX509SVIDSuccess(&node.FetchX509SVIDRequest{ Csrs: s.makeCSRs(workloadID), }) s.Equal([]*common.RegistrationEntry{entry}, upd.RegistrationEntries) s.assertBundlesInUpdate(upd) s.assertSVIDsInUpdate(upd, workloadID) } func (s *HandlerSuite) TestFetchX509SVIDWithSingleDNS() { dnsList := []string{"somehost1"} s.attestAgent() entry := s.createRegistrationEntry(&common.RegistrationEntry{ ParentId: agentID, SpiffeId: workloadID, DnsNames: dnsList, }) upd := s.requireFetchX509SVIDSuccess(&node.FetchX509SVIDRequest{ Csrs: s.makeCSRs(workloadID), }) s.Equal([]*common.RegistrationEntry{entry}, upd.RegistrationEntries) s.assertBundlesInUpdate(upd) chains := s.assertSVIDsInUpdate(upd, workloadID) s.Equal(dnsList, chains[0][0].DNSNames) s.Equal("somehost1", chains[0][0].Subject.CommonName) } func (s *HandlerSuite) TestFetchX509SVIDWithMultipleDNS() { dnsList := []string{"somehost1", "somehost2", "somehost3"} s.attestAgent() entry := s.createRegistrationEntry(&common.RegistrationEntry{ ParentId: agentID, SpiffeId: workloadID, DnsNames: dnsList, }) upd := s.requireFetchX509SVIDSuccess(&node.FetchX509SVIDRequest{ Csrs: s.makeCSRs(workloadID), }) s.Equal([]*common.RegistrationEntry{entry}, upd.RegistrationEntries) s.assertBundlesInUpdate(upd) chains := s.assertSVIDsInUpdate(upd, workloadID) s.Equal(dnsList, chains[0][0].DNSNames) s.Equal("somehost1", chains[0][0].Subject.CommonName) } func (s *HandlerSuite) TestFetchJWTSVIDWithUnattestedAgent() { s.requireFetchJWTSVIDFailure(&node.FetchJWTSVIDRequest{}, codes.PermissionDenied, "agent is not attested or no longer valid") } func (s *HandlerSuite) TestFetchJWTSVIDLimits() { s.attestAgent() s.limiter.setNextError(errors.New("limit exceeded")) s.requireFetchJWTSVIDFailure(&node.FetchJWTSVIDRequest{}, codes.ResourceExhausted, "limit exceeded") // FetchJWTSVID always adds 1 count s.Equal(1, s.limiter.callsFor(JSRMsg)) } func (s *HandlerSuite) TestFetchJWTSVIDWithMissingJSR() { s.attestAgent() s.requireFetchJWTSVIDFailure(&node.FetchJWTSVIDRequest{}, codes.InvalidArgument, "request missing JSR") } func (s *HandlerSuite) TestFetchJWTSVIDWithMissingSpiffeID() { s.attestAgent() s.requireFetchJWTSVIDFailure(&node.FetchJWTSVIDRequest{ Jsr: &node.JSR{ Audience: []string{"audience"}, }, }, codes.InvalidArgument, "request missing SPIFFE ID") } func (s *HandlerSuite) TestFetchJWTSVIDWithMissingAudience() { s.attestAgent() s.requireFetchJWTSVIDFailure(&node.FetchJWTSVIDRequest{ Jsr: &node.JSR{ SpiffeId: workloadID, }, }, codes.InvalidArgument, "request missing audience") } func (s *HandlerSuite) TestFetchJWTSVIDWithAgentID() { s.attestAgent() s.requireFetchJWTSVIDFailure(&node.FetchJWTSVIDRequest{ Jsr: &node.JSR{ SpiffeId: agentID, Audience: []string{"audience"}, }, }, codes.Unknown, `caller "spiffe://example.org/spire/agent/test/id" is not authorized for "spiffe://example.org/spire/agent/test/id"`) } func (s *HandlerSuite) TestFetchJWTSVIDWithUnauthorizedSPIFFEID() { s.attestAgent() s.requireFetchJWTSVIDFailure(&node.FetchJWTSVIDRequest{ Jsr: &node.JSR{ SpiffeId: workloadID, Audience: []string{"audience"}, }, }, codes.Unknown, `caller "spiffe://example.org/spire/agent/test/id" is not authorized for "spiffe://example.org/workload"`) } func (s *HandlerSuite) TestFetchJWTSVIDWithWorkloadID() { s.attestAgent() s.createRegistrationEntry(&common.RegistrationEntry{ ParentId: agentID, SpiffeId: workloadID, }) svid := s.requireFetchJWTSVIDSuccess(&node.FetchJWTSVIDRequest{ Jsr: &node.JSR{ SpiffeId: workloadID, Audience: []string{"audience"}, }, }) s.NotEmpty(svid.Token) s.Equal(s.clock.Now().Unix(), svid.IssuedAt) s.Equal(s.clock.Now().Add(s.serverCA.DefaultTTL()).Unix(), svid.ExpiresAt) } func (s *HandlerSuite) TestAuthorizeCallUnhandledMethod() { ctx, err := s.handler.AuthorizeCall(context.Background(), "/spire.api.node.Node/Foo") s.Require().Error(err) s.Equal(codes.PermissionDenied, status.Code(err)) s.Equal(`authorization not implemented for method "/spire.api.node.Node/Foo"`, status.Convert(err).Message()) s.Require().Nil(ctx) } func (s *HandlerSuite) TestAuthorizeCallForAlwaysAuthorizedCalls() { // Attest() is always authorized (context is not embellished) ctx, err := s.handler.AuthorizeCall(context.Background(), "/spire.api.node.Node/Attest") s.Require().NoError(err) s.Require().Equal(context.Background(), ctx) } func (s *HandlerSuite) TestAuthorizeCallForFetchX509SVID() { s.testAuthorizeCallRequiringAgentSVID("FetchX509SVID") } func (s *HandlerSuite) TestAuthorizeCallForFetchJWTSVID() { s.testAuthorizeCallRequiringAgentSVID("FetchJWTSVID") } func (s *HandlerSuite) testAuthorizeCallRequiringAgentSVID(method string) { peerCert := s.agentSVID[0] peerCtx := withPeerCert(context.Background(), s.agentSVID) fullMethod := fmt.Sprintf("/spire.api.node.Node/%s", method) // no peer context ctx, err := s.handler.AuthorizeCall(context.Background(), fullMethod) s.Require().Error(err) s.Equal("agent SVID is required for this request", status.Convert(err).Message()) s.Equal(codes.PermissionDenied, status.Code(err)) s.Require().Nil(ctx) s.assertLastLogMessage("no peer information") // non-TLS peer context ctx, err = s.handler.AuthorizeCall(peer.NewContext(context.Background(), &peer.Peer{}), fullMethod) s.Require().Error(err) s.Equal("agent SVID is required for this request", status.Convert(err).Message()) s.Equal(codes.PermissionDenied, status.Code(err)) s.Require().Nil(ctx) s.assertLastLogMessage("no TLS auth info for peer") // no verified chains on TLS peer context ctx, err = s.handler.AuthorizeCall(peer.NewContext(context.Background(), &peer.Peer{ AuthInfo: credentials.TLSInfo{}, }), fullMethod) s.Require().Error(err) s.Equal("agent SVID is required for this request", status.Convert(err).Message()) s.Equal(codes.PermissionDenied, status.Code(err)) s.Require().Nil(ctx) s.assertLastLogMessage("no verified client certificate presented by peer") // no attested certificate with matching SPIFFE ID ctx, err = s.handler.AuthorizeCall(peerCtx, fullMethod) s.Require().Error(err) s.Equal("agent is not attested or no longer valid", status.Convert(err).Message()) s.Equal(codes.PermissionDenied, status.Code(err)) s.assertLastLogMessage(`agent "spiffe://example.org/spire/agent/test/id" is not attested`) s.Require().Nil(ctx) // good certificate s.attestAgent() ctx, err = s.handler.AuthorizeCall(peerCtx, fullMethod) s.Require().NoError(err) actualCert, ok := getPeerCertificate(ctx) s.Require().True(ok, "context has peer certificate") s.Require().True(peerCert.Equal(actualCert), "peer certificate matches") // expired certificate s.clock.Set(peerCert.NotAfter.Add(time.Second)) ctx, err = s.handler.AuthorizeCall(peerCtx, fullMethod) s.Require().Error(err) s.Equal("agent is not attested or no longer valid", status.Convert(err).Message()) s.Equal(codes.PermissionDenied, status.Code(err)) s.assertLastLogMessage(`agent "spiffe://example.org/spire/agent/test/id" SVID has expired`) s.Require().Nil(ctx) s.clock.Set(peerCert.NotAfter) // serial number does not match s.updateAttestedNode(agentID, "SERIAL NUMBER", peerCert.NotAfter) ctx, err = s.handler.AuthorizeCall(peerCtx, fullMethod) s.Require().Error(err) s.Equal("agent is not attested or no longer valid", status.Convert(err).Message()) s.Equal(codes.PermissionDenied, status.Code(err)) s.Require().Nil(ctx) s.assertLastLogMessage(`agent "spiffe://example.org/spire/agent/test/id" SVID does not match expected serial number`) } func (s *HandlerSuite) addAttestor(name string, config fakeservernodeattestor.Config) { attestor := nodeattestor.NewBuiltIn(fakeservernodeattestor.New(name, config)) s.catalog.AddNodeAttestorNamed(name, attestor) } func (s *HandlerSuite) addResolver(name string, config fakenoderesolver.Config) { resolver := noderesolver.NewBuiltIn(fakenoderesolver.New(name, config)) s.catalog.AddNodeResolverNamed(name, resolver) } func (s *HandlerSuite) createBundle(bundle *common.Bundle) { _, err := s.ds.CreateBundle(context.Background(), &datastore.CreateBundleRequest{ Bundle: bundle, }) s.Require().NoError(err) } func (s *HandlerSuite) createJoinToken(token string, expiresAt time.Time) { _, err := s.ds.CreateJoinToken(context.Background(), &datastore.CreateJoinTokenRequest{ JoinToken: &datastore.JoinToken{ Token: token, Expiry: expiresAt.Unix(), }, }) s.Require().NoError(err) } func (s *HandlerSuite) fetchJoinToken(token string) *datastore.JoinToken { resp, err := s.ds.FetchJoinToken(context.Background(), &datastore.FetchJoinTokenRequest{ Token: token, }) s.Require().NoError(err) return resp.JoinToken } func (s *HandlerSuite) attestAgent() { s.Require().NoError(createAttestationEntry(context.Background(), s.ds, s.agentSVID[0], "test")) } func (s *HandlerSuite) createAttestedNode(n *common.AttestedNode) { _, err := s.ds.CreateAttestedNode(context.Background(), &datastore.CreateAttestedNodeRequest{ Node: n, }) s.Require().NoError(err) } func (s *HandlerSuite) updateAttestedNode(spiffeID, serialNumber string, notAfter time.Time) { _, err := s.ds.UpdateAttestedNode(context.Background(), &datastore.UpdateAttestedNodeRequest{ SpiffeId: spiffeID, CertSerialNumber: serialNumber, CertNotAfter: notAfter.Unix(), }) s.Require().NoError(err) } func (s *HandlerSuite) fetchAttestedNode(spiffeID string) *common.AttestedNode { resp, err := s.ds.FetchAttestedNode(context.Background(), &datastore.FetchAttestedNodeRequest{ SpiffeId: spiffeID, }) s.Require().NoError(err) s.Require().NotNil(resp) return resp.Node } func (s *HandlerSuite) getNodeSelectors(spiffeID string) []*common.Selector { resp, err := s.ds.GetNodeSelectors(context.Background(), &datastore.GetNodeSelectorsRequest{ SpiffeId: spiffeID, }) s.Require().NoError(err) s.Require().NotNil(resp) s.Require().NotNil(resp.Selectors) s.Require().Equal(spiffeID, resp.Selectors.SpiffeId) return resp.Selectors.Selectors } func (s *HandlerSuite) createRegistrationEntry(entry *common.RegistrationEntry) *common.RegistrationEntry { resp, err := s.ds.CreateRegistrationEntry(context.Background(), &datastore.CreateRegistrationEntryRequest{ Entry: entry, }) s.Require().NoError(err) s.Require().NotNil(resp.Entry) return resp.Entry } func (s *HandlerSuite) requireAttestSuccess(req *node.AttestRequest, responses ...string) *node.X509SVIDUpdate { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() stream, err := s.unattestedClient.Attest(ctx) s.Require().NoError(err) s.Require().NoError(stream.Send(req)) for _, response := range responses { resp, err := stream.Recv() s.Require().NoError(err) s.Require().NotNil(resp) s.Require().NotEmpty(resp.Challenge, "expected a challenge") s.Require().Nil(resp.SvidUpdate, "expected a challenge, which shouldn't contain an update") s.Require().NoError(stream.Send(&node.AttestRequest{ Response: []byte(response), })) } stream.CloseSend() resp, err := stream.Recv() s.Require().NoError(err) s.Require().NotNil(resp) s.Require().NotNil(resp.SvidUpdate) return resp.SvidUpdate } func (s *HandlerSuite) requireAttestFailure(req *node.AttestRequest, errorCode codes.Code, errorContains string) { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() stream, err := s.unattestedClient.Attest(ctx) s.Require().NoError(err) s.Require().NoError(stream.Send(req)) stream.CloseSend() resp, err := stream.Recv() s.requireErrorContains(err, errorContains) s.Require().Equal(errorCode, status.Code(err)) s.Require().Nil(resp) } func (s *HandlerSuite) getClientCertificate(*tls.CertificateRequestInfo) (*tls.Certificate, error) { c := &tls.Certificate{ PrivateKey: testKey, } for _, cert := range s.agentSVID { c.Certificate = append(c.Certificate, cert.Raw) } return c, nil } func (s *HandlerSuite) requireFetchX509SVIDSuccess(req *node.FetchX509SVIDRequest) *node.X509SVIDUpdate { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() stream, err := s.attestedClient.FetchX509SVID(ctx) s.Require().NoError(err) s.Require().NoError(stream.Send(req)) stream.CloseSend() resp, err := stream.Recv() s.Require().NoError(err) s.Require().NotNil(resp) s.Require().NotNil(resp.SvidUpdate) return resp.SvidUpdate } func (s *HandlerSuite) requireFetchX509SVIDFailure(req *node.FetchX509SVIDRequest, errorCode codes.Code, errorContains string) { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() stream, err := s.attestedClient.FetchX509SVID(ctx) s.Require().NoError(err) s.Require().NoError(stream.Send(req)) stream.CloseSend() resp, err := stream.Recv() s.Require().Contains(errorContains, status.Convert(err).Message()) s.Require().Equal(errorCode, status.Code(err)) s.Require().Nil(resp) } func (s *HandlerSuite) requireFetchX509SVIDAuthFailure() { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() stream, err := s.attestedClient.FetchX509SVID(ctx) s.Require().NoError(err) // the auth failure will come back on the Recv(). we shouldn't have to send // on the stream to get this to happen. resp, err := stream.Recv() s.Require().Contains("agent is not attested or no longer valid", status.Convert(err).Message()) s.Require().Equal(codes.PermissionDenied, status.Code(err)) s.Require().Nil(resp) } func (s *HandlerSuite) requireFetchJWTSVIDSuccess(req *node.FetchJWTSVIDRequest) *node.JWTSVID { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() resp, err := s.attestedClient.FetchJWTSVID(ctx, req) s.Require().NoError(err) s.Require().NotNil(resp) s.Require().NotNil(resp.Svid) return resp.Svid } func (s *HandlerSuite) requireFetchJWTSVIDFailure(req *node.FetchJWTSVIDRequest, errorCode codes.Code, errorContains string) { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() resp, err := s.attestedClient.FetchJWTSVID(ctx, req) s.Require().Contains(errorContains, status.Convert(err).Message()) s.Require().Equal(errorCode, status.Code(err)) s.Require().Nil(resp) } func (s *HandlerSuite) assertBundlesInUpdate(upd *node.X509SVIDUpdate, federatedBundles ...*common.Bundle) { // DEPRECATEDBundle field should contain the trust domain bundle certs s.Equal(upd.DEPRECATEDBundle, s.bundle.RootCas[0].DerBytes) // DEPRECATEDBundles should have an entry for the trust domain and each // federated domain s.Len(upd.DEPRECATEDBundles, 1+len(federatedBundles)) s.True(proto.Equal(upd.DEPRECATEDBundles[trustDomainID], &node.Bundle{ Id: s.bundle.TrustDomainId, CaCerts: s.bundle.RootCas[0].DerBytes, })) for _, federatedBundle := range federatedBundles { s.True(proto.Equal( upd.DEPRECATEDBundles[federatedBundle.TrustDomainId], makeDeprecatedBundle(federatedBundle), )) } // Bundles should have an entry for the trust domain and each federated domain s.Len(upd.Bundles, 1+len(federatedBundles)) s.True(proto.Equal(upd.Bundles[trustDomainID], s.bundle)) for _, federatedBundle := range federatedBundles { s.True(proto.Equal( upd.Bundles[federatedBundle.TrustDomainId], federatedBundle, )) } } func (s *HandlerSuite) assertSVIDsInUpdate(upd *node.X509SVIDUpdate, spiffeIDs ...string) [][]*x509.Certificate { s.Len(upd.Svids, len(spiffeIDs), "number of SVIDs in update") var svidChains [][]*x509.Certificate for _, spiffeID := range spiffeIDs { svidEntry := upd.Svids[spiffeID] if !s.NotNil(svidEntry, "svid entry") { continue } // Assert SVID chain is well formed svidChain, err := x509.ParseCertificates(svidEntry.CertChain) if !s.NoError(err, "parsing svid cert chain") { continue } s.Len(svidChain, 1) // DEPRECATEDCert should match first certificate in SVID chain deprecatedCert, err := x509.ParseCertificate(svidEntry.DEPRECATEDCert) if s.NoError(err, "parsing deprecated cert") { s.True(svidChain[0].Equal(deprecatedCert)) } // ExpiresAt should match NotAfter in first certificate in SVID chain s.WithinDuration(svidChain[0].NotAfter, time.Unix(svidEntry.ExpiresAt, 0), 0) svidChains = append(svidChains, svidChain) } s.Require().Len(svidChains, len(spiffeIDs), "# of good svids in update") return svidChains } func (s *HandlerSuite) requireErrorContains(err error, contains string) { s.Require().Error(err) s.Require().Contains(err.Error(), contains) } func (s *HandlerSuite) assertLastLogMessage(message string) { entry := s.logHook.LastEntry() if s.NotNil(entry) { s.Equal(message, entry.Message) } } func (s *HandlerSuite) assertLastLogMessageContains(contains string) { entry := s.logHook.LastEntry() if s.NotNil(entry) { s.Contains(entry.Message, contains) } } func (s *HandlerSuite) makeSVID(spiffeID string) []*x509.Certificate { svid, err := s.serverCA.SignX509SVID(context.Background(), s.makeCSR(spiffeID), ca.X509Params{}) s.Require().NoError(err) return svid } func (s *HandlerSuite) makeCSR(spiffeID string) []byte { csr, err := util.MakeCSR(testKey, spiffeID) s.Require().NoError(err) return csr } func (s *HandlerSuite) makeCSRs(spiffeIDs ...string) [][]byte { var csrs [][]byte for _, spiffeID := range spiffeIDs { csrs = append(csrs, s.makeCSR(spiffeID)) } return csrs } type fakeLimiter struct { callsForAttest int callsForCSR int callsForJSR int nextError error mtx sync.Mutex } func (fl *fakeLimiter) Limit(_ context.Context, msgType, count int) error { fl.mtx.Lock() defer fl.mtx.Unlock() switch msgType { case AttestMsg: fl.callsForAttest += count case CSRMsg: fl.callsForCSR += count case JSRMsg: fl.callsForJSR += count } if fl.nextError != nil { err := fl.nextError fl.nextError = nil return err } return nil } func (fl *fakeLimiter) setNextError(err error) { fl.mtx.Lock() defer fl.mtx.Unlock() fl.nextError = err } func (fl *fakeLimiter) callsFor(msgType int) int { fl.mtx.Lock() defer fl.mtx.Unlock() switch msgType { case AttestMsg: return fl.callsForAttest case CSRMsg: return fl.callsForCSR case JSRMsg: return fl.callsForJSR } return 0 } func makeAttestationData(typ, data string) *common.AttestationData { return &common.AttestationData{Type: typ, Data: []byte(data)} } func withPeerCert(ctx context.Context, certChain []*x509.Certificate) context.Context { addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:12345") return peer.NewContext(ctx, &peer.Peer{ Addr: addr, AuthInfo: credentials.TLSInfo{ State: tls.ConnectionState{ VerifiedChains: [][]*x509.Certificate{certChain}, }, }, }) }
1
10,922
nit: I think that `workloadID` should suffice for this test... that's what we'd be issuing anyways
spiffe-spire
go
@@ -1,4 +1,4 @@ -require 'spec_helper' +require 'rails_helper' feature 'Admin manages mentors' do scenario 'creating a new mentor' do
1
require 'spec_helper' feature 'Admin manages mentors' do scenario 'creating a new mentor' do user = create(:admin) visit admin_path(as: user) click_link 'Mentors' click_link 'Add new' select(user.name, from: 'User') click_button 'Save' expect(page).to have_content('Mentor successfully created') end end
1
10,609
Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping.
thoughtbot-upcase
rb
@@ -204,11 +204,7 @@ public class ClassTypeResolver extends JavaParserVisitorAdapter { if (className != null) { populateClassName(node, className); } - } catch (ClassNotFoundException e) { - if (LOG.isLoggable(Level.FINE)) { - LOG.log(Level.FINE, "Could not find class " + className + ", due to: " + e); - } - } catch (NoClassDefFoundError e) { + } catch (ClassNotFoundException | NoClassDefFoundError e) { if (LOG.isLoggable(Level.FINE)) { LOG.log(Level.FINE, "Could not find class " + className + ", due to: " + e); }
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.lang.java.typeresolution; import static net.sourceforge.pmd.lang.java.typeresolution.MethodTypeResolution.getApplicableMethods; import static net.sourceforge.pmd.lang.java.typeresolution.MethodTypeResolution.getBestMethodReturnType; import static net.sourceforge.pmd.lang.java.typeresolution.MethodTypeResolution.getMethodExplicitTypeArugments; import static net.sourceforge.pmd.lang.java.typeresolution.MethodTypeResolution.isMemberVisibleFromClass; import static net.sourceforge.pmd.lang.java.typeresolution.typedefinition.TypeDefinitionType.LOWER_WILDCARD; import static net.sourceforge.pmd.lang.java.typeresolution.typedefinition.TypeDefinitionType.UPPER_BOUND; import static net.sourceforge.pmd.lang.java.typeresolution.typedefinition.TypeDefinitionType.UPPER_WILDCARD; import java.lang.reflect.Array; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.logging.Level; import java.util.logging.Logger; import net.sourceforge.pmd.lang.ast.Node; import net.sourceforge.pmd.lang.java.ast.ASTAdditiveExpression; import net.sourceforge.pmd.lang.java.ast.ASTAllocationExpression; import net.sourceforge.pmd.lang.java.ast.ASTAndExpression; import net.sourceforge.pmd.lang.java.ast.ASTAnnotationTypeDeclaration; import net.sourceforge.pmd.lang.java.ast.ASTArgumentList; import net.sourceforge.pmd.lang.java.ast.ASTArguments; import net.sourceforge.pmd.lang.java.ast.ASTArrayDimsAndInits; import net.sourceforge.pmd.lang.java.ast.ASTBooleanLiteral; import net.sourceforge.pmd.lang.java.ast.ASTCastExpression; import net.sourceforge.pmd.lang.java.ast.ASTClassOrInterfaceBody; import net.sourceforge.pmd.lang.java.ast.ASTClassOrInterfaceDeclaration; import net.sourceforge.pmd.lang.java.ast.ASTClassOrInterfaceType; import net.sourceforge.pmd.lang.java.ast.ASTCompilationUnit; import net.sourceforge.pmd.lang.java.ast.ASTConditionalAndExpression; import net.sourceforge.pmd.lang.java.ast.ASTConditionalExpression; import net.sourceforge.pmd.lang.java.ast.ASTConditionalOrExpression; import net.sourceforge.pmd.lang.java.ast.ASTConstructorDeclaration; import net.sourceforge.pmd.lang.java.ast.ASTEnumBody; import net.sourceforge.pmd.lang.java.ast.ASTEnumDeclaration; import net.sourceforge.pmd.lang.java.ast.ASTEqualityExpression; import net.sourceforge.pmd.lang.java.ast.ASTExclusiveOrExpression; import net.sourceforge.pmd.lang.java.ast.ASTExpression; import net.sourceforge.pmd.lang.java.ast.ASTExtendsList; import net.sourceforge.pmd.lang.java.ast.ASTFieldDeclaration; import net.sourceforge.pmd.lang.java.ast.ASTImportDeclaration; import net.sourceforge.pmd.lang.java.ast.ASTInclusiveOrExpression; import net.sourceforge.pmd.lang.java.ast.ASTInstanceOfExpression; import net.sourceforge.pmd.lang.java.ast.ASTLiteral; import net.sourceforge.pmd.lang.java.ast.ASTMarkerAnnotation; import net.sourceforge.pmd.lang.java.ast.ASTMethodDeclaration; import net.sourceforge.pmd.lang.java.ast.ASTMultiplicativeExpression; import net.sourceforge.pmd.lang.java.ast.ASTName; import net.sourceforge.pmd.lang.java.ast.ASTNormalAnnotation; import net.sourceforge.pmd.lang.java.ast.ASTNullLiteral; import net.sourceforge.pmd.lang.java.ast.ASTPackageDeclaration; import net.sourceforge.pmd.lang.java.ast.ASTPostfixExpression; import net.sourceforge.pmd.lang.java.ast.ASTPreDecrementExpression; import net.sourceforge.pmd.lang.java.ast.ASTPreIncrementExpression; import net.sourceforge.pmd.lang.java.ast.ASTPrimaryExpression; import net.sourceforge.pmd.lang.java.ast.ASTPrimaryPrefix; import net.sourceforge.pmd.lang.java.ast.ASTPrimitiveType; import net.sourceforge.pmd.lang.java.ast.ASTReferenceType; import net.sourceforge.pmd.lang.java.ast.ASTRelationalExpression; import net.sourceforge.pmd.lang.java.ast.ASTShiftExpression; import net.sourceforge.pmd.lang.java.ast.ASTSingleMemberAnnotation; import net.sourceforge.pmd.lang.java.ast.ASTStatementExpression; import net.sourceforge.pmd.lang.java.ast.ASTType; import net.sourceforge.pmd.lang.java.ast.ASTTypeArgument; import net.sourceforge.pmd.lang.java.ast.ASTTypeArguments; import net.sourceforge.pmd.lang.java.ast.ASTTypeBound; import net.sourceforge.pmd.lang.java.ast.ASTTypeDeclaration; import net.sourceforge.pmd.lang.java.ast.ASTTypeParameter; import net.sourceforge.pmd.lang.java.ast.ASTTypeParameters; import net.sourceforge.pmd.lang.java.ast.ASTUnaryExpression; import net.sourceforge.pmd.lang.java.ast.ASTUnaryExpressionNotPlusMinus; import net.sourceforge.pmd.lang.java.ast.ASTVariableDeclarator; import net.sourceforge.pmd.lang.java.ast.ASTVariableDeclaratorId; import net.sourceforge.pmd.lang.java.ast.ASTWildcardBounds; import net.sourceforge.pmd.lang.java.ast.AbstractJavaTypeNode; import net.sourceforge.pmd.lang.java.ast.JavaNode; import net.sourceforge.pmd.lang.java.ast.JavaParserVisitorAdapter; import net.sourceforge.pmd.lang.java.ast.TypeNode; import net.sourceforge.pmd.lang.java.symboltable.ClassScope; import net.sourceforge.pmd.lang.java.symboltable.VariableNameDeclaration; import net.sourceforge.pmd.lang.java.typeresolution.typedefinition.JavaTypeDefinition; import net.sourceforge.pmd.lang.symboltable.NameOccurrence; import net.sourceforge.pmd.lang.symboltable.Scope; // // Helpful reading: // http://www.janeg.ca/scjp/oper/promotions.html // http://java.sun.com/docs/books/jls/second_edition/html/conversions.doc.html // public class ClassTypeResolver extends JavaParserVisitorAdapter { private static final Logger LOG = Logger.getLogger(ClassTypeResolver.class.getName()); private static final Map<String, Class<?>> PRIMITIVE_TYPES; private static final Map<String, String> JAVA_LANG; private Map<String, JavaTypeDefinition> staticFieldImageToTypeDef; private Map<String, List<JavaTypeDefinition>> staticNamesToClasses; private List<JavaTypeDefinition> importOnDemandStaticClasses; private ASTCompilationUnit currentAcu; static { // Note: Assumption here that primitives come from same parent // ClassLoader regardless of what ClassLoader we are passed Map<String, Class<?>> thePrimitiveTypes = new HashMap<>(); thePrimitiveTypes.put("void", Void.TYPE); thePrimitiveTypes.put("boolean", Boolean.TYPE); thePrimitiveTypes.put("byte", Byte.TYPE); thePrimitiveTypes.put("char", Character.TYPE); thePrimitiveTypes.put("short", Short.TYPE); thePrimitiveTypes.put("int", Integer.TYPE); thePrimitiveTypes.put("long", Long.TYPE); thePrimitiveTypes.put("float", Float.TYPE); thePrimitiveTypes.put("double", Double.TYPE); PRIMITIVE_TYPES = Collections.unmodifiableMap(thePrimitiveTypes); Map<String, String> theJavaLang = new HashMap<>(); theJavaLang.put("Boolean", "java.lang.Boolean"); theJavaLang.put("Byte", "java.lang.Byte"); theJavaLang.put("Character", "java.lang.Character"); theJavaLang.put("CharSequence", "java.lang.CharSequence"); theJavaLang.put("Class", "java.lang.Class"); theJavaLang.put("ClassLoader", "java.lang.ClassLoader"); theJavaLang.put("Cloneable", "java.lang.Cloneable"); theJavaLang.put("Comparable", "java.lang.Comparable"); theJavaLang.put("Compiler", "java.lang.Compiler"); theJavaLang.put("Double", "java.lang.Double"); theJavaLang.put("Float", "java.lang.Float"); theJavaLang.put("InheritableThreadLocal", "java.lang.InheritableThreadLocal"); theJavaLang.put("Integer", "java.lang.Integer"); theJavaLang.put("Long", "java.lang.Long"); theJavaLang.put("Math", "java.lang.Math"); theJavaLang.put("Number", "java.lang.Number"); theJavaLang.put("Object", "java.lang.Object"); theJavaLang.put("Package", "java.lang.Package"); theJavaLang.put("Process", "java.lang.Process"); theJavaLang.put("Runnable", "java.lang.Runnable"); theJavaLang.put("Runtime", "java.lang.Runtime"); theJavaLang.put("RuntimePermission", "java.lang.RuntimePermission"); theJavaLang.put("SecurityManager", "java.lang.SecurityManager"); theJavaLang.put("Short", "java.lang.Short"); theJavaLang.put("StackTraceElement", "java.lang.StackTraceElement"); theJavaLang.put("StrictMath", "java.lang.StrictMath"); theJavaLang.put("String", "java.lang.String"); theJavaLang.put("StringBuffer", "java.lang.StringBuffer"); theJavaLang.put("System", "java.lang.System"); theJavaLang.put("Thread", "java.lang.Thread"); theJavaLang.put("ThreadGroup", "java.lang.ThreadGroup"); theJavaLang.put("ThreadLocal", "java.lang.ThreadLocal"); theJavaLang.put("Throwable", "java.lang.Throwable"); theJavaLang.put("Void", "java.lang.Void"); JAVA_LANG = Collections.unmodifiableMap(theJavaLang); } private final PMDASMClassLoader pmdClassLoader; private Map<String, String> importedClasses; private List<String> importedOnDemand; private Map<Node, AnonymousClassMetadata> anonymousClassMetadata = new HashMap<>(); private static class AnonymousClassMetadata { public final String name; public int anonymousClassCounter; AnonymousClassMetadata(final String className) { this.name = className; } } public ClassTypeResolver() { this(ClassTypeResolver.class.getClassLoader()); } public ClassTypeResolver(ClassLoader classLoader) { pmdClassLoader = PMDASMClassLoader.getInstance(classLoader); } // FUTURE ASTCompilationUnit should not be a TypeNode. Clean this up // accordingly. @Override public Object visit(ASTCompilationUnit node, Object data) { String className = null; try { currentAcu = node; importedOnDemand = new ArrayList<>(); importedClasses = new HashMap<>(); staticFieldImageToTypeDef = new HashMap<>(); staticNamesToClasses = new HashMap<>(); importOnDemandStaticClasses = new ArrayList<>(); // TODO: this fails to account for multiple classes in the same file // later classes (in the ACU) won't have their Nested classes registered className = getClassName(node); if (className != null) { populateClassName(node, className); } } catch (ClassNotFoundException e) { if (LOG.isLoggable(Level.FINE)) { LOG.log(Level.FINE, "Could not find class " + className + ", due to: " + e); } } catch (NoClassDefFoundError e) { if (LOG.isLoggable(Level.FINE)) { LOG.log(Level.FINE, "Could not find class " + className + ", due to: " + e); } } catch (LinkageError e) { if (LOG.isLoggable(Level.WARNING)) { LOG.log(Level.WARNING, "Could not find class " + className + ", due to: " + e); } } finally { populateImports(node); } return super.visit(node, data); } @Override public Object visit(ASTPackageDeclaration node, Object data) { // no need to visit children, the only child, ASTName, will have no type return data; } @Override public Object visit(ASTImportDeclaration node, Object data) { ASTName importedType = (ASTName) node.jjtGetChild(0); if (importedType.getType() != null) { node.setType(importedType.getType()); } else { populateType(node, importedType.getImage()); } if (node.getType() != null) { node.setPackage(node.getType().getPackage()); } // no need to visit children, the only child, ASTName, will have no type return data; } @Override public Object visit(ASTTypeDeclaration node, Object data) { super.visit(node, data); rollupTypeUnary(node); return data; } @Override public Object visit(ASTClassOrInterfaceType node, Object data) { super.visit(node, data); String typeName = node.getImage(); if (node.isAnonymousClass()) { final AnonymousClassMetadata parentAnonymousClassMetadata = getParentAnonymousClassMetadata(node); if (parentAnonymousClassMetadata != null) { typeName = parentAnonymousClassMetadata.name + "$" + ++parentAnonymousClassMetadata .anonymousClassCounter; anonymousClassMetadata.put(node, new AnonymousClassMetadata(typeName)); } } populateType(node, typeName, node.getArrayDepth()); ASTTypeArguments typeArguments = node.getFirstChildOfType(ASTTypeArguments.class); if (typeArguments != null) { final JavaTypeDefinition[] boundGenerics = new JavaTypeDefinition[typeArguments.jjtGetNumChildren()]; for (int i = 0; i < typeArguments.jjtGetNumChildren(); ++i) { boundGenerics[i] = ((TypeNode) typeArguments.jjtGetChild(i)).getTypeDefinition(); } node.setTypeDefinition(JavaTypeDefinition.forClass(node.getType(), boundGenerics)); } return data; } private AnonymousClassMetadata getParentAnonymousClassMetadata(final ASTClassOrInterfaceType node) { Node parent = node; do { parent = parent.jjtGetParent(); } while (parent != null && !(parent instanceof ASTClassOrInterfaceBody) && !(parent instanceof ASTEnumBody)); // TODO : Should never happen, but add this for safety until we are sure to cover all possible scenarios in // unit testing if (parent == null) { return null; } parent = parent.jjtGetParent(); TypeNode typedParent; // The parent may now be an ASTEnumConstant, an ASTAllocationExpression, an ASTEnumDeclaration or an // ASTClassOrInterfaceDeclaration if (parent instanceof ASTAllocationExpression) { typedParent = parent.getFirstChildOfType(ASTClassOrInterfaceType.class); } else if (parent instanceof ASTClassOrInterfaceDeclaration || parent instanceof ASTEnumDeclaration) { typedParent = (TypeNode) parent; } else { typedParent = parent.getFirstParentOfType(ASTEnumDeclaration.class); } final AnonymousClassMetadata metadata = anonymousClassMetadata.get(typedParent); if (metadata != null) { return metadata; } final AnonymousClassMetadata newMetadata; if (typedParent instanceof ASTClassOrInterfaceType) { ASTClassOrInterfaceType parentTypeNode = (ASTClassOrInterfaceType) typedParent; if (parentTypeNode.isAnonymousClass()) { final AnonymousClassMetadata parentMetadata = getParentAnonymousClassMetadata(parentTypeNode); newMetadata = new AnonymousClassMetadata(parentMetadata.name + "$" + ++parentMetadata .anonymousClassCounter); } else { newMetadata = new AnonymousClassMetadata(parentTypeNode.getImage()); } } else { newMetadata = new AnonymousClassMetadata(typedParent.getImage()); } anonymousClassMetadata.put(typedParent, newMetadata); return newMetadata; } @Override public Object visit(ASTClassOrInterfaceDeclaration node, Object data) { populateType(node, node.getImage()); return super.visit(node, data); } @Override public Object visit(ASTEnumDeclaration node, Object data) { populateType(node, node.getImage()); return super.visit(node, data); } @Override public Object visit(ASTAnnotationTypeDeclaration node, Object data) { populateType(node, node.getImage()); return super.visit(node, data); } /** * Set's the node's type to the found Class in the node's name (if there is a class to be found). * * @param node * @return The index in the array produced by splitting the node's name by '.', which is not part of the * class name found. Example: com.package.SomeClass.staticField.otherField, return would be 3 */ private int searchNodeNameForClass(TypeNode node) { // this is the index from which field/method names start in the dotSplitImage array int startIndex = node.getImage().split("\\.").length; // tries to find a class in the node's image by omitting the parts after each '.', example: // First try: com.package.SomeClass.staticField.otherField // Second try: com.package.SomeClass.staticField // Third try: com.package.SomeClass <- found a class! for (String reducedImage = node.getImage();;) { populateType(node, reducedImage); if (node.getType() != null) { break; // we found a class! } // update the start index, so that code below knows where to start in the dotSplitImage array --startIndex; int lastDotIndex = reducedImage.lastIndexOf('.'); if (lastDotIndex != -1) { reducedImage = reducedImage.substring(0, lastDotIndex); } else { break; // there is no class } } return startIndex; } private ASTArgumentList getArgumentList(ASTArguments args) { if (args != null) { return args.getFirstChildOfType(ASTArgumentList.class); } return null; } private int getArgumentListArity(ASTArgumentList argList) { if (argList != null) { return argList.jjtGetNumChildren(); } return 0; } @Override public Object visit(ASTName node, Object data) { Class<?> accessingClass = getEnclosingTypeDeclarationClass(node); String[] dotSplitImage = node.getImage().split("\\."); int startIndex = searchNodeNameForClass(node); ASTArguments astArguments = getSuffixMethodArgs(node); ASTArgumentList astArgumentList = getArgumentList(astArguments); int methodArgsArity = getArgumentListArity(astArgumentList); JavaTypeDefinition previousType; if (node.getType() != null) { // static field or method // node.getType() has been set by the call to searchNodeNameForClass above // node.getType() will have the value equal to the Class found by that method previousType = node.getTypeDefinition(); } else { // non-static field or method if (dotSplitImage.length == 1 && astArguments != null) { // method List<MethodType> methods = getLocalApplicableMethods(node, dotSplitImage[0], Collections.<JavaTypeDefinition>emptyList(), methodArgsArity, accessingClass); TypeNode enclosingType = getEnclosingTypeDeclaration(node); if (enclosingType == null) { return data; // we can't proceed, probably uncompiled sources } previousType = getBestMethodReturnType(enclosingType.getTypeDefinition(), methods, astArgumentList); } else { // field previousType = getTypeDefinitionOfVariableFromScope(node.getScope(), dotSplitImage[0], accessingClass); } startIndex = 1; // first element's type in dotSplitImage has already been resolved } // TODO: remove this if branch, it's only purpose is to make JUnitAssertionsShouldIncludeMessage's tests pass // as the code is not compiled there and symbol table works on uncompiled code if (node.getNameDeclaration() != null && previousType == null // if it's not null, then let other code handle things && node.getNameDeclaration().getNode() instanceof TypeNode) { // Carry over the type from the declaration Class<?> nodeType = ((TypeNode) node.getNameDeclaration().getNode()).getType(); // FIXME : generic classes and class with generic super types could have the wrong type assigned here if (nodeType != null) { node.setType(nodeType); return super.visit(node, data); } } for (int i = startIndex; i < dotSplitImage.length; ++i) { if (previousType == null) { break; } if (i == dotSplitImage.length - 1 && astArguments != null) { // method List<MethodType> methods = getApplicableMethods(previousType, dotSplitImage[i], Collections.<JavaTypeDefinition>emptyList(), methodArgsArity, accessingClass); previousType = getBestMethodReturnType(previousType, methods, astArgumentList); } else { // field previousType = getFieldType(previousType, dotSplitImage[i], accessingClass); } } if (previousType != null) { node.setTypeDefinition(previousType); } return super.visit(node, data); } /** * This method looks for method invocations be simple name. * It searches outwards class declarations and their supertypes and in the end, static method imports. * Compiles a list of potentially applicable methods. * https://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.12.1 */ private List<MethodType> getLocalApplicableMethods(TypeNode node, String methodName, List<JavaTypeDefinition> typeArguments, int argArity, Class<?> accessingClass) { List<MethodType> foundMethods = new ArrayList<>(); if (accessingClass == null) { return foundMethods; } // we search each enclosing type declaration, looking at their supertypes as well for (node = getEnclosingTypeDeclaration(node); node != null; node = getEnclosingTypeDeclaration(node.jjtGetParent())) { foundMethods.addAll(getApplicableMethods(node.getTypeDefinition(), methodName, typeArguments, argArity, accessingClass)); } foundMethods.addAll(searchImportedStaticMethods(methodName, typeArguments, argArity, accessingClass)); return foundMethods; } private List<MethodType> searchImportedStaticMethods(String methodName, List<JavaTypeDefinition> typeArguments, int argArity, Class<?> accessingClass) { List<MethodType> foundMethods = new ArrayList<>(); // TODO: member methods must not be looked at in the code below // TODO: add support for properly dealing with shadowing List<JavaTypeDefinition> explicitImports = staticNamesToClasses.get(methodName); if (explicitImports != null) { for (JavaTypeDefinition anImport : explicitImports) { foundMethods.addAll(getApplicableMethods(anImport, methodName, typeArguments, argArity, accessingClass)); } } if (!foundMethods.isEmpty()) { // if we found an method by explicit imports, on deamand imports mustn't be searched, because // explicit imports shadow them by name, regardless of method parameters return foundMethods; } for (JavaTypeDefinition anOnDemandImport : importOnDemandStaticClasses) { foundMethods.addAll(getApplicableMethods(anOnDemandImport, methodName, typeArguments, argArity, accessingClass)); } return foundMethods; } /** * This method can be called on a prefix */ private ASTArguments getSuffixMethodArgs(Node node) { Node prefix = node.jjtGetParent(); if (prefix instanceof ASTPrimaryPrefix && prefix.jjtGetParent().jjtGetNumChildren() >= 2) { return prefix.jjtGetParent().jjtGetChild(1).getFirstChildOfType(ASTArguments.class); } return null; } /** * Searches a JavaTypeDefinition and it's superclasses until a field with name {@code fieldImage} that * is visible from the {@code accessingClass} class. Once it's found, it's possibly generic type is * resolved with the help of {@code typeToSearch} TypeDefinition. * * @param typeToSearch The type def. to search the field in. * @param fieldImage The simple name of the field. * @param accessingClass The class that is trying to access the field, some Class declared in the current ACU. * @return JavaTypeDefinition of the resolved field or null if it could not be found. */ private JavaTypeDefinition getFieldType(JavaTypeDefinition typeToSearch, String fieldImage, Class<?> accessingClass) { while (typeToSearch != null && typeToSearch.getType() != Object.class) { try { final Field field = typeToSearch.getType().getDeclaredField(fieldImage); if (isMemberVisibleFromClass(typeToSearch.getType(), field.getModifiers(), accessingClass)) { return typeToSearch.resolveTypeDefinition(field.getGenericType()); } } catch (final NoSuchFieldException ignored) { // swallow } catch (final LinkageError e) { if (LOG.isLoggable(Level.WARNING)) { LOG.log(Level.WARNING, "Error during type resolution due to: " + e); } // TODO : report a missing class once we start doing that... return null; } // transform the type into it's supertype typeToSearch = typeToSearch.resolveTypeDefinition(typeToSearch.getType().getGenericSuperclass()); } return null; } /** * Search for a field by it's image stating from a scope and taking into account if it's visible from the * accessingClass Class. The method takes into account that Nested inherited fields shadow outer scope fields. * * @param scope The scope to start the search from. * @param image The name of the field, local variable or method parameter. * @param accessingClass The Class (which is defined in the current ACU) that is trying to access the field. * @return Type def. of the field, or null if it could not be resolved. */ private JavaTypeDefinition getTypeDefinitionOfVariableFromScope(Scope scope, String image, Class<?> accessingClass) { if (accessingClass == null) { return null; } for (/* empty */; scope != null; scope = scope.getParent()) { // search each enclosing scope one by one for (Map.Entry<VariableNameDeclaration, List<NameOccurrence>> entry : scope.getDeclarations(VariableNameDeclaration.class).entrySet()) { if (entry.getKey().getImage().equals(image)) { ASTType typeNode = entry.getKey().getDeclaratorId().getTypeNode(); if (typeNode == null) { // TODO : Type is infered, ie, this is a lambda such as (var) -> var.equals(other) return null; } if (typeNode.jjtGetChild(0) instanceof ASTReferenceType) { return ((TypeNode) typeNode.jjtGetChild(0)).getTypeDefinition(); } else { // primitive type return JavaTypeDefinition.forClass(typeNode.getType()); } } } // Nested class' inherited fields shadow enclosing variables if (scope instanceof ClassScope) { try { // get the superclass type def. ot the Class the ClassScope belongs to JavaTypeDefinition superClass = getSuperClassTypeDefinition(((ClassScope) scope).getClassDeclaration().getNode(), null); // TODO: check if anonymous classes are class scope // try searching this type def. JavaTypeDefinition foundTypeDef = getFieldType(superClass, image, accessingClass); if (foundTypeDef != null) { // if null, then it's not an inherited field return foundTypeDef; } } catch (ClassCastException e) { // if there is an anonymous class, getClassDeclaration().getType() will throw // TODO: maybe there is a better way to handle this, maybe this hides bugs } } } return searchImportedStaticFields(image); // will return null if not found } private JavaTypeDefinition searchImportedStaticFields(String fieldName) { if (staticFieldImageToTypeDef.containsKey(fieldName)) { return staticFieldImageToTypeDef.get(fieldName); } for (JavaTypeDefinition anOnDemandImport : importOnDemandStaticClasses) { JavaTypeDefinition typeDef = getFieldType(anOnDemandImport, fieldName, currentAcu.getType()); if (typeDef != null) { staticFieldImageToTypeDef.put(fieldName, typeDef); return typeDef; } } return null; } @Override public Object visit(ASTFieldDeclaration node, Object data) { super.visit(node, data); rollupTypeUnary(node); return data; } @Override public Object visit(ASTVariableDeclarator node, Object data) { super.visit(node, data); rollupTypeUnary(node); return data; } @Override public Object visit(ASTVariableDeclaratorId node, Object data) { if (node == null || node.getNameDeclaration() == null) { return super.visit(node, data); } String name = node.getNameDeclaration().getTypeImage(); if (name != null) { populateType(node, name, node.getNameDeclaration().getArrayDepth()); } return super.visit(node, data); } @Override public Object visit(ASTType node, Object data) { super.visit(node, data); rollupTypeUnary(node); return data; } @Override public Object visit(ASTReferenceType node, Object data) { super.visit(node, data); rollupTypeUnary(node); return data; } @Override public Object visit(ASTPrimitiveType node, Object data) { populateType(node, node.getImage()); return super.visit(node, data); } @Override public Object visit(ASTExpression node, Object data) { super.visit(node, data); rollupTypeUnary(node); return data; } @Override public Object visit(ASTConditionalExpression node, Object data) { super.visit(node, data); if (node.isTernary()) { // TODO Rules for Ternary are complex } else { rollupTypeUnary(node); } return data; } @Override public Object visit(ASTConditionalOrExpression node, Object data) { populateType(node, "boolean"); return super.visit(node, data); } @Override public Object visit(ASTConditionalAndExpression node, Object data) { populateType(node, "boolean"); return super.visit(node, data); } @Override public Object visit(ASTInclusiveOrExpression node, Object data) { super.visit(node, data); rollupTypeBinaryNumericPromotion(node); return data; } @Override public Object visit(ASTExclusiveOrExpression node, Object data) { super.visit(node, data); rollupTypeBinaryNumericPromotion(node); return data; } @Override public Object visit(ASTAndExpression node, Object data) { super.visit(node, data); rollupTypeBinaryNumericPromotion(node); return data; } @Override public Object visit(ASTEqualityExpression node, Object data) { populateType(node, "boolean"); return super.visit(node, data); } @Override public Object visit(ASTInstanceOfExpression node, Object data) { populateType(node, "boolean"); return super.visit(node, data); } @Override public Object visit(ASTRelationalExpression node, Object data) { populateType(node, "boolean"); return super.visit(node, data); } @Override public Object visit(ASTShiftExpression node, Object data) { super.visit(node, data); // Unary promotion on LHS is type of a shift operation rollupTypeUnaryNumericPromotion(node); return data; } @Override public Object visit(ASTAdditiveExpression node, Object data) { super.visit(node, data); rollupTypeBinaryNumericPromotion(node); return data; } @Override public Object visit(ASTMultiplicativeExpression node, Object data) { super.visit(node, data); rollupTypeBinaryNumericPromotion(node); return data; } @Override public Object visit(ASTUnaryExpression node, Object data) { super.visit(node, data); rollupTypeUnaryNumericPromotion(node); return data; } @Override public Object visit(ASTPreIncrementExpression node, Object data) { super.visit(node, data); rollupTypeUnary(node); return data; } @Override public Object visit(ASTPreDecrementExpression node, Object data) { super.visit(node, data); rollupTypeUnary(node); return data; } @Override public Object visit(ASTUnaryExpressionNotPlusMinus node, Object data) { super.visit(node, data); if ("!".equals(node.getImage())) { populateType(node, "boolean"); } else { rollupTypeUnary(node); } return data; } @Override public Object visit(ASTPostfixExpression node, Object data) { super.visit(node, data); rollupTypeUnary(node); return data; } @Override public Object visit(ASTCastExpression node, Object data) { super.visit(node, data); rollupTypeUnary(node); return data; } @Override public Object visit(ASTPrimaryExpression primaryNode, Object data) { // visit method arguments in reverse for (int i = primaryNode.jjtGetNumChildren() - 1; i >= 0; --i) { ((JavaNode) primaryNode.jjtGetChild(i)).jjtAccept(this, data); } JavaTypeDefinition primaryNodeType = null; AbstractJavaTypeNode previousChild = null; AbstractJavaTypeNode nextChild; Class<?> accessingClass = getEnclosingTypeDeclarationClass(primaryNode); for (int childIndex = 0; childIndex < primaryNode.jjtGetNumChildren(); ++childIndex) { AbstractJavaTypeNode currentChild = (AbstractJavaTypeNode) primaryNode.jjtGetChild(childIndex); nextChild = childIndex + 1 < primaryNode.jjtGetNumChildren() ? (AbstractJavaTypeNode) primaryNode.jjtGetChild(childIndex + 1) : null; // skip children which already have their type assigned if (currentChild.getType() == null) { // Last token, because if 'this' is a Suffix, it'll have tokens '.' and 'this' if (currentChild.jjtGetLastToken().toString().equals("this")) { if (previousChild != null) { // Qualified 'this' expression currentChild.setTypeDefinition(previousChild.getTypeDefinition()); } else { // simple 'this' expression ASTClassOrInterfaceDeclaration typeDeclaration = currentChild.getFirstParentOfType(ASTClassOrInterfaceDeclaration.class); if (typeDeclaration != null) { currentChild.setTypeDefinition(typeDeclaration.getTypeDefinition()); } } // Last token, because if 'super' is a Suffix, it'll have tokens '.' and 'super' } else if (currentChild.jjtGetLastToken().toString().equals("super")) { if (previousChild != null) { // Qualified 'super' expression // anonymous classes can't have qualified super expression, thus // getSuperClassTypeDefinition's second argumet isn't null, but we are not // looking for enclosing super types currentChild.setTypeDefinition( getSuperClassTypeDefinition(currentChild, previousChild.getType())); } else { // simple 'super' expression currentChild.setTypeDefinition(getSuperClassTypeDefinition(currentChild, null)); } } else if (currentChild.getFirstChildOfType(ASTArguments.class) != null) { currentChild.setTypeDefinition(previousChild.getTypeDefinition()); } else if (previousChild != null && previousChild.getType() != null) { String currentChildImage = currentChild.getImage(); if (currentChildImage == null) { // this.<Something>foo(); <Something>foo would be in a Suffix and would have a null image currentChildImage = currentChild.jjtGetLastToken().toString(); } ASTArguments astArguments = nextChild != null ? nextChild.getFirstChildOfType(ASTArguments.class) : null; if (astArguments != null) { // method ASTArgumentList astArgumentList = getArgumentList(astArguments); int methodArgsArity = getArgumentListArity(astArgumentList); List<JavaTypeDefinition> typeArguments = getMethodExplicitTypeArugments(currentChild); List<MethodType> methods = getApplicableMethods(previousChild.getTypeDefinition(), currentChildImage, typeArguments, methodArgsArity, accessingClass); currentChild.setTypeDefinition(getBestMethodReturnType(previousChild.getTypeDefinition(), methods, astArgumentList)); } else { // field currentChild.setTypeDefinition(getFieldType(previousChild.getTypeDefinition(), currentChildImage, accessingClass)); } } } if (currentChild.getType() != null) { primaryNodeType = currentChild.getTypeDefinition(); } else { // avoid falsely passing tests primaryNodeType = null; break; } previousChild = currentChild; } primaryNode.setTypeDefinition(primaryNodeType); return data; } /** * Returns the the first Class declaration around the node. * * @param node The node with the enclosing Class declaration. * @return The JavaTypeDefinition of the enclosing Class declaration. */ private TypeNode getEnclosingTypeDeclaration(Node node) { Node previousNode = null; while (node != null) { if (node instanceof ASTClassOrInterfaceDeclaration) { return (TypeNode) node; // anonymous class declaration } else if (node instanceof ASTAllocationExpression // is anonymous class declaration && node.getFirstChildOfType(ASTArrayDimsAndInits.class) == null // array cant be anonymous && !(previousNode instanceof ASTArguments)) { // we might come out of the constructor return (TypeNode) node; } previousNode = node; node = node.jjtGetParent(); } return null; } private Class<?> getEnclosingTypeDeclarationClass(Node node) { TypeNode typeDecl = getEnclosingTypeDeclaration(node); if (typeDecl == null) { return null; } else { return typeDecl.getType(); } } /** * Get the type def. of the super class of the enclosing type declaration which has the same class * as the second argument, or if the second argument is null, then anonymous classes are considered * as well and the first enclosing scope's super class is returned. * * @param node The node from which to start searching. * @param clazz The type of the enclosing class. * @return The TypeDefinition of the superclass. */ private JavaTypeDefinition getSuperClassTypeDefinition(Node node, Class<?> clazz) { Node previousNode = null; for (; node != null; previousNode = node, node = node.jjtGetParent()) { if (node instanceof ASTClassOrInterfaceDeclaration // class declaration // is the class we are looking for or caller requested first class && (((TypeNode) node).getType() == clazz || clazz == null)) { ASTExtendsList extendsList = node.getFirstChildOfType(ASTExtendsList.class); if (extendsList != null) { return ((TypeNode) extendsList.jjtGetChild(0)).getTypeDefinition(); } else { return JavaTypeDefinition.forClass(Object.class); } // anonymous class declaration } else if (clazz == null // callers requested any class scope && node instanceof ASTAllocationExpression // is anonymous class decl && node.getFirstChildOfType(ASTArrayDimsAndInits.class) == null // arrays can't be anonymous && !(previousNode instanceof ASTArguments)) { // we might come out of the constructor return node.getFirstChildOfType(ASTClassOrInterfaceType.class).getTypeDefinition(); } } return null; } @Override public Object visit(ASTPrimaryPrefix node, Object data) { super.visit(node, data); rollupTypeUnary(node); return data; } @Override public Object visit(ASTTypeArgument node, Object data) { if (node.jjtGetNumChildren() == 0) { // if type argument is '?' node.setTypeDefinition(JavaTypeDefinition.forClass(UPPER_WILDCARD, Object.class)); } else { super.visit(node, data); rollupTypeUnary(node); } return data; } @Override public Object visit(ASTWildcardBounds node, Object data) { super.visit(node, data); JavaTypeDefinition childType = ((TypeNode) node.jjtGetChild(0)).getTypeDefinition(); if (node.jjtGetFirstToken().toString().equals("super")) { node.setTypeDefinition(JavaTypeDefinition.forClass(LOWER_WILDCARD, childType)); } else { // equals "extends" node.setTypeDefinition(JavaTypeDefinition.forClass(UPPER_WILDCARD, childType)); } return data; } @Override public Object visit(ASTTypeParameters node, Object data) { super.visit(node, data); if (node.jjtGetParent() instanceof ASTClassOrInterfaceDeclaration) { TypeNode parent = (TypeNode) node.jjtGetParent(); final JavaTypeDefinition[] boundGenerics = new JavaTypeDefinition[node.jjtGetNumChildren()]; for (int i = 0; i < node.jjtGetNumChildren(); ++i) { boundGenerics[i] = ((TypeNode) node.jjtGetChild(i)).getTypeDefinition(); } parent.setTypeDefinition(JavaTypeDefinition.forClass(parent.getType(), boundGenerics)); } return data; } @Override public Object visit(ASTTypeParameter node, Object data) { if (node.jjtGetNumChildren() == 0) { // type parameter doesn't have declared upper bounds node.setTypeDefinition(JavaTypeDefinition.forClass(UPPER_BOUND, Object.class)); } else { super.visit(node, data); rollupTypeUnary(node); } return data; } @Override public Object visit(ASTTypeBound node, Object data) { super.visit(node, data); // selecting only the type nodes, since the types can be preceded by annotations List<TypeNode> typeNodes = node.findChildrenOfType(TypeNode.class); // TypeBound will have at least one child, but maybe more JavaTypeDefinition[] bounds = new JavaTypeDefinition[typeNodes.size()]; for (int index = 0; index < typeNodes.size(); index++) { bounds[index] = typeNodes.get(index).getTypeDefinition(); } node.setTypeDefinition(JavaTypeDefinition.forClass(UPPER_BOUND, bounds)); return data; } @Override public Object visit(ASTNullLiteral node, Object data) { // No explicit type return super.visit(node, data); } @Override public Object visit(ASTBooleanLiteral node, Object data) { populateType(node, "boolean"); return super.visit(node, data); } @Override public Object visit(ASTLiteral node, Object data) { super.visit(node, data); if (node.jjtGetNumChildren() != 0) { rollupTypeUnary(node); } else { if (node.isIntLiteral()) { populateType(node, "int"); } else if (node.isLongLiteral()) { populateType(node, "long"); } else if (node.isFloatLiteral()) { populateType(node, "float"); } else if (node.isDoubleLiteral()) { populateType(node, "double"); } else if (node.isCharLiteral()) { populateType(node, "char"); } else if (node.isStringLiteral()) { populateType(node, "java.lang.String"); } else { throw new IllegalStateException("PMD error, unknown literal type!"); } } return data; } @Override public Object visit(ASTAllocationExpression node, Object data) { super.visit(node, data); final ASTArrayDimsAndInits dims = node.getFirstChildOfType(ASTArrayDimsAndInits.class); if (dims != null) { final Class<?> arrayType = ((TypeNode) node.jjtGetChild(0)).getType(); if (arrayType != null) { node.setType(Array.newInstance(arrayType, (int[]) Array.newInstance(int.class, dims.getArrayDepth())).getClass()); } } else { rollupTypeUnary(node); } return data; } @Override public Object visit(ASTStatementExpression node, Object data) { super.visit(node, data); rollupTypeUnary(node); return data; } @Override public Object visit(ASTNormalAnnotation node, Object data) { super.visit(node, data); rollupTypeUnary(node); return data; } @Override public Object visit(ASTMarkerAnnotation node, Object data) { super.visit(node, data); rollupTypeUnary(node); return data; } @Override public Object visit(ASTSingleMemberAnnotation node, Object data) { super.visit(node, data); rollupTypeUnary(node); return data; } // Roll up the type based on type of the first child node. private void rollupTypeUnary(TypeNode typeNode) { Node node = typeNode; if (node.jjtGetNumChildren() >= 1) { Node child = node.jjtGetChild(0); if (child instanceof TypeNode) { typeNode.setTypeDefinition(((TypeNode) child).getTypeDefinition()); } } } // Roll up the type based on type of the first child node using Unary // Numeric Promotion per JLS 5.6.1 private void rollupTypeUnaryNumericPromotion(TypeNode typeNode) { Node node = typeNode; if (node.jjtGetNumChildren() >= 1) { Node child = node.jjtGetChild(0); if (child instanceof TypeNode) { Class<?> type = ((TypeNode) child).getType(); if (type != null) { if ("byte".equals(type.getName()) || "short".equals(type.getName()) || "char".equals(type.getName())) { populateType(typeNode, "int"); } else { typeNode.setType(((TypeNode) child).getType()); } } } } } // Roll up the type based on type of the first and second child nodes using // Binary Numeric Promotion per JLS 5.6.2 private void rollupTypeBinaryNumericPromotion(TypeNode typeNode) { Node node = typeNode; if (node.jjtGetNumChildren() >= 2) { Node child1 = node.jjtGetChild(0); Node child2 = node.jjtGetChild(1); if (child1 instanceof TypeNode && child2 instanceof TypeNode) { Class<?> type1 = ((TypeNode) child1).getType(); Class<?> type2 = ((TypeNode) child2).getType(); if (type1 != null && type2 != null) { // Yeah, String is not numeric, but easiest place to handle // it, only affects ASTAdditiveExpression if ("java.lang.String".equals(type1.getName()) || "java.lang.String".equals(type2.getName())) { populateType(typeNode, "java.lang.String"); } else if ("boolean".equals(type1.getName()) || "boolean".equals(type2.getName())) { populateType(typeNode, "boolean"); } else if ("double".equals(type1.getName()) || "double".equals(type2.getName())) { populateType(typeNode, "double"); } else if ("float".equals(type1.getName()) || "float".equals(type2.getName())) { populateType(typeNode, "float"); } else if ("long".equals(type1.getName()) || "long".equals(type2.getName())) { populateType(typeNode, "long"); } else { populateType(typeNode, "int"); } } else if (type1 != null || type2 != null) { // If one side is known to be a String, then the result is a // String // Yeah, String is not numeric, but easiest place to handle // it, only affects ASTAdditiveExpression if (type1 != null && "java.lang.String".equals(type1.getName()) || type2 != null && "java.lang.String".equals(type2.getName())) { populateType(typeNode, "java.lang.String"); } } } } } private void populateType(TypeNode node, String className) { populateType(node, className, 0); } private void populateType(TypeNode node, String className, int arrayDimens) { String qualifiedName = className; Class<?> myType = PRIMITIVE_TYPES.get(className); if (myType == null && importedClasses != null) { if (importedClasses.containsKey(className)) { qualifiedName = importedClasses.get(className); } else if (importedClasses.containsValue(className)) { qualifiedName = className; } if (qualifiedName != null) { try { /* * TODO - the map right now contains just class names. if we * use a map of classname/class then we don't have to hit * the class loader for every type - much faster */ myType = pmdClassLoader.loadClass(qualifiedName); } catch (ClassNotFoundException e) { myType = processOnDemand(qualifiedName); } catch (LinkageError e) { myType = processOnDemand(qualifiedName); } } } if (myType == null && qualifiedName != null && qualifiedName.contains(".")) { // try if the last part defines a inner class String qualifiedNameInner = qualifiedName.substring(0, qualifiedName.lastIndexOf('.')) + "$" + qualifiedName.substring(qualifiedName.lastIndexOf('.') + 1); try { myType = pmdClassLoader.loadClass(qualifiedNameInner); } catch (Exception e) { // ignored } } if (myType == null && qualifiedName != null && !qualifiedName.contains(".")) { // try again with java.lang.... try { myType = pmdClassLoader.loadClass("java.lang." + qualifiedName); } catch (Exception e) { // ignored } } // try generics // TODO: generic declarations can shadow type declarations ... :( if (myType == null) { ASTTypeParameter parameter = getTypeParameterDeclaration(node, className); if (parameter != null) { node.setTypeDefinition(parameter.getTypeDefinition()); } } else { if (arrayDimens > 0) { myType = Array.newInstance(myType, (int[]) Array.newInstance(int.class, arrayDimens)).getClass(); } node.setType(myType); } } private ASTTypeParameter getTypeParameterDeclaration(Node startNode, String image) { for (Node parent = startNode.jjtGetParent(); parent != null; parent = parent.jjtGetParent()) { ASTTypeParameters typeParameters = null; if (parent instanceof ASTTypeParameters) { // if type parameter defined in the same < > typeParameters = (ASTTypeParameters) parent; } else if (parent instanceof ASTConstructorDeclaration || parent instanceof ASTMethodDeclaration || parent instanceof ASTClassOrInterfaceDeclaration) { typeParameters = parent.getFirstChildOfType(ASTTypeParameters.class); } if (typeParameters != null) { for (int index = 0; index < typeParameters.jjtGetNumChildren(); ++index) { String imageToCompareTo = typeParameters.jjtGetChild(index).getImage(); if (imageToCompareTo != null && imageToCompareTo.equals(image)) { return (ASTTypeParameter) typeParameters.jjtGetChild(index); } } } } return null; } /** * Check whether the supplied class name exists. */ public boolean classNameExists(String fullyQualifiedClassName) { try { pmdClassLoader.loadClass(fullyQualifiedClassName); return true; // Class found } catch (ClassNotFoundException e) { return false; } catch (NoClassDefFoundError e) { return false; } } public Class<?> loadClass(String fullyQualifiedClassName) { try { return pmdClassLoader.loadClass(fullyQualifiedClassName); } catch (ClassNotFoundException e) { return null; } } private Class<?> processOnDemand(String qualifiedName) { for (String entry : importedOnDemand) { try { return pmdClassLoader.loadClass(entry + "." + qualifiedName); } catch (Throwable e) { } } return null; } private String getClassName(ASTCompilationUnit node) { ASTClassOrInterfaceDeclaration classDecl = node.getFirstDescendantOfType(ASTClassOrInterfaceDeclaration.class); if (classDecl == null) { // Happens if this compilation unit only contains an enum return null; } if (node.declarationsAreInDefaultPackage()) { return classDecl.getImage(); } ASTPackageDeclaration pkgDecl = node.getPackageDeclaration(); importedOnDemand.add(pkgDecl.getPackageNameImage()); return pkgDecl.getPackageNameImage() + "." + classDecl.getImage(); } /** * If the outer class wasn't found then we'll get in here * * @param node */ private void populateImports(ASTCompilationUnit node) { List<ASTImportDeclaration> theImportDeclarations = node.findChildrenOfType(ASTImportDeclaration.class); importedClasses.putAll(JAVA_LANG); // go through the imports for (ASTImportDeclaration anImportDeclaration : theImportDeclarations) { String strPackage = anImportDeclaration.getPackageName(); if (anImportDeclaration.isStatic()) { if (anImportDeclaration.isImportOnDemand()) { importOnDemandStaticClasses.add(JavaTypeDefinition.forClass(loadClass(strPackage))); } else { // not import on-demand String strName = anImportDeclaration.getImportedName(); String fieldName = strName.substring(strName.lastIndexOf('.') + 1); Class<?> staticClassWithField = loadClass(strPackage); if (staticClassWithField != null) { JavaTypeDefinition typeDef = getFieldType(JavaTypeDefinition.forClass(staticClassWithField), fieldName, currentAcu.getType()); staticFieldImageToTypeDef.put(fieldName, typeDef); } List<JavaTypeDefinition> typeList = staticNamesToClasses.get(fieldName); if (typeList == null) { typeList = new ArrayList<>(); } typeList.add(JavaTypeDefinition.forClass(staticClassWithField)); staticNamesToClasses.put(fieldName, typeList); } } else { // non-static if (anImportDeclaration.isImportOnDemand()) { importedOnDemand.add(strPackage); } else { // not import on-demand String strName = anImportDeclaration.getImportedName(); importedClasses.put(strName, strName); importedClasses.put(strName.substring(strPackage.length() + 1), strName); } } } } private void populateClassName(ASTCompilationUnit node, String className) throws ClassNotFoundException { node.setType(pmdClassLoader.loadClass(className)); importedClasses.putAll(pmdClassLoader.getImportedClasses(className)); } }
1
13,678
We should have a rule to detect identical catch branches
pmd-pmd
java
@@ -28,10 +28,6 @@ namespace Microsoft.DotNet.Build.Tasks.Feed public bool PublishFlatContainer { get; set; } - public int RetryAttempts { get; set; } = 5; - - public int RetryDelayInSeconds { get; set; } = 30; - public int MaxClients { get; set; } = 8; public bool SkipCreateContainer { get; set; } = false;
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using Microsoft.Build.Framework; using Microsoft.DotNet.Build.CloudTestTasks; using System; using System.Collections.Generic; using System.Linq; using System.Threading; using System.Threading.Tasks; using MSBuild = Microsoft.Build.Utilities; namespace Microsoft.DotNet.Build.Tasks.Feed { public class PushToBlobFeed : MSBuild.Task { [Required] public string ExpectedFeedUrl { get; set; } [Required] public string AccountKey { get; set; } [Required] public ITaskItem[] ItemsToPush { get; set; } public bool Overwrite { get; set; } public bool PublishFlatContainer { get; set; } public int RetryAttempts { get; set; } = 5; public int RetryDelayInSeconds { get; set; } = 30; public int MaxClients { get; set; } = 8; public bool SkipCreateContainer { get; set; } = false; public override bool Execute() { return ExecuteAsync().GetAwaiter().GetResult(); } public async Task<bool> ExecuteAsync() { try { Log.LogMessage(MessageImportance.High, "Performing feed push..."); if (ItemsToPush == null) { Log.LogError($"No items to push. Please check ItemGroup ItemsToPush."); } else { BlobFeedAction blobFeedAction = new BlobFeedAction(ExpectedFeedUrl, AccountKey, Log, RetryAttempts, RetryDelayInSeconds); if (!SkipCreateContainer) { await blobFeedAction.CreateContainerAsync(this.BuildEngine); } List<string> items = ConvertToStringLists(ItemsToPush); if (!PublishFlatContainer) { await blobFeedAction.PushToFeed(items, Overwrite); } else { using (var clientThrottle = new SemaphoreSlim(this.MaxClients, this.MaxClients)) { Log.LogMessage($"Uploading {ItemsToPush.Length} items..."); await Task.WhenAll(ItemsToPush.Select(item => blobFeedAction.UploadAssets(item, clientThrottle, Overwrite))); } } } } catch (Exception e) { Log.LogErrorFromException(e, true); } return !Log.HasLoggedErrors; } private List<string> ConvertToStringLists(ITaskItem[] taskItems) { List<string> stringList = new List<string>(); foreach (var item in taskItems) { stringList.Add(item.ItemSpec); } return stringList; } } }
1
14,070
remove these from the targets file.
dotnet-buildtools
.cs
@@ -11,14 +11,13 @@ import ( "time" "github.com/ethersphere/bee/pkg/addressbook" + "github.com/ethersphere/bee/pkg/bzz" "github.com/ethersphere/bee/pkg/hive/pb" "github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/p2p/protobuf" "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/swarm" - - ma "github.com/multiformats/go-multiaddr" ) const (
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package hive import ( "context" "errors" "fmt" "time" "github.com/ethersphere/bee/pkg/addressbook" "github.com/ethersphere/bee/pkg/hive/pb" "github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/p2p/protobuf" "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/swarm" ma "github.com/multiformats/go-multiaddr" ) const ( protocolName = "hive" protocolVersion = "1.0.0" peersStreamName = "peers" messageTimeout = 1 * time.Minute // maximum allowed time for a message to be read or written. maxBatchSize = 50 ) type Service struct { streamer p2p.Streamer addressBook addressbook.GetPutter peerHandler func(context.Context, swarm.Address) error logger logging.Logger } type Options struct { Streamer p2p.Streamer AddressBook addressbook.GetPutter Logger logging.Logger } func New(o Options) *Service { return &Service{ streamer: o.Streamer, logger: o.Logger, addressBook: o.AddressBook, } } func (s *Service) Protocol() p2p.ProtocolSpec { return p2p.ProtocolSpec{ Name: protocolName, Version: protocolVersion, StreamSpecs: []p2p.StreamSpec{ { Name: peersStreamName, Handler: s.peersHandler, }, }, } } func (s *Service) BroadcastPeers(ctx context.Context, addressee swarm.Address, peers ...swarm.Address) error { max := maxBatchSize for len(peers) > 0 { if max > len(peers) { max = len(peers) } if err := s.sendPeers(ctx, addressee, peers[:max]); err != nil { return err } peers = peers[max:] } return nil } func (s *Service) SetPeerAddedHandler(h func(ctx context.Context, addr swarm.Address) error) { s.peerHandler = h } func (s *Service) sendPeers(ctx context.Context, peer swarm.Address, peers []swarm.Address) error { stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, peersStreamName) if err != nil { return fmt.Errorf("new stream: %w", err) } defer stream.Close() w, _ := protobuf.NewWriterAndReader(stream) var peersRequest pb.Peers for _, p := range peers { addr, err := s.addressBook.Get(p) if err != nil { if errors.Is(err, storage.ErrNotFound) { s.logger.Debugf("Peer not found %s", peer, err) continue } return err } peersRequest.Peers = append(peersRequest.Peers, &pb.BzzAddress{ Overlay: p.Bytes(), Underlay: addr.String(), }) } if err := w.WriteMsg(&peersRequest); err != nil { return fmt.Errorf("write Peers message: %w", err) } return stream.FullClose() } func (s *Service) peersHandler(_ context.Context, peer p2p.Peer, stream p2p.Stream) error { _, r := protobuf.NewWriterAndReader(stream) var peersReq pb.Peers if err := r.ReadMsgWithTimeout(messageTimeout, &peersReq); err != nil { _ = stream.Close() return fmt.Errorf("read requestPeers message: %w", err) } if err := stream.Close(); err != nil { return fmt.Errorf("close stream: %w", err) } for _, newPeer := range peersReq.Peers { addr, err := ma.NewMultiaddr(newPeer.Underlay) if err != nil { s.logger.Infof("Skipping peer in response %s: %w", newPeer, err) continue } err = s.addressBook.Put(swarm.NewAddress(newPeer.Overlay), addr) if err != nil { return err } if s.peerHandler != nil { if err := s.peerHandler(context.Background(), swarm.NewAddress(newPeer.Overlay)); err != nil { return err } } } return nil }
1
10,242
now that we have the signature in the hive messages, it might be that this must be drastically reduced, since there are limits on the protobuf reader/writers i believe
ethersphere-bee
go
@@ -35,11 +35,12 @@ NAMESPACE_PACKAGES = [ REQUIRED_PACKAGES = [ # Installation related. 'anytree==2.4.3', - 'google-api-python-client==1.7.7', - 'google-auth==1.6.2', + 'google-api-python-client==1.7.10', + 'google-auth==1.6.3', 'google-auth-httplib2==0.0.3', 'Jinja2==2.10.1', 'jmespath==0.9.3', + 'mailjet-rest==1.3.3', 'netaddr==0.7.19', 'pyyaml==4.2b4', 'python-graph-core==1.8.2',
1
#!/usr/bin/env python # Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Setup script for Forseti Security tools.""" import os import sys from install.util import build_protos from setuptools import find_packages from setuptools import setup from setuptools.command.install import install import google.cloud.forseti FORSETI_VERSION = google.cloud.forseti.__version__ NAMESPACE_PACKAGES = [ 'google', 'google.cloud' ] REQUIRED_PACKAGES = [ # Installation related. 'anytree==2.4.3', 'google-api-python-client==1.7.7', 'google-auth==1.6.2', 'google-auth-httplib2==0.0.3', 'Jinja2==2.10.1', 'jmespath==0.9.3', 'netaddr==0.7.19', 'pyyaml==4.2b4', 'python-graph-core==1.8.2', 'python-dateutil==2.7.5', 'ratelimiter==1.2.0.post0', 'retrying==1.3.3', 'requests[security]==2.21.0', 'sendgrid==5.6.0', 'simple-crypt==4.1.7', 'unicodecsv==0.14.1', # Setup related. 'grpcio==1.18.0', 'grpcio-tools==1.18.0', 'protobuf==3.7.1', # Testing related. 'parameterized==0.6.1', 'ruamel.yaml==0.15.37', 'pylint==1.9.4', 'pylint-quotes==0.2.1', 'PyMySQL==0.9.3', 'SQLAlchemy==1.2.18', 'sqlalchemy-migrate==0.11.0' ] if sys.version_info.major < 3: sys.exit('Sorry, Python 2 is not supported.') def build_forseti_protos(clean_only=False): """Clean and optionally Build protos. Args: clean_only (boolean): Whether to only clean previously built protos. """ abs_path = os.path.abspath(__file__) build_protos.clean(abs_path) if not clean_only: build_protos.make_proto(abs_path) class BuildProtosCommand(install): """A command to build protos in all children directories.""" def run(self): build_forseti_protos() class CleanProtosCommand(install): """A command to clean protos in all children directories.""" def run(self): build_forseti_protos(clean_only=True) class PostInstallCommand(install): """Post installation command.""" def run(self): build_forseti_protos() install.do_egg_install(self) setup( name='forseti-security', version=FORSETI_VERSION, description='Forseti Security tools', author='Google LLC.', author_email='[email protected]', url='https://github.com/forseti-security/forseti-security', classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'License :: OSI Approved :: Apache Software License' ], cmdclass={ 'build_protos': BuildProtosCommand, 'clean_protos': CleanProtosCommand, 'install': PostInstallCommand, }, install_requires=REQUIRED_PACKAGES, setup_requires=REQUIRED_PACKAGES, tests_require=REQUIRED_PACKAGES, packages=find_packages(exclude=[ '*.tests', '*.tests.*', 'tests.*', 'tests']), include_package_data=True, package_data={ '': ['cloud/forseti/common/email_templates/*.jinja', 'cloud/forseti/common/gcp_api/discovery_documents/*.json'] }, namespace_packages=NAMESPACE_PACKAGES, license='Apache 2.0', keywords='gcp google cloud platform security tools', entry_points={ 'console_scripts': [ 'forseti_enforcer = google.cloud.forseti.stubs:RunForsetiEnforcer', 'forseti_server = google.cloud.forseti.stubs:RunForsetiServer', 'forseti = google.cloud.forseti.stubs:RunForsetiCli', ] }, zip_safe=False, # Set to False: apputils doesn't like zip_safe eggs )
1
35,042
I recommend that we move this to be optional, as other users might not need it. Can you look at `OPTIONAL_PACKAGES` section, around line 68?
forseti-security-forseti-security
py
@@ -8,10 +8,7 @@ package javaslang.control; import javaslang.Serializables; import org.junit.Test; -import java.util.Iterator; -import java.util.NoSuchElementException; -import java.util.Objects; -import java.util.Optional; +import java.util.*; import static org.assertj.core.api.Assertions.assertThat;
1
/* / \____ _ ______ _____ / \____ ____ _____ * / \__ \/ \ / \__ \ / __// \__ \ / \/ __ \ Javaslang * _/ // _\ \ \/ / _\ \\_ \/ // _\ \ /\ \__/ / Copyright 2014-2015 Daniel Dietrich * /___/ \_____/\____/\_____/____/\___\_____/_/ \_/____/ Licensed under the Apache License, Version 2.0 */ package javaslang.control; import javaslang.Serializables; import org.junit.Test; import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Objects; import java.util.Optional; import static org.assertj.core.api.Assertions.assertThat; public class OptionTest { // -- construction @Test public void shouldMapNullToNone() { assertThat(Option.of(null)).isEqualTo(None.instance()); } @Test public void shouldMapNonNullToSome() { final Option<?> option = Option.of(new Object()); assertThat(option.isDefined()).isTrue(); } @Test public void shouldWrapNullInSome() { final Some<?> some = new Some<>(null); assertThat(some.get()).isEqualTo(null); } @Test public void shouldCreateNothing() { assertThat(Some.nothing()).isEqualTo(new Some<Void>(null)); } // -- get @Test public void shouldSucceedOnGetWhenValueIsPresent() { assertThat(Option.of(1).get()).isEqualTo(1); } @Test(expected = NoSuchElementException.class) public void shouldThrowOnGetWhenValueIsNotPresent() { Option.none().get(); } // -- orElse @Test public void shouldGetValueOnOrElseWhenValueIsPresent() { assertThat(Option.of(1).orElse(2)).isEqualTo(1); } @Test public void shouldGetAlternativeOnOrElseWhenValueIsNotPresent() { assertThat(Option.none().orElse(2)).isEqualTo(2); } // -- orElseGet @Test public void shouldGetValueOnOrElseGetWhenValueIsPresent() { assertThat(Option.of(1).orElseGet(() -> 2)).isEqualTo(1); } @Test public void shouldGetAlternativeOnOrElseGetWhenValueIsNotPresent() { assertThat(Option.none().orElseGet(() -> 2)).isEqualTo(2); } // -- orElseThrow @Test public void shouldGetValueOnOrElseThrowWhenValueIsPresent() { assertThat(Option.of(1).orElseThrow(() -> new RuntimeException("none"))).isEqualTo(1); } @Test(expected = RuntimeException.class) public void shouldThrowOnOrElseThrowWhenValueIsNotPresent() { Option.none().orElseThrow(() -> new RuntimeException("none")); } // -- toJavaOptional @Test public void shouldConvertNoneToJavaOptional() { final None<Object> none = None.instance(); assertThat(none.toJavaOptional()).isEqualTo(Optional.empty()); } @Test public void shouldConvertSomeToJavaOptional() { final Some<Integer> some = new Some<>(1); assertThat(some.toJavaOptional()).isEqualTo(Optional.of(1)); } // -- isPresent @Test public void shouldBePresentOnIsPresentWhenValueIsPresent() { assertThat(Option.of(1).isDefined()).isTrue(); } @Test public void shouldNotBePresentOnIsPresentWhenValueIsNotPresent() { assertThat(Option.none().isDefined()).isFalse(); } // -- isEmpty @Test public void shouldBeEmptyOnIsEmptyWhenValueIsEmpty() { assertThat(Option.none().isEmpty()).isTrue(); } @Test public void shouldBePresentOnIsEmptyWhenValueIsPresent() { assertThat(Option.of(1).isEmpty()).isFalse(); } // -- ifPresent @Test public void shouldConsumePresentValueOnIsPresentWhenValueIsPresent() { final int[] actual = new int[] { -1 }; Option.of(1).forEach(i -> actual[0] = i); assertThat(actual[0]).isEqualTo(1); } @Test public void shouldNotConsumeAnythingOnIsPresentWhenValueIsNotPresent() { final int[] actual = new int[] { -1 }; Option.<Integer> none().forEach(i -> actual[0] = i); assertThat(actual[0]).isEqualTo(-1); } // -- filter @Test public void shouldReturnSomeOnFilterWhenValueIsPresentAndPredicateMatches() { assertThat(Option.of(1).filter(i -> i == 1)).isEqualTo(Option.of(1)); } @Test public void shouldReturnNoneOnFilterWhenValueIsPresentAndPredicateNotMatches() { assertThat(Option.of(1).filter(i -> i == 2)).isEqualTo(Option.none()); } @Test public void shouldReturnNoneOnFilterWhenValueIsNotPresentAndPredicateNotMatches() { assertThat(Option.<Integer> none().filter(i -> i == 1)).isEqualTo(Option.none()); } // -- flatten() @Test public void shouldFlattenUnnestedSome() { assertThat(new Some<>(1).flatten()).isEqualTo(new Some<>(1)); } @Test public void shouldFlattenSomeOfSome() { assertThat(new Some<>(new Some<>(1)).flatten()).isEqualTo(new Some<>(1)); } @Test public void shouldFlattenSomeOfNone() { assertThat(new Some<>(None.instance()).flatten()).isEqualTo(None.instance()); } @Test public void shouldFlattenNone() { assertThat(None.instance().flatten()).isEqualTo(None.instance()); } // -- map @Test public void shouldMapSome() { assertThat(Option.of(1).map(String::valueOf)).isEqualTo(Option.of("1")); } @Test public void shouldMapNone() { assertThat(Option.<Integer> none().map(String::valueOf)).isEqualTo(Option.none()); } // -- flatMap @Test public void shouldFlatMapSome() { assertThat(Option.of(1).flatMap(i -> Option.of(String.valueOf(i)))).isEqualTo(Option.of("1")); } @Test public void shouldFlatMapNone() { assertThat(Option.<Integer> none().flatMap(i -> Option.of(String.valueOf(i)))).isEqualTo(Option.none()); } // -- exists @Test public void shouldBeAwareOfPropertyThatHoldsExistsOfSome() { assertThat(new Some<>(1).exists(i -> i == 1)).isTrue(); } @Test public void shouldBeAwareOfPropertyThatNotHoldsExistsOfSome() { assertThat(new Some<>(1).exists(i -> i == 2)).isFalse(); } @Test public void shouldNotHoldPropertyExistsOfNone() { assertThat(None.instance().exists(e -> true)).isFalse(); } // -- forall @Test public void shouldBeAwareOfPropertyThatHoldsForAllOfSome() { assertThat(new Some<>(1).forAll(i -> i == 1)).isTrue(); } @Test public void shouldBeAwareOfPropertyThatNotHoldsForAllOfSome() { assertThat(new Some<>(1).forAll(i -> i == 2)).isFalse(); } @Test // a property holds for all elements of no elements public void shouldNotHoldPropertyForAllOfNone() { assertThat(None.instance().forAll(e -> true)).isTrue(); } // -- forEach @Test public void shouldConsumePresentValueOnForEachWhenValueIsPresent() { final int[] actual = new int[] { -1 }; Option.of(1).forEach(i -> actual[0] = i); assertThat(actual[0]).isEqualTo(1); } @Test public void shouldNotConsumeAnythingOnForEachWhenValueIsNotPresent() { final int[] actual = new int[] { -1 }; Option.<Integer> none().forEach(i -> actual[0] = i); assertThat(actual[0]).isEqualTo(-1); } // -- peek @Test public void shouldConsumePresentValueOnPeekWhenValueIsPresent() { final int[] actual = new int[] { -1 }; final Option<Integer> testee = Option.of(1).peek(i -> actual[0] = i); assertThat(actual[0]).isEqualTo(1); assertThat(testee).isEqualTo(Option.of(1)); } @Test public void shouldNotConsumeAnythingOnPeekWhenValueIsNotPresent() { final int[] actual = new int[] { -1 }; final Option<Integer> testee = Option.<Integer> none().peek(i -> actual[0] = i); assertThat(actual[0]).isEqualTo(-1); assertThat(testee).isEqualTo(Option.none()); } // -- iterator @Test public void shouldReturnIteratorOfSome() { assertThat((Iterator<Integer>) new Some<>(1).iterator()).isNotNull(); } @Test public void shouldReturnIteratorOfNone() { assertThat((Iterator<Object>) None.instance().iterator()).isNotNull(); } // -- equals @Test public void shouldEqualNoneIfObjectIsSame() { final None<?> none = None.instance(); assertThat(none).isEqualTo(none); } @Test public void shouldEqualSomeIfObjectIsSame() { final Some<?> some = new Some<>(1); assertThat(some).isEqualTo(some); } @Test public void shouldNotEqualNoneIfObjectIsNull() { assertThat(None.instance()).isNotNull(); } @Test public void shouldNotEqualSomeIfObjectIsNull() { assertThat(new Some<>(1)).isNotNull(); } @Test public void shouldNotEqualNoneIfObjectIsOfDifferentType() { final Object none = None.instance(); assertThat(none.equals(new Object())).isFalse(); } @Test public void shouldNotEqualSomeIfObjectIsOfDifferentType() { final Object some = new Some<>(1); assertThat(some.equals(new Object())).isFalse(); } @Test public void shouldEqualSome() { assertThat(new Some<>(1)).isEqualTo(new Some<>(1)); } // -- hashCode @Test public void shouldHashNone() { assertThat(None.instance().hashCode()).isEqualTo(Objects.hash()); } @Test public void shouldHashSome() { assertThat(new Some<>(1).hashCode()).isEqualTo(Objects.hashCode(1)); } // -- toString @Test public void shouldConvertSomeToString() { assertThat(new Some<>(1).toString()).isEqualTo("Some(1)"); } @Test public void shouldConvertNoneToString() { assertThat(None.instance().toString()).isEqualTo("None"); } // -- serialization @Test public void shouldPreserveSingletonWhenDeserializingNone() { final Object none = Serializables.deserialize(Serializables.serialize(None.instance())); assertThat(none == None.instance()).isTrue(); } }
1
6,476
I usually set idea to never use wildcard import such as `import java.util.*;` Now i use setting from javaslang standard.
vavr-io-vavr
java
@@ -29,7 +29,9 @@ module Beaker v_file << " v.vm.box = '#{host['box']}'\n" v_file << " v.vm.box_url = '#{host['box_url']}'\n" unless host['box_url'].nil? v_file << " v.vm.base_mac = '#{randmac}'\n" - v_file << " v.vm.network :private_network, ip: \"#{host['ip'].to_s}\", :netmask => \"#{host['netmask'] ||= "255.255.0.0"}\"\n" + host['ips'].each do |ip| + v_file << " v.vm.network :private_network, ip: \"#{ip.to_s}\", :netmask => \"#{host['netmask'] ||= "255.255.0.0"}\"\n" + end if host['disk_path'] v_file << " v.vm.provider :virtualbox do |vb|\n"
1
require 'open3' module Beaker class Vagrant < Beaker::Hypervisor # Return a random mac address # # @return [String] a random mac address def randmac "080027" + (1..3).map{"%0.2X"%rand(256)}.join end def rand_chunk (2 + rand(252)).to_s #don't want a 0, 1, or a 255 end def randip "10.255.#{rand_chunk}.#{rand_chunk}" end def make_vfile hosts, options = {} #HACK HACK HACK - add checks here to ensure that we have box + box_url #generate the VagrantFile v_file = "Vagrant.configure(\"2\") do |c|\n" hosts.each do |host| host['ip'] ||= randip #use the existing ip, otherwise default to a random ip v_file << " c.vm.define '#{host.name}' do |v|\n" v_file << " v.vm.hostname = '#{host.name}'\n" v_file << " v.vm.box = '#{host['box']}'\n" v_file << " v.vm.box_url = '#{host['box_url']}'\n" unless host['box_url'].nil? v_file << " v.vm.base_mac = '#{randmac}'\n" v_file << " v.vm.network :private_network, ip: \"#{host['ip'].to_s}\", :netmask => \"#{host['netmask'] ||= "255.255.0.0"}\"\n" if host['disk_path'] v_file << " v.vm.provider :virtualbox do |vb|\n" v_file << " vb.name = '#{host.name}'\n" unless File.exist?(host['disk_path']) host['disk_path'] = File.join(host['disk_path'], "#{host.name}.vmdk") v_file << " vb.customize ['createhd', '--filename', '#{host['disk_path']}', '--size', #{host['disk_size'] ||= 5 * 1024}, '--format', 'vmdk']\n" end v_file << " vb.customize ['storageattach', :id, '--storagectl', 'SATA Controller', '--port', 1, '--device', 0, '--type', 'hdd', '--medium','#{host['disk_path']}']\n" v_file << " end\n" end if /windows/i.match(host['platform']) v_file << " v.vm.network :forwarded_port, guest: 3389, host: 3389\n" v_file << " v.vm.network :forwarded_port, guest: 5985, host: 5985, id: 'winrm', auto_correct: true\n" v_file << " v.vm.guest = :windows" end v_file << " end\n" @logger.debug "created Vagrantfile for VagrantHost #{host.name}" end v_file << " c.vm.provider :virtualbox do |vb|\n" v_file << " vb.customize [\"modifyvm\", :id, \"--memory\", \"#{options['vagrant_memsize'] ||= '1024'}\"]\n" v_file << " end\n" v_file << "end\n" File.open(@vagrant_file, 'w') do |f| f.write(v_file) end end def set_ssh_config host, user f = Tempfile.new("#{host.name}") ssh_config = Dir.chdir(@vagrant_path) do result = `vagrant ssh-config #{host.name}` if $?.to_i != 0 raise "Failed to vagrant ssh-config for #{host.name}" end result end #replace hostname with ip ssh_config = ssh_config.gsub(/#{host.name}/, host['ip']) unless not host['ip'] #set the user ssh_config = ssh_config.gsub(/User vagrant/, "User #{user}") f.write(ssh_config) f.rewind host['ssh'] = {:config => f.path()} host['user'] = user @temp_files << f end def get_ip_from_vagrant_file(hostname) ip = '' if File.file?(@vagrant_file) #we should have a vagrant file available to us for reading f = File.read(@vagrant_file) m = /#{hostname}.*?ip:\s*('|")\s*([^'"]+)('|")/m.match(f) if m ip = m[2] @logger.debug("Determined existing vagrant box #{hostname} ip to be: #{ip} ") else raise("Unable to determine ip for vagrant box #{hostname}") end else raise("No vagrant file found (should be located at #{@vagrant_file})") end ip end def initialize(vagrant_hosts, options) require 'tempfile' @options = options @logger = options[:logger] @temp_files = [] @hosts = vagrant_hosts @vagrant_path = File.expand_path(File.join(File.basename(__FILE__), '..', '.vagrant', 'beaker_vagrant_files', File.basename(options[:hosts_file]))) FileUtils.mkdir_p(@vagrant_path) @vagrant_file = File.expand_path(File.join(@vagrant_path, "Vagrantfile")) end def provision if !@options[:provision] and !File.file?(@vagrant_file) raise "Beaker is configured with provision = false but no vagrant file was found at #{@vagrant_file}. You need to enable provision" end if @options[:provision] #setting up new vagrant hosts #make sure that any old boxes are dead dead dead vagrant_cmd("destroy --force") if File.file?(@vagrant_file) make_vfile @hosts, @options vagrant_cmd("up") else #set host ip of already up boxes @hosts.each do |host| host[:ip] = get_ip_from_vagrant_file(host.name) end end @logger.debug "configure vagrant boxes (set ssh-config, switch to root user, hack etc/hosts)" @hosts.each do |host| default_user = host['user'] set_ssh_config host, 'vagrant' copy_ssh_to_root host, @options #shut down connection, will reconnect on next exec host.close set_ssh_config host, default_user end hack_etc_hosts @hosts, @options end def cleanup @logger.debug "removing temporory ssh-config files per-vagrant box" @temp_files.each do |f| f.close() end @logger.notify "Destroying vagrant boxes" vagrant_cmd("destroy --force") FileUtils.rm_rf(@vagrant_path) end def vagrant_cmd(args) Dir.chdir(@vagrant_path) do result = `vagrant #{args} 2>&1` result.each_line do |line| @logger.debug(line) end if $?.to_i != 0 raise "Failed to exec 'vagrant #{args}'" end end end end end
1
7,331
Where is host['ips'] coming from?
voxpupuli-beaker
rb
@@ -3,14 +3,7 @@ package userns -import ( - "strings" - - "github.com/opencontainers/runc/libcontainer/user" -) - -func FuzzUIDMap(data []byte) int { - uidmap, _ := user.ParseIDMap(strings.NewReader(string(data))) - _ = uidMapInUserNS(uidmap) +func FuzzUIDMap(uidmap []byte) int { + _ = uidMapInUserNS(string(uidmap)) return 1 }
1
//go:build gofuzz // +build gofuzz package userns import ( "strings" "github.com/opencontainers/runc/libcontainer/user" ) func FuzzUIDMap(data []byte) int { uidmap, _ := user.ParseIDMap(strings.NewReader(string(data))) _ = uidMapInUserNS(uidmap) return 1 }
1
22,884
oh! missed a `:` here; let me fix that; also can get rid of the intermediate variable
opencontainers-runc
go
@@ -1,6 +1,8 @@ describe "Display status text" do let(:proposal) { FactoryGirl.create(:proposal, :with_parallel_approvers) } before do + proposal.approvers.first.update(first_name: "Uniquely", last_name: "Named") + proposal.approvers.second.update(first_name: "Onlyof", last_name: "Itskind") login_as(proposal.requester) end
1
describe "Display status text" do let(:proposal) { FactoryGirl.create(:proposal, :with_parallel_approvers) } before do login_as(proposal.requester) end it "displays approved status" do proposal.approvals.each{|approval| approval.approve!} visit proposals_path expect(page).to have_content('Approved') end it "displays outstanding approvers" do visit proposals_path expect(page).not_to have_content('Please review') expect(page).to have_content('Waiting for review from:') proposal.approvers.each{|approver| expect(page).to have_content(approver.full_name)} end it "excludes approved approvals" do proposal.approvals.first.approve! visit proposals_path expect(page).not_to have_content('Please review') expect(page).to have_content('Waiting for review from:') proposal.approvers[1..-1].each{|approver| expect(page).to have_content(approver.full_name)} expect(page).not_to have_content(proposal.approvers.first.full_name) end context "linear" do let(:proposal) { FactoryGirl.create(:proposal, :with_serial_approvers) } it "displays the first approver" do visit proposals_path expect(page).to have_content('Waiting for review from:') proposal.approvers[1..-1].each{|approver| expect(page).not_to have_content(approver.full_name)} expect(page).to have_content(proposal.approvers.first.full_name) end it "excludes approved approvals" do proposal.approvals.first.approve! visit proposals_path expect(page).to have_content('Waiting for review from:') expect(page).not_to have_content(proposal.approvers.first.full_name) end end end
1
13,606
Is this necessary?
18F-C2
rb
@@ -1,8 +1,16 @@ -import React from 'react'; +import React, {Component} from 'react'; import 'element-theme-default'; import {i18n} from 'element-react'; import locale from 'element-react/src/locale/lang/en'; +import storage from './utils/storage'; +import logo from './utils/logo'; +import {makeLogin, isTokenExpire} from './utils/login'; + +import Header from './components/Header'; +import Footer from './components/Footer'; +import LoginModal from './components/Login'; + i18n.use(locale); import Route from './router';
1
import React from 'react'; import 'element-theme-default'; import {i18n} from 'element-react'; import locale from 'element-react/src/locale/lang/en'; i18n.use(locale); import Route from './router'; import './styles/main.scss'; import 'normalize.css'; export default class App extends React.Component { render() { return ( <Route /> ); } }
1
18,856
I'd create a different method for each render section.
verdaccio-verdaccio
js
@@ -23,6 +23,8 @@ public class EstimateGasOperationTracer implements OperationTracer { private Gas sStoreStipendNeeded = Gas.ZERO; + private boolean isReverted = false; + @Override public void traceExecution( final MessageFrame frame, final OperationTracer.ExecuteOperation executeOperation) {
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.ethereum.vm; import org.hyperledger.besu.ethereum.core.Gas; import org.hyperledger.besu.ethereum.vm.operations.SStoreOperation; public class EstimateGasOperationTracer implements OperationTracer { private int maxDepth = 0; private Gas sStoreStipendNeeded = Gas.ZERO; @Override public void traceExecution( final MessageFrame frame, final OperationTracer.ExecuteOperation executeOperation) { try { executeOperation.execute(); } finally { if (frame.getCurrentOperation() instanceof SStoreOperation && sStoreStipendNeeded.compareTo(Gas.ZERO) == 0) { sStoreStipendNeeded = ((SStoreOperation) frame.getCurrentOperation()).getMinumumGasRemaining(); } if (maxDepth < frame.getMessageStackDepth()) { maxDepth = frame.getMessageStackDepth(); } } } public int getMaxDepth() { return maxDepth; } public Gas getStipendNeeded() { return sStoreStipendNeeded; } }
1
23,615
Instead of storing the revert flag in the tracer is it possible to use org.hyperledger.besu.ethereum.mainnet.TransactionProcessor.Result#getRevertReason? (via org.hyperledger.besu.ethereum.transaction.TransactionSimulatorResult#getResult)? If a TX reverts without a reason do we get an empty revert reason or a revert reason with zero bytes?
hyperledger-besu
java
@@ -71,7 +71,7 @@ public class TestSparkOrcReader extends AvroDataTest { try (CloseableIterable<InternalRow> reader = ORC.read(Files.localInput(testFile)) .project(schema) - .createReaderFunc(SparkOrcReader::new) + .createReaderFunc(readOrcSchema -> new SparkOrcReader(schema, readOrcSchema)) .build()) { final Iterator<InternalRow> actualRows = reader.iterator(); final Iterator<InternalRow> expectedRows = expected.iterator();
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.spark.data; import java.io.File; import java.io.IOException; import java.util.Collections; import java.util.Iterator; import java.util.List; import org.apache.iceberg.Files; import org.apache.iceberg.Schema; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.io.FileAppender; import org.apache.iceberg.orc.ORC; import org.apache.iceberg.types.Types; import org.apache.spark.sql.catalyst.InternalRow; import org.junit.Assert; import org.junit.Test; import static org.apache.iceberg.spark.data.TestHelpers.assertEquals; import static org.apache.iceberg.types.Types.NestedField.required; public class TestSparkOrcReader extends AvroDataTest { @Override protected void writeAndValidate(Schema schema) throws IOException { final Iterable<InternalRow> expected = RandomData .generateSpark(schema, 100, 0L); writeAndValidateRecords(schema, expected); } @Test public void writeAndValidateRepeatingRecords() throws IOException { Schema structSchema = new Schema( required(100, "id", Types.LongType.get()), required(101, "data", Types.StringType.get()) ); List<InternalRow> expectedRepeating = Collections.nCopies(100, RandomData.generateSpark(structSchema, 1, 0L).iterator().next()); writeAndValidateRecords(structSchema, expectedRepeating); } private void writeAndValidateRecords(Schema schema, Iterable<InternalRow> expected) throws IOException { final File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<InternalRow> writer = ORC.write(Files.localOutput(testFile)) .createWriterFunc(SparkOrcWriter::new) .schema(schema) .build()) { writer.addAll(expected); } try (CloseableIterable<InternalRow> reader = ORC.read(Files.localInput(testFile)) .project(schema) .createReaderFunc(SparkOrcReader::new) .build()) { final Iterator<InternalRow> actualRows = reader.iterator(); final Iterator<InternalRow> expectedRows = expected.iterator(); while (expectedRows.hasNext()) { Assert.assertTrue("Should have expected number of rows", actualRows.hasNext()); assertEquals(schema, expectedRows.next(), actualRows.next()); } Assert.assertFalse("Should not have extra rows", actualRows.hasNext()); } } }
1
19,872
I think this should test with and without container reuse if that is implemented in this PR. Probably just make this test parameterized.
apache-iceberg
java
@@ -100,6 +100,10 @@ abstract class BaseFile<F> found = true; fromProjectionPos[i] = j; } + if (fields.get(i).fieldId() == ManifestFile.SPEC_ID.fieldId()) { + found = true; + fromProjectionPos[i] = 14; + } } if (!found) {
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import java.io.Serializable; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; import org.apache.avro.Schema; import org.apache.avro.generic.IndexedRecord; import org.apache.avro.specific.SpecificData; import org.apache.iceberg.avro.AvroSchemaUtil; import org.apache.iceberg.relocated.com.google.common.base.MoreObjects; import org.apache.iceberg.relocated.com.google.common.collect.Maps; import org.apache.iceberg.types.Type; import org.apache.iceberg.types.Types; import org.apache.iceberg.util.ArrayUtil; import org.apache.iceberg.util.ByteBuffers; /** * Base class for both {@link DataFile} and {@link DeleteFile}. */ abstract class BaseFile<F> implements ContentFile<F>, IndexedRecord, StructLike, SpecificData.SchemaConstructable, Serializable { static final Types.StructType EMPTY_STRUCT_TYPE = Types.StructType.of(); static final PartitionData EMPTY_PARTITION_DATA = new PartitionData(EMPTY_STRUCT_TYPE) { @Override public PartitionData copy() { return this; // this does not change } }; private int[] fromProjectionPos; private Types.StructType partitionType; private int partitionSpecId = -1; private FileContent content = FileContent.DATA; private String filePath = null; private FileFormat format = null; private PartitionData partitionData = null; private Long recordCount = null; private long fileSizeInBytes = -1L; // optional fields private Map<Integer, Long> columnSizes = null; private Map<Integer, Long> valueCounts = null; private Map<Integer, Long> nullValueCounts = null; private Map<Integer, ByteBuffer> lowerBounds = null; private Map<Integer, ByteBuffer> upperBounds = null; private long[] splitOffsets = null; private int[] equalityIds = null; private byte[] keyMetadata = null; // cached schema private transient Schema avroSchema = null; /** * Used by Avro reflection to instantiate this class when reading manifest files. */ BaseFile(Schema avroSchema) { this.avroSchema = avroSchema; Types.StructType schema = AvroSchemaUtil.convert(avroSchema).asNestedType().asStructType(); // partition type may be null if the field was not projected Type partType = schema.fieldType("partition"); if (partType != null) { this.partitionType = partType.asNestedType().asStructType(); } else { this.partitionType = EMPTY_STRUCT_TYPE; } List<Types.NestedField> fields = schema.fields(); List<Types.NestedField> allFields = DataFile.getType(partitionType).fields(); this.fromProjectionPos = new int[fields.size()]; for (int i = 0; i < fromProjectionPos.length; i += 1) { boolean found = false; for (int j = 0; j < allFields.size(); j += 1) { if (fields.get(i).fieldId() == allFields.get(j).fieldId()) { found = true; fromProjectionPos[i] = j; } } if (!found) { throw new IllegalArgumentException("Cannot find projected field: " + fields.get(i)); } } this.partitionData = new PartitionData(partitionType); } BaseFile(int specId, FileContent content, String filePath, FileFormat format, PartitionData partition, long fileSizeInBytes, long recordCount, Map<Integer, Long> columnSizes, Map<Integer, Long> valueCounts, Map<Integer, Long> nullValueCounts, Map<Integer, ByteBuffer> lowerBounds, Map<Integer, ByteBuffer> upperBounds, List<Long> splitOffsets, int[] equalityFieldIds, ByteBuffer keyMetadata) { this.partitionSpecId = specId; this.content = content; this.filePath = filePath; this.format = format; // this constructor is used by DataFiles.Builder, which passes null for unpartitioned data if (partition == null) { this.partitionData = EMPTY_PARTITION_DATA; this.partitionType = EMPTY_PARTITION_DATA.getPartitionType(); } else { this.partitionData = partition; this.partitionType = partition.getPartitionType(); } // this will throw NPE if metrics.recordCount is null this.recordCount = recordCount; this.fileSizeInBytes = fileSizeInBytes; this.columnSizes = columnSizes; this.valueCounts = valueCounts; this.nullValueCounts = nullValueCounts; this.lowerBounds = SerializableByteBufferMap.wrap(lowerBounds); this.upperBounds = SerializableByteBufferMap.wrap(upperBounds); this.splitOffsets = ArrayUtil.toLongArray(splitOffsets); this.equalityIds = equalityFieldIds; this.keyMetadata = ByteBuffers.toByteArray(keyMetadata); } /** * Copy constructor. * * @param toCopy a generic data file to copy. * @param fullCopy whether to copy all fields or to drop column-level stats */ BaseFile(BaseFile<F> toCopy, boolean fullCopy) { this.partitionSpecId = toCopy.partitionSpecId; this.content = toCopy.content; this.filePath = toCopy.filePath; this.format = toCopy.format; this.partitionData = toCopy.partitionData.copy(); this.partitionType = toCopy.partitionType; this.recordCount = toCopy.recordCount; this.fileSizeInBytes = toCopy.fileSizeInBytes; if (fullCopy) { // TODO: support lazy conversion to/from map this.columnSizes = copy(toCopy.columnSizes); this.valueCounts = copy(toCopy.valueCounts); this.nullValueCounts = copy(toCopy.nullValueCounts); this.lowerBounds = SerializableByteBufferMap.wrap(copy(toCopy.lowerBounds)); this.upperBounds = SerializableByteBufferMap.wrap(copy(toCopy.upperBounds)); } else { this.columnSizes = null; this.valueCounts = null; this.nullValueCounts = null; this.lowerBounds = null; this.upperBounds = null; } this.fromProjectionPos = toCopy.fromProjectionPos; this.keyMetadata = toCopy.keyMetadata == null ? null : Arrays.copyOf(toCopy.keyMetadata, toCopy.keyMetadata.length); this.splitOffsets = toCopy.splitOffsets == null ? null : Arrays.copyOf(toCopy.splitOffsets, toCopy.splitOffsets.length); this.equalityIds = toCopy.equalityIds != null ? Arrays.copyOf(toCopy.equalityIds, toCopy.equalityIds.length) : null; } /** * Constructor for Java serialization. */ BaseFile() { } @Override public int specId() { return partitionSpecId; } void setSpecId(int specId) { this.partitionSpecId = specId; } protected abstract Schema getAvroSchema(Types.StructType partitionStruct); @Override public Schema getSchema() { if (avroSchema == null) { this.avroSchema = getAvroSchema(partitionType); } return avroSchema; } @Override @SuppressWarnings("unchecked") public void put(int i, Object value) { int pos = i; // if the schema was projected, map the incoming ordinal to the expected one if (fromProjectionPos != null) { pos = fromProjectionPos[i]; } switch (pos) { case 0: this.content = value != null ? FileContent.values()[(Integer) value] : FileContent.DATA; return; case 1: // always coerce to String for Serializable this.filePath = value.toString(); return; case 2: this.format = FileFormat.valueOf(value.toString()); return; case 3: this.partitionData = (PartitionData) value; return; case 4: this.recordCount = (Long) value; return; case 5: this.fileSizeInBytes = (Long) value; return; case 6: this.columnSizes = (Map<Integer, Long>) value; return; case 7: this.valueCounts = (Map<Integer, Long>) value; return; case 8: this.nullValueCounts = (Map<Integer, Long>) value; return; case 9: this.lowerBounds = SerializableByteBufferMap.wrap((Map<Integer, ByteBuffer>) value); return; case 10: this.upperBounds = SerializableByteBufferMap.wrap((Map<Integer, ByteBuffer>) value); return; case 11: this.keyMetadata = ByteBuffers.toByteArray((ByteBuffer) value); return; case 12: this.splitOffsets = ArrayUtil.toLongArray((List<Long>) value); return; case 13: this.equalityIds = ArrayUtil.toIntArray((List<Integer>) value); return; default: // ignore the object, it must be from a newer version of the format } } @Override public <T> void set(int pos, T value) { put(pos, value); } @Override public Object get(int i) { int pos = i; // if the schema was projected, map the incoming ordinal to the expected one if (fromProjectionPos != null) { pos = fromProjectionPos[i]; } switch (pos) { case 0: return content.id(); case 1: return filePath; case 2: return format != null ? format.toString() : null; case 3: return partitionData; case 4: return recordCount; case 5: return fileSizeInBytes; case 6: return columnSizes; case 7: return valueCounts; case 8: return nullValueCounts; case 9: return lowerBounds; case 10: return upperBounds; case 11: return keyMetadata(); case 12: return splitOffsets(); case 13: return equalityFieldIds(); default: throw new UnsupportedOperationException("Unknown field ordinal: " + pos); } } @Override public <T> T get(int pos, Class<T> javaClass) { return javaClass.cast(get(pos)); } @Override public int size() { return DataFile.getType(EMPTY_STRUCT_TYPE).fields().size(); } @Override public FileContent content() { return content; } @Override public CharSequence path() { return filePath; } @Override public FileFormat format() { return format; } @Override public StructLike partition() { return partitionData; } @Override public long recordCount() { return recordCount; } @Override public long fileSizeInBytes() { return fileSizeInBytes; } @Override public Map<Integer, Long> columnSizes() { return columnSizes; } @Override public Map<Integer, Long> valueCounts() { return valueCounts; } @Override public Map<Integer, Long> nullValueCounts() { return nullValueCounts; } @Override public Map<Integer, ByteBuffer> lowerBounds() { return lowerBounds; } @Override public Map<Integer, ByteBuffer> upperBounds() { return upperBounds; } @Override public ByteBuffer keyMetadata() { return keyMetadata != null ? ByteBuffer.wrap(keyMetadata) : null; } @Override public List<Long> splitOffsets() { return ArrayUtil.toLongList(splitOffsets); } @Override public List<Integer> equalityFieldIds() { return ArrayUtil.toIntList(equalityIds); } private static <K, V> Map<K, V> copy(Map<K, V> map) { if (map != null) { Map<K, V> copy = Maps.newHashMapWithExpectedSize(map.size()); copy.putAll(map); return Collections.unmodifiableMap(copy); } return null; } @Override public String toString() { return MoreObjects.toStringHelper(this) .add("content", content.toString().toLowerCase(Locale.ROOT)) .add("file_path", filePath) .add("file_format", format) .add("partition", partitionData) .add("record_count", recordCount) .add("file_size_in_bytes", fileSizeInBytes) .add("column_sizes", columnSizes) .add("value_counts", valueCounts) .add("null_value_counts", nullValueCounts) .add("lower_bounds", lowerBounds) .add("upper_bounds", upperBounds) .add("key_metadata", keyMetadata == null ? "null" : "(redacted)") .add("split_offsets", splitOffsets == null ? "null" : splitOffsets()) .add("equality_ids", equalityIds == null ? "null" : equalityFieldIds()) .toString(); } }
1
24,158
These modifications allow BaseFile to translate into a SparkRow with the specID as a column
apache-iceberg
java
@@ -519,7 +519,7 @@ function resolveReadPreference(parent, options) { throw new Error('No readPreference was provided or inherited.'); } - return readPreference; + return typeof readPreference === 'string' ? new ReadPreference(readPreference) : readPreference; } /**
1
'use strict'; const MongoError = require('./core').MongoError; const ReadPreference = require('./core').ReadPreference; const WriteConcern = require('./write_concern'); var shallowClone = function(obj) { var copy = {}; for (var name in obj) copy[name] = obj[name]; return copy; }; // Figure out the read preference var translateReadPreference = function(options) { var r = null; if (options.readPreference) { r = options.readPreference; } else { return options; } if (typeof r === 'string') { options.readPreference = new ReadPreference(r); } else if (r && !(r instanceof ReadPreference) && typeof r === 'object') { const mode = r.mode || r.preference; if (mode && typeof mode === 'string') { options.readPreference = new ReadPreference(mode, r.tags, { maxStalenessSeconds: r.maxStalenessSeconds }); } } else if (!(r instanceof ReadPreference)) { throw new TypeError('Invalid read preference: ' + r); } return options; }; // Set simple property var getSingleProperty = function(obj, name, value) { Object.defineProperty(obj, name, { enumerable: true, get: function() { return value; } }); }; var formatSortValue = (exports.formatSortValue = function(sortDirection) { var value = ('' + sortDirection).toLowerCase(); switch (value) { case 'ascending': case 'asc': case '1': return 1; case 'descending': case 'desc': case '-1': return -1; default: throw new Error( 'Illegal sort clause, must be of the form ' + "[['field1', '(ascending|descending)'], " + "['field2', '(ascending|descending)']]" ); } }); var formattedOrderClause = (exports.formattedOrderClause = function(sortValue) { var orderBy = {}; if (sortValue == null) return null; if (Array.isArray(sortValue)) { if (sortValue.length === 0) { return null; } for (var i = 0; i < sortValue.length; i++) { if (sortValue[i].constructor === String) { orderBy[sortValue[i]] = 1; } else { orderBy[sortValue[i][0]] = formatSortValue(sortValue[i][1]); } } } else if (sortValue != null && typeof sortValue === 'object') { orderBy = sortValue; } else if (typeof sortValue === 'string') { orderBy[sortValue] = 1; } else { throw new Error( 'Illegal sort clause, must be of the form ' + "[['field1', '(ascending|descending)'], ['field2', '(ascending|descending)']]" ); } return orderBy; }); var checkCollectionName = function checkCollectionName(collectionName) { if ('string' !== typeof collectionName) { throw new MongoError('collection name must be a String'); } if (!collectionName || collectionName.indexOf('..') !== -1) { throw new MongoError('collection names cannot be empty'); } if ( collectionName.indexOf('$') !== -1 && collectionName.match(/((^\$cmd)|(oplog\.\$main))/) == null ) { throw new MongoError("collection names must not contain '$'"); } if (collectionName.match(/^\.|\.$/) != null) { throw new MongoError("collection names must not start or end with '.'"); } // Validate that we are not passing 0x00 in the collection name if (collectionName.indexOf('\x00') !== -1) { throw new MongoError('collection names cannot contain a null character'); } }; var handleCallback = function(callback, err, value1, value2) { try { if (callback == null) return; if (callback) { return value2 ? callback(err, value1, value2) : callback(err, value1); } } catch (err) { process.nextTick(function() { throw err; }); return false; } return true; }; /** * Wrap a Mongo error document in an Error instance * @ignore * @api private */ var toError = function(error) { if (error instanceof Error) return error; var msg = error.err || error.errmsg || error.errMessage || error; var e = MongoError.create({ message: msg, driver: true }); // Get all object keys var keys = typeof error === 'object' ? Object.keys(error) : []; for (var i = 0; i < keys.length; i++) { try { e[keys[i]] = error[keys[i]]; } catch (err) { // continue } } return e; }; /** * @ignore */ var normalizeHintField = function normalizeHintField(hint) { var finalHint = null; if (typeof hint === 'string') { finalHint = hint; } else if (Array.isArray(hint)) { finalHint = {}; hint.forEach(function(param) { finalHint[param] = 1; }); } else if (hint != null && typeof hint === 'object') { finalHint = {}; for (var name in hint) { finalHint[name] = hint[name]; } } return finalHint; }; /** * Create index name based on field spec * * @ignore * @api private */ var parseIndexOptions = function(fieldOrSpec) { var fieldHash = {}; var indexes = []; var keys; // Get all the fields accordingly if ('string' === typeof fieldOrSpec) { // 'type' indexes.push(fieldOrSpec + '_' + 1); fieldHash[fieldOrSpec] = 1; } else if (Array.isArray(fieldOrSpec)) { fieldOrSpec.forEach(function(f) { if ('string' === typeof f) { // [{location:'2d'}, 'type'] indexes.push(f + '_' + 1); fieldHash[f] = 1; } else if (Array.isArray(f)) { // [['location', '2d'],['type', 1]] indexes.push(f[0] + '_' + (f[1] || 1)); fieldHash[f[0]] = f[1] || 1; } else if (isObject(f)) { // [{location:'2d'}, {type:1}] keys = Object.keys(f); keys.forEach(function(k) { indexes.push(k + '_' + f[k]); fieldHash[k] = f[k]; }); } else { // undefined (ignore) } }); } else if (isObject(fieldOrSpec)) { // {location:'2d', type:1} keys = Object.keys(fieldOrSpec); keys.forEach(function(key) { indexes.push(key + '_' + fieldOrSpec[key]); fieldHash[key] = fieldOrSpec[key]; }); } return { name: indexes.join('_'), keys: keys, fieldHash: fieldHash }; }; var isObject = (exports.isObject = function(arg) { return '[object Object]' === Object.prototype.toString.call(arg); }); var debugOptions = function(debugFields, options) { var finaloptions = {}; debugFields.forEach(function(n) { finaloptions[n] = options[n]; }); return finaloptions; }; var decorateCommand = function(command, options, exclude) { for (var name in options) { if (exclude.indexOf(name) === -1) command[name] = options[name]; } return command; }; var mergeOptions = function(target, source) { for (var name in source) { target[name] = source[name]; } return target; }; // Merge options with translation var translateOptions = function(target, source) { var translations = { // SSL translation options sslCA: 'ca', sslCRL: 'crl', sslValidate: 'rejectUnauthorized', sslKey: 'key', sslCert: 'cert', sslPass: 'passphrase', // SocketTimeout translation options socketTimeoutMS: 'socketTimeout', connectTimeoutMS: 'connectionTimeout', // Replicaset options replicaSet: 'setName', rs_name: 'setName', secondaryAcceptableLatencyMS: 'acceptableLatency', connectWithNoPrimary: 'secondaryOnlyConnectionAllowed', // Mongos options acceptableLatencyMS: 'localThresholdMS' }; for (var name in source) { if (translations[name]) { target[translations[name]] = source[name]; } else { target[name] = source[name]; } } return target; }; var filterOptions = function(options, names) { var filterOptions = {}; for (var name in options) { if (names.indexOf(name) !== -1) filterOptions[name] = options[name]; } // Filtered options return filterOptions; }; // Write concern keys var writeConcernKeys = ['w', 'j', 'wtimeout', 'fsync']; // Merge the write concern options var mergeOptionsAndWriteConcern = function(targetOptions, sourceOptions, keys, mergeWriteConcern) { // Mix in any allowed options for (var i = 0; i < keys.length; i++) { if (!targetOptions[keys[i]] && sourceOptions[keys[i]] !== undefined) { targetOptions[keys[i]] = sourceOptions[keys[i]]; } } // No merging of write concern if (!mergeWriteConcern) return targetOptions; // Found no write Concern options var found = false; for (i = 0; i < writeConcernKeys.length; i++) { if (targetOptions[writeConcernKeys[i]]) { found = true; break; } } if (!found) { for (i = 0; i < writeConcernKeys.length; i++) { if (sourceOptions[writeConcernKeys[i]]) { targetOptions[writeConcernKeys[i]] = sourceOptions[writeConcernKeys[i]]; } } } return targetOptions; }; /** * Executes the given operation with provided arguments. * * This method reduces large amounts of duplication in the entire codebase by providing * a single point for determining whether callbacks or promises should be used. Additionally * it allows for a single point of entry to provide features such as implicit sessions, which * are required by the Driver Sessions specification in the event that a ClientSession is * not provided * * @param {object} topology The topology to execute this operation on * @param {function} operation The operation to execute * @param {array} args Arguments to apply the provided operation * @param {object} [options] Options that modify the behavior of the method */ const executeLegacyOperation = (topology, operation, args, options) => { if (topology == null) { throw new TypeError('This method requires a valid topology instance'); } if (!Array.isArray(args)) { throw new TypeError('This method requires an array of arguments to apply'); } options = options || {}; const Promise = topology.s.promiseLibrary; let callback = args[args.length - 1]; // The driver sessions spec mandates that we implicitly create sessions for operations // that are not explicitly provided with a session. let session, opOptions, owner; if (!options.skipSessions && topology.hasSessionSupport()) { opOptions = args[args.length - 2]; if (opOptions == null || opOptions.session == null) { owner = Symbol(); session = topology.startSession({ owner }); const optionsIndex = args.length - 2; args[optionsIndex] = Object.assign({}, args[optionsIndex], { session: session }); } else if (opOptions.session && opOptions.session.hasEnded) { throw new MongoError('Use of expired sessions is not permitted'); } } const makeExecuteCallback = (resolve, reject) => function executeCallback(err, result) { if (session && session.owner === owner && !options.returnsCursor) { session.endSession(() => { delete opOptions.session; if (err) return reject(err); resolve(result); }); } else { if (err) return reject(err); resolve(result); } }; // Execute using callback if (typeof callback === 'function') { callback = args.pop(); const handler = makeExecuteCallback( result => callback(null, result), err => callback(err, null) ); args.push(handler); try { return operation.apply(null, args); } catch (e) { handler(e); throw e; } } // Return a Promise if (args[args.length - 1] != null) { throw new TypeError('final argument to `executeLegacyOperation` must be a callback'); } return new Promise(function(resolve, reject) { const handler = makeExecuteCallback(resolve, reject); args[args.length - 1] = handler; try { return operation.apply(null, args); } catch (e) { handler(e); } }); }; /** * Applies retryWrites: true to a command if retryWrites is set on the command's database. * * @param {object} target The target command to which we will apply retryWrites. * @param {object} db The database from which we can inherit a retryWrites value. */ function applyRetryableWrites(target, db) { if (db && db.s.options.retryWrites) { target.retryWrites = true; } return target; } /** * Applies a write concern to a command based on well defined inheritance rules, optionally * detecting support for the write concern in the first place. * * @param {Object} target the target command we will be applying the write concern to * @param {Object} sources sources where we can inherit default write concerns from * @param {Object} [options] optional settings passed into a command for write concern overrides * @returns {Object} the (now) decorated target */ function applyWriteConcern(target, sources, options) { options = options || {}; const db = sources.db; const coll = sources.collection; if (options.session && options.session.inTransaction()) { // writeConcern is not allowed within a multi-statement transaction if (target.writeConcern) { delete target.writeConcern; } return target; } const writeConcern = WriteConcern.fromOptions(options); if (writeConcern) { return Object.assign(target, { writeConcern }); } if (coll && coll.writeConcern) { return Object.assign(target, { writeConcern: Object.assign({}, coll.writeConcern) }); } if (db && db.writeConcern) { return Object.assign(target, { writeConcern: Object.assign({}, db.writeConcern) }); } return target; } /** * Resolves a read preference based on well-defined inheritance rules. This method will not only * determine the read preference (if there is one), but will also ensure the returned value is a * properly constructed instance of `ReadPreference`. * * @param {Collection|Db|MongoClient} parent The parent of the operation on which to determine the read * preference, used for determining the inherited read preference. * @param {Object} options The options passed into the method, potentially containing a read preference * @returns {(ReadPreference|null)} The resolved read preference */ function resolveReadPreference(parent, options) { options = options || {}; const session = options.session; const inheritedReadPreference = parent.readPreference; let readPreference; if (options.readPreference) { readPreference = ReadPreference.fromOptions(options); } else if (session && session.inTransaction() && session.transaction.options.readPreference) { // The transaction’s read preference MUST override all other user configurable read preferences. readPreference = session.transaction.options.readPreference; } else if (inheritedReadPreference != null) { readPreference = inheritedReadPreference; } else { throw new Error('No readPreference was provided or inherited.'); } return readPreference; } /** * Checks if a given value is a Promise * * @param {*} maybePromise * @return true if the provided value is a Promise */ function isPromiseLike(maybePromise) { return maybePromise && typeof maybePromise.then === 'function'; } /** * Applies collation to a given command. * * @param {object} [command] the command on which to apply collation * @param {(Cursor|Collection)} [target] target of command * @param {object} [options] options containing collation settings */ function decorateWithCollation(command, target, options) { const topology = target.s && target.s.topology; if (!topology) { throw new TypeError('parameter "target" is missing a topology'); } const capabilities = topology.capabilities(); if (options.collation && typeof options.collation === 'object') { if (capabilities && capabilities.commandsTakeCollation) { command.collation = options.collation; } else { throw new MongoError(`Current topology does not support collation`); } } } /** * Applies a read concern to a given command. * * @param {object} command the command on which to apply the read concern * @param {Collection} coll the parent collection of the operation calling this method */ function decorateWithReadConcern(command, coll, options) { if (options && options.session && options.session.inTransaction()) { return; } let readConcern = Object.assign({}, command.readConcern || {}); if (coll.s.readConcern) { Object.assign(readConcern, coll.s.readConcern); } if (Object.keys(readConcern).length > 0) { Object.assign(command, { readConcern: readConcern }); } } const emitProcessWarning = msg => process.emitWarning(msg, 'DeprecationWarning'); const emitConsoleWarning = msg => console.error(msg); const emitDeprecationWarning = process.emitWarning ? emitProcessWarning : emitConsoleWarning; /** * Default message handler for generating deprecation warnings. * * @param {string} name function name * @param {string} option option name * @return {string} warning message * @ignore * @api private */ function defaultMsgHandler(name, option) { return `${name} option [${option}] is deprecated and will be removed in a later version.`; } /** * Deprecates a given function's options. * * @param {object} config configuration for deprecation * @param {string} config.name function name * @param {Array} config.deprecatedOptions options to deprecate * @param {number} config.optionsIndex index of options object in function arguments array * @param {function} [config.msgHandler] optional custom message handler to generate warnings * @param {function} fn the target function of deprecation * @return {function} modified function that warns once per deprecated option, and executes original function * @ignore * @api private */ function deprecateOptions(config, fn) { if (process.noDeprecation === true) { return fn; } const msgHandler = config.msgHandler ? config.msgHandler : defaultMsgHandler; const optionsWarned = new Set(); function deprecated() { const options = arguments[config.optionsIndex]; // ensure options is a valid, non-empty object, otherwise short-circuit if (!isObject(options) || Object.keys(options).length === 0) { return fn.apply(this, arguments); } config.deprecatedOptions.forEach(deprecatedOption => { if (options.hasOwnProperty(deprecatedOption) && !optionsWarned.has(deprecatedOption)) { optionsWarned.add(deprecatedOption); const msg = msgHandler(config.name, deprecatedOption); emitDeprecationWarning(msg); if (this && this.getLogger) { const logger = this.getLogger(); if (logger) { logger.warn(msg); } } } }); return fn.apply(this, arguments); } // These lines copied from https://github.com/nodejs/node/blob/25e5ae41688676a5fd29b2e2e7602168eee4ceb5/lib/internal/util.js#L73-L80 // The wrapper will keep the same prototype as fn to maintain prototype chain Object.setPrototypeOf(deprecated, fn); if (fn.prototype) { // Setting this (rather than using Object.setPrototype, as above) ensures // that calling the unwrapped constructor gives an instanceof the wrapped // constructor. deprecated.prototype = fn.prototype; } return deprecated; } const SUPPORTS = {}; // Test asyncIterator support try { require('./async/async_iterator'); SUPPORTS.ASYNC_ITERATOR = true; } catch (e) { SUPPORTS.ASYNC_ITERATOR = false; } class MongoDBNamespace { constructor(db, collection) { this.db = db; this.collection = collection; } toString() { return this.collection ? `${this.db}.${this.collection}` : this.db; } withCollection(collection) { return new MongoDBNamespace(this.db, collection); } static fromString(namespace) { if (!namespace) { throw new Error(`Cannot parse namespace from "${namespace}"`); } const index = namespace.indexOf('.'); return new MongoDBNamespace(namespace.substring(0, index), namespace.substring(index + 1)); } } module.exports = { filterOptions, mergeOptions, translateOptions, shallowClone, getSingleProperty, checkCollectionName, toError, formattedOrderClause, parseIndexOptions, normalizeHintField, handleCallback, decorateCommand, isObject, debugOptions, MAX_JS_INT: Number.MAX_SAFE_INTEGER + 1, mergeOptionsAndWriteConcern, translateReadPreference, executeLegacyOperation, applyRetryableWrites, applyWriteConcern, isPromiseLike, decorateWithCollation, decorateWithReadConcern, deprecateOptions, SUPPORTS, MongoDBNamespace, resolveReadPreference };
1
16,045
is this something we've been missing this whole time?
mongodb-node-mongodb-native
js
@@ -25,6 +25,7 @@ import ( const ( defaultWaitApprovalTimeout = Duration(6 * time.Hour) defaultAnalysisQueryTimeout = Duration(30 * time.Second) + allEvents = "*" ) type GenericDeploymentSpec struct {
1
// Copyright 2020 The PipeCD Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "encoding/json" "fmt" "time" "github.com/pipe-cd/pipe/pkg/model" ) const ( defaultWaitApprovalTimeout = Duration(6 * time.Hour) defaultAnalysisQueryTimeout = Duration(30 * time.Second) ) type GenericDeploymentSpec struct { // Configuration used while planning deployment. Planner DeploymentPlanner `json:"planner"` // Forcibly use QuickSync or Pipeline when commit message matched the specified pattern. CommitMatcher DeploymentCommitMatcher `json:"commitMatcher"` // Pipeline for deploying progressively. Pipeline *DeploymentPipeline `json:"pipeline"` // The list of sealed secrets that should be decrypted. SealedSecrets []SealedSecretMapping `json:"sealedSecrets"` // List of directories or files where their changes will trigger the deployment. // Regular expression can be used. TriggerPaths []string `json:"triggerPaths,omitempty"` // The maximum length of time to execute deployment before giving up. // Default is 6h. Timeout Duration `json:"timeout,omitempty" default:"6h"` // List of encrypted secrets and targets that should be decoded before using. Encryption *SecretEncryption `json:"encryption"` // Additional configuration used while sending notification to external services. DeploymentNotification *DeploymentNotification `json:"notification"` } type DeploymentPlanner struct { // Disable auto-detecting to use QUICK_SYNC or PROGRESSIVE_SYNC. // Always use the speficied pipeline for all deployments. AlwaysUsePipeline bool `json:"alwaysUsePipeline"` } func (s *GenericDeploymentSpec) Validate() error { if s.Pipeline != nil { for _, stage := range s.Pipeline.Stages { if stage.AnalysisStageOptions != nil { if err := stage.AnalysisStageOptions.Validate(); err != nil { return err } } } } if e := s.Encryption; e != nil { if err := e.Validate(); err != nil { return err } } if s.DeploymentNotification != nil { for _, m := range s.DeploymentNotification.Mentions { if err := m.Validate(); err != nil { return err } } } return nil } func (s GenericDeploymentSpec) GetStage(index int32) (PipelineStage, bool) { if s.Pipeline == nil { return PipelineStage{}, false } if int(index) >= len(s.Pipeline.Stages) { return PipelineStage{}, false } return s.Pipeline.Stages[index], true } // HasStage checks if the given stage is included in the pipeline. func (s GenericDeploymentSpec) HasStage(stage model.Stage) bool { if s.Pipeline == nil { return false } for _, s := range s.Pipeline.Stages { if s.Name == stage { return true } } return false } // DeploymentCommitMatcher provides a way to decide how to deploy. type DeploymentCommitMatcher struct { // It makes sure to perform syncing if the commit message matches this regular expression. QuickSync string `json:"quickSync"` // It makes sure to perform pipeline if the commit message matches this regular expression. Pipeline string `json:"pipeline"` } // DeploymentPipeline represents the way to deploy the application. // The pipeline is triggered by changes in any of the following objects: // - Target PodSpec (Target can be Deployment, DaemonSet, StatefulSet) // - ConfigMaps, Secrets that are mounted as volumes or envs in the deployment. type DeploymentPipeline struct { Stages []PipelineStage `json:"stages"` } // PipelineStage represents a single stage of a pipeline. // This is used as a generic struct for all stage type. type PipelineStage struct { Id string Name model.Stage Desc string Timeout Duration WaitStageOptions *WaitStageOptions WaitApprovalStageOptions *WaitApprovalStageOptions AnalysisStageOptions *AnalysisStageOptions K8sPrimaryRolloutStageOptions *K8sPrimaryRolloutStageOptions K8sCanaryRolloutStageOptions *K8sCanaryRolloutStageOptions K8sCanaryCleanStageOptions *K8sCanaryCleanStageOptions K8sBaselineRolloutStageOptions *K8sBaselineRolloutStageOptions K8sBaselineCleanStageOptions *K8sBaselineCleanStageOptions K8sTrafficRoutingStageOptions *K8sTrafficRoutingStageOptions TerraformSyncStageOptions *TerraformSyncStageOptions TerraformPlanStageOptions *TerraformPlanStageOptions TerraformApplyStageOptions *TerraformApplyStageOptions CloudRunSyncStageOptions *CloudRunSyncStageOptions CloudRunPromoteStageOptions *CloudRunPromoteStageOptions LambdaSyncStageOptions *LambdaSyncStageOptions LambdaCanaryRolloutStageOptions *LambdaCanaryRolloutStageOptions LambdaPromoteStageOptions *LambdaPromoteStageOptions ECSSyncStageOptions *ECSSyncStageOptions ECSCanaryRolloutStageOptions *ECSCanaryRolloutStageOptions ECSPrimaryRolloutStageOptions *ECSPrimaryRolloutStageOptions ECSCanaryCleanStageOptions *ECSCanaryCleanStageOptions ECSTrafficRoutingStageOptions *ECSTrafficRoutingStageOptions } type genericPipelineStage struct { Id string `json:"id"` Name model.Stage `json:"name"` Desc string `json:"desc,omitempty"` Timeout Duration `json:"timeout"` With json.RawMessage `json:"with"` } func (s *PipelineStage) UnmarshalJSON(data []byte) error { var err error gs := genericPipelineStage{} if err = json.Unmarshal(data, &gs); err != nil { return err } s.Id = gs.Id s.Name = gs.Name s.Desc = gs.Desc s.Timeout = gs.Timeout switch s.Name { case model.StageWait: s.WaitStageOptions = &WaitStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.WaitStageOptions) } case model.StageWaitApproval: s.WaitApprovalStageOptions = &WaitApprovalStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.WaitApprovalStageOptions) } if s.WaitApprovalStageOptions.Timeout <= 0 { s.WaitApprovalStageOptions.Timeout = defaultWaitApprovalTimeout } case model.StageAnalysis: s.AnalysisStageOptions = &AnalysisStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.AnalysisStageOptions) } for i := 0; i < len(s.AnalysisStageOptions.Metrics); i++ { if s.AnalysisStageOptions.Metrics[i].Timeout <= 0 { s.AnalysisStageOptions.Metrics[i].Timeout = defaultAnalysisQueryTimeout } } case model.StageK8sPrimaryRollout: s.K8sPrimaryRolloutStageOptions = &K8sPrimaryRolloutStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.K8sPrimaryRolloutStageOptions) } case model.StageK8sCanaryRollout: s.K8sCanaryRolloutStageOptions = &K8sCanaryRolloutStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.K8sCanaryRolloutStageOptions) } case model.StageK8sCanaryClean: s.K8sCanaryCleanStageOptions = &K8sCanaryCleanStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.K8sCanaryCleanStageOptions) } case model.StageK8sBaselineRollout: s.K8sBaselineRolloutStageOptions = &K8sBaselineRolloutStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.K8sBaselineRolloutStageOptions) } case model.StageK8sBaselineClean: s.K8sBaselineCleanStageOptions = &K8sBaselineCleanStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.K8sBaselineCleanStageOptions) } case model.StageK8sTrafficRouting: s.K8sTrafficRoutingStageOptions = &K8sTrafficRoutingStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.K8sTrafficRoutingStageOptions) } case model.StageTerraformSync: s.TerraformSyncStageOptions = &TerraformSyncStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.TerraformSyncStageOptions) } case model.StageTerraformPlan: s.TerraformPlanStageOptions = &TerraformPlanStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.TerraformPlanStageOptions) } case model.StageTerraformApply: s.TerraformApplyStageOptions = &TerraformApplyStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.TerraformApplyStageOptions) } case model.StageCloudRunSync: s.CloudRunSyncStageOptions = &CloudRunSyncStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.CloudRunSyncStageOptions) } case model.StageCloudRunPromote: s.CloudRunPromoteStageOptions = &CloudRunPromoteStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.CloudRunPromoteStageOptions) } case model.StageLambdaSync: s.LambdaSyncStageOptions = &LambdaSyncStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.LambdaSyncStageOptions) } case model.StageLambdaPromote: s.LambdaPromoteStageOptions = &LambdaPromoteStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.LambdaPromoteStageOptions) } case model.StageLambdaCanaryRollout: s.LambdaCanaryRolloutStageOptions = &LambdaCanaryRolloutStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.LambdaCanaryRolloutStageOptions) } case model.StageECSSync: s.ECSSyncStageOptions = &ECSSyncStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.ECSSyncStageOptions) } case model.StageECSCanaryRollout: s.ECSCanaryRolloutStageOptions = &ECSCanaryRolloutStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.ECSCanaryRolloutStageOptions) } case model.StageECSPrimaryRollout: s.ECSPrimaryRolloutStageOptions = &ECSPrimaryRolloutStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.ECSPrimaryRolloutStageOptions) } case model.StageECSCanaryClean: s.ECSCanaryCleanStageOptions = &ECSCanaryCleanStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.ECSCanaryCleanStageOptions) } case model.StageECSTrafficRouting: s.ECSTrafficRoutingStageOptions = &ECSTrafficRoutingStageOptions{} if len(gs.With) > 0 { err = json.Unmarshal(gs.With, s.ECSTrafficRoutingStageOptions) } default: err = fmt.Errorf("unsupported stage name: %s", s.Name) } return err } // WaitStageOptions contains all configurable values for a WAIT stage. type WaitStageOptions struct { Duration Duration `json:"duration"` } // WaitStageOptions contains all configurable values for a WAIT_APPROVAL stage. type WaitApprovalStageOptions struct { // The maximum length of time to wait before giving up. // Defaults to 6h. Timeout Duration `json:"timeout"` Approvers []string `json:"approvers"` } // AnalysisStageOptions contains all configurable values for a K8S_ANALYSIS stage. type AnalysisStageOptions struct { // How long the analysis process should be executed. Duration Duration `json:"duration"` // TODO: Consider about how to handle a pod restart // possible count of pod restarting RestartThreshold int `json:"restartThreshold"` Metrics []TemplatableAnalysisMetrics `json:"metrics"` Logs []TemplatableAnalysisLog `json:"logs"` Https []TemplatableAnalysisHTTP `json:"https"` } func (a *AnalysisStageOptions) Validate() error { if a.Duration == 0 { return fmt.Errorf("the ANALYSIS stage requires duration field") } for _, m := range a.Metrics { if m.Template.Name != "" { if err := m.Template.Validate(); err != nil { return fmt.Errorf("one of metrics configurations of ANALYSIS stage is invalid: %w", err) } continue } if err := m.AnalysisMetrics.Validate(); err != nil { return fmt.Errorf("one of metrics configurations of ANALYSIS stage is invalid: %w", err) } } for _, l := range a.Logs { if l.Template.Name != "" { if err := l.Template.Validate(); err != nil { return fmt.Errorf("one of log configurations of ANALYSIS stage is invalid: %w", err) } continue } if err := l.AnalysisLog.Validate(); err != nil { return fmt.Errorf("one of log configurations of ANALYSIS stage is invalid: %w", err) } } for _, h := range a.Https { if h.Template.Name != "" { if err := h.Template.Validate(); err != nil { return fmt.Errorf("one of http configurations of ANALYSIS stage is invalid: %w", err) } continue } if err := h.AnalysisHTTP.Validate(); err != nil { return fmt.Errorf("one of http configurations of ANALYSIS stage is invalid: %w", err) } } return nil } type AnalysisTemplateRef struct { Name string `json:"name"` AppArgs map[string]string `json:"appArgs"` } func (a *AnalysisTemplateRef) Validate() error { if a.Name == "" { return fmt.Errorf("the reference of analysis template name is empty") } return nil } // TemplatableAnalysisMetrics wraps AnalysisMetrics to allow specify template to use. type TemplatableAnalysisMetrics struct { AnalysisMetrics Template AnalysisTemplateRef `json:"template"` } // TemplatableAnalysisLog wraps AnalysisLog to allow specify template to use. type TemplatableAnalysisLog struct { AnalysisLog Template AnalysisTemplateRef `json:"template"` } // TemplatableAnalysisHTTP wraps AnalysisHTTP to allow specify template to use. type TemplatableAnalysisHTTP struct { AnalysisHTTP Template AnalysisTemplateRef `json:"template"` } type SealedSecretMapping struct { // Relative path from the application directory to sealed secret file. Path string `json:"path"` // The filename for the decrypted secret. // Empty means the same name with the sealed secret file. OutFilename string `json:"outFilename"` // The directory name where to put the decrypted secret. // Empty means the same directory with the sealed secret file. OutDir string `json:"outDir"` } type SecretEncryption struct { // List of encrypted secrets. EncryptedSecrets map[string]string `json:"encryptedSecrets"` // List of files to be decrypted before using. DecryptionTargets []string `json:"decryptionTargets"` } func (e *SecretEncryption) Validate() error { for k, v := range e.EncryptedSecrets { if k == "" { return fmt.Errorf("key field in encryptedSecrets must not be empty") } if v == "" { return fmt.Errorf("value field of %s in encryptedSecrets must not be empty", k) } } return nil } // DeploymentNotification represents the way to send to users. type DeploymentNotification struct { // List of users to be notified for each event. Mentions []NotificationMention `json:"mentions"` } func (n *DeploymentNotification) FindSlackAccounts(event model.NotificationEventType) []string { for _, v := range n.Mentions { if e := "EVENT_" + v.Event; e == event.String() { return v.Slack } } return []string{} } type NotificationMention struct { // The event to be notified to users. Event string `json:"event"` // List of user IDs for mentioning in Slack. // See https://api.slack.com/reference/surfaces/formatting#mentioning-users // for more information on how to check them. Slack []string `json:"slack"` // TODO: Support for email notification // The email for notification. Email []string `json:"email"` } func (n *NotificationMention) Validate() error { e := "EVENT_" + n.Event for k := range model.NotificationEventType_value { if e == k { return nil } } return fmt.Errorf("event %q is incorrect as NotificationEventType", n.Event) }
1
21,408
nit: this is a package-wide constant so better to narrow the scope like `allEventsSign`.
pipe-cd-pipe
go
@@ -111,6 +111,15 @@ def generate_thrift_files(thrift_files_dir, env, silent=True): LOG.error('Failed to generate viewer server files') return ret + auth_thrift = os.path.join(thrift_files_dir, 'authentication.thrift') + auth_thrift = 'authentication.thrift' + auth_cmd = ['thrift', '-r', '-I', '.', + '--gen', 'py', auth_thrift] + ret = run_cmd(auth_cmd, thrift_files_dir, env, silent=silent) + if ret: + LOG.error('Failed to generate authentication interface files') + return ret + # ------------------------------------------------------------------- def generate_documentation(doc_root, env, silent=True):
1
#!/usr/bin/env python """ CodeChecker packager script creates a package based on the given layout config. """ from __future__ import print_function import argparse import errno import json import logging import ntpath import os import shutil import sys try: import urlparse except ImportError: import urllib.parse as urlparse import tarfile import subprocess import time import shlex import platform from distutils.spawn import find_executable LOG = logging.getLogger('Packager') msg_formatter = logging.Formatter('[%(levelname)s] - %(message)s') log_handler = logging.StreamHandler() log_handler.setFormatter(msg_formatter) LOG.setLevel(logging.INFO) LOG.addHandler(log_handler) # ------------------------------------------------------------------- def run_cmd(cmd, cwd=None, env=None, silent=False): """ Run a command. """ LOG.debug(' '.join(cmd)) LOG.debug(cwd) try: stdout = subprocess.PIPE stderr = subprocess.PIPE if silent: stdout = None stderr = None proc = subprocess.Popen(cmd, cwd=cwd, stdout=stdout, stderr=stderr, env=env) proc.wait() ret = proc.returncode LOG.debug(ret) return ret except TypeError as type_error: LOG.error('Failed to run ' + ' '.join(cmd)) LOG.error(type_error) sys.exit(1) except OSError as os_error: LOG.error('Failed to run ' + ' '.join(cmd)) LOG.error(os_error) sys.exit(1) # ------------------------------------------------------------------- def build_ld_logger(ld_logger_path, env, arch=None, clean=True, silent=True): """ Build ld logger. """ LOG.info('Building ld logger ...') LOG.debug(ld_logger_path) if clean: make_cmd = ['make', '-f', 'Makefile.manual', 'clean'] ret = run_cmd(make_cmd, ld_logger_path, env, silent=silent) if ret: LOG.error('Failed to run: ' + ' '.join(make_cmd)) return ret if arch is None: make_cmd = ['make', '-f', 'Makefile.manual'] elif arch == '32': make_cmd = ['make', '-f', 'Makefile.manual', 'pack32bit'] elif arch == '64': make_cmd = ['make', '-f', 'Makefile.manual', 'pack64bit'] ret = run_cmd(make_cmd, ld_logger_path, env, silent=silent) if ret: LOG.error('Failed to run: ' + ' '.join(make_cmd)) return ret # ------------------------------------------------------------------- def generate_thrift_files(thrift_files_dir, env, silent=True): """ Generate python and javascript files from thrift IDL. """ LOG.info('Generating thrift files ...') rss_thrift = 'report_storage_server.thrift' rss_cmd = ['thrift', '-r', '-I', '.', '--gen', 'py', rss_thrift] ret = run_cmd(rss_cmd, thrift_files_dir, env, silent=silent) if ret: LOG.error('Failed to generate storage server files') return ret rvs_thrift = 'report_viewer_server.thrift' rvs_cmd = ['thrift', '-r', '-I', '.', '--gen', 'py', '--gen', 'js:jquery', rvs_thrift] ret = run_cmd(rvs_cmd, thrift_files_dir, env, silent=silent) if ret: LOG.error('Failed to generate viewer server files') return ret # ------------------------------------------------------------------- def generate_documentation(doc_root, env, silent=True): """ Generate user guide and other documentation. """ LOG.info('Generating documentation ...') doc_gen_cmd = ['doxygen', 'Doxyfile.in'] LOG.debug(doc_gen_cmd) ret = run_cmd(doc_gen_cmd, doc_root, env, silent=silent) if ret: LOG.error('Failed to generate documentation') return ret # ------------------------------------------------------------------- def create_folder_layout(path, layout): """ Create package directory layout. """ package_root = layout['root'] if os.path.exists(path): LOG.info('Removing previous package') if os.path.exists(package_root): shutil.rmtree(package_root) else: os.makedirs(path) LOG.info('Creating package layout') LOG.debug(layout) os.makedirs(package_root) for key, folder in layout.items(): if key != 'root': try: directory = os.path.join(package_root, folder) os.makedirs(directory) except OSError as os_err: if os_err.errno != errno.EEXIST: LOG.warning(directory) LOG.warning(os_err.strerror) sys.exit() # ------------------------------------------------------------------- def copy_tree(src, dst): """ Copy file tree. """ if not os.path.exists(dst): os.makedirs(dst) for item in os.listdir(src): source = os.path.join(src, item) destination = os.path.join(dst, item) if os.path.isdir(source): copy_tree(source, destination) else: delta = os.stat(src).st_mtime - os.stat(dst).st_mtime if not os.path.exists(destination) or delta > 0: shutil.copy2(source, destination) # ------------------------------------------------------------------- def handle_external_file(dep, clean, env, verbose): """ Download (and if needed, extract) files from the given url. Currently supports handling of files with the following extensions: .tar.gz, .js, .css """ supported_exts = { 'compressed': ['.tar.gz'], 'uncompressed': ['.js', '.css'] } source_package = dep['source_package'] directory = dep['directory'] if clean and os.path.exists(directory): LOG.debug('Removing directory ' + directory) shutil.rmtree(directory) else: if os.path.exists(directory): return os.makedirs(directory) download_cmd = [] download_cmd.extend(shlex.split(source_package['download_cmd'])) file_url = source_package['url'] download_cmd.append(file_url) option = source_package['option'] download_cmd.append(option) file_name = source_package['name'] download_cmd.append(file_name) LOG.info('Downloading ...') if run_cmd(download_cmd, directory, env, verbose): LOG.error('Failed to get dependency') sys.exit(1) url_data = urlparse.urlparse(file_url) head, file_name = ntpath.split(url_data.path) head, file_ext = os.path.splitext(file_name) if file_ext == '.gz' and head.endswith('.tar'): file_ext = '.tar.gz' if file_ext in supported_exts['compressed']: if file_ext == '.tar.gz': file_name = os.path.join(directory, file_name) with tarfile.open(file_name) as tar: tar.extractall(directory) os.remove(file_name) else: LOG.error('Unsupported file type') elif file_ext in supported_exts['uncompressed']: pass else: LOG.error('Unsupported file type') # ------------------------------------------------------------------- def handle_external_repository(dep, clean, env, verbose): """ Download external repository. """ repository = dep['repository'] if repository['type'] == 'git': directory = dep['directory'] if clean and os.path.exists(directory): LOG.debug('Removing directory ' + directory) shutil.rmtree(directory) else: if os.path.exists(directory): return git_cmd = ['git', 'clone', '--depth', '1', '--single-branch'] git_tag = repository.get('git_tag') if git_tag: git_cmd.append('-b') git_cmd.append(git_tag) git_cmd.append(repository.get('url')) git_cmd.append(directory) dir_name, tail = ntpath.split(directory) LOG.info('Downloading ...') if run_cmd(git_cmd, dir_name, env=env, silent=verbose): LOG.error('Failed to get dependency') sys.exit(1) else: LOG.error('Unsupported repository type') # ------------------------------------------------------------------- def handle_ext_source_dep(dep, clean, env, verbose): """ Handle external project dependencies.""" LOG.info('Checking source: ' + dep['name']) if dep.get('source_package') is None and dep.get('repository') is None: LOG.error('Missing download for source dependency: ' + dep['name']) sys.exit(1) if dep.get('source_package'): handle_external_file(dep, clean, env, verbose) if dep.get('repository'): handle_external_repository(dep, clean, env, verbose) LOG.info('Done.') # ------------------------------------------------------------------- def compress_to_tar(source_folder, target_folder, compress): """ Compress folder to tar.gz file. """ source = source_folder.rstrip('//') target = target_folder.rstrip('//') if source == target: # the folder which should be compressed is # the same as the target folder return False target = os.path.join(target_folder, compress) t = tarfile.open(target, mode='w:gz') head, tail = os.path.split(source) for root, dirs, files in os.walk(source_folder): for f in files: cfile = os.path.join(root, f) rename = cfile.replace(head, '') LOG.debug('Compressing: %s' % rename) t.add(cfile, arcname=rename) t.close() return True # ------------------------------------------------------------------- def get_ext_package_data(deps, dep_name): """ Search for a dependency in the list. """ for dep in deps: if dep['name'] == dep_name: return dep # ------------------------------------------------------------------- def build_package(repository_root, build_package_config, env=None): """ Package can be integrated easier to build systems if required. """ verbose = build_package_config.get('verbose_log') if verbose: LOG.setLevel(logging.DEBUG) LOG.debug(env) LOG.debug(build_package_config) LOG.debug('Using build config') for val in build_package_config.items(): LOG.debug(val) with open(build_package_config['package_layout_config'], 'r') as pckg_layout_cfg: package_layout_content = pckg_layout_cfg.read() LOG.debug(package_layout_content) layout = json.loads(package_layout_content) LOG.debug(layout) package_layout = layout['static'] output_dir = build_package_config['output_dir'] package_root = os.path.join(output_dir, 'CodeChecker') package_layout['root'] = package_root # Get external dependencies. ext_deps_dir = os.path.join(repository_root, 'external-source-deps') ext_deps_config = os.path.join(ext_deps_dir, 'ext_source_deps_config.json') LOG.debug(ext_deps_config) with open(ext_deps_config, 'r') as ext_cfg: ext_dep_cfg = ext_cfg.read() ext_deps = json.loads(ext_dep_cfg) clean = build_package_config['clean'] for dep in ext_deps: dep['directory'] = os.path.join(repository_root, dep['directory']) handle_ext_source_dep(dep, clean, env, verbose) external_dependencies = {dep['name']: dep for dep in ext_deps} LOG.info('Getting external dependencies done.') # Create package folder layout. create_folder_layout(output_dir, package_layout) # Check scan-build-py (intercept). LOG.info('Checking source: llvm scan-build-py (intercept)') intercept_build_executable = find_executable('intercept-build') if intercept_build_executable is not None: LOG.info('Available') else: if platform.system() == 'Darwin': LOG.error('Not exists, scan-build-py (intercept) ' 'is mandatory on OS X!') sys.exit(1) # Build ld logger because intercept is not available. if platform.system() == 'Linux': LOG.warning('Not exists, build ld logger') ld_logger_path = build_package_config['ld_logger_path'] if ld_logger_path: ld_logger_build = os.path.join(ld_logger_path, 'build') ld_logger32 = build_package_config.get('ld_logger_32') ld_logger64 = build_package_config.get('ld_logger_64') rebuild = build_package_config.get('rebuild_ld_logger') or clean arch = None if ld_logger32 == ld_logger64: # Build both versions. pass elif ld_logger32: arch = '32' elif ld_logger64: arch = '64' if build_ld_logger(ld_logger_path, env, arch, rebuild, verbose): LOG.error('Failed to build ld logger') sys.exit() # Copy ld logger files. target = os.path.join(package_root, package_layout['ld_logger']) copy_tree(ld_logger_build, target) curr_dir = os.getcwd() os.chdir(os.path.join(package_root, package_layout['bin'])) logger_symlink = os.path.join('../', package_layout['ld_logger'], 'bin', 'ldlogger') os.symlink(logger_symlink, 'ldlogger') os.chdir(curr_dir) else: LOG.info('Skipping ld logger from package') # Generate gen files with thrift. thrift_files_dir = os.path.join(repository_root, 'thrift_api') generated_py_files = os.path.join(thrift_files_dir, 'gen-py') generated_js_files = os.path.join(thrift_files_dir, 'gen-js') # Cleanup already generated files. if os.path.exists(generated_py_files): shutil.rmtree(generated_py_files) if os.path.exists(generated_js_files): shutil.rmtree(generated_js_files) generate_thrift_files(thrift_files_dir, env, verbose) target = os.path.join(package_root, package_layout['codechecker_gen']) copy_tree(generated_py_files, target) target = os.path.join(package_root, package_layout['web_client']) copy_tree(generated_js_files, target) # The cmd_line client. cmdline_client_files = os.path.join(repository_root, 'viewer_clients', 'cmdline_client') target = os.path.join(package_root, package_layout['cmdline_client']) copy_tree(cmdline_client_files, target) # Generate documentation. generate_documentation(repository_root, env, verbose) source = os.path.join(repository_root, 'gen-docs', 'html') target = os.path.join(package_root, package_layout['docs']) copy_tree(source, target) source = os.path.join(repository_root, 'docs', 'checker_docs') target = os.path.join(package_root, package_layout['checker_md_docs']) copy_tree(source, target) # Thift js. thrift_dep = external_dependencies['thrift'] thrift_root = os.path.join(repository_root, thrift_dep.get('directory')) thift_js_files = os.path.join(thrift_root, 'lib', 'js', 'src') target = os.path.join(package_root, package_layout['js_thrift']) copy_tree(thift_js_files, target) # CodeMirror. codemirror_dep = external_dependencies['codemirror'] codemirror_root = os.path.join(repository_root, codemirror_dep.get('directory')) target = os.path.join(package_root, package_layout['web_client_codemirror']) copy_tree(codemirror_root, target) # HighlightJs. highlightjs_dep = external_dependencies['highlightjs'] highlightjs_root = os.path.join(repository_root, highlightjs_dep.get('directory')) target = os.path.join(package_root, package_layout['web_client_highlightjs']) copy_tree(highlightjs_root, target) # HighlightJs_css. highlightjs_css_dep = external_dependencies['highlightjs_css'] highlightjs_css_root = os.path.join(repository_root, highlightjs_css_dep.get('directory')) target = os.path.join(package_root, package_layout['web_client_highlightjs']) target = os.path.join(target, 'css') copy_tree(highlightjs_css_root, target) # Dojo. dojo_dep = external_dependencies['dojotoolkit'] file_url = dojo_dep['source_package']['url'] url_data = urlparse.urlparse(file_url) head, file_name = ntpath.split(url_data.path) head, tail = file_name.split('.tar.gz') dojo_root = os.path.join(repository_root, dojo_dep.get('directory')) dojo_root = os.path.join(dojo_root, head) target = os.path.join(package_root, package_layout['web_client_dojo']) copy_tree(dojo_root, target) # Marked. marked_dep = external_dependencies['marked'] marked_root = os.path.join(repository_root, marked_dep.get('directory')) target = os.path.join(package_root, package_layout['web_client_marked']) shutil.copy(os.path.join(marked_root, 'marked.min.js'), target) # JsPlumb. jsplumb_dep = external_dependencies['jsplumb'] jsplumb_root = os.path.join(repository_root, jsplumb_dep.get('directory')) target = os.path.join(package_root, package_layout['web_client_jsplumb']) jsplumb = os.path.join(jsplumb_root, 'dist', 'js', 'jquery.jsPlumb-1.7.6-min.js') shutil.copy(jsplumb, target) # Add jQuery for JsPlumb. target = os.path.join(target, 'external') if not os.path.exists(target): os.mkdir(target) jquery = os.path.join(jsplumb_root, 'external', 'jquery-1.9.0-min.js') shutil.copy(jquery, target) # config files LOG.debug('Copy config files') source = os.path.join(repository_root, 'config') target = os.path.join(package_root, package_layout['config']) copy_tree(source, target) version_file = os.path.join(target, 'version.json') LOG.debug('Extending version file: ' + version_file) with open(version_file) as v_file: version_json_data = json.load(v_file) git_hash = '' try: git_hash_cmd = ['git', 'rev-parse', 'HEAD'] git_hash = subprocess.check_output(git_hash_cmd, cwd=repository_root) git_hash = str(git_hash.rstrip()) except subprocess.CalledProcessError as cperr: LOG.error('Failed to get last commit hash.') LOG.error(str(cperr)) except OSError as oerr: LOG.error('Failed to run command:' + ' '.join(git_hash_cmd)) LOG.error(str(oerr)) sys.exit(1) version_json_data['git_hash'] = git_hash time_now = time.strftime("%Y-%m-%dT%H:%M") version_json_data['package_build_date'] = time_now # Rewrite version config file with the extended data. with open(version_file, 'w') as v_file: v_file.write(json.dumps(version_json_data, sort_keys=True, indent=4)) # CodeChecker web client. LOG.debug('Copy web client files') source = os.path.join(repository_root, 'viewer_clients', 'web-client') target = os.path.join(package_root, package_layout['www']) copy_tree(source, target) # CodeChecker main scripts. LOG.debug('Copy main codechecker files') source = os.path.join(repository_root, 'codechecker', 'CodeChecker.py') target = os.path.join(package_root, package_layout['cc_bin']) shutil.copy2(source, target) source = os.path.join(repository_root, 'codechecker', 'CodeChecker') target = os.path.join(package_root, package_layout['bin']) shutil.copy2(source, target) # CodeChecker modules. LOG.debug('Copy codechecker modules') source = os.path.join(repository_root, 'codechecker_lib') target = os.path.join(package_root, package_layout['codechecker_lib']) copy_tree(source, target) # CodeChecker db model. LOG.debug('Copy codechecker database model') source = os.path.join(repository_root, 'db_model') target = os.path.join(package_root, package_layout['codechecker_db_model']) copy_tree(source, target) # CodeChecker db migrate. LOG.debug('Copy codechecker database migration') source = os.path.join(repository_root, 'db_migrate') target = os.path.join(package_root, package_layout['codechecker_db_migrate']) copy_tree(source, target) # CodeChecker storage server. LOG.debug('Copy codechecker storage server') source = os.path.join(repository_root, 'storage_server') target = os.path.join(package_root, package_layout['storage_server_modules']) copy_tree(source, target) # CodeChecker viewer server. LOG.debug('Copy codechecker viewer server') source = os.path.join(repository_root, 'viewer_server') target = os.path.join(package_root, package_layout['viewer_server_modules']) copy_tree(source, target) # License. license_file = os.path.join(repository_root, 'LICENSE.TXT') target = os.path.join(package_root) shutil.copy(license_file, target) compress = build_package_config.get('compress') if compress: LOG.info('Compressing package ...') compress_to_tar(package_root, output_dir, compress) LOG.info('Creating package finished successfully.') # ------------------------------------------------------------------- def main(): """ Main script. """ repository_root = os.path.dirname(os.path.realpath(__file__)) default_package_layout = os.path.join(repository_root, "config", "package_layout.json") description = '''CodeChecker packager script''' parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=description) parser.add_argument("-l", action="store", dest="package_layout_config", default=default_package_layout, help="Package layout configuration file.") parser.add_argument("-o", "--output", required=True, action="store", dest="output_dir") parser.add_argument("--clean", action="store_true", dest='clean', help='Clean external dependencies') default_logger_dir = os.path.join(repository_root, 'external-source-deps', 'build-logger') logger_group = parser.add_argument_group('ld-logger') logger_group.add_argument("--ld-logger", action="store", dest="ld_logger_path", default=default_logger_dir, help="Ld logger source path.") logger_group.add_argument('--32', action='store_true', dest="ld_logger_32", help='Build for 32bit architecture.') logger_group.add_argument('--64', action='store_true', dest="ld_logger_64", help='Build for 64bit architecture.') logger_group.add_argument('--rebuild', action='store_true', dest='rebuild_ld_logger', help='Clean and rebuild logger.') parser.add_argument("--compress", action="store", dest="compress", default=False, help="Compress package to tar.gz") parser.add_argument("-v", action="store_true", dest="verbose_log", help='Set log level to higher verbosity.') args = vars(parser.parse_args()) build_package_config = {k: args[k] for k in args if args[k] is not None} build_package(repository_root, build_package_config) if __name__ == "__main__": main()
1
6,139
There seems to be some repetition. Does a local function make this code shorter overall?
Ericsson-codechecker
c
@@ -298,6 +298,7 @@ function diffElementNodes( } if (dom == null) { + isHydrating = false; if (newVNode.type === null) { return document.createTextNode(newProps); }
1
import { EMPTY_OBJ, EMPTY_ARR } from '../constants'; import { Component } from '../component'; import { Fragment } from '../create-element'; import { diffChildren } from './children'; import { diffProps } from './props'; import { assign, removeNode } from '../util'; import options from '../options'; /** * Diff two virtual nodes and apply proper changes to the DOM * @param {import('../internal').PreactElement} parentDom The parent of the DOM element * @param {import('../internal').VNode} newVNode The new virtual node * @param {import('../internal').VNode} oldVNode The old virtual node * @param {object} globalContext The current context object. Modified by getChildContext * @param {boolean} isSvg Whether or not this element is an SVG node * @param {Array<import('../internal').PreactElement>} excessDomChildren * @param {Array<import('../internal').Component>} commitQueue List of components * which have callbacks to invoke in commitRoot * @param {Element | Text} oldDom The current attached DOM * element any new dom elements should be placed around. Likely `null` on first * render (except when hydrating). Can be a sibling DOM element when diffing * Fragments that have siblings. In most cases, it starts out as `oldChildren[0]._dom`. * @param {boolean} [isHydrating] Whether or not we are in hydration */ export function diff( parentDom, newVNode, oldVNode, globalContext, isSvg, excessDomChildren, commitQueue, oldDom, isHydrating ) { let tmp, newType = newVNode.type; // When passing through createElement it assigns the object // constructor as undefined. This to prevent JSON-injection. if (newVNode.constructor !== undefined) return null; if ((tmp = options._diff)) tmp(newVNode); try { outer: if (typeof newType == 'function') { let c, isNew, oldProps, oldState, snapshot, clearProcessingException; let newProps = newVNode.props; // Necessary for createContext api. Setting this property will pass // the context value as `this.context` just for this component. tmp = newType.contextType; let provider = tmp && globalContext[tmp._id]; let componentContext = tmp ? provider ? provider.props.value : tmp._defaultValue : globalContext; // Get component and set it to `c` if (oldVNode._component) { c = newVNode._component = oldVNode._component; clearProcessingException = c._processingException = c._pendingError; } else { // Instantiate the new component if ('prototype' in newType && newType.prototype.render) { newVNode._component = c = new newType(newProps, componentContext); // eslint-disable-line new-cap } else { newVNode._component = c = new Component(newProps, componentContext); c.constructor = newType; c.render = doRender; } if (provider) provider.sub(c); c.props = newProps; if (!c.state) c.state = {}; c.context = componentContext; c._globalContext = globalContext; isNew = c._dirty = true; c._renderCallbacks = []; } // Invoke getDerivedStateFromProps if (c._nextState == null) { c._nextState = c.state; } if (newType.getDerivedStateFromProps != null) { if (c._nextState == c.state) { c._nextState = assign({}, c._nextState); } assign( c._nextState, newType.getDerivedStateFromProps(newProps, c._nextState) ); } oldProps = c.props; oldState = c.state; // Invoke pre-render lifecycle methods if (isNew) { if ( newType.getDerivedStateFromProps == null && c.componentWillMount != null ) { c.componentWillMount(); } if (c.componentDidMount != null) { c._renderCallbacks.push(c.componentDidMount); } } else { if ( newType.getDerivedStateFromProps == null && newProps !== oldProps && c.componentWillReceiveProps != null ) { c.componentWillReceiveProps(newProps, componentContext); } if ( !c._force && c.shouldComponentUpdate != null && c.shouldComponentUpdate(newProps, c._nextState, componentContext) === false ) { c.props = newProps; c.state = c._nextState; c._dirty = false; c._vnode = newVNode; newVNode._dom = oldVNode._dom; newVNode._children = oldVNode._children; if (c._renderCallbacks.length) { commitQueue.push(c); } for (tmp = 0; tmp < newVNode._children.length; tmp++) { if (newVNode._children[tmp]) { newVNode._children[tmp]._parent = newVNode; } } break outer; } if (c.componentWillUpdate != null) { c.componentWillUpdate(newProps, c._nextState, componentContext); } if (c.componentDidUpdate != null) { c._renderCallbacks.push(() => { c.componentDidUpdate(oldProps, oldState, snapshot); }); } } c.context = componentContext; c.props = newProps; c.state = c._nextState; if ((tmp = options._render)) tmp(newVNode); c._dirty = false; c._vnode = newVNode; c._parentDom = parentDom; tmp = c.render(c.props, c.state, c.context); let isTopLevelFragment = tmp != null && tmp.type == Fragment && tmp.key == null; newVNode._children = isTopLevelFragment ? tmp.props.children : Array.isArray(tmp) ? tmp : [tmp]; if (c.getChildContext != null) { globalContext = assign(assign({}, globalContext), c.getChildContext()); } if (!isNew && c.getSnapshotBeforeUpdate != null) { snapshot = c.getSnapshotBeforeUpdate(oldProps, oldState); } diffChildren( parentDom, newVNode, oldVNode, globalContext, isSvg, excessDomChildren, commitQueue, oldDom, isHydrating ); c.base = newVNode._dom; if (c._renderCallbacks.length) { commitQueue.push(c); } if (clearProcessingException) { c._pendingError = c._processingException = null; } c._force = false; } else { newVNode._dom = diffElementNodes( oldVNode._dom, newVNode, oldVNode, globalContext, isSvg, excessDomChildren, commitQueue, isHydrating ); } if ((tmp = options.diffed)) tmp(newVNode); } catch (e) { options._catchError(e, newVNode, oldVNode); } return newVNode._dom; } /** * @param {Array<import('../internal').Component>} commitQueue List of components * which have callbacks to invoke in commitRoot * @param {import('../internal').VNode} root */ export function commitRoot(commitQueue, root) { if (options._commit) options._commit(root, commitQueue); commitQueue.some(c => { try { commitQueue = c._renderCallbacks; c._renderCallbacks = []; commitQueue.some(cb => { cb.call(c); }); } catch (e) { options._catchError(e, c._vnode); } }); } /** * Diff two virtual nodes representing DOM element * @param {import('../internal').PreactElement} dom The DOM element representing * the virtual nodes being diffed * @param {import('../internal').VNode} newVNode The new virtual node * @param {import('../internal').VNode} oldVNode The old virtual node * @param {object} globalContext The current context object * @param {boolean} isSvg Whether or not this DOM node is an SVG node * @param {*} excessDomChildren * @param {Array<import('../internal').Component>} commitQueue List of components * which have callbacks to invoke in commitRoot * @param {boolean} isHydrating Whether or not we are in hydration * @returns {import('../internal').PreactElement} */ function diffElementNodes( dom, newVNode, oldVNode, globalContext, isSvg, excessDomChildren, commitQueue, isHydrating ) { let i; let oldProps = oldVNode.props; let newProps = newVNode.props; // Tracks entering and exiting SVG namespace when descending through the tree. isSvg = newVNode.type === 'svg' || isSvg; if (excessDomChildren != null) { for (i = 0; i < excessDomChildren.length; i++) { const child = excessDomChildren[i]; // if newVNode matches an element in excessDomChildren or the `dom` // argument matches an element in excessDomChildren, remove it from // excessDomChildren so it isn't later removed in diffChildren if ( child != null && ((newVNode.type === null ? child.nodeType === 3 : child.localName === newVNode.type) || dom == child) ) { dom = child; excessDomChildren[i] = null; break; } } } if (dom == null) { if (newVNode.type === null) { return document.createTextNode(newProps); } dom = isSvg ? document.createElementNS('http://www.w3.org/2000/svg', newVNode.type) : document.createElement( newVNode.type, newProps.is && { is: newProps.is } ); // we created a new parent, so none of the previously attached children can be reused: excessDomChildren = null; } if (newVNode.type === null) { if (oldProps !== newProps && dom.data != newProps) { dom.data = newProps; } } else if (newVNode !== oldVNode) { if (excessDomChildren != null) { excessDomChildren = EMPTY_ARR.slice.call(dom.childNodes); } oldProps = oldVNode.props || EMPTY_OBJ; let oldHtml = oldProps.dangerouslySetInnerHTML; let newHtml = newProps.dangerouslySetInnerHTML; // During hydration, props are not diffed at all (including dangerouslySetInnerHTML) // @TODO we should warn in debug mode when props don't match here. if (!isHydrating) { if (oldProps === EMPTY_OBJ) { oldProps = {}; for (let i = 0; i < dom.attributes.length; i++) { oldProps[dom.attributes[i].name] = dom.attributes[i].value; } } if (newHtml || oldHtml) { // Avoid re-applying the same '__html' if it did not changed between re-render if (!newHtml || !oldHtml || newHtml.__html != oldHtml.__html) { dom.innerHTML = (newHtml && newHtml.__html) || ''; } } } diffProps(dom, newProps, oldProps, isSvg, isHydrating); newVNode._children = newVNode.props.children; // If the new vnode didn't have dangerouslySetInnerHTML, diff its children if (!newHtml) { diffChildren( dom, newVNode, oldVNode, globalContext, newVNode.type === 'foreignObject' ? false : isSvg, excessDomChildren, commitQueue, EMPTY_OBJ, isHydrating ); } // (as above, don't diff props during hydration) if (!isHydrating) { if ( 'value' in newProps && newProps.value !== undefined && newProps.value !== dom.value ) { dom.value = newProps.value == null ? '' : newProps.value; } if ( 'checked' in newProps && newProps.checked !== undefined && newProps.checked !== dom.checked ) { dom.checked = newProps.checked; } } } return dom; } /** * Invoke or update a ref, depending on whether it is a function or object ref. * @param {object|function} ref * @param {any} value * @param {import('../internal').VNode} vnode */ export function applyRef(ref, value, vnode) { try { if (typeof ref == 'function') ref(value); else ref.current = value; } catch (e) { options._catchError(e, vnode); } } /** * Unmount a virtual node from the tree and apply DOM changes * @param {import('../internal').VNode} vnode The virtual node to unmount * @param {import('../internal').VNode} parentVNode The parent of the VNode that * initiated the unmount * @param {boolean} [skipRemove] Flag that indicates that a parent node of the * current element is already detached from the DOM. */ export function unmount(vnode, parentVNode, skipRemove) { let r; if (options.unmount) options.unmount(vnode); if ((r = vnode.ref)) { if (!r.current || r.current === vnode._dom) applyRef(r, null, parentVNode); } let dom; if (!skipRemove && typeof vnode.type != 'function') { skipRemove = (dom = vnode._dom) != null; } // Must be set to `undefined` to properly clean up `_nextDom` // for which `null` is a valid value. See comment in `create-element.js` vnode._dom = vnode._nextDom = undefined; if ((r = vnode._component) != null) { if (r.componentWillUnmount) { try { r.componentWillUnmount(); } catch (e) { options._catchError(e, parentVNode); } } r.base = r._parentDom = null; } if ((r = vnode._children)) { for (let i = 0; i < r.length; i++) { if (r[i]) unmount(r[i], parentVNode, skipRemove); } } if (dom != null) removeNode(dom); } /** The `.render()` method for a PFC backing instance. */ function doRender(props, state, context) { return this.constructor(props, context); }
1
15,531
Might be cheaper to reuse the `null` assignment of line 313 and set `isHydrating` to null instead WDYT?
preactjs-preact
js
@@ -22,7 +22,6 @@ import java.util.List; @AutoValue public abstract class TestCaseView { - public abstract String clientMethodName(); public abstract InitCodeView initCode();
1
/* Copyright 2016 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.viewmodel.testing; import com.google.api.codegen.config.GrpcStreamingConfig.GrpcStreamingType; import com.google.api.codegen.viewmodel.ClientMethodType; import com.google.api.codegen.viewmodel.InitCodeView; import com.google.auto.value.AutoValue; import java.util.List; @AutoValue public abstract class TestCaseView { public abstract String clientMethodName(); public abstract InitCodeView initCode(); public abstract ClientMethodType clientMethodType(); public abstract MockGrpcResponseView mockResponse(); public abstract List<ClientTestAssertView> asserts(); public abstract String requestTypeName(); public abstract String responseTypeName(); public abstract List<PageStreamingResponseView> pageStreamingResponseViews(); public abstract String name(); public abstract String nameWithException(); public abstract String serviceConstructorName(); public abstract String mockServiceVarName(); public abstract boolean hasRequestParameters(); public abstract boolean hasReturnValue(); public abstract GrpcStreamingType grpcStreamingType(); public abstract String mockGrpcStubTypeName(); public abstract String createStubFunctionName(); public abstract String grpcStubCallString(); public static Builder newBuilder() { return new AutoValue_TestCaseView.Builder(); } @AutoValue.Builder public abstract static class Builder { public abstract Builder clientMethodName(String val); public abstract Builder name(String val); public abstract Builder nameWithException(String val); public abstract Builder serviceConstructorName(String val); public abstract Builder mockServiceVarName(String val); public abstract Builder initCode(InitCodeView val); public abstract Builder clientMethodType(ClientMethodType val); public abstract Builder mockResponse(MockGrpcResponseView val); public abstract Builder asserts(List<ClientTestAssertView> val); public abstract Builder requestTypeName(String val); public abstract Builder responseTypeName(String val); public abstract Builder pageStreamingResponseViews(List<PageStreamingResponseView> val); public abstract Builder hasRequestParameters(boolean val); public abstract Builder hasReturnValue(boolean val); public abstract Builder grpcStreamingType(GrpcStreamingType val); public abstract Builder mockGrpcStubTypeName(String val); public abstract Builder createStubFunctionName(String val); public abstract Builder grpcStubCallString(String val); public abstract TestCaseView build(); } }
1
21,194
Revert this blank line
googleapis-gapic-generator
java
@@ -50,8 +50,8 @@ public class MoveReplicaHDFSTest extends MoveReplicaTest { HdfsTestUtil.teardownClass(dfsCluster); } finally { dfsCluster = null; - System.setProperty("solr.hdfs.blockcache.blocksperbank", "512"); - System.setProperty("tests.hdfs.numdatanodes", "1"); + System.clearProperty("solr.hdfs.blockcache.blocksperbank"); + System.clearProperty("tests.hdfs.numdatanodes"); } }
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.cloud; import com.carrotsearch.randomizedtesting.annotations.Nightly; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.lucene.util.LuceneTestCase.Slow; import org.apache.lucene.util.TimeUnits; import org.apache.solr.cloud.hdfs.HdfsTestUtil; import org.apache.solr.util.BadHdfsThreadsFilter; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @Slow @Nightly @ThreadLeakFilters(defaultFilters = true, filters = { BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) }) @TimeoutSuite(millis = TimeUnits.HOUR) public class MoveReplicaHDFSTest extends MoveReplicaTest { private static MiniDFSCluster dfsCluster; @BeforeClass public static void setupClass() throws Exception { System.setProperty("solr.hdfs.blockcache.blocksperbank", "512"); System.setProperty("tests.hdfs.numdatanodes", "1"); dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath()); } @AfterClass public static void teardownClass() throws Exception { try { HdfsTestUtil.teardownClass(dfsCluster); } finally { dfsCluster = null; System.setProperty("solr.hdfs.blockcache.blocksperbank", "512"); System.setProperty("tests.hdfs.numdatanodes", "1"); } } @Override protected String getConfigSet() { return "cloud-hdfs"; } @Test public void testNormalMove() throws Exception { inPlaceMove = false; test(); } @Test //2018-06-18 (commented) @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018 //commented 9-Aug-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Jul-2018 //commented 23-AUG-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 17-Aug-2018 // commented 4-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 23-Aug-2018 //commented 20-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 6-Sep-2018 //Commented 14-Oct-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018 public void testNormalFailedMove() throws Exception { inPlaceMove = false; testFailedMove(); } @Test @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/SOLR-12080") // added 03-Oct-2018 public void testFailedMove() throws Exception { super.testFailedMove(); } }
1
28,756
This was introduced recently for the Hadoop 3 upgrade. Copy/paste error but definitely causing some of the new test failures.
apache-lucene-solr
java
@@ -2,6 +2,7 @@ class ProposalsController < ApplicationController include TokenAuth skip_before_action :authenticate_user!, only: [:approve] + before_action :check_disabled_client # TODO use Policy for all actions before_action ->{authorize proposal}, only: [:show, :cancel, :cancel_form, :history] before_action :needs_token_on_get, only: :approve
1
class ProposalsController < ApplicationController include TokenAuth skip_before_action :authenticate_user!, only: [:approve] # TODO use Policy for all actions before_action ->{authorize proposal}, only: [:show, :cancel, :cancel_form, :history] before_action :needs_token_on_get, only: :approve before_action :validate_access, only: :approve helper_method :display_status add_template_helper ProposalsHelper rescue_from Pundit::NotAuthorizedError, with: :auth_errors def show @proposal = proposal.decorate end def index @CLOSED_PROPOSAL_LIMIT = 10 @pending_data = listing.pending @pending_review_data = listing.pending_review @approved_data = listing.approved.alter_query{ |rel| rel.limit(@CLOSED_PROPOSAL_LIMIT) } @cancelled_data = listing.cancelled end def archive @proposals_data = listing.closed end def cancel_form @proposal = proposal.decorate end def cancel if params[:reason_input].present? comments = "Request cancelled with comments: " + params[:reason_input] proposal.cancel! proposal.comments.create!(comment_text: comments, user: current_user) flash[:success] = "Your request has been cancelled" redirect_to proposal_path(proposal) Dispatcher.new.deliver_cancellation_emails(proposal, params[:reason_input]) else redirect_to( cancel_form_proposal_path(params[:id]), alert: "A reason for cancellation is required. Please indicate why this request needs to be cancelled." ) end end def approve approval = proposal.existing_approval_for(current_user) approval.update_attributes!(completer: current_user) approval.approve! flash[:success] = "You have approved #{proposal.public_id}." redirect_to proposal end def query query_listing = listing @proposals_data = query_listing.query @text = params[:text] @start_date = query_listing.start_date @end_date = query_listing.end_date end def history @container = Query::Proposal::Versions.container(proposal) @container.set_state_from_params(params) end protected def proposal @cached_proposal ||= Proposal.find(params[:id]) end def auth_errors(exception) if ['cancel','cancel_form'].include?(params[:action]) redirect_to proposal_path, alert: exception.message else super end end def listing Query::Proposal::Listing.new(current_user, params) end end
1
15,959
I think we should only need to do this for `:approve` since we are using `authenticate_user!` for all other actions and that checks for disabled client
18F-C2
rb
@@ -56,9 +56,10 @@ class DefaultBucketViewTest(BaseWebTest, unittest.TestCase): self.app.get(self.collection_url, headers=self.headers) def test_querystring_parameters_are_taken_into_account(self): - self.app.get(self.collection_url + '/records?_since=invalid', - headers=self.headers, - status=400) + resp = self.app.get(self.collection_url + '/records?_since=invalid', + headers=self.headers, + status=400) + self.assertIn('Content-Length', resp.headers) def test_option_is_possible_without_authentication_for_default(self): headers = 'authorization,content-type'
1
from six import text_type from uuid import UUID from cliquet.utils import hmac_digest from .support import (BaseWebTest, unittest, get_user_headers, MINIMALIST_RECORD) class DefaultBucketViewTest(BaseWebTest, unittest.TestCase): bucket_url = '/buckets/default' collection_url = '/buckets/default/collections/tasks' def test_default_bucket_exists_and_has_user_id(self): bucket = self.app.get(self.bucket_url, headers=self.headers) result = bucket.json settings = self.app.app.registry.settings hmac_secret = settings['cliquet.userid_hmac_secret'] bucket_id = hmac_digest(hmac_secret, self.principal)[:32] self.assertEqual(result['data']['id'], text_type(UUID(bucket_id))) self.assertEqual(result['permissions']['write'], [self.principal]) def test_default_bucket_collections_are_automatically_created(self): self.app.get(self.collection_url, headers=self.headers, status=200) def test_adding_a_task_for_bob_doesnt_add_it_for_alice(self): record = MINIMALIST_RECORD.copy() resp = self.app.post_json(self.collection_url + '/records', record, headers=get_user_headers('bob')) record_id = self.collection_url + '/records/' + resp.json['data']['id'] resp = self.app.get(record_id, headers=get_user_headers('alice'), status=404) def test_unauthenticated_bucket_access_raises_json_401(self): resp = self.app.get(self.bucket_url, status=401) self.assertEquals(resp.json['message'], 'Please authenticate yourself to use this endpoint.') def test_bucket_id_is_an_uuid_with_dashes(self): bucket = self.app.get(self.bucket_url, headers=self.headers) bucket_id = bucket.json['data']['id'] self.assertIn('-', bucket_id) try: UUID(bucket_id) except ValueError: self.fail('bucket_id: %s is not a valid UUID.' % bucket_id) def test_second_call_on_default_bucket_doesnt_raise_a_412(self): self.app.get(self.bucket_url, headers=self.headers) self.app.get(self.bucket_url, headers=self.headers) def test_second_call_on_default_bucket_collection_doesnt_raise_a_412(self): self.app.get(self.collection_url, headers=self.headers) self.app.get(self.collection_url, headers=self.headers) def test_querystring_parameters_are_taken_into_account(self): self.app.get(self.collection_url + '/records?_since=invalid', headers=self.headers, status=400) def test_option_is_possible_without_authentication_for_default(self): headers = 'authorization,content-type' self.app.options(self.collection_url + '/records', headers={ 'Origin': 'http://localhost:8000', 'Access-Control-Request-Method': 'GET', 'Access-Control-Request-Headers': headers}) def test_cors_headers_are_provided_on_errors(self): resp = self.app.post_json(self.collection_url + '/records', MINIMALIST_RECORD, headers=self.headers) current = resp.json['data']['last_modified'] headers = self.headers.copy() headers.update({ 'Origin': 'http://localhost:8000', 'If-None-Match': ('"%s"' % current).encode('utf-8') }) resp = self.app.get(self.collection_url + '/records', headers=headers, status=304) self.assertIn('Access-Control-Allow-Origin', resp.headers) def test_bucket_id_starting_with_default_can_still_be_created(self): # We need to create the bucket first since it is not the default bucket resp = self.app.put_json( self.bucket_url.replace('default', 'default-1234'), {"data": {}}, headers=self.headers, status=201) bucket_id = resp.json['data']['id'] self.assertEquals(bucket_id, 'default-1234') # We can then create the collection collection_url = '/buckets/default-1234/collections/default' self.app.put_json( collection_url, {"data": {}}, headers=self.headers, status=201) resp = self.app.get('/buckets/default-1234/collections', headers=self.headers) self.assertEquals(resp.json['data'][0]['id'], 'default')
1
8,016
I wonder if we should create new tests for header checks; here for instance we're mixing querystring and headers. Thoughts?
Kinto-kinto
py
@@ -42,7 +42,7 @@ module Bolt path = File.join(libexec, 'custom_facts.rb') file = { 'name' => 'custom_facts.rb', 'path' => path } metadata = { 'supports_noop' => true, 'input_method' => 'stdin' } - Bolt::Task.new(name: 'custom_facts', files: [file], metadata: metadata) + Bolt::Task.new(name: 'apply_helpers::custom_facts', files: [file], metadata: metadata) end end
1
# frozen_string_literal: true require 'base64' require 'concurrent' require 'find' require 'json' require 'logging' require 'minitar' require 'open3' require 'bolt/task' require 'bolt/util/puppet_log_level' module Bolt class Applicator def initialize(inventory, executor, modulepath, plugin_dirs, pdb_client, hiera_config, max_compiles) @inventory = inventory @executor = executor @modulepath = modulepath @plugin_dirs = plugin_dirs @pdb_client = pdb_client @hiera_config = hiera_config ? validate_hiera_config(hiera_config) : nil @pool = Concurrent::ThreadPoolExecutor.new(max_threads: max_compiles) @logger = Logging.logger[self] @plugin_tarball = Concurrent::Delay.new do build_plugin_tarball do |mod| search_dirs = [] search_dirs << mod.plugins if mod.plugins? search_dirs << mod.pluginfacts if mod.pluginfacts? search_dirs << mod.files if mod.files? search_dirs end end end private def libexec @libexec ||= File.join(Gem::Specification.find_by_name('bolt').gem_dir, 'libexec') end def custom_facts_task @custom_facts_task ||= begin path = File.join(libexec, 'custom_facts.rb') file = { 'name' => 'custom_facts.rb', 'path' => path } metadata = { 'supports_noop' => true, 'input_method' => 'stdin' } Bolt::Task.new(name: 'custom_facts', files: [file], metadata: metadata) end end def catalog_apply_task @catalog_apply_task ||= begin path = File.join(libexec, 'apply_catalog.rb') file = { 'name' => 'apply_catalog.rb', 'path' => path } metadata = { 'supports_noop' => true, 'input_method' => 'stdin' } Bolt::Task.new(name: 'apply_catalog', files: [file], metadata: metadata) end end def compile(target, ast, plan_vars) trusted = Puppet::Context::TrustedInformation.new('local', target.host, {}) catalog_input = { code_ast: ast, modulepath: @modulepath, pdb_config: @pdb_client.config.to_hash, hiera_config: @hiera_config, target: { name: target.host, facts: @inventory.facts(target), variables: @inventory.vars(target).merge(plan_vars), trusted: trusted.to_h }, inventory: @inventory.data_hash } bolt_catalog_exe = File.join(libexec, 'bolt_catalog') old_path = ENV['PATH'] ENV['PATH'] = "#{RbConfig::CONFIG['bindir']}#{File::PATH_SEPARATOR}#{old_path}" out, err, stat = Open3.capture3('ruby', bolt_catalog_exe, 'compile', stdin_data: catalog_input.to_json) ENV['PATH'] = old_path # stderr may contain formatted logs from Puppet's logger or other errors. # Print them in order, but handle them separately. Anything not a formatted log is assumed # to be an error message. logs = err.lines.map do |l| begin JSON.parse(l) rescue StandardError l end end logs.each do |log| if log.is_a?(String) @logger.error(log.chomp) else log.map { |k, v| [k.to_sym, v] }.each do |level, msg| bolt_level = Bolt::Util::PuppetLogLevel::MAPPING[level] @logger.send(bolt_level, "#{target.name}: #{msg.chomp}") end end end raise(ApplyError, target.name) unless stat.success? JSON.parse(out) end def validate_hiera_config(hiera_config) if File.exist?(File.path(hiera_config)) data = File.open(File.path(hiera_config), "r:UTF-8") { |f| YAML.safe_load(f.read) } unless data['version'] == 5 raise Bolt::ParseError, "Hiera v5 is required, found v#{data['version'] || 3} in #{hiera_config}" end hiera_config end end def provide_puppet_missing_errors(result) error_hash = result.error_hash exit_code = error_hash['details']['exit_code'] if error_hash && error_hash['details'] # If we get exit code 126 or 127 back, it means the shebang command wasn't found; Puppet isn't present if [126, 127].include?(exit_code) Result.new(result.target, error: { 'msg' => "Puppet is not installed on the target, please install it to enable 'apply'", 'kind' => 'bolt/apply-error' }) elsif exit_code == 1 && (error_hash['msg'] =~ /Could not find executable 'ruby.exe'/ || error_hash['msg'] =~ /The term 'ruby.exe' is not recognized as the name of a cmdlet/) # Windows does not have Ruby present Result.new(result.target, error: { 'msg' => "Puppet is not installed on the target in $env:ProgramFiles, please install it to enable 'apply'", 'kind' => 'bolt/apply-error' }) elsif exit_code == 1 && error_hash['msg'] =~ /cannot load such file -- puppet \(LoadError\)/ # Windows uses a Ruby that doesn't have Puppet installed # TODO: fix so we don't find other Rubies, or point to a known issues URL for more info Result.new(result.target, error: { 'msg' => 'Found a Ruby without Puppet present, please install Puppet ' \ "or remove Ruby from $env:Path to enable 'apply'", 'kind' => 'bolt/apply-error' }) else result end end def identify_resource_failures(result) if result.ok? && result.value['status'] == 'failed' resources = result.value['resource_statuses'] failed = resources.select { |_, r| r['failed'] }.flat_map do |key, resource| resource['events'].select { |e| e['status'] == 'failure' }.map do |event| "\n #{key}: #{event['message']}" end end result.value['_error'] = { 'msg' => "Resources failed to apply for #{result.target.name}#{failed.join}", 'kind' => 'bolt/resource-failure' } end result end def apply(args, apply_body, scope) raise(ArgumentError, 'apply requires a TargetSpec') if args.empty? type0 = Puppet.lookup(:pal_script_compiler).type('TargetSpec') Puppet::Pal.assert_type(type0, args[0], 'apply targets') @executor.report_function_call('apply') options = {} if args.count > 1 type1 = Puppet.lookup(:pal_script_compiler).type('Hash[String, Data]') Puppet::Pal.assert_type(type1, args[1], 'apply options') options = args[1] end # collect plan vars and merge them over target vars plan_vars = scope.to_hash %w[trusted server_facts facts].each { |k| plan_vars.delete(k) } targets = @inventory.get_targets(args[0]) ast = Puppet::Pops::Serialization::ToDataConverter.convert(apply_body, rich_data: true, symbol_to_string: true) notify = proc { |_| nil } r = @executor.log_action('apply catalog', targets) do futures = targets.map do |target| Concurrent::Future.execute(executor: @pool) do @executor.with_node_logging("Compiling manifest block", [target]) do compile(target, ast, plan_vars) end end end result_promises = targets.zip(futures).flat_map do |target, future| @executor.queue_execute([target]) do |transport, batch| @executor.with_node_logging("Applying manifest block", batch) do arguments = { 'catalog' => future.value, 'plugins' => plugins, '_noop' => options['_noop'] } raise future.reason if future.rejected? result = transport.batch_task(batch, catalog_apply_task, arguments, options, &notify) result = provide_puppet_missing_errors(result) identify_resource_failures(result) end end end @executor.await_results(result_promises) end if !r.ok && !options['_catch_errors'] raise Bolt::ApplyFailure, r end r end def plugins @plugin_tarball.value || raise(Bolt::Error.new("Failed to pack module plugins: #{@plugin_tarball.reason}", 'bolt/plugin-error')) end def build_plugin_tarball start_time = Time.now sio = StringIO.new output = Minitar::Output.new(Zlib::GzipWriter.new(sio)) Puppet.lookup(:current_environment).override_with(modulepath: @plugin_dirs).modules.each do |mod| search_dirs = yield mod parent = Pathname.new(mod.path).parent files = Find.find(*search_dirs).select { |file| File.file?(file) } files.each do |file| tar_path = Pathname.new(file).relative_path_from(parent) @logger.debug("Packing plugin #{file} to #{tar_path}") stat = File.stat(file) content = File.binread(file) output.tar.add_file_simple( tar_path.to_s, data: content, size: content.size, mode: stat.mode & 0o777, mtime: stat.mtime ) end end duration = Time.now - start_time @logger.debug("Packed plugins in #{duration * 1000} ms") output.close Base64.encode64(sio.string) ensure output&.close end end end
1
9,566
We should be able to add sensitive by hard-coding the parameters, same as you put into the metadata in apply_helpers.
puppetlabs-bolt
rb
@@ -0,0 +1,11 @@ +<?php + +declare(strict_types=1); + +namespace Shopsys\ShopBundle\Model\Order\Item; + +use Shopsys\FrameworkBundle\Model\Order\Item\OrderItemFactory as BaseOrderItemFactory; + +class OrderItemFactory extends BaseOrderItemFactory +{ +}
1
1
18,706
Why do you think that it is necessary to create this class?
shopsys-shopsys
php
@@ -18,8 +18,8 @@ type Hash struct { // // See http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html for more information. func ComputeHashes(r io.ReadSeeker) Hash { - r.Seek(0, 0) // Read the whole stream - defer r.Seek(0, 0) // Rewind stream at end + start, _ := r.Seek(0, 1) // Read the whole stream + defer r.Seek(start, 0) // Rewind stream at end buf := make([]byte, bufsize) hashes := [][]byte{}
1
package glacier import ( "crypto/sha256" "io" ) const bufsize = 1024 * 1024 // Hash contains information about the tree-hash and linear hash of a // Glacier payload. This structure is generated by ComputeHashes(). type Hash struct { TreeHash []byte LinearHash []byte } // ComputeHashes computes the tree-hash and linear hash of a seekable reader r. // // See http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html for more information. func ComputeHashes(r io.ReadSeeker) Hash { r.Seek(0, 0) // Read the whole stream defer r.Seek(0, 0) // Rewind stream at end buf := make([]byte, bufsize) hashes := [][]byte{} hsh := sha256.New() for { // Build leaf nodes in 1MB chunks n, err := io.ReadAtLeast(r, buf, bufsize) if n == 0 { break } tmpHash := sha256.Sum256(buf[:n]) hashes = append(hashes, tmpHash[:]) hsh.Write(buf[:n]) // Track linear hash while we're at it if err != nil { break // This is the last chunk } } return Hash{ LinearHash: hsh.Sum(nil), TreeHash: ComputeTreeHash(hashes), } } // ComputeTreeHash builds a tree hash root node given a slice of // hashes. Glacier tree hash to be derived from SHA256 hashes of 1MB // chucks of the data. // // See http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html for more information. func ComputeTreeHash(hashes [][]byte) []byte { if hashes == nil || len(hashes) == 0 { return nil } for len(hashes) > 1 { tmpHashes := [][]byte{} for i := 0; i < len(hashes); i += 2 { if i+1 <= len(hashes)-1 { tmpHash := append(append([]byte{}, hashes[i]...), hashes[i+1]...) tmpSum := sha256.Sum256(tmpHash) tmpHashes = append(tmpHashes, tmpSum[:]) } else { tmpHashes = append(tmpHashes, hashes[i]) } } hashes = tmpHashes } return hashes[0] }
1
9,086
replacing the `1` with `io.SeekCurrent` may be good here
aws-aws-sdk-go
go
@@ -0,0 +1,15 @@ +package main + +import ( + "time" + + "gopkg.in/square/go-jose.v2" +) + +type JWKSSource interface { + // FetchJWKS returns the key set and modified time. + FetchKeySet() (*jose.JSONWebKeySet, time.Time, bool) + + // Close closes the source. + Close() error +}
1
1
12,047
nit: perhaps this file would be better named `jwks_source.go` ?
spiffe-spire
go
@@ -153,6 +153,9 @@ public class Constants { // Overridable plugin load properties public static final String AZ_PLUGIN_LOAD_OVERRIDE_PROPS = "azkaban.plugin.load.override.props"; + // File containing param override configs + public static final String PARAM_OVERRIDE_FILE = "param_override.properties"; + // Azkaban event reporter constants public static class EventReporterConstants {
1
/* * Copyright 2018 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban; import java.time.Duration; /** * Constants used in configuration files or shared among classes. * * <p>Conventions: * * <p>Internal constants to be put in the {@link Constants} class * * <p>Configuration keys to be put in the {@link ConfigurationKeys} class * * <p>Flow level properties keys to be put in the {@link FlowProperties} class * * <p>Job level Properties keys to be put in the {@link JobProperties} class * * <p>Use '.' to separate name spaces and '_" to separate words in the same namespace. e.g. * azkaban.job.some_key</p> */ public class Constants { // Azkaban Flow Versions public static final double DEFAULT_AZKABAN_FLOW_VERSION = 1.0; public static final double AZKABAN_FLOW_VERSION_2_0 = 2.0; // Flow 2.0 file suffix public static final String PROJECT_FILE_SUFFIX = ".project"; public static final String FLOW_FILE_SUFFIX = ".flow"; // Flow 2.0 node type public static final String NODE_TYPE = "type"; public static final String FLOW_NODE_TYPE = "flow"; // Flow 2.0 flow and job path delimiter public static final String PATH_DELIMITER = ":"; // Job properties override suffix public static final String JOB_OVERRIDE_SUFFIX = ".jor"; // Key for the root node of the DAG in runtime properties public static final String ROOT_NODE_IDENTIFIER = "ROOT"; // Names and paths of various file names to configure Azkaban public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties"; public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties"; public static final String DEFAULT_CONF_PATH = "conf"; public static final String DEFAULT_EXECUTOR_PORT_FILE = "executor.port"; public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app"; public static final String AZKABAN_CONTAINER_CONTEXT_KEY = "flow_container"; // Internal username used to perform SLA action public static final String AZKABAN_SLA_CHECKER_USERNAME = "azkaban_sla"; // Memory check retry interval when OOM in ms public static final long MEMORY_CHECK_INTERVAL_MS = 1000 * 60 * 1; // Max number of memory check retry public static final int MEMORY_CHECK_RETRY_LIMIT = 720; public static final int DEFAULT_PORT_NUMBER = 8081; public static final int DEFAULT_SSL_PORT_NUMBER = 8443; public static final int DEFAULT_JETTY_MAX_THREAD_COUNT = 20; // Configures the form limits for the web application public static final int MAX_FORM_CONTENT_SIZE = 10 * 1024 * 1024; // One Schedule's default End Time: 01/01/2050, 00:00:00, UTC public static final long DEFAULT_SCHEDULE_END_EPOCH_TIME = 2524608000000L; // Default flow trigger max wait time public static final Duration DEFAULT_FLOW_TRIGGER_MAX_WAIT_TIME = Duration.ofDays(10); public static final Duration MIN_FLOW_TRIGGER_WAIT_TIME = Duration.ofMinutes(1); public static final int DEFAULT_MIN_AGE_FOR_CLASSIFYING_A_FLOW_AGED_MINUTES = 20; // The flow exec id for a flow trigger instance which hasn't started a flow yet public static final int UNASSIGNED_EXEC_ID = -1; // The flow exec id for a flow trigger instance unable to trigger a flow yet public static final int FAILED_EXEC_ID = -2; // Default locked flow error message public static final String DEFAULT_LOCKED_FLOW_ERROR_MESSAGE = "Flow %s in project %s is locked. This is either a repeatedly failing flow, or an ineffcient" + " flow. Please refer to the Dr. Elephant report for this flow for more information."; // Default maximum number of concurrent runs for a single flow public static final int DEFAULT_MAX_ONCURRENT_RUNS_ONEFLOW = 30; // How often executors will poll new executions in Poll Dispatch model public static final int DEFAULT_AZKABAN_POLLING_INTERVAL_MS = 1000; // Executors can use cpu load calculated from this period to take/skip polling turns public static final int DEFAULT_AZKABAN_POLLING_CRITERIA_CPU_LOAD_PERIOD_SEC = 60; // Default value to feature enable setting. To be backward compatible, this value === FALSE public static final boolean DEFAULT_AZKABAN_RAMP_ENABLED = false; // Due to multiple AzkabanExec Server instance scenario, it will be required to persistent the ramp result into the DB. // However, Frequent data persistence will sacrifice the performance with limited data accuracy. // This setting value controls to push result into DB every N finished ramped workflows public static final int DEFAULT_AZKABAN_RAMP_STATUS_PUSH_INTERVAL_MAX = 20; // Due to multiple AzkabanExec Server instance, it will be required to persistent the ramp result into the DB. // However, Frequent data persistence will sacrifice the performance with limited data accuracy. // This setting value controls to pull result from DB every N new ramped workflows public static final int DEFAULT_AZKABAN_RAMP_STATUS_PULL_INTERVAL_MAX = 50; // Use Polling Service to sync the ramp status cross EXEC Server. public static final boolean DEFAULT_AZKABAN_RAMP_STATUS_POOLING_ENABLED = false; // How often executors will poll ramp status in Poll Dispatch model public static final int DEFAULT_AZKABAN_RAMP_STATUS_POLLING_INTERVAL = 10; // Username to be sent to UserManager when OAuth is in use, and real username is not available: public static final String OAUTH_USERNAME_PLACEHOLDER = "<OAuth>"; // Used by UserManager for password validation (to tell apart real passwords from auth codes). // Empirically, passwords are shorter than this, and ACs are longer: public static final int OAUTH_MIN_AUTHCODE_LENGTH = 80; // Used (or should be used) wherever a string representation of UTF_8 charset is needed: public static final String UTF_8 = java.nio.charset.StandardCharsets.UTF_8.toString(); // Specifies the source(adhoc, scheduled, event) from where flow execution is triggered public static final String EXECUTION_SOURCE_ADHOC = "adhoc"; public static final String EXECUTION_SOURCE_SCHEDULED = "schedule"; public static final String EXECUTION_SOURCE_EVENT = "event"; public static final String CONTENT_TYPE_TEXT_PLAIN = "text/plain"; public static final String CHARACTER_ENCODING_UTF_8 = "utf-8"; // Use in-memory keystore public static final String USE_IN_MEMORY_KEYSTORE = "use.in-memory.keystore"; // AZ_HOME in containerized execution public static final String AZ_HOME = "AZ_HOME"; // Flow restart action on EXECUTION_STOPPED public static final String RESTART_FLOW = "Restart Flow"; // Overridable plugin load properties public static final String AZ_PLUGIN_LOAD_OVERRIDE_PROPS = "azkaban.plugin.load.override.props"; // Azkaban event reporter constants public static class EventReporterConstants { public static final String FLOW_NAME = "flowName"; public static final String AZ_HOST = "azkabanHost"; public static final String AZ_WEBSERVER = "azkabanWebserver"; public static final String PROJECT_NAME = "projectName"; public static final String SUBMIT_USER = "submitUser"; public static final String START_TIME = "startTime"; public static final String END_TIME = "endTime"; public static final String FLOW_STATUS = "flowStatus"; public static final String EXECUTION_ID = "executionId"; public static final String SUBMIT_TIME = "submitTime"; public static final String FLOW_VERSION = "flowVersion"; public static final String FAILED_JOB_ID = "failedJobId"; public static final String MODIFIED_BY = "modifiedBy"; public static final String FLOW_KILL_DURATION = "flowKillDuration"; public static final String FLOW_PAUSE_DURATION = "flowPauseDuration"; public static final String FLOW_PREPARATION_DURATION = "flowPreparationDuration"; public static final String SLA_OPTIONS = "slaOptions"; public static final String VERSION_SET = "versionSet"; public static final String EXECUTOR_TYPE = "executorType"; public static final String PROJECT_FILE_UPLOAD_USER = "projectFileUploadUser"; public static final String PROJECT_FILE_UPLOADER_IP_ADDR = "projectFileUploaderIpAddr"; public static final String PROJECT_FILE_NAME = "projectFileName"; public static final String PROJECT_FILE_UPLOAD_TIME = "projectFileUploadTime"; public static final String JOB_ID = "jobId"; public static final String JOB_TYPE = "jobType"; public static final String VERSION = "version"; public static final String JOB_PROXY_USER = "jobProxyUser"; public static final String ATTEMPT_ID = "attemptId"; public static final String JOB_KILL_DURATION = "jobKillDuration"; public static final String QUEUE_DURATION = "queueDuration"; public static final String FAILURE_MESSAGE = "failureMessage"; public static final String JOB_STATUS = "jobStatus"; public static final String EFFECTIVE_USERS = "effectiveUsers"; public static final String CPU_UTILIZED = "cpuUtilized"; public static final String MEMORY_UTILIZED_IN_BYTES = "memoryUtilizedInBytes"; } public static class ConfigurationKeys { public static final String AZKABAN_CLUSTER_NAME = "azkaban.cluster.name"; public static final String AZKABAN_CLUSTER_ENV = "azkaban.cluster.env"; public static final String AZKABAN_GLOBAL_PROPERTIES_EXT_PATH = "executor.global.properties"; // Property to enable appropriate dispatch model public static final String AZKABAN_EXECUTION_DISPATCH_METHOD = "azkaban.execution.dispatch.method"; // Configures Azkaban to use new polling model for dispatching public static final String AZKABAN_POLLING_INTERVAL_MS = "azkaban.polling.interval.ms"; public static final String AZKABAN_POLLING_LOCK_ENABLED = "azkaban.polling.lock.enabled"; public static final String AZKABAN_POLLING_CRITERIA_FLOW_THREADS_AVAILABLE = "azkaban.polling_criteria.flow_threads_available"; public static final String AZKABAN_POLLING_CRITERIA_MIN_FREE_MEMORY_GB = "azkaban.polling_criteria.min_free_memory_gb"; public static final String AZKABAN_POLLING_CRITERIA_MAX_CPU_UTILIZATION_PCT = "azkaban.polling_criteria.max_cpu_utilization_pct"; public static final String AZKABAN_POLLING_CRITERIA_CPU_LOAD_PERIOD_SEC = "azkaban.polling_criteria.cpu_load_period_sec"; // Configures properties for Azkaban executor health check public static final String AZKABAN_EXECUTOR_HEALTHCHECK_INTERVAL_MIN = "azkaban.executor.healthcheck.interval.min"; public static final String AZKABAN_EXECUTOR_MAX_FAILURE_COUNT = "azkaban.executor.max.failurecount"; public static final String AZKABAN_ADMIN_ALERT_EMAIL = "azkaban.admin.alert.email"; // Configures Azkaban Flow Version in project YAML file public static final String AZKABAN_FLOW_VERSION = "azkaban-flow-version"; // These properties are configurable through azkaban.properties public static final String AZKABAN_PID_FILENAME = "azkaban.pid.filename"; // External URL template of a given topic, specified in the list defined above //Deprecated, it is replaced by AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC_URL public static final String AZKABAN_SERVER_EXTERNAL_TOPIC_URL = "azkaban.server.external.${topic}.url"; // Designates one of the external link topics to correspond to an execution analyzer //Deprecated, replaced by AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPICS public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC = "azkaban.server.external.analyzer.topic"; //Deprecated, it is replaced by AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC_LABEL public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_LABEL = "azkaban.server.external.analyzer.label"; // Defines a list of external links, each referred to as a topic // external links defined here will be translated into buttons and rendered in the Flow Execution page public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPICS = "azkaban.server.external.analyzer.topics"; // Defines timeout in milliseconds for azkaban to validate external links // If this config is missing, azkaban will use default 3000 milliseconds as timeout. // If validation fails, buttons is disabled in Flow Execution page. public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TIMEOUT_MS = "azkaban.server.external.analyzer.timeout.ms"; // Designates one of the external link topics to correspond to an execution analyzer public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC_LABEL = "azkaban.server" + ".external.analyzer.${topic}.label"; // External URL template of a given topic, specified in the list defined above public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC_URL = "azkaban.server" + ".external.analyzer.${topic}.url"; // Designates one of the external link topics to correspond to a job log viewer public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC = "azkaban.server.external.logviewer.topic"; public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_LABEL = "azkaban.server.external.logviewer.label"; /* * Hadoop/Spark user job link. * Example: * a) azkaban.server.external.resource_manager_job_url=http://***rm***:8088/cluster/app/application_${application.id} * b) azkaban.server.external.history_server_job_url=http://***jh***:19888/jobhistory/job/job_${application.id} * c) azkaban.server.external.spark_history_server_job_url=http://***sh***:18080/history/application_${application.id}/1/jobs * */ public static final String HADOOP_CLUSTER_URL = "azkaban.server.external.hadoop_cluster_url"; public static final String RESOURCE_MANAGER_JOB_URL = "azkaban.server.external.resource_manager_job_url"; public static final String HISTORY_SERVER_JOB_URL = "azkaban.server.external.history_server_job_url"; public static final String SPARK_HISTORY_SERVER_JOB_URL = "azkaban.server.external.spark_history_server_job_url"; // Configures the Kafka appender for logging user jobs, specified for the exec server public static final String AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST = "azkaban.server.logging.kafka.brokerList"; public static final String AZKABAN_SERVER_LOGGING_KAFKA_TOPIC = "azkaban.server.logging.kafka.topic"; // Represent the class name of azkaban metrics reporter. public static final String CUSTOM_METRICS_REPORTER_CLASS_NAME = "azkaban.metrics.reporter.name"; // Represent the metrics server URL. public static final String METRICS_SERVER_URL = "azkaban.metrics.server.url"; public static final String IS_METRICS_ENABLED = "azkaban.is.metrics.enabled"; public static final String MIN_AGE_FOR_CLASSIFYING_A_FLOW_AGED_MINUTES = "azkaban.metrics" + ".min_age_for_classifying_a_flow_aged_minutes"; // User facing web server configurations used to construct the user facing server URLs. They are useful when there is a reverse proxy between Azkaban web servers and users. // enduser -> myazkabanhost:443 -> proxy -> localhost:8081 // when this parameters set then these parameters are used to generate email links. // if these parameters are not set then jetty.hostname, and jetty.port(if ssl configured jetty.ssl.port) are used. public static final String AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME = "azkaban.webserver.external_hostname"; public static final String AZKABAN_WEBSERVER_EXTERNAL_SSL_PORT = "azkaban.webserver.external_ssl_port"; public static final String AZKABAN_WEBSERVER_EXTERNAL_PORT = "azkaban.webserver.external_port"; // Hostname for the host, if not specified, canonical hostname will be used public static final String AZKABAN_SERVER_HOST_NAME = "azkaban.server.hostname"; // List of users we prevent azkaban from running flows as. (ie: root, azkaban) public static final String BLACK_LISTED_USERS = "azkaban.server.blacklist.users"; // Path name of execute-as-user executable public static final String AZKABAN_SERVER_NATIVE_LIB_FOLDER = "azkaban.native.lib"; // Name of *nix group associated with the process running Azkaban public static final String AZKABAN_SERVER_GROUP_NAME = "azkaban.group.name"; // Legacy configs section, new configs should follow the naming convention of azkaban.server.<rest of the name> for server configs. // Jetty server configurations. public static final String JETTY_HEADER_BUFFER_SIZE = "jetty.headerBufferSize"; public static final String JETTY_USE_SSL = "jetty.use.ssl"; public static final String JETTY_SSL_PORT = "jetty.ssl.port"; public static final String JETTY_PORT = "jetty.port"; public static final String EXECUTOR_PORT_FILE = "executor.portfile"; // To set a fixed port for executor-server. Otherwise some available port is used. public static final String EXECUTOR_PORT = "executor.port"; public static final String EXECUTOR_SSL_PORT = "executor.ssl.port"; public static final String DEFAULT_TIMEZONE_ID = "default.timezone.id"; // Boolean config set on the Web server to prevent users from creating projects. When set to // true only admins or users with CREATEPROJECTS permission can create projects. public static final String LOCKDOWN_CREATE_PROJECTS_KEY = "lockdown.create.projects"; // Boolean config set on the Web server to prevent users from uploading projects. When set to // true only admins or users with UPLOADPROJECTS permission can upload projects. public static final String LOCKDOWN_UPLOAD_PROJECTS_KEY = "lockdown.upload.projects"; // Max flow running time in mins, server will kill flows running longer than this setting. // if not set or <= 0, then there's no restriction on running time. public static final String AZKABAN_MAX_FLOW_RUNNING_MINS = "azkaban.server.flow.max.running.minutes"; // Maximum number of tries to download a dependency (no more retry attempts will be made after this many download failures) public static final String AZKABAN_DEPENDENCY_MAX_DOWNLOAD_TRIES = "azkaban.dependency.max.download.tries"; public static final String AZKABAN_DEPENDENCY_DOWNLOAD_THREADPOOL_SIZE = "azkaban.dependency.download.threadpool.size"; public static final String AZKABAN_STORAGE_TYPE = "azkaban.storage.type"; public static final String AZKABAN_STORAGE_LOCAL_BASEDIR = "azkaban.storage.local.basedir"; public static final String HADOOP_CONF_DIR_PATH = "hadoop.conf.dir.path"; // This really should be azkaban.storage.hdfs.project_root.uri public static final String AZKABAN_STORAGE_HDFS_PROJECT_ROOT_URI = "azkaban.storage.hdfs.root.uri"; public static final String AZKABAN_STORAGE_CACHE_DEPENDENCY_ENABLED = "azkaban.storage.cache.dependency.enabled"; public static final String AZKABAN_STORAGE_CACHE_DEPENDENCY_ROOT_URI = "azkaban.storage.cache.dependency_root.uri"; public static final String AZKABAN_STORAGE_ORIGIN_DEPENDENCY_ROOT_URI = "azkaban.storage.origin.dependency_root.uri"; public static final String AZKABAN_KERBEROS_PRINCIPAL = "azkaban.kerberos.principal"; public static final String AZKABAN_KEYTAB_PATH = "azkaban.keytab.path"; public static final String PROJECT_TEMP_DIR = "project.temp.dir"; // Event reporting properties public static final String AZKABAN_EVENT_REPORTING_CLASS_PARAM = "azkaban.event.reporting.class"; public static final String AZKABAN_EVENT_REPORTING_ENABLED = "azkaban.event.reporting.enabled"; // Comma separated list of properties to propagate from flow to Event reporter metadata public static final String AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE = "azkaban.event.reporting.propagateProperties"; public static final String AZKABAN_EVENT_REPORTING_KAFKA_BROKERS = "azkaban.event.reporting.kafka.brokers"; public static final String AZKABAN_EVENT_REPORTING_KAFKA_TOPIC = "azkaban.event.reporting.kafka.topic"; public static final String AZKABAN_EVENT_REPORTING_KAFKA_SCHEMA_REGISTRY_URL = "azkaban.event.reporting.kafka.schema.registry.url"; /* * The max number of artifacts retained per project. * Accepted Values: * - 0 : Save all artifacts. No clean up is done on storage. * - 1, 2, 3, ... (any +ve integer 'n') : Maintain 'n' latest versions in storage * * Note: Having an unacceptable value results in an exception and the service would REFUSE * to start. * * Example: * a) azkaban.storage.artifact.max.retention=all * implies save all artifacts * b) azkaban.storage.artifact.max.retention=3 * implies save latest 3 versions saved in storage. **/ public static final String AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION = "azkaban.storage.artifact.max.retention"; // enable quartz scheduler and flow trigger if true. public static final String ENABLE_QUARTZ = "azkaban.server.schedule.enable_quartz"; public static final String CUSTOM_CREDENTIAL_NAME = "azkaban.security.credential"; public static final String OAUTH_CREDENTIAL_NAME = "azkaban.oauth.credential"; public static final String SECURITY_USER_GROUP = "azkaban.security.user.group"; public static final String CSR_KEYSTORE_LOCATION = "azkaban.csr.keystore.location"; // dir to keep dependency plugins public static final String DEPENDENCY_PLUGIN_DIR = "azkaban.dependency.plugin.dir"; public static final String USE_MULTIPLE_EXECUTORS = "azkaban.use.multiple.executors"; public static final String MAX_CONCURRENT_RUNS_ONEFLOW = "azkaban.max.concurrent.runs.oneflow"; // list of whitelisted flows, with specific max number of concurrent runs. Format: // <project 1>,<flow 1>,<number>;<project 2>,<flow 2>,<number> public static final String CONCURRENT_RUNS_ONEFLOW_WHITELIST = "azkaban.concurrent.runs.oneflow.whitelist"; public static final String WEBSERVER_QUEUE_SIZE = "azkaban.webserver.queue.size"; public static final String ACTIVE_EXECUTOR_REFRESH_IN_MS = "azkaban.activeexecutor.refresh.milisecinterval"; public static final String ACTIVE_EXECUTOR_REFRESH_IN_NUM_FLOW = "azkaban.activeexecutor.refresh.flowinterval"; public static final String EXECUTORINFO_REFRESH_MAX_THREADS = "azkaban.executorinfo.refresh.maxThreads"; public static final String MAX_DISPATCHING_ERRORS_PERMITTED = "azkaban.maxDispatchingErrors"; public static final String EXECUTOR_SELECTOR_FILTERS = "azkaban.executorselector.filters"; public static final String EXECUTOR_SELECTOR_COMPARATOR_PREFIX = "azkaban.executorselector.comparator."; public static final String QUEUEPROCESSING_ENABLED = "azkaban.queueprocessing.enabled"; public static final String QUEUE_PROCESSOR_WAIT_IN_MS = "azkaban.queue.processor.wait.in.ms"; public static final String SESSION_TIME_TO_LIVE = "session.time.to.live"; // allowed max number of sessions per user per IP public static final String MAX_SESSION_NUMBER_PER_IP_PER_USER = "azkaban.session" + ".max_number_per_ip_per_user"; // allowed max size of shared project dir (percentage of partition size), e.g 0.8 public static final String PROJECT_CACHE_SIZE_PERCENTAGE = "azkaban.project_cache_size_percentage_of_disk"; public static final String PROJECT_CACHE_THROTTLE_PERCENTAGE = "azkaban.project_cache_throttle_percentage"; // how many older versions of project files are kept in DB before deleting them public static final String PROJECT_VERSION_RETENTION = "project.version.retention"; // number of rows to be displayed on the executions page. public static final String DISPLAY_EXECUTION_PAGE_SIZE = "azkaban.display.execution_page_size"; // locked flow error message. Parameters passed in are the flow name and project name. public static final String AZKABAN_LOCKED_FLOW_ERROR_MESSAGE = "azkaban.locked.flow.error.message"; // flow ramp related setting keys // Default value to feature enable setting. To be backward compatible, this value === FALSE public static final String AZKABAN_RAMP_ENABLED = "azkaban.ramp.enabled"; // Due to multiple AzkabanExec Server instance scenario, it will be required to persistent the ramp result into the DB. // However, Frequent data persistence will sacrifice the performance with limited data accuracy. // This setting value controls to push result into DB every N finished ramped workflows public static final String AZKABAN_RAMP_STATUS_PUSH_INTERVAL_MAX = "azkaban.ramp.status.push.interval.max"; // Due to multiple AzkabanExec Server instance, it will be required to persistent the ramp result into the DB. // However, Frequent data persistence will sacrifice the performance with limited data accuracy. // This setting value controls to pull result from DB every N new ramped workflows public static final String AZKABAN_RAMP_STATUS_PULL_INTERVAL_MAX = "azkaban.ramp.status.pull.interval.max"; // A Polling Service can be applied to determine the ramp status synchronization interval. public static final String AZKABAN_RAMP_STATUS_POLLING_ENABLED = "azkaban.ramp.status.polling.enabled"; public static final String AZKABAN_RAMP_STATUS_POLLING_INTERVAL = "azkaban.ramp.status.polling.interval"; public static final String AZKABAN_RAMP_STATUS_POLLING_CPU_MAX = "azkaban.ramp.status.polling.cpu.max"; public static final String AZKABAN_RAMP_STATUS_POLLING_MEMORY_MIN = "azkaban.ramp.status.polling.memory.min"; public static final String EXECUTION_LOGS_RETENTION_MS = "execution.logs.retention.ms"; public static final String EXECUTION_LOGS_CLEANUP_INTERVAL_SECONDS = "execution.logs.cleanup.interval.seconds"; public static final String EXECUTION_LOGS_CLEANUP_RECORD_LIMIT = "execution.logs.cleanup.record.limit"; // Oauth2.0 configuration keys. If missing, no OAuth will be attempted, and the old // username/password{+2FA} prompt will be given for interactive login: public static final String OAUTH_PROVIDER_URI_KEY = "oauth.provider_uri"; // where to send user for OAuth flow, e.g.: // oauth.provider_uri=https://login.microsoftonline.com/tenant-id/oauth2/v2.0/authorize\ // ?client_id=client_id\ // &response_type=code\ // &scope=openid\ // &response_mode=form_post\ // &state={state}\ // &redirect_uri={redirect_uri} // Strings {state} and {redirect_uri}, if present verbatim in the property value, will be // substituted at runtime with (URL-encoded) navigation target and OAuth responce handler URIs, // respectively. See handleOauth() in LoginAbstractServlet.java for details. public static final String OAUTH_REDIRECT_URI_KEY = "oauth.redirect_uri"; // how OAuth calls us back, e.g.: // oauth.redirect_uri=http://localhost:8081/?action=oauth_callback // By default job props always win over flow override props. // If this flag is set to true, then override props override also override existing job props. public static final String AZKABAN_EXECUTOR_RUNTIME_PROPS_OVERRIDE_EAGER = "azkaban.executor.runtimeProps.override.eager"; // Executor client TLS properties public static final String EXECUTOR_CLIENT_TLS_ENABLED = "azkaban.executor.client.tls.enabled"; public static final String EXECUTOR_CLIENT_TRUSTSTORE_PATH = "azkaban.executor.client.truststore"; public static final String EXECUTOR_CLIENT_TRUSTSTORE_PASSWORD = "azkaban.executor.client.trustpassword"; public static final String AZKABAN_EXECUTOR_REVERSE_PROXY_ENABLED = "azkaban.executor.reverse.proxy.enabled"; public static final String AZKABAN_EXECUTOR_REVERSE_PROXY_HOSTNAME = "azkaban.executor.reverse.proxy.hostname"; public static final String AZKABAN_EXECUTOR_REVERSE_PROXY_PORT = "azkaban.executor.reverse.proxy.port"; // Job callback public static final String AZKABAN_EXECUTOR_JOBCALLBACK_ENABLED = "azkaban.executor.jobcallback.enabled"; } public static class FlowProperties { // Basic properties of flows as set by the executor server public static final String AZKABAN_FLOW_PROJECT_NAME = "azkaban.flow.projectname"; public static final String AZKABAN_FLOW_FLOW_ID = "azkaban.flow.flowid"; public static final String AZKABAN_FLOW_SUBMIT_USER = "azkaban.flow.submituser"; public static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid"; public static final String AZKABAN_FLOW_PROJECT_VERSION = "azkaban.flow.projectversion"; } public static class JobProperties { // Job property that enables/disables using Kafka logging of user job logs public static final String AZKABAN_JOB_LOGGING_KAFKA_ENABLE = "azkaban.job.logging.kafka.enable"; /* * this parameter is used to replace EXTRA_HCAT_LOCATION that could fail when one of the uris is not available. * EXTRA_HCAT_CLUSTERS has the following format: * other_hcat_clusters = "thrift://hcat1:port,thrift://hcat2:port;thrift://hcat3:port,thrift://hcat4:port" * Each string in the parenthesis is regarded as a "cluster", and we will get a delegation token from each cluster. * The uris(hcat servers) in a "cluster" ensures HA is provided. **/ public static final String EXTRA_HCAT_CLUSTERS = "azkaban.job.hive.other_hcat_clusters"; /* * the settings to be defined by user indicating if there are hcat locations other than the * default one the system should pre-fetch hcat token from. Note: Multiple thrift uris are * supported, use comma to separate the values, values are case insensitive. **/ // Use EXTRA_HCAT_CLUSTERS instead @Deprecated public static final String EXTRA_HCAT_LOCATION = "other_hcat_location"; // If true, AZ will fetches the jobs' certificate from remote Certificate Authority. public static final String ENABLE_JOB_SSL = "azkaban.job.enable.ssl"; // If true, AZ will fetch OAuth token from credential provider public static final String ENABLE_OAUTH = "azkaban.enable.oauth"; // Job properties that indicate maximum memory size public static final String JOB_MAX_XMS = "job.max.Xms"; public static final String MAX_XMS_DEFAULT = "1G"; public static final String JOB_MAX_XMX = "job.max.Xmx"; public static final String MAX_XMX_DEFAULT = "2G"; // The hadoop user the job should run under. If not specified, it will default to submit user. public static final String USER_TO_PROXY = "user.to.proxy"; /** * Format string for Log4j's EnhancedPatternLayout */ public static final String JOB_LOG_LAYOUT = "azkaban.job.log.layout"; } public static class JobCallbackProperties { public static final String JOBCALLBACK_CONNECTION_REQUEST_TIMEOUT = "jobcallback.connection.request.timeout"; public static final String JOBCALLBACK_CONNECTION_TIMEOUT = "jobcallback.connection.timeout"; public static final String JOBCALLBACK_SOCKET_TIMEOUT = "jobcallback.socket.timeout"; public static final String JOBCALLBACK_RESPONSE_WAIT_TIMEOUT = "jobcallback.response.wait.timeout"; public static final String JOBCALLBACK_THREAD_POOL_SIZE = "jobcallback.thread.pool.size"; } public static class FlowTriggerProps { // Flow trigger props public static final String SCHEDULE_TYPE = "type"; public static final String CRON_SCHEDULE_TYPE = "cron"; public static final String SCHEDULE_VALUE = "value"; public static final String DEP_NAME = "name"; // Flow trigger dependency run time props public static final String START_TIME = "startTime"; public static final String TRIGGER_INSTANCE_ID = "triggerInstanceId"; } public static class PluginManager { public static final String JOBTYPE_DEFAULTDIR = "plugins/jobtypes"; public static final String RAMPPOLICY_DEFAULTDIR = "plugins/ramppolicies"; // need jars.to.include property, will be loaded with user property public static final String CONFFILE = "plugin.properties"; // not exposed to users public static final String SYSCONFFILE = "private.properties"; // common properties for multiple plugins public static final String COMMONCONFFILE = "common.properties"; // common private properties for multiple plugins public static final String COMMONSYSCONFFILE = "commonprivate.properties"; // mapping for the jobType to default proxy user public static final String DEFAULT_PROXY_USERS_FILE = "default-proxy-users.properties"; // allowed jobType classes for default proxy user public static final String DEFAULT_PROXY_USERS_JOBTYPE_CLASSES = "default.proxyusers.jobtype" + ".classes"; // users not allowed as default proxy user public static final String DEFAULT_PROXY_USERS_FILTER = "default.proxyusers.filter"; } public static class ContainerizedDispatchManagerProperties { public static final String AZKABAN_CONTAINERIZED_PREFIX = "azkaban.containerized."; public static final String CONTAINERIZED_IMPL_TYPE = AZKABAN_CONTAINERIZED_PREFIX + "impl.type"; public static final String CONTAINERIZED_EXECUTION_BATCH_ENABLED = AZKABAN_CONTAINERIZED_PREFIX + "execution.batch.enabled"; public static final String CONTAINERIZED_EXECUTION_BATCH_SIZE = AZKABAN_CONTAINERIZED_PREFIX + "execution.batch.size"; public static final String CONTAINERIZED_EXECUTION_PROCESSING_THREAD_POOL_SIZE = AZKABAN_CONTAINERIZED_PREFIX + "execution.processing.thread.pool.size"; public static final String CONTAINERIZED_CREATION_RATE_LIMIT = AZKABAN_CONTAINERIZED_PREFIX + "creation.rate.limit"; public static final String CONTAINERIZED_RAMPUP = AZKABAN_CONTAINERIZED_PREFIX + "rampup"; public static final String CONTAINERIZED_JOBTYPE_ALLOWLIST = AZKABAN_CONTAINERIZED_PREFIX + "jobtype.allowlist"; public static final String CONTAINERIZED_PROXY_USER_DENYLIST = AZKABAN_CONTAINERIZED_PREFIX + "proxy.user.denylist"; // Kubernetes related properties public static final String AZKABAN_KUBERNETES_PREFIX = "azkaban.kubernetes."; public static final String KUBERNETES_NAMESPACE = AZKABAN_KUBERNETES_PREFIX + "namespace"; public static final String KUBERNETES_KUBE_CONFIG_PATH = AZKABAN_KUBERNETES_PREFIX + "kube.config.path"; // Kubernetes pod related properties public static final String KUBERNETES_POD_PREFIX = AZKABAN_KUBERNETES_PREFIX + "pod."; public static final String KUBERNETES_POD_NAME_PREFIX = KUBERNETES_POD_PREFIX + "name.prefix"; public static final String KUBERNETES_POD_AZKABAN_BASE_IMAGE_NAME = AZKABAN_KUBERNETES_PREFIX + "azkaban-base.image.name"; public static final String KUBERNETES_POD_AZKABAN_CONFIG_IMAGE_NAME = AZKABAN_KUBERNETES_PREFIX + "azkaban-config.image.name"; // Kubernetes flow container related properties public static final String KUBERNETES_FLOW_CONTAINER_PREFIX = AZKABAN_KUBERNETES_PREFIX + "flow.container."; public static final String KUBERNETES_FLOW_CONTAINER_NAME = KUBERNETES_FLOW_CONTAINER_PREFIX + ".name"; public static final String KUBERNETES_FLOW_CONTAINER_CPU_LIMIT_MULTIPLIER = KUBERNETES_FLOW_CONTAINER_PREFIX + "cpu.limit.multiplier"; public static final String KUBERNETES_FLOW_CONTAINER_MAX_ALLOWED_CPU = KUBERNETES_FLOW_CONTAINER_PREFIX + "max.allowed.cpu"; public static final String KUBERNETES_FLOW_CONTAINER_CPU_REQUEST = KUBERNETES_FLOW_CONTAINER_PREFIX + "cpu.request"; public static final String KUBERNETES_FLOW_CONTAINER_MEMORY_LIMIT_MULTIPLIER = KUBERNETES_FLOW_CONTAINER_PREFIX + "memory.limit.multiplier"; public static final String KUBERNETES_FLOW_CONTAINER_MAX_ALLOWED_MEMORY = KUBERNETES_FLOW_CONTAINER_PREFIX + "max.allowed.memory"; public static final String KUBERNETES_FLOW_CONTAINER_MEMORY_REQUEST = KUBERNETES_FLOW_CONTAINER_PREFIX + "memory.request"; public static final String KUBERNETES_FLOW_CONTAINER_SECRET_NAME = KUBERNETES_FLOW_CONTAINER_PREFIX + "secret.name"; public static final String KUBERNETES_FLOW_CONTAINER_SECRET_VOLUME = KUBERNETES_FLOW_CONTAINER_PREFIX + "secret.volume"; public static final String KUBERNETES_FLOW_CONTAINER_SECRET_MOUNTPATH = KUBERNETES_FLOW_CONTAINER_PREFIX + "secret.mountpath"; public static final String KUBERNETES_INIT_MOUNT_PATH_FOR_JOBTYPES = KUBERNETES_FLOW_CONTAINER_PREFIX + "init.jobtypes.mount.path"; public static final String KUBERNETES_MOUNT_PATH_FOR_JOBTYPES = KUBERNETES_FLOW_CONTAINER_PREFIX + "jobtypes.mount.path"; public static final String KUBERNETES_POD_TEMPLATE_PATH = KUBERNETES_POD_PREFIX + "template.path"; public static final String KUBERNETES_DEPENDENCY_TYPES = KUBERNETES_FLOW_CONTAINER_PREFIX + "dependencyTypes"; public static final String KUBERNETES_INIT_MOUNT_PATH_FOR_DEPENDENCIES = KUBERNETES_FLOW_CONTAINER_PREFIX + "init.dependencies.mount.path"; public static final String KUBERNETES_MOUNT_PATH_FOR_DEPENDENCIES = KUBERNETES_FLOW_CONTAINER_PREFIX + "dependencies.mount.path"; // Kubernetes service related properties public static final String KUBERNETES_SERVICE_PREFIX = AZKABAN_KUBERNETES_PREFIX + "service."; public static final String KUBERNETES_SERVICE_REQUIRED = KUBERNETES_SERVICE_PREFIX + "required"; public static final String KUBERNETES_SERVICE_NAME_PREFIX = KUBERNETES_SERVICE_PREFIX + "name.prefix"; public static final String KUBERNETES_SERVICE_PORT = KUBERNETES_SERVICE_PREFIX + "port"; public static final String KUBERNETES_SERVICE_CREATION_TIMEOUT_MS = KUBERNETES_SERVICE_PREFIX + "creation.timeout.ms"; // Kubernetes Watch related properties public static final String KUBERNETES_WATCH_PREFIX = AZKABAN_KUBERNETES_PREFIX + "watch."; public static final String KUBERNETES_WATCH_ENABLED = KUBERNETES_WATCH_PREFIX + "enabled"; public static final String KUBERNETES_WATCH_EVENT_CACHE_MAX_ENTRIES = KUBERNETES_WATCH_PREFIX + "cache.max.entries"; // Periodicity of lookup and cleanup of stale executions. public static final String CONTAINERIZED_STALE_EXECUTION_CLEANUP_INTERVAL_MIN = AZKABAN_CONTAINERIZED_PREFIX + "stale.execution.cleanup.interval.min"; public static final String ENV_VERSION_SET_ID = "VERSION_SET_ID"; public static final String ENV_FLOW_EXECUTION_ID = "FLOW_EXECUTION_ID"; public static final String ENV_JAVA_ENABLE_DEBUG = "JAVA_ENABLE_DEBUG"; public static final String ENV_ENABLE_DEV_POD = "ENABLE_DEV_POD"; public static final String ENV_CPU_REQUEST = "CPU_REQUEST"; public static final String ENV_MEMORY_REQUEST = "MEMORY_REQUEST"; } public static class ImageMgmtConstants { public static final String IMAGE_TYPE = "imageType"; public static final String IMAGE_VERSION = "imageVersion"; public static final String VERSION_STATE = "versionState"; public static final String ID_KEY = "id"; public static final String IMAGE_RAMPUP_PLAN = "imageRampupPlan"; } public static class FlowParameters { // Constants for Flow parameters public static final String FLOW_PARAM_VERSION_SET_ID = "azkaban.version-set.id"; // Constant to enable java remote debug for Flow Container public static final String FLOW_PARAM_JAVA_ENABLE_DEBUG = "java.enable.debug"; // Constant to enable pod for developer testing public static final String FLOW_PARAM_ENABLE_DEV_POD = "enable.dev.pod"; // Constant to disable pod cleanup through the kubernetes watch public static final String FLOW_PARAM_DISABLE_POD_CLEANUP = "disable.pod.cleanup"; // Constant to dispatch execution to Containerization public static final String FLOW_PARAM_DISPATCH_EXECUTION_TO_CONTAINER = "dispatch.execution.to.container"; // Constant for cpu request for flow container public static final String FLOW_PARAM_FLOW_CONTAINER_CPU_REQUEST = "flow.container.cpu.request"; // Constant for memory request for flow container public static final String FLOW_PARAM_FLOW_CONTAINER_MEMORY_REQUEST = "flow.container.memory.request"; public static final String FLOW_PARAM_POD_ENV_VAR = "pod.env.var."; // Constant to allow test version to be passed as flow parameter. Passing test version will be // allowed for Azkaban ADMIN role only public static final String FLOW_PARAM_ALLOW_IMAGE_TEST_VERSION = "allow.image.test.version"; public static final String FLOW_PARAM_ALLOW_RESTART_ON_EXECUTION_STOPPED = "allow.restart.on.execution.stopped"; } }
1
22,697
It will be helpful to specify the intended priority as well for the properties within this file.
azkaban-azkaban
java
@@ -23,6 +23,8 @@ import ( "github.com/GoogleCloudPlatform/compute-image-tools/go/osinfo" ) +type RunFunc func(*exec.Cmd) ([]byte, error) + var ( // AptExists indicates whether apt is installed. AptExists bool
1
/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package packages provides package management functions for Windows and Linux // systems. package packages import ( "fmt" "os" "os/exec" "github.com/GoogleCloudPlatform/compute-image-tools/go/osinfo" ) var ( // AptExists indicates whether apt is installed. AptExists bool // YumExists indicates whether yum is installed. YumExists bool // ZypperExists indicates whether zypper is installed. ZypperExists bool // GemExists indicates whether gem is installed. GemExists bool // PipExists indicates whether pip is installed. PipExists bool // GooGetExists indicates whether googet is installed. GooGetExists bool noarch = osinfo.Architecture("noarch") ) // Packages is a selection of packages based on their manager. type Packages struct { Yum []PkgInfo `json:"yum,omitempty"` Rpm []PkgInfo `json:"rpm,omitempty"` Apt []PkgInfo `json:"apt,omitempty"` Deb []PkgInfo `json:"deb,omitempty"` Zypper []PkgInfo `json:"zypper,omitempty"` Gem []PkgInfo `json:"gem,omitempty"` Pip []PkgInfo `json:"pip,omitempty"` GooGet []PkgInfo `json:"googet,omitempty"` WUA []WUAPackage `json:"wua,omitempty"` QFE []QFEPackage `json:"qfe,omitempty"` } // PkgInfo describes a package. type PkgInfo struct { Name, Arch, Version string } // WUAPackage describes a Windows Update Agent package. type WUAPackage struct { Title string Description string Categories []string CategoryIDs []string KBArticleIDs []string SupportURL string UpdateID string RevisionNumber int32 } // QFEPackage describes a Windows Quick Fix Engineering package. type QFEPackage struct { Caption, Description, HotFixID, InstalledOn string } func run(cmd *exec.Cmd) ([]byte, error) { fmt.Printf("Running %q with args %q\n", cmd.Path, cmd.Args[1:]) out, err := cmd.CombinedOutput() if err != nil { return nil, fmt.Errorf("error running %q with args %q: %v, stdout: %s", cmd.Path, cmd.Args, err, out) } return out, nil } func exists(name string) bool { if _, err := os.Stat(name); os.IsNotExist(err) { return false } return true }
1
7,988
Make this private and update all the public functions to not take this argument. The variable you set below should also be private, then in the tests instead of passing the variable in to the function just update the variable. We don't want to expose the testing implementation in the public api if it can be avoided.
GoogleCloudPlatform-compute-image-tools
go
@@ -1956,7 +1956,7 @@ SDDkwd__(EXE_DIAGNOSTIC_EVENTS, "OFF"), DDkwd__(HIVE_DEFAULT_CHARSET, (char *)SQLCHARSETSTRING_UTF8), DD_____(HIVE_DEFAULT_SCHEMA, "HIVE"), DD_____(HIVE_FILE_CHARSET, ""), - DD_____(HIVE_FILE_NAME, "/hive/tpcds/customer/customer.dat" ), + DD_____(HIVE_FILE_NAME, "/user/trafodion/hive/tpcds/customer/customer.dat" ), DD_____(HIVE_HDFS_STATS_LOG_FILE, ""), DDui___(HIVE_INSERT_ERROR_MODE, "1"), DDint__(HIVE_LIB_HDFS_PORT_OVERRIDE, "-1"),
1
/* -*-C++-*- // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ ***************************************************************************** * * File: NADefaults.cpp * Description: Implementation for the defaults table class, NADefaults. * * Created: 7/11/96 * Language: C++ * * * * ***************************************************************************** */ #define SQLPARSERGLOBALS_FLAGS // must precede all #include's #define SQLPARSERGLOBALS_NADEFAULTS #include "Platform.h" #include "NADefaults.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #ifdef NA_HAS_SEARCH_H #include <search.h> // use the bsearch binary search routine of the C RTL #else #include <unistd.h> // on OSS, bsearch comes from unistd.h #endif #include "nsk/nskport.h" #if !defined(NDEBUG) #endif #include "CliDefs.h" #include "CmpContext.h" #include "CmpErrors.h" #include "ComObjectName.h" #include "ComRtUtils.h" #include "ComSchemaName.h" #include "ex_error.h" #include "DefaultConstants.h" #include "DefaultValidator.h" #include "NAClusterInfo.h" #include "parser.h" #include "sql_id.h" #include "SQLCLIdev.h" #include "Sqlcomp.h" #include "StmtCompilationMode.h" #include "OptimizerSimulator.h" #include "CmpSeabaseDDL.h" #include "Globals.h" #include "QCache.h" #include "SqlParserGlobals.h" // MUST be last #include! #include "seabed/ms.h" #include "seabed/fs.h" #define NADHEAP CTXTHEAP #define ERRWARN(msg) ToErrorOrWarning(msg, errOrWarn) #define ERRWARNLOOP(msg) ToErrorOrWarning(msg, errOrWarnLOOP) #define ENUM_RANGE_CHECK(e) (e >= 0 && (size_t)e < numDefaultAttributes()) #define ATTR_RANGE_CHECK ENUM_RANGE_CHECK(attrEnum) #ifndef NDEBUG #define ATTR_RANGE_ASSERT CMPASSERT(ATTR_RANGE_CHECK) #else #define ATTR_RANGE_ASSERT #endif // ------------------------------------------------------------------------- // This table contains defaults used in SQLARK. // To add a default, put it in sqlcomp/DefaultConstants.h and in this table. // // The #define declares the domain (allowed range of values) of the attr-value; // typically it is Int1 or UI1 (signed or unsigned integral, >=1) // to prevent division-by-zero errors in the calling code. // // The first column is the internal enum value from sqlcomp/DefaultConstants.h. // The second column is the default value as a string. // // The DDxxxx macro identifies the domain of the attribute // (the range and properties of the possible values). // // XDDxxxx does the same *and* externalizes the attribute // (makes it visible to SHOWCONTROL; *you* need to tell Pubs to document it). // // SDDxxxx does the same and externalizes the attribute to HP support personnel // (makes it visible to HPDM when support is logged on; *you* need to tell Pubs // to document it in the support manual. You can set the // SHOWCONTROL_SUPPORT_ATTRS CQD to ON to see all the externalized and // support-level CQDs). // // For instance, DDflt0 allows any nonnegative floating-point number, while // DDflte allows any positive float (the e stands for epsilon, that tiniest // scintilla >0 in classical calculus, and something like +1E-38 on a Pentium). // DDui allows only nonnegative integral values (ui=unsigned int), // DDui1 allows only ints > 0, DDui2 only nonzero multiples of 2, etc. // // DDkwd validates keywords. Each attribute that is DDkwd has its own subset // of acceptable tokens -- the default behavior is that the attr is bivalent // (ON/OFF or TRUE/FALSE or ENABLE/DISABLE). If you want different keywords, // see enum DefaultToken in DefaultConstants.h, and NADefaults::token() below. // // Other DD's validate percentages, and Ansi names. Certainly more could be // defined, for more restrictive ranges or other criteria. // ************************************************************************* // NOTE: You must keep the entire list in alphabetical order, // or else the lookup will not work!!!!!!! Use only CAPITAL LETTERS!!!!!!!!! // ************************************************************************* // NOTE 2: If you choose to "hide" the default default value by setting it to // "ENABLE" or "SYSTEM" or "", your code must handle this possibility. // // See OptPhysRelExpr.cpp's handling of PARALLEL_NUM_ESPS, // an unsigned positive int which also accepts the keyword setting of "SYSTEM". // See ImplRule.cpp's use of INSERT_VSBB, a keyword attr which allows "SYSTEM". // // A simple way to handle ON/OFF keywords that you want to hide the default for: // Take OPTIMIZER_PRUNING as an example. Right now, it appears below with // default "OFF", and opt.cpp does // DisablePruning = (NADEFAULT(OPTIMIZER_PRUNING) == DF_OFF); // To hide the default default, // you would enter it below as "SYSTEM", and opt.cpp would do // DisablePruning = (NADEFAULT(OPTIMIZER_PRUNING) != DF_ON); // (i.e., DF_OFF and DF_SYSTEM would be treated identically, as desired). // ************************************************************************* // NOTE 3: The user is always allowed to say // CONTROL QUERY DEFAULT attrname 'SYSTEM'; -- or 'ENABLE' or '' // What this means is that the current setting for that attribute // reverts to its default-default value. This default-default value // may or may not be "SYSTEM"; this is completely orthogonal/irrelevant // to the CQD usage. // // One gotcha: 'ENABLE' is a synonym for 'SYSTEM', *EXCEPT* when the // SYSTEM default (the default-default) is "DISABLE". // In this case, 'ENABLE' is a synonym for 'ON' // (the opposite of the synonyms DISABLE/OFF). // ************************************************************************* // NOTE 4: After modifying this static table in any way, INCLUDING A CODE MERGE, // for a quick sanity check, run w:/toolbin/checkNAD. // For a complete consistency check, compile this file, link arkcmp, and // runregr TEST050. // ************************************************************************* struct DefaultDefault { enum DefaultConstants attrEnum; const char *attrName; const char *value; const DefaultValidator *validator; UInt32 flags; }; #define DD(name,value,validator) { name, "" # name "", value, validator } #define FDD(name,value,validator,flags) { name, "" # name "", value, validator, flags } #define XDD(name,value,validator) FDD(name,value,validator,DEFAULT_IS_EXTERNALIZED) #define SDD(name,value,validator) FDD(name,value,validator,DEFAULT_IS_FOR_SUPPORT) #define DDS(name,value,validator) FDD(name,value,validator,DEFAULT_IS_SSD) #define XDDS(name,value,validator) FDD(name,value,validator,DEFAULT_IS_SSD | DEFAULT_IS_EXTERNALIZED) #define SDDS(name,value,validator) FDD(name,value,validator,DEFAULT_IS_SSD | DEFAULT_IS_FOR_SUPPORT) #define DD_____(name,value) DD(name,value,&validateUnknown) #define XDD_____(name,value) XDD(name,value,&validateUnknown) #define SDD_____(name,value) SDD(name,value,&validateUnknown) #define DDS_____(name,value) DDS(name,value,&validateUnknown) #define XDDS_____(name,value) XDDS(name,value,&validateUnknown) #define DDansi_(name,value) DD(name,value,&validateAnsiName) #define XDDansi_(name,value) XDD(name,value,&validateAnsiName) #define DDcoll_(name,value) DD(name,value,&validateCollList) #define DDint__(name,value) DD(name,value,&validateInt) #define SDDint__(name,value) SDD(name,value,&validateInt) #define XDDint__(name,value) XDD(name,value,&validateInt) #define DDSint__(name,value) DDS(name,value,&validateInt) #define XDDSint__(name,value) XDDS(name,value,&validateInt) #define XDDintN2(name,value) XDD(name,value,&validateIntNeg2) #define DDintN1__(name,value) DD(name,value,&validateIntNeg1) #define DDpct__(name,value) DD(name,value,&validatePct) #define XDDpct__(name,value) XDD(name,value,&validatePct) #define SDDpct__(name,value) SDD(name,value,&validatePct) #define DDpct1_50(name,value) DD(name,value,&validatePct1_t50) #define DD0_10485760(name,value) DD(name,value,&validate0_10485760) #define DD0_255(name,value) DD(name,value,&validate0_255) #define DD0_200000(name,value) DD(name,value,&validate0_200000) #define XDD0_200000(name,value) XDD(name,value,&validate0_200000) #define DD1_200000(name,value) DD(name,value,&validate1_200000) #define XDDui30_32000(name,value) XDD(name,value,&validate30_32000) #define DDui30_246(name,value) DD(name,value,&validate30_246) #define DDui50_4194303(name,value) DD(name,value,&validate50_4194303) #define DD1_24(name,value) DD(name,value,&validate1_24) #define XDD1_1024(name,value) XDD(name,value,&validate1_1024) #define DD1_1024(name,value) DD(name,value,&validate1_1024) #define DD18_128(name,value) DD(name,value,&validate18_128) #define DD1_128(name,value) DD(name,value,&validate1_128) #define DDui___(name,value) DD(name,value,&validateUI) #define XDDui___(name,value) XDD(name,value,&validateUI) #define SDDui___(name,value) SDD(name,value,&validateUI) #define DDui1__(name,value) DD(name,value,&validateUI1) #define XDDui1__(name,value) XDD(name,value,&validateUI1) #define SDDui1__(name,value) SDD(name,value,&validateUI1) #define DDui2__(name,value) DD(name,value,&validateUI2) #define XDDui2__(name,value) XDD(name,value,&validateUI2) #define DDui8__(name,value) DD(name,value,&validateUI8) #define DDui512(name,value) DD(name,value,&validateUI512) #define DDui0_5(name,value) DD(name,value,&validateUIntFrom0To5) #define XDDui0_5(name,value) XDD(name,value,&validateUIntFrom0To5) #define DDui1_6(name,value) DD(name,value,&validateUIntFrom1To6) #define DDui1_10(name,value) DD(name,value,&validateUIntFrom1To10) #define DDui2_10(name,value) DD(name,value,&validateUIntFrom2To10) #define DDui1500_4000(name,value) DD(name,value,&validateUIntFrom1500To4000) #define DDipcBu(name,value) DD(name,value,&validateIPCBuf) #define XDDipcBu(name,value) XDD(name,value,&validateIPCBuf) #define DDflt__(name,value) DD(name,value,&validateFlt) #define XDDflt__(name,value) XDD(name,value,&validateFlt) #define SDDflt__(name,value) SDD(name,value,&validateFlt) #define DDflt0_(name,value) DD(name,value,&validateFlt0) #define XDDflt0_(name,value) XDD(name,value,&validateFlt0) #define SDDflt0_(name,value) SDD(name,value,&validateFlt0) #define DDflte_(name,value) DD(name,value,&validateFltE) #define XDDflte_(name,value) XDD(name,value,&validateFltE) #define SDDflte_(name,value) SDD(name,value,&validateFltE) #define DDflt1_(name,value) DD(name,value,&validateFlt1) #define XDDflt1_(name,value) XDD(name,value,&validateFlt1) #define DDflt_0_1(name,value) DD(name,value,&validateFlt_0_1) #define XDDflt_0_1(name,value) XDD(name,value,&validateFlt_0_1) #define DDkwd__(name,value) DD(name,value,&validateKwd) #define XDDkwd__(name,value) XDD(name,value,&validateKwd) #define SDDkwd__(name,value) SDD(name,value,&validateKwd) #define DDSkwd__(name,value) DDS(name,value,&validateKwd) #define SDDSkwd__(name,value) SDDS(name,value,&validateKwd) #define DD1_4096(name,value) DD(name,value,&validate1_4096) #define DD0_18(name,value) DD(name,value,&validate0_18) #define DD0_64(name,value) DD(name,value,&validate0_64) #define DD16_64(name,value) DD(name,value,&validate16_64) #define DDalis_(name,value) DD(name,value,&validateAnsiList) #define XDDalis_(name,value) XDD(name,value,&validateAnsiList) #define XDDpos__(name,value) XDD(name,value,&validatePOSTableSizes) #define SDDpos__(name,value) SDD(name,value,&validatePOSTableSizes) #define DDpos__(name,value) DD(name,value,&validatePOSTableSizes) #define DDtp___(name,value) DD(name,value,&validateTraceStr) #define DDosch_(name,value) DD(name,value,&validateOverrideSchema) #define SDDosch_(name,value) SDD(name,value,&validateOverrideSchema) #define DDpsch_(name,value) DD(name,value,&validatePublicSchema) #define SDDpsch_(name,value) SDD(name,value,&validatePublicSchema) #define DDrlis_(name,value) DD(name,value,&validateRoleNameList) #define XDDrlis_(name,value) XDD(name,value,&validateRoleNameList) #define DDrver_(name,value) DD(name,value,&validateReplIoVersion) #define XDDMVA__(name,value) XDD(name,value,&validateMVAge) #define DDusht_(name,value) DD(name,value,&validate_uint16) const DefaultValidator validateUnknown; const DefaultValidator validateAnsiName(CASE_SENSITIVE_ANSI); // e.g. 'c.s.tbl' ValidateCollationList validateCollList(TRUE/*mp-format*/); // list collations const ValidateInt validateInt; // allows neg, zero, pos ints const ValidateIntNeg1 validateIntNeg1;// allows -1 to +infinity ints const ValidateIntNeg1 validateIntNeg2;// allows -1 to +infinity ints const ValidatePercent validatePct; // allows zero to 100 (integral %age) const ValidateNumericRange validatePct1_t50(VALID_UINT, 1, (float)50);// allows 1 to 50 (integral %age) const Validate_0_10485760 validate0_10485760; // allows zero to 10Meg (integer) const Validate_0_255 validate0_255; // allows zero to 255 (integer) const Validate_0_200000 validate0_200000; // allows zero to 200000 (integer) const Validate_1_200000 validate1_200000; // allows 1 to 200000 (integer) const Validate_30_32000 validate30_32000; // allows 30 to 32000 const Validate_30_246 validate30_246; // allows 30 to 246 const Validate_50_4194303 validate50_4194303; // allows 50 to 4194303 (integer) const Validate_1_24 validate1_24; // allows 1 to 24 (integer) const ValidateUInt validateUI; // allows zero and pos const ValidateUInt1 validateUI1; // allows pos only (>= 1) const ValidateUInt2 validateUI2(2); // allows pos multiples of 2 only const ValidateUInt2 validateUI8(8); // pos multiples of 8 only const ValidateUInt2 validateUI512(512); // pos multiples of 512 only const ValidateUIntFrom0To5 validateUIntFrom0To5; // integer from 0 to 5 const ValidateUIntFrom1500To4000 validateUIntFrom1500To4000; // integer from 1 to 6 const ValidateUIntFrom1To6 validateUIntFrom1To6; // integer from 1 to 6 const ValidateUIntFrom1To10 validateUIntFrom1To10; // integer from 1 to 10 const ValidateUIntFrom2To10 validateUIntFrom2To10; // integer from 2 to 10 const ValidateIPCBuf validateIPCBuf; // for IPC message buffers (DP2 msgs) const ValidateFlt validateFlt; // allows neg, zero, pos (all nums) const ValidateFltMin0 validateFlt0; // allows zero and pos const ValidateFltMinEpsilon validateFltE; // allows pos only (>= epsilon > 0) const ValidateFltMin1 validateFlt1; // allows pos only (>= 1) const ValidateSelectivity ValidateSelectivity; // allows 0 to 1 (float) const ValidateFlt_0_1 validateFlt_0_1; // allows 0 to 1 (float) const ValidateKeyword validateKwd; // allows relevant keywords only const Validate_1_4096 validate1_4096; // allows 1 to 4096 (integer) which is max character size supported. const Validate_0_18 validate0_18; // allows 0 to 18 (integer) because 18 is max precision supported. const Validate_1_1024 validate1_1024; // allows 1 to 1024 (integer). const Validate_0_64 validate0_64; // allows 0 to 64 (integer) const Validate_16_64 validate16_64; // allows 16 to 64 (integer) const Validate_18_128 validate18_128; // allows 18 to 128 (integer). const Validate_1_128 validate1_128; // allows 1 to 128 (integer). // allows ':' separated list of three part ANSI names const ValidateAnsiList validateAnsiList; // allows ',' separated list of role names const ValidateRoleNameList validateRoleNameList; const ValidatePOSTableSizes validatePOSTableSizes; const ValidateTraceStr validateTraceStr; const ValidateOverrideSchema validateOverrideSchema; // check OverrideSchema format const ValidatePublicSchema validatePublicSchema; // This high value should be same as default value of REPLICATE_IO_VERSION const ValidateReplIoVersion validateReplIoVersion(11,17); const ValidateMVAge validateMVAge; const Validate_uint16 validate_uint16; // See the NOTEs above for how to maintain this list! THREAD_P DefaultDefault defaultDefaults[] = { DDflt0_(ACCEPTABLE_INPUTESTLOGPROP_ERROR, "0.5"), SDDint__(AFFINITY_VALUE, "-2"), // controls the ESP allocation per core. DDkwd__(AGGRESSIVE_ESP_ALLOCATION_PER_CORE, "OFF"), SDDkwd__(ALLOW_AUDIT_ATTRIBUTE_CHANGE, "FALSE"), // Used to control if row sampling will use the sample operator in SQL/MX or the // this should be used for testing only. DML should not be executed on // non-audited tables DDkwd__(ALLOW_DML_ON_NONAUDITED_TABLE, "OFF"), // DP2_EXECUTOR_POSITION_SAMPLE method in DP2. // Valid values are ON, OFF and SYSTEM // ON => choose DP2_ROW_SAMPLING over row sampling in EID, if sampling % is less than 50. // OFF => choose EID row sampling over DP2 row sampling regardless of sampling % // SYSTEM => update stats will choose DP row sampling if sampling % is less than 5. SDDkwd__(ALLOW_DP2_ROW_SAMPLING, "SYSTEM"), DDkwd__(ALLOW_FIRSTN_IN_SUBQUERIES, "TRUE"), // ON/OFF flag to invoke ghost objects from non-licensed process (non-super.super user) who can not use parserflags DDkwd__(ALLOW_GHOST_OBJECTS, "OFF"), // This default, if set to ON, will allow Translate nodes (to/from UCS2) // to be automatically inserted by the Binder if some children of an // ItemExpr are declared as UCS2 and some are declared as ISO88591. DDkwd__(ALLOW_IMPLICIT_CHAR_CASTING, "ON"), // this default, if set to ON, will allow certain incompatible // assignment, like string to int. The assignment will be done by // implicitely CASTing one operand to another as long as CAST between // the two is supported. See binder for details. DDkwd__(ALLOW_INCOMPATIBLE_ASSIGNMENT, "ON"), // this default, if set to ON, will allow certain incompatible // comparisons, like string to int. The comparison will be done by // implicitely CASTing one operand to another as long as CAST between // the two is supported. See binder for details. DDkwd__(ALLOW_INCOMPATIBLE_COMPARISON, "ON"), // this default, if set to ON, will allow certain incompatible // comparisons. This includes incompatible comparisons, assignments, // conversions, UNION, arith, string and case stmts. // See binder(BindItemExpr.cpp, SynthType.cpp) for details. DDkwd__(ALLOW_INCOMPATIBLE_OPERATIONS, "ON"), // if set to 2, the replicateNonKeyVEGPred() mdamkey method // will try to use inputs to filter out VEG elements that are not // local to the associated table to minimize predicate replication. // It is defaulted to 0 (off), as there is some concern that this algoritm // might produce to few replications, which could lead to incorrect results. // Setting the Value to 1 will try a simpler optimization DDui___(ALLOW_INPUT_PRED_REPLICATION_REDUCTION,"0"), // if set to ON, then isolation level (read committed, etc) could be // specified in a regular CREATE VIEW (not a create MV) statement. DDkwd__(ALLOW_ISOLATION_LEVEL_IN_CREATE_VIEW, "ON"), // if set to ON, then we allow subqueries of degree > 1 in the // select list. DDkwd__(ALLOW_MULTIDEGREE_SUBQ_IN_SELECTLIST, "SYSTEM"), // by default, a primary key or unique constraint must be non-nullable. // This default, if set, allows them to be nullable. // The default value is OFF. DDkwd__(ALLOW_NULLABLE_UNIQUE_KEY_CONSTRAINT, "OFF"), // if set to ON, then ORDER BY could be // specified in a regular CREATE VIEW (not a create MV) statement. DDkwd__(ALLOW_ORDER_BY_IN_CREATE_VIEW, "ON"), DDkwd__(ALLOW_ORDER_BY_IN_SUBQUERIES, "ON"), // rand() function in sql is disabled unless this CQD is turned on DDkwd__(ALLOW_RAND_FUNCTION, "ON"), DDkwd__(ALLOW_RANGE_PARTITIONING, "TRUE"), DDkwd__(ALLOW_RENAME_OF_MVF_OR_SUBQ, "OFF"), DDkwd__(ALLOW_RISKY_UPDATE_WITH_NO_ROLLBACK, "OFF"), DDkwd__(ALLOW_SUBQ_IN_SET, "SYSTEM"), DDkwd__(ALLOW_UNEXTERNALIZED_MAINTAIN_OPTIONS, "OFF"), DDSkwd__(ALTPRI_ESP, ""), DDSkwd__(ALTPRI_MASTER, ""), DDS_____(AQR_ENTRIES, ""), DDkwd__(AQR_WNR, "ON"), DDkwd__(AQR_WNR_DELETE_NO_ROWCOUNT, "OFF"), DDkwd__(AQR_WNR_EXPLAIN_INSERT, "OFF"), DDkwd__(AQR_WNR_INSERT_CLEANUP, "OFF"), DDkwd__(AQR_WNR_LOCK_INSERT_TARGET, "OFF"), DDkwd__(ARKCMP_FAKE_HW, "OFF"), DDkwd__(ASG_FEATURE, "ON"), // Set ASM cache DDkwd__(ASM_ALLOWED, "ON"), // Precompute statistics in ASM DDkwd__(ASM_PRECOMPUTE, "OFF"), DDkwd__(ASYMMETRIC_JOIN_TRANSFORMATION, "MAXIMUM"), DDkwd__(ATTEMPT_ASYNCHRONOUS_ACCESS, "ON"), DDkwd__(ATTEMPT_ESP_PARALLELISM, "ON"), DDkwd__(ATTEMPT_REVERSE_SYNCHRONOUS_ORDER, "ON"), // Online Populate Index uses AuditImage for index tables only. // By setting this CQD to ON, one can generate AuditImage for // tables also. DDkwd__(AUDIT_IMAGE_FOR_TABLES, "OFF"), DDkwd__(AUTOMATIC_RECOMPILATION, "OFF"), DDkwd__(AUTO_QUERY_RETRY, "SYSTEM"), XDDkwd__(AUTO_QUERY_RETRY_WARNINGS, "OFF"), DDkwd__(BASE_NUM_PAS_ON_ACTIVE_PARTS, "OFF"), // see comments in DefaultConstants.h DDkwd__(BIGNUM_IO, "SYSTEM"), DDint__(BLOCK_ENCRYPTION_MODE, "0"), XDDkwd__(BLOCK_TO_PREVENT_HALLOWEEN, "ON"), DDflte_(BMO_CITIZENSHIP_FACTOR, "1."), DDui1__(BMO_MEMORY_SIZE, "204800"), // percentage of physical main memory availabe for BMO. // This value is only used by HJ and HGB to come up with // an initial estimate for the number of clusters to allocate. // It does NOT by any means determine the amount of memory // used by a BMO. The memory usage depends on the amount of // memory available during execution and the amount of input // data. DDflte_(BMO_MEMORY_USAGE_PERCENT, "5."), // When on, then try to bulk move nullable and variable length column values. DDkwd__(BULK_MOVE_NULL_VARCHAR, "ON"), //Temporary fix to bypass volatile schema name checking for non-table objects - ALM Case#4764 DDkwd__(BYPASS_CHECK_FOR_VOLATILE_SCHEMA_NAME, "OFF"), DDkwd__(CACHE_HISTOGRAMS, "ON"), DDkwd__(CACHE_HISTOGRAMS_CHECK_FOR_LEAKS, "OFF"), DD0_200000(CACHE_HISTOGRAMS_IN_KB, "32768"), DDkwd__(CACHE_HISTOGRAMS_MONITOR_HIST_DETAIL, "OFF"), DDkwd__(CACHE_HISTOGRAMS_MONITOR_MEM_DETAIL, "OFF"), DD_____(CACHE_HISTOGRAMS_MONITOR_OUTPUT_FILE, ""), DD_____(CACHE_HISTOGRAMS_TRACE_OUTPUT_FILE, ""), DDkwd__(CALL_EMBEDDED_ARKCMP, "OFF"), DDui___(CANCEL_MINIMUM_BLOCKING_INTERVAL, "60"), DDkwd__(CASCADED_GROUPBY_TRANSFORMATION, "ON"), XDDansi_(CATALOG, TRAFODION_SYSCAT_LIT), DDkwd__(CAT_ALLOW_NEW_FEATUREX, "OFF"), // Control whether authorization caches immutable users DDkwd__(CAT_AUTHORIZATION_CACHE_IMMUTABLE_USERS, "ON"), DDkwd__(CAT_CREATE_SCHEMA_LABELS_ON_ALL_SEGMENTS, "ON"), DDkwd__(CAT_DEFAULT_COMPRESSION, "NONE"), // Metadata table distribution schemes // OFF - Place all metadata tables on one single disk // LOCAL_NODE - Distribute metadata tables across disks on local segment // where first schema in the catalog is created // ON - Distribute metadata tables across disks in local segment // and visible remote segments SDDkwd__(CAT_DISTRIBUTE_METADATA, "ON"), //SDDkwd__(CAT_DISTRIBUTE_METADATA, "ON"), // This disables Query Invalidation processing in catman when set to "OFF" SDDkwd__(CAT_ENABLE_QUERY_INVALIDATION, "ON"), // Throw an error if a column is part of the store by clause and // is not defined as NOT NULL return an error DDkwd__(CAT_ERROR_ON_NOTNULL_STOREBY, "ON"), DDui1__(CAT_FS_TIMEOUT, "9000"), // Used to make ignore "already exists" error in Create and // "does not exist" error in Drop. DDkwd__(CAT_IGNORE_ALREADY_EXISTS_ERROR, "OFF"), DDkwd__(CAT_IGNORE_DOES_NOT_EXIST_ERROR, "OFF"), // Used to make catman test134 predictable DDkwd__(CAT_IGNORE_EMPTY_CATALOGS, "OFF"), // Catalog Manager internal support for REPLICATE AUTHORIZATION DDkwd__(CAT_IGNORE_REPL_AUTHIDS_ERROR, "OFF"), // This enables the DB Limits functionality. If set to OFF, then blocksize // is restricted to 4096 and clustering key size is limited to 255 bytes. // DB Limits checking is turned off on NT since NT's DP2 does not support // large blocks or keys. DDkwd__(CAT_LARGE_BLOCKS_LARGE_KEYS, "ON"), // If DB Limits is enabled, then increase the default blocksize to 32K // on NSK if the object's clustering key length is larger than this value. DDui1__(CAT_LARGE_BLOCKS_MAX_KEYSIZE, "1"), // If DB Limits is enabled, then increase the default blocksize to 32K // on NSK if the object's row size is larger than this value. DDui1__(CAT_LARGE_BLOCKS_MAX_ROWSIZE, "1"), // Controls how pathnames for routines/procedures/SPJs are interpreted DDkwd__(CAT_LIBRARY_PATH_RELATIVE, "OFF"), DDkwd__(CAT_MORE_SCHEMA_PRIVS, "ON"), DDkwd__(CAT_OVERRIDE_CREATE_DISABLE, "OFF"), // This forces an rcb to be created with a different version number // A "0" means to take the current mxv version DDui___(CAT_RCB_VERSION, "0"), // Controls creation of column privileges for object-level privileges DDkwd__(CAT_REDUNDANT_COLUMN_PRIVS, "ON"), // If schema owner is object owner is ON, then the default owner for objects is the // schema owner. DDkwd__(CAT_SCHEMA_OWNER_IS_OBJECT_OWNER, "OFF"), DDkwd__(CAT_TEST_BOOL, "OFF"), DDint__(CAT_TEST_POINT, "0"), DD_____(CAT_TEST_STRING, "NONE"), // CMP_ERR_LOG_FILE indicates where to save a log for certain errors. DD_____(CMP_ERR_LOG_FILE, "tdm_arkcmp_errors.log"), DDkwd__(COLLECT_REORG_STATS, "ON"), DDint__(COMPILER_IDLE_TIMEOUT, "1800"), // To match with set session defaults value // tracking compilers specific defaults DDint__(COMPILER_TRACKING_INTERVAL, "0"), DD_____(COMPILER_TRACKING_LOGFILE, "NONE"), DDkwd__(COMPILER_TRACKING_LOGTABLE, "OFF"), DDkwd__(COMPILE_TIME_MONITOR, "OFF"), DD_____(COMPILE_TIME_MONITOR_LOG_ALLTIME_ONLY, "OFF"), DD_____(COMPILE_TIME_MONITOR_OUTPUT_FILE, "NONE"), // complexity threshold beyond which a // MultiJoin query is considered too complex DDflt0_(COMPLEX_MJ_QUERY_THRESHOLD, "1000000"), // Switch between new aligned internal format and exploded format DDkwd__(COMPRESSED_INTERNAL_FORMAT, "SYSTEM"), DDkwd__(COMPRESSED_INTERNAL_FORMAT_BMO, "SYSTEM"), DDkwd__(COMPRESSED_INTERNAL_FORMAT_BMO_AFFINITY, "ON"), DDkwd__(COMPRESSED_INTERNAL_FORMAT_BULK_MOVE, "ON"), DDflt0_(COMPRESSED_INTERNAL_FORMAT_DEFRAG_RATIO, "0.30"), DDkwd__(COMPRESSED_INTERNAL_FORMAT_EXPLAIN, "OFF"), DDui1__(COMPRESSED_INTERNAL_FORMAT_MIN_ROW_SIZE, "32"), DDkwd__(COMPRESSED_INTERNAL_FORMAT_ROOT_DOES_CONVERSION, "OFF"), DDflt0_(COMPRESSED_INTERNAL_FORMAT_ROW_SIZE_ADJ, "0.90"), XDDkwd__(COMPRESSION_TYPE, "NONE"), // These are switches and variables to use for compiler debugging DDkwd__(COMP_BOOL_1, "OFF"), DDkwd__(COMP_BOOL_10, "OFF"), DDkwd__(COMP_BOOL_100, "OFF"), DDkwd__(COMP_BOOL_101, "OFF"), DDkwd__(COMP_BOOL_102, "OFF"), DDkwd__(COMP_BOOL_103, "OFF"), DDkwd__(COMP_BOOL_104, "OFF"), DDkwd__(COMP_BOOL_105, "OFF"), DDkwd__(COMP_BOOL_106, "OFF"), DDkwd__(COMP_BOOL_107, "ON"), // Being used for testing default predicate synthesis in cardinality estimation DDkwd__(COMP_BOOL_108, "ON"), // Being used for testing default predicate synthesis in cardinality estimation DDkwd__(COMP_BOOL_109, "OFF"), DDkwd__(COMP_BOOL_11, "OFF"), DDkwd__(COMP_BOOL_110, "OFF"), DDkwd__(COMP_BOOL_111, "OFF"), DDkwd__(COMP_BOOL_112, "OFF"), DDkwd__(COMP_BOOL_113, "OFF"), DDkwd__(COMP_BOOL_114, "OFF"), DDkwd__(COMP_BOOL_115, "OFF"), DDkwd__(COMP_BOOL_116, "OFF"), DDkwd__(COMP_BOOL_117, "OFF"), DDkwd__(COMP_BOOL_118, "OFF"), // soln 10-100508-0135 - allow undo of fix. DDkwd__(COMP_BOOL_119, "OFF"), DDkwd__(COMP_BOOL_12, "OFF"), DDkwd__(COMP_BOOL_120, "OFF"), DDkwd__(COMP_BOOL_121, "OFF"), DDkwd__(COMP_BOOL_122, "ON"), // Solution 10-081203-7708 fix DDkwd__(COMP_BOOL_123, "OFF"), DDkwd__(COMP_BOOL_124, "OFF"), DDkwd__(COMP_BOOL_125, "ON"), DDkwd__(COMP_BOOL_126, "OFF"), DDkwd__(COMP_BOOL_127, "ON"), DDkwd__(COMP_BOOL_128, "ON"), DDkwd__(COMP_BOOL_129, "ON"), DDkwd__(COMP_BOOL_13, "OFF"), DDkwd__(COMP_BOOL_130, "ON"), DDkwd__(COMP_BOOL_131, "OFF"), DDkwd__(COMP_BOOL_132, "OFF"), DDkwd__(COMP_BOOL_133, "OFF"), DDkwd__(COMP_BOOL_134, "ON"), DDkwd__(COMP_BOOL_135, "ON"), DDkwd__(COMP_BOOL_136, "OFF"), DDkwd__(COMP_BOOL_137, "OFF"), // ON enables logging of RewriteJoinPred DDkwd__(COMP_BOOL_138, "OFF"), // ON disables tryToRewriteJoinPredicate DDkwd__(COMP_BOOL_139, "OFF"), DDkwd__(COMP_BOOL_14, "ON"), DDkwd__(COMP_BOOL_140, "ON"), DDkwd__(COMP_BOOL_141, "ON"), // Used for testing MC UEC adjustment for uplifting join cardinality DDkwd__(COMP_BOOL_142, "ON"), // Used for turning on Compile Time Statistics caching DDkwd__(COMP_BOOL_143, "OFF"), DDkwd__(COMP_BOOL_144, "OFF"), // only Key columns usage as a part of materialization of disjuncts is controlled by the CQD DDkwd__(COMP_BOOL_145, "ON"), // Used for selectivity adjustment for MC Joins DDkwd__(COMP_BOOL_146, "OFF"), DDkwd__(COMP_BOOL_147, "OFF"), DDkwd__(COMP_BOOL_148, "ON"), // Used for GroupBy Cardinality Enhancement for complex expressions DDkwd__(COMP_BOOL_149, "ON"), // Used for testing multi-col uniqueness cardinality enhancement DDkwd__(COMP_BOOL_15, "OFF"), DDkwd__(COMP_BOOL_150, "OFF"), DDkwd__(COMP_BOOL_151, "OFF"), DDkwd__(COMP_BOOL_152, "OFF"), DDkwd__(COMP_BOOL_153, "ON"), // skew buster: ON == use round robin, else Co-located. DDkwd__(COMP_BOOL_154, "OFF"), DDkwd__(COMP_BOOL_155, "OFF"), DDkwd__(COMP_BOOL_156, "ON"), // Used by RTS to turn on RTS Stats collection for ROOT operators DDkwd__(COMP_BOOL_157, "OFF"), DDkwd__(COMP_BOOL_158, "OFF"), DDkwd__(COMP_BOOL_159, "OFF"), DDkwd__(COMP_BOOL_16, "OFF"), DDkwd__(COMP_BOOL_160, "OFF"), DDkwd__(COMP_BOOL_161, "OFF"), DDkwd__(COMP_BOOL_162, "ON"), // transform NOT EXISTS subquery using anti_semijoin instead of Join-Agg DDkwd__(COMP_BOOL_163, "OFF"), DDkwd__(COMP_BOOL_164, "OFF"), DDkwd__(COMP_BOOL_165, "ON"), // set to 'ON' in M5 for SQ DDkwd__(COMP_BOOL_166, "OFF"), // ON --> turn off fix for 10-100310-8659. DDkwd__(COMP_BOOL_167, "OFF"), DDkwd__(COMP_BOOL_168, "ON"), DDkwd__(COMP_BOOL_169, "OFF"), DDkwd__(COMP_BOOL_17, "ON"), DDkwd__(COMP_BOOL_170, "ON"), DDkwd__(COMP_BOOL_171, "OFF"), DDkwd__(COMP_BOOL_172, "OFF"), DDkwd__(COMP_BOOL_173, "OFF"), // fix: make odbc params nullable DDkwd__(COMP_BOOL_174, "ON"), // internal usage: merge stmt DDkwd__(COMP_BOOL_175, "OFF"), // internal usage: merge stmt DDkwd__(COMP_BOOL_176, "OFF"), DDkwd__(COMP_BOOL_177, "OFF"), DDkwd__(COMP_BOOL_178, "OFF"), DDkwd__(COMP_BOOL_179, "OFF"), DDkwd__(COMP_BOOL_18, "OFF"), DDkwd__(COMP_BOOL_180, "OFF"), DDkwd__(COMP_BOOL_181, "OFF"), DDkwd__(COMP_BOOL_182, "OFF"), // internal usage DDkwd__(COMP_BOOL_183, "OFF"), DDkwd__(COMP_BOOL_184, "ON"), // ON => use min probe size for mdam. Using min probe size of 1 or 2 currently has a bug so this is not the default. OFF => use default probe size of 100 DDkwd__(COMP_BOOL_185, "ON"), //Fix, allows extract(year from current_date) to be treated as a userinput DDkwd__(COMP_BOOL_186, "OFF"), DDkwd__(COMP_BOOL_187, "OFF"), // reserved for internal usage DDkwd__(COMP_BOOL_188, "OFF"), DDkwd__(COMP_BOOL_189, "OFF"), // reserved for internal usage DDkwd__(COMP_BOOL_19, "OFF"), DDkwd__(COMP_BOOL_190, "OFF"), DDkwd__(COMP_BOOL_191, "OFF"), // Temp for UDF metadata switch DDkwd__(COMP_BOOL_192, "OFF"), DDkwd__(COMP_BOOL_193, "OFF"), DDkwd__(COMP_BOOL_194, "OFF"), DDkwd__(COMP_BOOL_195, "OFF"), // used to enable unexternalized get statistics options. DDkwd__(COMP_BOOL_196, "OFF"), DDkwd__(COMP_BOOL_197, "OFF"), DDkwd__(COMP_BOOL_198, "OFF"), DDkwd__(COMP_BOOL_199, "ON"), DDkwd__(COMP_BOOL_2, "OFF"), DDkwd__(COMP_BOOL_20, "OFF"), // ON -> disable ability of stmt to be canceled. DDkwd__(COMP_BOOL_200, "OFF"), DDkwd__(COMP_BOOL_201, "OFF"), DDkwd__(COMP_BOOL_202, "ON"),// For SQ: // ON: excluding fixup cost // for EXCHANGE for // anti-surf logic; // OFF: do include. // Change to ON in M5 DDkwd__(COMP_BOOL_203, "OFF"), DDkwd__(COMP_BOOL_205, "OFF"), // enable reorg on metadata DDkwd__(COMP_BOOL_206, "OFF"), // Internal Usage DDkwd__(COMP_BOOL_207, "OFF"), // Internal Usage DDkwd__(COMP_BOOL_208, "OFF"), // Internal Usage DDkwd__(COMP_BOOL_209, "OFF"), // Internal Usage DDkwd__(COMP_BOOL_21, "OFF"), DDkwd__(COMP_BOOL_210, "ON"), DDkwd__(COMP_BOOL_211, "ON"), // controls removing constants from group expression DDkwd__(COMP_BOOL_215, "OFF"), DDkwd__(COMP_BOOL_217, "OFF"), DDkwd__(COMP_BOOL_219, "OFF"), // for InMem obj defn DDkwd__(COMP_BOOL_22, "ON"), DDkwd__(COMP_BOOL_220, "OFF"), // UserLoad fastpath opt DDkwd__(COMP_BOOL_221, "OFF"), // unnests a subquery even when there is no explicit correlation DDkwd__(COMP_BOOL_222, "ON"), // R2.5 BR features enabled DDkwd__(COMP_BOOL_223, "OFF"), // enable undocumented options // bulk replicate features DDkwd__(COMP_BOOL_224, "OFF"), // enable undocumented // bulk replicate features DDkwd__(COMP_BOOL_225, "ON"), // enable optimized esps allocation DDkwd__(COMP_BOOL_226, "OFF"), // ON enables UNLOAD feature // for disk label stats. DDkwd__(COMP_BOOL_23, "ON"), DDkwd__(COMP_BOOL_24, "OFF"), // AS enhancement to adjust maxDoP DDkwd__(COMP_BOOL_25, "OFF"), // Being used in Cardinality Estimation DDkwd__(COMP_BOOL_26, "OFF"), DDkwd__(COMP_BOOL_27, "OFF"), DDkwd__(COMP_BOOL_28, "OFF"), DDkwd__(COMP_BOOL_29, "OFF"), DDkwd__(COMP_BOOL_3, "OFF"), DDkwd__(COMP_BOOL_30, "ON"), DDkwd__(COMP_BOOL_31, "OFF"), DDkwd__(COMP_BOOL_32, "OFF"), DDkwd__(COMP_BOOL_33, "OFF"), DDkwd__(COMP_BOOL_34, "OFF"), DDkwd__(COMP_BOOL_35, "OFF"), DDkwd__(COMP_BOOL_36, "OFF"), DDkwd__(COMP_BOOL_37, "OFF"), DDkwd__(COMP_BOOL_38, "OFF"), DDkwd__(COMP_BOOL_39, "OFF"), DDkwd__(COMP_BOOL_4, "OFF"), DDkwd__(COMP_BOOL_40, "ON"), DDkwd__(COMP_BOOL_41, "OFF"), DDkwd__(COMP_BOOL_42, "ON"), DDkwd__(COMP_BOOL_43, "OFF"), DDkwd__(COMP_BOOL_44, "OFF"), DDkwd__(COMP_BOOL_45, "ON"), DDkwd__(COMP_BOOL_46, "OFF"), DDkwd__(COMP_BOOL_47, "ON"), DDkwd__(COMP_BOOL_48, "ON"), // Turned "Off" because of Regression failure DDkwd__(COMP_BOOL_49, "OFF"), DDkwd__(COMP_BOOL_5, "ON"), DDkwd__(COMP_BOOL_50, "OFF"), DDkwd__(COMP_BOOL_51, "OFF"), DDkwd__(COMP_BOOL_52, "OFF"), DDkwd__(COMP_BOOL_53, "ON"), //Turned "ON" for OCB Cost DDkwd__(COMP_BOOL_54, "OFF"), DDkwd__(COMP_BOOL_55, "OFF"), DDkwd__(COMP_BOOL_56, "OFF"), DDkwd__(COMP_BOOL_57, "ON"), DDkwd__(COMP_BOOL_58, "OFF"), DDkwd__(COMP_BOOL_59, "OFF"), DDkwd__(COMP_BOOL_6, "OFF"), // comp_bool_60 is used in costing of an exchange operator. This is // used in deciding to use Nodemap decoupling and other exchange // costing logic. DDkwd__(COMP_BOOL_60, "ON"), DDkwd__(COMP_BOOL_61, "OFF"), DDkwd__(COMP_BOOL_62, "OFF"), DDkwd__(COMP_BOOL_63, "OFF"), DDkwd__(COMP_BOOL_64, "OFF"), DDkwd__(COMP_BOOL_65, "OFF"), DDkwd__(COMP_BOOL_66, "OFF"), DDkwd__(COMP_BOOL_67, "ON"), // Being used in Cardinality Estimation DDkwd__(COMP_BOOL_68, "ON"), DDkwd__(COMP_BOOL_69, "OFF"), DDkwd__(COMP_BOOL_7, "OFF"), DDkwd__(COMP_BOOL_70, "ON"), DDkwd__(COMP_BOOL_71, "OFF"), DDkwd__(COMP_BOOL_72, "OFF"), DDkwd__(COMP_BOOL_73, "OFF"), DDkwd__(COMP_BOOL_74, "ON"), DDkwd__(COMP_BOOL_75, "ON"), DDkwd__(COMP_BOOL_76, "ON"), DDkwd__(COMP_BOOL_77, "OFF"), DDkwd__(COMP_BOOL_78, "OFF"), DDkwd__(COMP_BOOL_79, "ON"), DDkwd__(COMP_BOOL_8, "OFF"), DDkwd__(COMP_BOOL_80, "OFF"), DDkwd__(COMP_BOOL_81, "OFF"), DDkwd__(COMP_BOOL_82, "OFF"), DDkwd__(COMP_BOOL_83, "ON"), DDkwd__(COMP_BOOL_84, "OFF"), DDkwd__(COMP_BOOL_85, "OFF"), DDkwd__(COMP_BOOL_86, "OFF"), DDkwd__(COMP_BOOL_87, "OFF"), DDkwd__(COMP_BOOL_88, "OFF"), DDkwd__(COMP_BOOL_89, "OFF"), DDkwd__(COMP_BOOL_9, "OFF"), DDkwd__(COMP_BOOL_90, "ON"), DDkwd__(COMP_BOOL_91, "OFF"), DDkwd__(COMP_BOOL_92, "OFF"), // used by generator. DDkwd__(COMP_BOOL_93, "ON"), // turn on pushdown for IUDs involving MVs. Default is off DDkwd__(COMP_BOOL_94, "OFF"), DDkwd__(COMP_BOOL_95, "OFF"), DDkwd__(COMP_BOOL_96, "OFF"), DDkwd__(COMP_BOOL_97, "OFF"), DDkwd__(COMP_BOOL_98, "ON"), DDkwd__(COMP_BOOL_99, "OFF"), DDflt0_(COMP_FLOAT_0, "0.002"), DDflt0_(COMP_FLOAT_1, "0.00002"), DDflt0_(COMP_FLOAT_2, "0"), DDflt0_(COMP_FLOAT_3, "0.01"), DDflt0_(COMP_FLOAT_4, "1.1"), DDflt__(COMP_FLOAT_5, "0.01"), // For Split Top cost adjustments : 0.25 DDflt__(COMP_FLOAT_6, "0.67"), // used to set the fudge factor which // is used to estimate cardinality of an // aggregate function in an equi-join expression DDflt__(COMP_FLOAT_7, "1.5"), DDflt__(COMP_FLOAT_8, "0.8"), // min expected #groups when HGB under right side of NLJ DDflt__(COMP_FLOAT_9, "1002.0"), DDint__(COMP_INT_0, "5000"), DDint__(COMP_INT_1, "0"), DDint__(COMP_INT_10, "3"), DDint__(COMP_INT_11, "-1"), DDint__(COMP_INT_12, "0"), DDint__(COMP_INT_13, "0"), DDint__(COMP_INT_14, "0"), DDint__(COMP_INT_15, "7"), DDint__(COMP_INT_16, "1000000"), DDint__(COMP_INT_17, "1000000"), DDint__(COMP_INT_18, "1"), DDint__(COMP_INT_19, "2"), DDint__(COMP_INT_2, "1"), DDint__(COMP_INT_20, "4"), DDint__(COMP_INT_21, "0"), DDint__(COMP_INT_22, "0"), // used to control old parser based INLIST transformation // 0 ==> OFF, positive value implies ON and has the effect of implicitly shutting down much of OR_PRED transformations // this cqd has been retained as a fallback in case OR_PRED has bugs. DDint__(COMP_INT_23, "22"), DDint__(COMP_INT_24, "1000000000"), DDint__(COMP_INT_25, "0"), DDint__(COMP_INT_26, "1"), DDint__(COMP_INT_27, "0"), DDint__(COMP_INT_28, "0"), DDint__(COMP_INT_29, "0"), DDint__(COMP_INT_3, "5"), DDint__(COMP_INT_30, "5"), DDint__(COMP_INT_31, "5"), DDint__(COMP_INT_32, "100"), DDint__(COMP_INT_33, "0"), DDint__(COMP_INT_34, "10000"), // lower bound: 10000 DDint__(COMP_INT_35, "500000"), // upper bound: 200000 DDint__(COMP_INT_36, "128"), // Bounds for producer for OCB DDint__(COMP_INT_37, "0"), DDint__(COMP_INT_38, "0"), // test master's abend DDint__(COMP_INT_39, "0"), // test esp's abend DDint__(COMP_INT_4, "400"), DDint__(COMP_INT_40, "10"), // this defines the percentage of selectivity after applying equality predicates on single column histograms // beyond which the optimizer should use MC stats DDint__(COMP_INT_41, "0"), DDint__(COMP_INT_42, "0"), DDint__(COMP_INT_43, "3"), // this is only for testing purposes. Once HIST_USE_SAMPLE_FOR_CARDINALITY_ESTIMATION is set to ON by default, the value of this CQD should be adjusted DDint__(COMP_INT_44, "1000000"), // frequency threshold above which // a boundary value will be inclded // in the frequentValueList (stats) DDint__(COMP_INT_45, "300"), DDint__(COMP_INT_46, "10"), DDint__(COMP_INT_47, "0"), DDint__(COMP_INT_48, "32"), // # trips thru scheduler task list before eval of CPU time limit. DDint__(COMP_INT_49, "0"), DDint__(COMP_INT_5, "0"), DDint__(COMP_INT_50, "0"), DDint__(COMP_INT_51, "0"), DDint__(COMP_INT_52, "0"), DDint__(COMP_INT_53, "0"), DDint__(COMP_INT_54, "0"), DDint__(COMP_INT_55, "0"), DDint__(COMP_INT_56, "0"), DDint__(COMP_INT_57, "0"), DDint__(COMP_INT_58, "0"), DDint__(COMP_INT_59, "0"), DDint__(COMP_INT_6, "400"), // comp_int_60 is used in costing of an exchnage operator. It is // used to indicate buffer size of a DP2 exchange when sending // messages down. DDint__(COMP_INT_60, "4"), DDint__(COMP_INT_61, "0"), // Exchange operator default value DDint__(COMP_INT_62, "10000"), DDint__(COMP_INT_63, "10000"), // SG Insert issue DDint__(COMP_INT_64, "0"), DDint__(COMP_INT_65, "0"), DDint__(COMP_INT_66, "0"), // to change #buffers per flushed cluster DDint__(COMP_INT_67, "8"), // to test #outer-buffers per a batch DDint__(COMP_INT_68, "0"), DDint__(COMP_INT_69, "0"), DDint__(COMP_INT_7, "10000000"), DDint__(COMP_INT_70, "1000000"), DDint__(COMP_INT_71, "0"), DDint__(COMP_INT_72, "0"), // if set to 1, allows keyPredicate to be inserted without passing key col. DDint__(COMP_INT_73, "1"), // if set to 1, disables cursor_delete plan if there are no alternate indexes. DDint__(COMP_INT_74, "0"), DDint__(COMP_INT_75, "0"), DDint__(COMP_INT_76, "0"), DDint__(COMP_INT_77, "0"), DDint__(COMP_INT_78, "0"), DDint__(COMP_INT_79, "0"), // this is used temporaraly as value for parallel threshold // in case ATTEMPT_ESP_PARALLELISM is set to MAXIMUM DDint__(COMP_INT_8, "20"), DDint__(COMP_INT_80, "3"), DDint__(COMP_INT_81, "0"), DDint__(COMP_INT_82, "0"), DDint__(COMP_INT_83, "0"), // max num of retries after parl purgedata open/control call errs.Default 25. DDint__(COMP_INT_84, "25"), // delay between each paral pd error retry. Default is 2 seconds. DDint__(COMP_INT_85, "2"), DDint__(COMP_INT_86, "0"), DDint__(COMP_INT_87, "0"), DDint__(COMP_INT_88, "0"), DDint__(COMP_INT_89, "2"), DDint__(COMP_INT_9, "0"), DDint__(COMP_INT_90, "0"), DDint__(COMP_INT_91, "0"), DDint__(COMP_INT_92, "0"), DDint__(COMP_INT_93, "0"), DDint__(COMP_INT_94, "0"), DDint__(COMP_INT_95, "0"), DDint__(COMP_INT_96, "0"), DDint__(COMP_INT_97, "0"), DDint__(COMP_INT_98, "512"), DDint__(COMP_INT_99, "10"), DD_____(COMP_STRING_1, "NONE"), DD_____(COMP_STRING_2, ""), DD_____(COMP_STRING_3, ""), DD_____(COMP_STRING_4, ""), DD_____(COMP_STRING_5, ""), DD_____(COMP_STRING_6, ""), // Configured_memory_for defaults are all measured in KB DDui___(CONFIGURED_MEMORY_FOR_BASE, "16384"), DDui___(CONFIGURED_MEMORY_FOR_DAM, "20480"), DDui___(CONFIGURED_MEMORY_FOR_MINIMUM_HASH, "20480"), DDui___(CONFIGURED_MEMORY_FOR_MXESP, "8192"), DDkwd__(CONSTANT_FOLDING, "OFF"), DDkwd__(COSTING_SHORTCUT_GROUPBY_FIX, "ON"), DDflt0_(COST_PROBE_DENSITY_THRESHOLD, ".25"), // As of 3/23/98 the tupp desc. length is 12 bytes. Change when executor // changes. DDflt0_(COST_TUPP_DESC_LENGTH_IN_KB, "0.01171875"), DDflt0_(CPUCOST_COMPARE_COMPLEX_DATA_TYPE_OVERHEAD, "10."), DDflt0_(CPUCOST_COMPARE_COMPLEX_DATA_TYPE_PER_BYTE, ".1"), // Same as CPUCOST_PREDICATE_COMPARISON // Change HH_OP_PROBE_HASH_TABLE when you change this value: DDflt0_(CPUCOST_COMPARE_SIMPLE_DATA_TYPE, ".200"), // no cost overhead assumed: DDflt0_(CPUCOST_COPY_ROW_OVERHEAD, "0."), // change CPUCOST_HASH_PER_KEY when changing this value DDflt0_(CPUCOST_COPY_ROW_PER_BYTE, ".0007"), DDflt0_(CPUCOST_COPY_SIMPLE_DATA_TYPE, ".005"), // This is a per data request overhead cost paid by the cpu DDflt0_(CPUCOST_DATARQST_OVHD, ".01"), DDflt0_(CPUCOST_DM_GET, ".001"), DDflt0_(CPUCOST_DM_UPDATE, ".001"), DDflt0_(CPUCOST_ENCODE_PER_BYTE, ".002"), DDflt0_(CPUCOST_ESP_INITIALIZATION, "10"), // The previous observation had calculated the number of seconds to // aggregate incorrectly. Now: // Number of seconds to scan 100,000 rows @ 208 bytes: 4 // Number of seconds to scan 100,000 rows @ 208 bytes and aggregate // 15 aggregates: 17 // Thus, number of seconds per aggregate = (17-4)/15 = 0.866667 // CPUCOST_PER_ROW = 1.13333/(0.00005*100,000) = 0.1733 // previous observation // It takes 13.96 seconds to aggregate 99,999 rows using // 15 expressions, thus at 0.00005 et_cpu, we have that // the cost to eval an arith op is: // 6.14 / (0.00005 * 99,9999 * 15) = 0.0819 DDflt0_(CPUCOST_EVAL_ARITH_OP, ".0305"), DDflt0_(CPUCOST_EVAL_FUNC_DEFAULT, "10."), DDflt0_(CPUCOST_EVAL_LOGICAL_OP, "1."), DDflt0_(CPUCOST_EVAL_SIMPLE_PREDICATE, "1."), DDflt0_(CPUCOST_EXCHANGE_COST_PER_BYTE, ".002"), DDflt0_(CPUCOST_EXCHANGE_COST_PER_ROW, ".002"), DDflt0_(CPUCOST_EXCHANGE_INTERNODE_COST_PER_BYTE, ".008"), DDflt0_(CPUCOST_EXCHANGE_MAPPING_FUNCTION, ".01"), // was 0.1, but now 0.011 // XDDflt0_(CPUCOST_EXCHANGE_REMOTENODE_COST_PER_BYTE, ".011"), // Set the additional cost of copying a byte to message buffer for // remote node to be the same as for inter node, 0.01 // Also change it to be internalized DDflt0_(CPUCOST_EXCHANGE_REMOTENODE_COST_PER_BYTE, ".01"), DDflt0_(CPUCOST_EXCHANGE_SPLIT_FUNCTION, ".01"), // Assume // CPUCOST_HASH_PER_KEY = 4 * CPUCOST_HASH_PER_BYTE // History: // Before 01/06/98: 0.005 DDflt0_(CPUCOST_HASH_PER_BYTE, ".057325"), // Assume // CPUCOST_HASH_PER_KEY = 4 * CPUCOST_HASH_PER_BYTE // From observation: // For a case when all the hash table fits into memory: // 01/05/98: 42,105 rows inserted per second @ 0.00005 seconds // per thousand of instructions, give: // seconds to insert one row = 1/42105 = 0.00002375 // thd. of instructions per row inserted = 1/42105/0.00005 = 0.4750 // The cost is distributed as follows: // CPUCOST_HASH_PER_KEY + CPUCOST_HASH_PER_BYTE*4 + // HH_OP_INSERT_ROW_TO_CHAIN + CPUCOST_COPY_ROW_PER_BYTE * 4 // = 0.4750 // Thus we have: // 2* CPUCOST_HASH_PER_KEY + 0.01 + 0.0016*4 = 0.4750 // -> CPUCOST_HASH_PER_KEY = 0.4586/2 = 0.2293 // History: // Before 01/06/98: 0.02 // Change // CPUCOST_HASH_PER_BYTE // when changing this value DDflt0_(CPUCOST_HASH_PER_KEY, "1.29"), DDflt0_(CPUCOST_LIKE_COMPARE_OVERHEAD, "10."), DDflt0_(CPUCOST_LIKE_COMPARE_PER_BYTE, ".1"), DDflt0_(CPUCOST_LOCK_ROW, ".01"), DDflt0_(CPUCOST_NJ_TUPLST_FF, "10."), // Observation (A971125_1): // CPU time to scan 100,000 rows with no exe pred: 10 // CPU time to scan 100,000 rows with an exe pred like // nonkeycol < K: 11 // CPU time spend in every row: 1/100,000 = .00001 // Thus, at 0.00005 th. inst. per sec we have: 0.00001/0.00005 = // 0.2 thousand inst. to evaluate every row: // // Predicate comparison is very expensive right now (10/08/97) // (cost it that it takes like 1000 instruction for one comparison) // 10/08/97: 1. // Change // CPUCOST_COMPARE_SIMPLE_DATA_TYPE // when you change this value: // History // Before 04/30/98: .2 DDflt0_(CPUCOST_PREDICATE_COMPARISON, ".08"), // Cost of copying the data from disk to the DP2 Cache: DDflt0_(CPUCOST_SCAN_DSK_TO_DP2_PER_KB, "2.5"), DDflt0_(CPUCOST_SCAN_DSK_TO_DP2_PER_SEEK, "0.0"), // The communication between DP2 and ExeInDp2 requires to encode // and decode the key. DDflt0_(CPUCOST_SCAN_KEY_LENGTH, "0."), // The communication between DP2 and ExeInDp2 is complex and // ever changing. The following factor is introduced to // make the costing of scan fit observed CPU time for the scan: DDflt0_(CPUCOST_SCAN_OVH_PER_KB, "0.984215"), DDflt0_(CPUCOST_SCAN_OVH_PER_ROW, "0.0"), // It takes about 1/3 of a second to open a table, thus with a // 0.00005 ff for cpu elapsed time we get: // 1/3/0.00005 = 7000 thousands instructions // CPUCOST_SUBSET_OPEN lumps together all the overhead needed // to set-up the access to each partition. Thus it is a blocking // cost, nothing can overlap with it. DDflt0_(CPUCOST_SUBSET_OPEN, "7000"), DDflt0_(CPUCOST_SUBSET_OPEN_AFTER_FIRST, "1250"), DDflt0_(CPUCOST_TUPLE_REFERENCE, ".001"), DDui___(CREATE_DEFINITION_SCHEMA_VERSION, "0"), DDkwd__(CREATE_EXTERNAL_USER_NAME_INDEX, "OFF"), DDkwd__(CREATE_FOR_NO_RDF_REPLICATE, "OFF"), DDkwd__(CREATE_METADATA_TABLE, "OFF"), DDkwd__(CREATE_OBJECTS_IN_METADATA_ONLY, "OFF"), DDkwd__(CROSS_PRODUCT_CONTROL, "ON"), // CQDs for Common Subexpressions (CSEs) // cache queries containing temp tables for common subexpressions DDkwd__(CSE_CACHE_TEMP_QUERIES, "OFF"), // "cleanup obsolete volatile tables" command cleans up Hive temp tables DDkwd__(CSE_CLEANUP_HIVE_TABLES, "OFF"), // don't temp if all consumers have preds on n key columns DDui___(CSE_COMMON_KEY_PRED_CONTROL, "1"), // emit warnings that help diagnose why CSEs are not shared DDkwd__(CSE_DEBUG_WARNINGS, "OFF"), // create a CommonSubExpr node for CTEs defined in WITH clauses (OFF/ON) DDkwd__(CSE_FOR_WITH, "OFF"), // use Hive tables as temp tables DDkwd__(CSE_HIVE_TEMP_TABLE, "ON"), // don't temp if avg consumer has preds on more than n percent of key cols DDflt0_(CSE_PCT_KEY_COL_PRED_CONTROL, "49.9"), // print debugging info on stdout DDkwd__(CSE_PRINT_DEBUG_INFO, "OFF"), // limit temp table size (based on max. card and regular card) DDflt0_(CSE_TEMP_TABLE_MAX_MAX_SIZE, "1E12"), DDflt0_(CSE_TEMP_TABLE_MAX_SIZE, "1E9"), // implement CommonSubExpr as a temp table (OFF/SYSTEM/ON) DDkwd__(CSE_USE_TEMP, "SYSTEM"), SDDui___(CYCLIC_ESP_PLACEMENT, "1"), // if this one is "ON" it overwrites optimizer heuristics 4 & 5 as "ON" // if it's "OFF" then the defaults of the two heuristics will be used DDkwd__(DATA_FLOW_OPTIMIZATION, "ON"), // DDL Default location support DD_____(DDL_DEFAULT_LOCATIONS, ""), DDkwd__(DDL_EXPLAIN, "OFF"), DDkwd__(DDL_TRANSACTIONS, "ON"), // We ignore this setting for the first (SYSTEM_DEFAULTS) table open+read. DDkwd__(DEFAULTS_TABLE_ACCESS_WARNINGS, "OFF"), SDDkwd__(DEFAULT_CHARSET, (char *)SQLCHARSETSTRING_ISO88591), XDDui1__(DEFAULT_DEGREE_OF_PARALLELISM, "2"), SDDkwd__(DEFAULT_SCHEMA_ACCESS_ONLY, "OFF"), SDDkwd__(DEFAULT_SCHEMA_NAMETYPE, "SYSTEM"), // These DEF_xxx values of "" get filled in by updateSystemParameters(). #define def_DEF_CHUNK_SIZE 5000000.0 #define str_DEF_CHUNK_SIZE "5000000.0" // DDui2__(DEF_CHUNK_SIZE, str_DEF_CHUNK_SIZE), DD_____(DEF_CPU_ARCHITECTURE, ""), DDui1__(DEF_DISCS_ON_CLUSTER, ""), DDui1__(DEF_INSTRUCTIONS_SECOND, ""), DDui___(DEF_LOCAL_CLUSTER_NUMBER, ""), DDui___(DEF_LOCAL_SMP_NODE_NUMBER, ""), //DEF_MAX_HISTORY_ROWS made external RV 06/21/01 CR 10-010425-2440 XDDui1__(DEF_MAX_HISTORY_ROWS, "1024"), DDui___(DEF_NUM_BM_CHUNKS, ""), DDui1__(DEF_NUM_NODES_IN_ACTIVE_CLUSTERS, ""), DDui1__(DEF_NUM_SMP_CPUS, ""), DDui2__(DEF_PAGE_SIZE, ""), DDui1__(DEF_PHYSICAL_MEMORY_AVAILABLE, ""), DDui1__(DEF_TOTAL_MEMORY_AVAILABLE, ""), DDui1__(DEF_VIRTUAL_MEMORY_AVAILABLE, ""), DDkwd__(DESTROY_ORDER_AFTER_REPARTITIONING, "OFF"), // detailed executor statistics DDkwd__(DETAILED_STATISTICS, "OPERATOR"), DDkwd__(DIMENSIONAL_QUERY_OPTIMIZATION, "OFF"), DDkwd__(DISABLE_BUFFERED_INSERTS, "OFF"), DDkwd__(DISABLE_READ_ONLY, "OFF"), DD_____(DISPLAY_DATA_FLOW_GRAPH, "OFF"), XDDkwd__(DISPLAY_DIVISION_BY_COLUMNS, "OFF"), // opens are distributed among all partitions instead of just root. // 0: no distribution, only use root. // -1: max distribution, all partitions // <number>: num of partitions per segment DDint__(DISTRIBUTE_OPENS, "-1"), // temp. disable dop reduction logic DDflt0_(DOP_REDUCTION_ROWCOUNT_THRESHOLD, "0.0"), DDkwd__(DO_MINIMAL_RENAME, "OFF"), // if set, then space needed for executor structures at runtime is // optimized such that the allocation starts with a low number and then // is allocated on a need basis. This means that we may have to allocate // more smaller chunks if much space is needed. But it helps in the case // where many plans are being used and each one only takes a small amount // of space. This optimization especially helps in case of Dp2 fragments // as there is only a finite amount of space available there. Once that // limit is reached, and a new plan is shipped, it means that an existing // eid plan from dp2 memory need to be swapped out and then refixed up. // By reducing space utilization, we end up with more eid sessions in // use inside of dp2. DDkwd__(DO_RUNTIME_EID_SPACE_COMPUTATION, "OFF"), DDkwd__(DO_RUNTIME_SPACE_OPTIMIZATION, "OFF"), DDui2__(DP2_BLOCK_HEADER_SIZE, "96"), // DP2 Cache defaults as of 06/08/98. DDui1__(DP2_CACHE_1024_BLOCKS, "152"), DDui1__(DP2_CACHE_16K_BLOCKS, "1024"), DDui1__(DP2_CACHE_2048_BLOCKS, "150"), DDui1__(DP2_CACHE_32K_BLOCKS, "512"), DDui1__(DP2_CACHE_4096_BLOCKS, "4096"), DDui1__(DP2_CACHE_512_BLOCKS, "152"), DDui1__(DP2_CACHE_8K_BLOCKS, "2048"), // The cache size is about 2000 pages @ 4k each page DDui1__(DP2_CACHE_SIZE_IN_KB, "8000"), // Exchange Costing // 6/12/98. // End of buffer header is 32 bytes or .0313 KB. // Each Exchange->DP2 request is 48 bytes or .0469 KB. DDflte_(DP2_END_OF_BUFFER_HEADER_SIZE, ".0313"), DDflte_(DP2_EXCHANGE_REQUEST_SIZE, ".0469"), DDpct__(DP2_FRACTION_SEEK_FROM_RANDOM_TO_INORDER, "25"), DDui2__(DP2_MAX_READ_PER_ACCESS_IN_KB, "256"), // The buffer size, as of 10/07/97 is 32K DDui2__(DP2_MESSAGE_BUFFER_SIZE, "56"), // Exchange Costing // 6/12/98. // Message header for Exchange->DP2 is 18 bytes or .0176 KB DDflte_(DP2_MESSAGE_HEADER_SIZE, ".0176"), DDui2__(DP2_MESSAGE_HEADER_SIZE_BYTES, "18"), DDui1__(DP2_MINIMUM_FILE_SIZE_FOR_SEEK_IN_BLOCKS, "256"), DDint__(DP2_PRIORITY, "-1001"), DDint__(DP2_PRIORITY_DELTA, "-1001"), DDui1__(DP2_SEQ_READS_WITHOUT_SEEKS, "100"), DDkwd__(DYNAMIC_HISTOGRAM_COMPRESSION, "ON"), DDui2__(DYN_PA_QUEUE_RESIZE_INIT_DOWN, "1024"), DDui2__(DYN_PA_QUEUE_RESIZE_INIT_UP, "1024"), DDui2__(DYN_QUEUE_RESIZE_FACTOR, "4"), DDui2__(DYN_QUEUE_RESIZE_INIT_DOWN, "4"), DDui2__(DYN_QUEUE_RESIZE_INIT_UP, "4"), DDui1__(DYN_QUEUE_RESIZE_LIMIT, "9"), DDkwd__(EID_SPACE_USAGE_OPT, "OFF"), // For both of these CQDs see executor/ExDp2Trace.h for values. DDint__(EID_TRACE_STATES, "0"), DDtp___(EID_TRACE_STR, ""), DDkwd__(ELIMINATE_REDUNDANT_JOINS, "ON"), DDkwd__(ENABLE_DP2_XNS, "OFF"), DDSint__(ESP_ASSIGN_DEPTH, "0"), DDSint__(ESP_FIXUP_PRIORITY_DELTA, "0"), DDint__(ESP_IDLE_TIMEOUT, "1800"), // To match with set session defaults value DDkwd__(ESP_MULTI_FRAGMENTS, "ON"), DDkwd__(ESP_MULTI_FRAGMENT_QUOTAS, "ON"), DDui1500_4000(ESP_MULTI_FRAGMENT_QUOTA_VM, "4000"), DDui1_6(ESP_NUM_FRAGMENTS, "3"), DDui1_6(ESP_NUM_FRAGMENTS_WITH_QUOTAS, "6"), DDkwd__(ESP_ON_AGGREGATION_NODES_ONLY, "OFF"), DDSint__(ESP_PRIORITY, "0"), DDSint__(ESP_PRIORITY_DELTA, "0"), // Disable hints - if SYSTEM, enable on SSD, and disable only on HDD DDkwd__(EXE_BMO_DISABLE_CMP_HINTS_OVERFLOW_HASH, "SYSTEM"), DDkwd__(EXE_BMO_DISABLE_CMP_HINTS_OVERFLOW_SORT, "SYSTEM"), DDkwd__(EXE_BMO_DISABLE_OVERFLOW, "OFF"), DDui___(EXE_BMO_MIN_SIZE_BEFORE_PRESSURE_CHECK_IN_MB, "50"), DDkwd__(EXE_BMO_SET_BUFFERED_WRITES, "OFF"), SDDkwd__(EXE_DIAGNOSTIC_EVENTS, "OFF"), DDui1__(EXE_HGB_INITIAL_HT_SIZE, "262144"), // == hash buffer DDflt__(EXE_HJ_MIN_NUM_CLUSTERS, "4"), DDkwd__(EXE_LOG_RETRY_IPC, "OFF"), // Total size of memory (in MB) available to BMOs (e.g., 1200 MB) SDDui___(EXE_MEMORY_AVAILABLE_IN_MB, "1200"), SDDui___(EXE_MEMORY_FOR_PARTIALHGB_IN_MB, "100"), SDDui___(EXE_MEMORY_FOR_PROBE_CACHE_IN_MB, "100"), // lower-bound memory limit for BMOs/nbmos (in MB) DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_EXCHANGE, "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_HASHGROUPBY , "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_HASHJOIN, "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_MERGEJOIN, "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_PA , "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_PROBE_CACHE , "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_SEQUENCE , "10"), DDui___(EXE_MEMORY_LIMIT_LOWER_BOUND_SORT , "10"), // total memory limit per CPU per query in MB DDpct1_50(EXE_MEMORY_LIMIT_NONBMOS_PERCENT, "15"), XDDui___(EXE_MEMORY_LIMIT_PER_CPU, "0"), // Memory not available for BMOs in master fragment in mxosrvr // (mostly due to QIO). DDui___(EXE_MEMORY_RESERVED_FOR_MXOSRVR_IN_MB,"544"), // Override the memory quota system; set limit per each and every BMO SDDflt__(EXE_MEM_LIMIT_PER_BMO_IN_MB, "0"), DDui1__(EXE_NUM_CONCURRENT_SCRATCH_IOS, "4"), // DDkwd__(EXE_PARALLEL_DDL, "ON"), DDui___(EXE_PA_DP2_STATIC_AFFINITY, "1"), DDkwd__(EXE_SINGLE_BMO_QUOTA, "ON"), // The following 3 are only for testing overflow; zero value means: ignore DDui___(EXE_TEST_FORCE_CLUSTER_SPLIT_AFTER_MB, "0"), DDui___(EXE_TEST_FORCE_HASH_LOOP_AFTER_NUM_BUFFERS, "0"), DDui___(EXE_TEST_HASH_FORCE_OVERFLOW_EVERY, "0"), DDkwd__(EXE_UTIL_RWRS, "OFF"), DDkwd__(EXPAND_DP2_SHORT_ROWS, "ON"), XDDint__(EXPLAIN_DESCRIPTION_COLUMN_SIZE, "-1"), DDkwd__(EXPLAIN_DETAIL_COST_FOR_CALIBRATION, "FALSE"), DDkwd__(EXPLAIN_DISPLAY_FORMAT, "EXTERNAL"), DDkwd__(EXPLAIN_IN_RMS, "ON"), DDui___(EXPLAIN_OUTPUT_ROW_SIZE, "80"), DDui1__(EXPLAIN_ROOT_INPUT_VARS_MAX, "2000"), // maximum number of inputs that we can tolerate to // explain information for inputVars expression // this is needed to avoid stack overflow DDkwd__(EXPLAIN_SPACE_OPT, "ON"), DDkwd__(EXPLAIN_STRATEGIZER_PARAMETERS, "OFF"), DDflte_(EX_OP_ALLOCATE_ATP, ".02"), // Calibration // 01/23/98: 50. // Original: .1 DDflte_(EX_OP_ALLOCATE_BUFFER, "50."), DDflte_(EX_OP_ALLOCATE_BUFFER_POOL, ".1"), DDflte_(EX_OP_ALLOCATE_TUPLE, ".05"), // copy_atp affects the costing of NJ // History: // 08/21/98: 0.02, The previous change affected more than one operrator // 08/13/98: 1.0 // 01/08/98: 0.02 DDflte_(EX_OP_COPY_ATP, "1.1335"), DDflte_(EX_OP_DEQUEUE, ".02"), DDflte_(EX_OP_ENQUEUE, ".02"), DDkwd__(FAKE_VOLUME_ASSIGNMENTS, "OFF"), DDui1__(FAKE_VOLUME_NUM_VOLUMES, "24"), DDkwd__(FAST_DELETE, "OFF"), DDkwd__(FAST_DP2_SUBSET_OPT, "ON"), // upper and lower limit (2,10) must be in sync with error values in //ExFastTransport.cpp DDkwd__(FAST_EXTRACT_DIAGS, "OFF"), DDui2_10(FAST_EXTRACT_IO_BUFFERS, "6"), DDui___(FAST_EXTRACT_IO_TIMEOUT_SEC, "60"), DDkwd__(FAST_REPLYDATA_MOVE, "ON"), SDDkwd__(FFDC_DIALOUTS_FOR_MXCMP, "OFF"), DDkwd__(FIND_COMMON_SUBEXPRS_IN_OR, "ON"), DDui___(FLOAT_ESP_RANDOM_NUM_SEED, "0"), DDkwd__(FORCE_BUSHY_CQS, "ON"), DDkwd__(FORCE_PARALLEL_CREATE_INDEX, "OFF"), DDkwd__(FORCE_PARALLEL_INSERT_SELECT, "OFF"), DDkwd__(FORCE_PASS_ONE, "OFF"), DDkwd__(FORCE_PASS_TWO, "ON"), // Control if plan fragments need to be compressed // DDui___(FRAG_COMPRESSION_THRESHOLD, "16"), // Controls FSO Tests for debug // DDui___(FSO_RUN_TESTS, "0"), // Controls use of Simple File Scan Optimizer // IF 0 - Use original "Complex" File Scan Optimizer. // (in case simple causes problems) // IF 1 - Use logic to determine FSO to use. (default) // IF 2 - Use logic to determine FSO to use, but also use new // executor predicate costing. // IF >2 - Always use new "Simple" File Scan Optimizer. // (not recommended) // DDui___(FSO_TO_USE, "1"), // Disallow/Allow full outer joins in MultiJoin framework DDkwd__(FULL_OUTER_JOINS_SPOIL_JBB, "OFF"), DDkwd__(GA_PROP_INDEXES_ARITY_1, "ON"), // this default value is filled in // NADefaults::initCurrentDefaultsWithDefaultDefaults. The default value // is ON for static compiles and OFF for dynamic queries. DDkwd__(GENERATE_EXPLAIN, "ON"), DDipcBu(GEN_ALIGNED_PA_DP2_BUFFER_SIZE, "31000"), DDui1__(GEN_CBUF_BUFFER_SIZE, "30000"), DDui1__(GEN_CBUF_NUM_BUFFERS, "4"), DDui1__(GEN_CBUF_SIZE_DOWN, "8"), DDui1__(GEN_CBUF_SIZE_UP, "8"), DDui___(GEN_CS_BUFFER_SIZE, "0"), DDui___(GEN_CS_NUM_BUFFERS, "0"), DDui___(GEN_CS_SIZE_DOWN, "4"), DDui___(GEN_CS_SIZE_UP, "4"), DDkwd__(GEN_DBLIMITS_LARGER_BUFSIZE, "ON"), DDui1__(GEN_DDL_BUFFER_SIZE, "30000"), DDui1__(GEN_DDL_NUM_BUFFERS, "4"), DDui1__(GEN_DDL_SIZE_DOWN, "2"), DDui1__(GEN_DDL_SIZE_UP, "32"), DDui1__(GEN_DEL_BUFFER_SIZE, "512"), DDui1__(GEN_DEL_NUM_BUFFERS, "5"), DDui1__(GEN_DEL_SIZE_DOWN, "2"), DDui1__(GEN_DEL_SIZE_UP, "2"), DDui1__(GEN_DESC_BUFFER_SIZE, "10240"), DDui1__(GEN_DESC_NUM_BUFFERS, "4"), DDui1__(GEN_DESC_SIZE_DOWN, "2"), DDui1__(GEN_DESC_SIZE_UP, "16"), DDui1__(GEN_DP2I_BUFFER_SIZE, "10000"), DDui1__(GEN_DP2I_NUM_BUFFERS, "2"), DDui1__(GEN_DP2I_SIZE_DOWN, "32"), DDui1__(GEN_DP2I_SIZE_UP, "64"), DDui1__(GEN_DPDU_BUFFER_SIZE, "2"), DDui1__(GEN_DPDU_NUM_BUFFERS, "1"), DDui1__(GEN_DPDU_SIZE_DOWN, "2"), DDui1__(GEN_DPDU_SIZE_UP, "2"), DDui1__(GEN_DPRO_BUFFER_SIZE, "10240"), DDui1__(GEN_DPRO_NUM_BUFFERS, "1"), DDui1__(GEN_DPRO_SIZE_DOWN, "16"), DDui1__(GEN_DPRO_SIZE_UP, "16"), DDui1__(GEN_DPSO_BUFFER_SIZE, "10240"), DDui1__(GEN_DPSO_NUM_BUFFERS, "4"), DDui1__(GEN_DPSO_SIZE_DOWN, "2048"), DDui1__(GEN_DPSO_SIZE_UP, "2048"), DDui1__(GEN_DPUO_BUFFER_SIZE, "10000"), DDui1__(GEN_DPUO_NUM_BUFFERS, "4"), DDui1__(GEN_DPUO_SIZE_DOWN, "2048"), DDui1__(GEN_DPUO_SIZE_UP, "2048"), DDui1__(GEN_DPVI_BUFFER_SIZE, "10000"), DDui1__(GEN_DPVI_NUM_BUFFERS, "2"), DDui1__(GEN_DPVI_SIZE_DOWN, "32"), DDui1__(GEN_DPVI_SIZE_UP, "64"), DDui___(GEN_EIDR_BROKEN_TREE_CHECK_INTERVAL, "128"), DDipcBu(GEN_EIDR_BUFFER_SIZE, "31000"), DDui1__(GEN_EIDR_NUM_BUFFERS, "3"), DDui1__(GEN_EIDR_SIZE_DOWN, "2"), DDui1__(GEN_EIDR_SIZE_UP, "2"), DDui___(GEN_EIDR_STATS_REPLY_INTERVAL, "3000"), DDint__(GEN_EXCHANGE_MAX_MEM_IN_KB, "4000"), DDint__(GEN_EXCHANGE_MSG_COUNT, "80"), // Fast extract settings are for UDR method invocations DDui1__(GEN_FE_BUFFER_SIZE, "31000"), DDui1__(GEN_FE_NUM_BUFFERS, "2"), DDui1__(GEN_FE_SIZE_DOWN, "4"), DDui1__(GEN_FE_SIZE_UP, "4"), DDui1__(GEN_FSRT_BUFFER_SIZE, "5120"), DDui1__(GEN_FSRT_NUM_BUFFERS, "5"), DDui1__(GEN_FSRT_SIZE_DOWN, "2"), DDui1__(GEN_FSRT_SIZE_UP, "8"), // Do not alter the buffer size; it must be 56K for SCRATCH_MGMT_OPTION == 5 DDui1__(GEN_HGBY_BUFFER_SIZE, "262144"), DDui1__(GEN_HGBY_NUM_BUFFERS , "5"), DDui1__(GEN_HGBY_PARTIAL_GROUP_FLUSH_THRESHOLD, "100"), DDui___(GEN_HGBY_PARTIAL_GROUP_ROWS_PER_CLUSTER, "0"), DDui1__(GEN_HGBY_SIZE_DOWN, "2048"), DDui1__(GEN_HGBY_SIZE_UP, "2048"), // Do not alter the buffer size; it must be 56K for SCRATCH_MGMT_OPTION == 5 DDui1__(GEN_HSHJ_BUFFER_SIZE, "262144"), // Controls use of the hash join min/max optimization. DDkwd__(GEN_HSHJ_MIN_MAX_OPT, "OFF"), DDui1__(GEN_HSHJ_NUM_BUFFERS, "1"), DDui1__(GEN_HSHJ_SIZE_DOWN, "2048"), DDui1__(GEN_HSHJ_SIZE_UP, "2048"), DDui1__(GEN_IAR_BUFFER_SIZE, "10240"), DDui1__(GEN_IAR_NUM_BUFFERS, "1"), DDui1__(GEN_IAR_SIZE_DOWN, "2"), DDui1__(GEN_IAR_SIZE_UP, "4"), DDui1__(GEN_IMDT_BUFFER_SIZE, "2"), DDui1__(GEN_IMDT_NUM_BUFFERS, "1"), DDui1__(GEN_IMDT_SIZE_DOWN, "2"), DDui1__(GEN_IMDT_SIZE_UP, "2"), DDui1__(GEN_INS_BUFFER_SIZE, "10240"), DDui1__(GEN_INS_NUM_BUFFERS, "3"), DDui1__(GEN_INS_SIZE_DOWN, "4"), DDui1__(GEN_INS_SIZE_UP, "128"), // Controls LeanEr Expression generation DDkwd__(GEN_LEANER_EXPRESSIONS, "ON"), DDui1__(GEN_LOCK_BUFFER_SIZE, "1024"), DDui1__(GEN_LOCK_NUM_BUFFERS, "1"), DDui1__(GEN_LOCK_SIZE_DOWN, "4"), DDui1__(GEN_LOCK_SIZE_UP, "4"), DDui1__(GEN_MATR_BUFFER_SIZE, "2"), DDui1__(GEN_MATR_NUM_BUFFERS, "1"), DDui1__(GEN_MATR_SIZE_DOWN, "2"), DDui1__(GEN_MATR_SIZE_UP, "8"), DDui___(GEN_MAX_NUM_PART_DISK_ENTRIES, "3"), DDui___(GEN_MAX_NUM_PART_NODE_ENTRIES, "255"), DDui1__(GEN_MEM_PRESSURE_THRESHOLD, "10000"), DDui1__(GEN_MJ_BUFFER_SIZE, "32768"), DDui1__(GEN_MJ_NUM_BUFFERS, "1"), DDui1__(GEN_MJ_SIZE_DOWN, "2"), DDui1__(GEN_MJ_SIZE_UP, "1024"), DDui1__(GEN_ONLJ_BUFFER_SIZE, "5120"), DDui1__(GEN_ONLJ_LEFT_CHILD_QUEUE_DOWN, "4"), DDui1__(GEN_ONLJ_LEFT_CHILD_QUEUE_UP, "2048"), DDui1__(GEN_ONLJ_NUM_BUFFERS, "5"), DDui1__(GEN_ONLJ_RIGHT_SIDE_QUEUE_DOWN, "2048"), DDui1__(GEN_ONLJ_RIGHT_SIDE_QUEUE_UP, "2048"), DDkwd__(GEN_ONLJ_SET_QUEUE_LEFT, "ON"), DDkwd__(GEN_ONLJ_SET_QUEUE_RIGHT, "ON"), DDui1__(GEN_ONLJ_SIZE_DOWN, "2048"), DDui1__(GEN_ONLJ_SIZE_UP, "2048"), DDui1__(GEN_PAR_LAB_OP_BUFFER_SIZE, "1024"), DDui1__(GEN_PAR_LAB_OP_NUM_BUFFERS, "1"), DDui1__(GEN_PAR_LAB_OP_SIZE_DOWN, "2"), DDui1__(GEN_PAR_LAB_OP_SIZE_UP, "4"), DDipcBu(GEN_PA_BUFFER_SIZE, "31000"), DDui1__(GEN_PA_NUM_BUFFERS, "5"), DDui1__(GEN_PA_SIZE_DOWN, "2048"), DDui1__(GEN_PA_SIZE_UP, "2048"), DDui1__(GEN_PROBE_CACHE_NUM_ENTRIES, "16384"),// number of entries DDui___(GEN_PROBE_CACHE_NUM_INNER, "0"), //0 means compiler decides DDui1__(GEN_PROBE_CACHE_SIZE_DOWN, "2048"), DDui1__(GEN_PROBE_CACHE_SIZE_UP, "2048"), DDui1__(GEN_RCRS_BUFFER_SIZE, "2"), DDui1__(GEN_RCRS_NUM_BUFFERS, "1"), DDui1__(GEN_RCRS_SIZE_DOWN, "8"), DDui1__(GEN_RCRS_SIZE_UP, "16"), DDkwd__(GEN_RESET_ACCESS_COUNTER, "OFF"), DDui1__(GEN_ROOT_BUFFER_SIZE, "2"), DDui1__(GEN_ROOT_NUM_BUFFERS, "1"), DDui1__(GEN_ROOT_SIZE_DOWN, "2"), DDui1__(GEN_ROOT_SIZE_UP, "2"), DDui1__(GEN_SAMPLE_BUFFER_SIZE, "5120"), DDui1__(GEN_SAMPLE_NUM_BUFFERS, "5"), DDui1__(GEN_SAMPLE_SIZE_DOWN, "16"), DDui1__(GEN_SAMPLE_SIZE_UP, "16"), DDui1__(GEN_SCAN_BUFFER_SIZE, "10240"), DDui1__(GEN_SCAN_NUM_BUFFERS, "10"), DDui1__(GEN_SCAN_SIZE_DOWN, "16"), DDui1__(GEN_SCAN_SIZE_UP, "32"), DDui1__(GEN_SEQFUNC_BUFFER_SIZE, "5120"), DDui1__(GEN_SEQFUNC_NUM_BUFFERS, "5"), DDui1__(GEN_SEQFUNC_SIZE_DOWN, "16"), DDui1__(GEN_SEQFUNC_SIZE_UP, "16"), DDkwd__(GEN_SEQFUNC_UNLIMITED_HISTORY, "OFF"), DDui1__(GEN_SEQ_BUFFER_SIZE, "512"), DDui1__(GEN_SEQ_NUM_BUFFERS, "5"), DDui1__(GEN_SEQ_SIZE_DOWN, "2"), DDui1__(GEN_SEQ_SIZE_UP, "2"), DDui1__(GEN_SGBY_BUFFER_SIZE, "5120"), DDui1__(GEN_SGBY_NUM_BUFFERS, "5"), DDui1__(GEN_SGBY_SIZE_DOWN, "2048"), DDui1__(GEN_SGBY_SIZE_UP, "2048"), DDui1__(GEN_SID_BUFFER_SIZE, "1024"), DDui1__(GEN_SID_NUM_BUFFERS, "4"), DDui1__(GEN_SNDB_BUFFER_SIZE, "2"), DDui1__(GEN_SNDB_NUM_BUFFERS, "4"), DDui1__(GEN_SNDB_SIZE_DOWN, "4"), DDui1__(GEN_SNDB_SIZE_UP, "128"), DDui___(GEN_SNDT_BUFFER_SIZE_DOWN, "0"), DDui___(GEN_SNDT_BUFFER_SIZE_UP, "0"), DDui1__(GEN_SNDT_NUM_BUFFERS, "2"), DDkwd__(GEN_SNDT_RESTRICT_SEND_BUFFERS, "ON"), DDui1__(GEN_SNDT_SIZE_DOWN, "4"), DDui1__(GEN_SNDT_SIZE_UP, "128"), DDui1__(GEN_SORT_MAX_BUFFER_SIZE, "5242880"), DDui1__(GEN_SORT_MAX_NUM_BUFFERS, "160"), DDui___(GEN_SORT_MIN_BUFFER_SIZE, "0"), DDui1__(GEN_SORT_NUM_BUFFERS, "4"), DDui1__(GEN_SORT_SIZE_DOWN, "2"), DDui1__(GEN_SORT_SIZE_UP, "1024"), DDkwd__(GEN_SORT_TOPN, "ON"), DDui1__(GEN_SPLB_BUFFER_SIZE, "2"), DDui1__(GEN_SPLB_NUM_BUFFERS, "1"), DDui1__(GEN_SPLB_SIZE_DOWN, "2"), DDui1__(GEN_SPLB_SIZE_UP, "2"), DDui1__(GEN_SPLT_BUFFER_SIZE, "2"), DDui1__(GEN_SPLT_NUM_BUFFERS, "1"), DDui1__(GEN_SPLT_SIZE_DOWN, "2048"), DDui1__(GEN_SPLT_SIZE_UP, "2048"), DDui1__(GEN_STPR_BUFFER_SIZE, "1024"), DDui1__(GEN_STPR_NUM_BUFFERS, "3"), DDui1__(GEN_STPR_SIZE_DOWN, "2"), DDui1__(GEN_STPR_SIZE_UP, "2"), DDui1__(GEN_TFLO_BUFFER_SIZE, "5120"), DDui1__(GEN_TFLO_NUM_BUFFERS, "2"), DDui1__(GEN_TFLO_SIZE_DOWN, "8"), DDui1__(GEN_TFLO_SIZE_UP, "16"), DDui512(GEN_TIMEOUT_BUFFER_SIZE, "4096"), DDui1__(GEN_TIMEOUT_NUM_BUFFERS, "1"), DDui2__(GEN_TIMEOUT_SIZE_DOWN, "2"), DDui2__(GEN_TIMEOUT_SIZE_UP, "4"), DDui1__(GEN_TRAN_BUFFER_SIZE, "4096"), DDui1__(GEN_TRAN_NUM_BUFFERS, "1"), DDui1__(GEN_TRAN_SIZE_DOWN, "2"), DDui1__(GEN_TRAN_SIZE_UP, "4"), DDui1__(GEN_TRSP_BUFFER_SIZE, "10240"), DDui1__(GEN_TRSP_NUM_BUFFERS, "5"), DDui1__(GEN_TRSP_SIZE_DOWN, "16"), DDui1__(GEN_TRSP_SIZE_UP, "16"), DDui1__(GEN_TUPL_BUFFER_SIZE, "1024"), DDui1__(GEN_TUPL_NUM_BUFFERS, "4"), DDui1__(GEN_TUPL_SIZE_DOWN, "2048"), DDui1__(GEN_TUPL_SIZE_UP, "2048"), // GEN_UDRRS_ settings are for stored procedure result // set proxy plans DDui1__(GEN_UDRRS_BUFFER_SIZE, "31000"), DDui1__(GEN_UDRRS_NUM_BUFFERS, "2"), DDui1__(GEN_UDRRS_SIZE_DOWN, "4"), DDui1__(GEN_UDRRS_SIZE_UP, "128"), // GEN_UDR_ settings are for UDR method invocations DDui1__(GEN_UDR_BUFFER_SIZE, "31000"), DDui1__(GEN_UDR_NUM_BUFFERS, "2"), DDui1__(GEN_UDR_SIZE_DOWN, "4"), DDui1__(GEN_UDR_SIZE_UP, "4"), DDui1__(GEN_UNLJ_BUFFER_SIZE, "5120"), DDui1__(GEN_UNLJ_NUM_BUFFERS, "5"), DDui1__(GEN_UNLJ_SIZE_DOWN, "8"), DDui1__(GEN_UNLJ_SIZE_UP, "16"), DDui1__(GEN_UN_BUFFER_SIZE, "10240"), DDui1__(GEN_UN_NUM_BUFFERS, "5"), DDui1__(GEN_UN_SIZE_DOWN, "8"), DDui1__(GEN_UN_SIZE_UP, "16"), DDui1__(GEN_UPD_BUFFER_SIZE, "5120"), DDui1__(GEN_UPD_NUM_BUFFERS, "5"), DDui1__(GEN_UPD_SIZE_DOWN, "2"), DDui1__(GEN_UPD_SIZE_UP, "2"), // Used when Compressed_Internal_Format is on to reduce space in the // hash buffers (Hash Join and Hash Groupby) and sort buffers. DDkwd__(GEN_VARIABLE_LENGTH_BUFFERS, "OFF"), DDui1__(GEN_XPLN_BUFFER_SIZE, "4096"), DDui1__(GEN_XPLN_NUM_BUFFERS, "3"), DDui1__(GEN_XPLN_SIZE_DOWN, "8"), DDui1__(GEN_XPLN_SIZE_UP, "16"), // When less or equal to this CQD (5000 rows by default), a partial root // will be running in the Master. Set to 0 to disable the feature. DDint__(GROUP_BY_PARTIAL_ROOT_THRESHOLD, "5000"), DDkwd__(GROUP_BY_PUSH_TO_BOTH_SIDES_OF_JOIN, "ON"), DDkwd__(GROUP_OR_ORDER_BY_EXPR, "ON"), // HASH_JOINS ON means do HASH_JOINS XDDkwd__(HASH_JOINS, "ON"), DDkwd__(HASH_JOINS_TYPE1_PLAN1, "ON"), DDkwd__(HASH_JOINS_TYPE1_PLAN2, "ON"), // HBase defaults // Some of the more important ones: // HBASE_CATALOG: Catalog of "_ROW_" and "_CELL_" schemas // HBASE_COPROCESSORS: Enable use of co-processors for aggregates. // need to set the coprocessor in HBase config file // HBASE_INTERFACE: JNI or JNI_TRX (transactional interface) // HBASE_MAX_COLUMN_xxx_LENGTH: Max length of some // string columns in the "_ROW_" and "_CELL_" schemas // HBASE_SQL_IUD_SEMANTICS: Off: Don't check for existing rows for insert/update DDkwd__(HBASE_ASYNC_DROP_TABLE, "OFF"), DDkwd__(HBASE_ASYNC_OPERATIONS, "ON"), // HBASE_CACHE_BLOCKS, ON => cache every scan, OFF => cache no scan // SYSTEM => cache scans which take less than 1 RS block cache mem. DDui___(HBASE_BLOCK_SIZE, "65536"), DDkwd__(HBASE_CACHE_BLOCKS, "SYSTEM"), DD_____(HBASE_CATALOG, "HBASE"), DDkwd__(HBASE_CHECK_AND_UPDEL_OPT, "ON"), DDkwd__(HBASE_COMPRESSION_OPTION, ""), DDkwd__(HBASE_COPROCESSORS, "ON"), DDkwd__(HBASE_CREATE_OLD_MD_FOR_UPGRADE_TESTING, "OFF"), DDkwd__(HBASE_DATA_BLOCK_ENCODING_OPTION, ""), // If set to 'OFF' we get a stub cost of 1 for delete operations. // We can remove this once the delete costing code has broader // exposure. DDkwd__(HBASE_DELETE_COSTING, "ON"), DDflt0_(HBASE_DOP_PARALLEL_SCANNER, "0."), DDkwd__(HBASE_FILTER_PREDS, "OFF"), DDkwd__(HBASE_HASH2_PARTITIONING, "ON"), DDui___(HBASE_INDEX_LEVEL, "0"), DDui___(HBASE_MAX_COLUMN_INFO_LENGTH, "10000"), DDui___(HBASE_MAX_COLUMN_NAME_LENGTH, "100"), DDui___(HBASE_MAX_COLUMN_VAL_LENGTH, "1000"), DDui___(HBASE_MAX_ESPS, "9999"), DDui___(HBASE_MAX_NUM_SEARCH_KEYS, "512"), DDui1__(HBASE_MIN_BYTES_PER_ESP_PARTITION, "67108864"), DDkwd__(HBASE_NATIVE_IUD, "ON"), DDui1__(HBASE_NUM_CACHE_ROWS_MAX, "1024"), DDui1__(HBASE_NUM_CACHE_ROWS_MIN, "100"), DDkwd__(HBASE_RANGE_PARTITIONING, "ON"), DDkwd__(HBASE_RANGE_PARTITIONING_MC_SPLIT, "ON"), DDkwd__(HBASE_RANGE_PARTITIONING_PARTIAL_COLS,"ON"), DDui___(HBASE_REGION_SERVER_MAX_HEAP_SIZE, "1024"), // in units of MB DDkwd__(HBASE_ROWSET_VSBB_OPT, "ON"), DDusht_(HBASE_ROWSET_VSBB_SIZE, "1024"), DDflt0_(HBASE_SALTED_TABLE_MAX_FILE_SIZE, "0"), DDkwd__(HBASE_SALTED_TABLE_SET_SPLIT_POLICY, "ON"), DD_____(HBASE_SCHEMA, "HBASE"), DDkwd__(HBASE_SERIALIZATION, "ON"), DD_____(HBASE_SERVER, ""), DDkwd__(HBASE_SMALL_SCANNER, "OFF"), DDkwd__(HBASE_SQL_IUD_SEMANTICS, "ON"), DDkwd__(HBASE_STATS_PARTITIONING, "ON"), DDkwd__(HBASE_TRANSFORM_UPDATE_TO_DELETE_INSERT, "OFF"), // If set to 'OFF' we get a stub cost of 1 for update operations. // We can remove this once the delete costing code has broader // exposure. This is 'OFF' at the moment because the update code // is only partially written. DDkwd__(HBASE_UPDATE_COSTING, "OFF"), DDkwd__(HBASE_UPDEL_CURSOR_OPT, "ON"), DDui___(HBASE_USE_FAKED_REGIONS, "0"), DD_____(HBASE_ZOOKEEPER_PORT, ""), DDui1__(HDFS_IO_BUFFERSIZE, "65536"), DDui___(HDFS_IO_BUFFERSIZE_BYTES, "0"), // The value 0 denotes RangeTail = max record length of table. DDui___(HDFS_IO_RANGE_TAIL, "0"), DDkwd__(HDFS_PREFETCH, "ON"), DDkwd__(HDFS_READ_CONTINUE_ON_ERROR, "OFF"), DDui1__(HDFS_REPLICATION, "1"), DDkwd__(HDFS_USE_CURSOR_MULTI, "OFF"), DDkwd__(HGB_BITMUX, "OFF"), DDflt0_(HGB_CPUCOST_INITIALIZE, "1."), DDflt0_(HGB_DP2_MEMORY_LIMIT, "10000."), DDflte_(HGB_GROUPING_FACTOR_FOR_SPILLED_CLUSTERS, ".5"), DDflte_(HGB_MAX_TABLE_SIZE_FOR_CLUSTERS, "4E5"), DDflte_(HGB_MEMORY_AVAILABLE_FOR_CLUSTERS, "10"), DDflte_(HH_OP_ALLOCATE_BUCKET_ARRAY, ".1"), DDflte_(HH_OP_ALLOCATE_CLUSTER, ".1"), DDflte_(HH_OP_ALLOCATE_CLUSTERDB, ".1"), DDflte_(HH_OP_ALLOCATE_HASH_TABLE, ".05"), DDflt1_(HH_OP_HASHED_ROW_OVERHEAD, "8."), // From observation: // 03/11/98: probing the hash table is very inexpensive, // thus reduce this to almost zero. // change // CPUCOST_HASH_PER_KEY // when changing this value // It takes around 2 seconds to insert 100,000 rows into the chain: // @ 0.00005 secs per k instr: // k instr= 2/0.00005/100000 = 0.4 // History: // Before 03/11/98: 0.4 // Initially: 0.01 DDflte_(HH_OP_INSERT_ROW_TO_CHAIN, "0.51"), // From observation: // 03/11/98: probing the hash table is very inexpensive, // thus reduce this to almost zero. // 01/05/98: 15,433 rows probed per second @ 0.00005 seconds // per thousand of instructions, give: // seconds to probe one row = 1/15,433 = 0.000064796 // This time includes: time to position and to compare. Thus // subtract the time to compare to arrive to the proper number: // thd. of instructions per row inserted = // 1/15,433/0.00005 - CPUCOST_COMPARE_SIMPLE_DATA_TYPE = // 1.2959 - 0.2 = 1.0959 // History: // Before 03/11/98: 1.0959 // Before 01/05/98: 0.01 DDflt0_(HH_OP_PROBE_HASH_TABLE, "0.011"), DDflt0_(HH_OP_READ_HASH_BUFFER, "0."), DDflt0_(HH_OP_WRITE_HASH_BUFFER, "0."), // Added 10/16/02 DDkwd__(HIDE_INDEXES, "NONE"), DDansi_(HISTOGRAMS_SCHEMA, ""), // ------------------------------------------------------------------------- // Histogram fudge factors // ------------------------------------------------------------------------- //HIST_BASE_REDUCTION and HIST_PREFETCH externalized 08/21/01 CR 10-010713-3895 DDkwd__(HIST_ASSUME_INDEPENDENT_REDUCTION, "ON"), XDDkwd__(HIST_AUTO_GENERATION_OF_SAMPLE, "OFF"), DDkwd__(HIST_BASE_REDUCTION, "ON"), DDflt0_(HIST_BASE_REDUCTION_FUDGE_FACTOR, "0.1"), DDflt0_(HIST_CONSTANT_ALPHA, "0.5"), DDflt_0_1(HIST_DEFAULT_BASE_SEL_FOR_LIKE_WILDCARD, "0.50"), DDui1__(HIST_DEFAULT_NUMBER_OF_INTERVALS, "50"), DDui1__(HIST_DEFAULT_SAMPLE_MAX, "1000000"), DDui1__(HIST_DEFAULT_SAMPLE_MIN, "10000"), DDflt_0_1(HIST_DEFAULT_SAMPLE_RATIO, "0.01"), DDflte_(HIST_DEFAULT_SEL_FOR_BOOLEAN, "0.3333"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_IS_NULL, "0.01"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_JOIN_EQUAL, "0.3333"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_JOIN_RANGE, "0.3333"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_LIKE_NO_WILDCARD,"1.0"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_LIKE_WILDCARD, "0.10"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_PRED_EQUAL, "0.01"), DDflt_0_1(HIST_DEFAULT_SEL_FOR_PRED_RANGE, "0.3333"), // control the amount of data in each partition of the persistent sample tble. DDflt1_(HIST_FETCHCOUNT_SCRATCH_VOL_THRESHOLD, "10240000"), DDkwd__(HIST_FREQ_VALS_NULL_FIX, "ON"), DDkwd__(HIST_INCLUDE_SKEW_FOR_NON_INNER_JOIN, "ON"), DDkwd__(HIST_INTERMEDIATE_REDUCTION, "OFF"), DDflt0_(HIST_INTERMEDIATE_REDUCTION_FUDGE_FACTOR, "0.25"), DDflt_0_1(HIST_JOIN_CARD_LOWBOUND, "1.0"), DDui1__(HIST_LOW_UEC_THRESHOLD, "55"), DDui1__(HIST_MAX_NUMBER_OF_INTERVALS, "10000"), DDkwd__(HIST_MC_STATS_NEEDED, "ON"), DDkwd__(HIST_MERGE_FREQ_VALS_FIX, "ON"), // Histogram min/max optimization: when the predicate is of form // T.A = MIN/MAX(S.B), replace the histogram(T.A) with // single_int_histogram(MIN/MAX(S.B)). Do this only when // there is no local predicate on S and there exists a frequent // value that is equals to MIN/MAX(S.B). DDkwd__(HIST_MIN_MAX_OPTIMIZATION, "ON"), // This CQD is used to control the number of missing stats warnings // that should be generated. // 0 ? Display no warnings. // 1 ? Display only missing single column stats warnings. These include 6008 and 6011 // 2 ? Display all single column missing stats warnings and // multi-column missing stats warnings for Scans only. // 3 ? Display all missing single column stats warnings and missing // multi-column stats warnings for Scans and Join operators only.. // 4 ? Display all missing single column stats and missing multi-column // stats warnings for all operators including Scans, Joins and GroupBys. // The CQD also does not have an impact on the auto update stats behavior. The stats will // still be automatically generated even if the warnings have been suppressed. // USTAT_AUTO_MISSING_STATS_LEVEL. // Default behavior is to generate all warnings XDDui___(HIST_MISSING_STATS_WARNING_LEVEL, "4"), DDflt1_(HIST_NO_STATS_ROWCOUNT, "100"), DDflt1_(HIST_NO_STATS_UEC, "2"), DDflt1_(HIST_NO_STATS_UEC_CHAR1, "10"), DDui1__(HIST_NUM_ADDITIONAL_DAYS_TO_EXTRAPOLATE, "4"), DDintN1__(HIST_ON_DEMAND_STATS_SIZE, "0"), DDui___(HIST_OPTIMISTIC_CARD_OPTIMIZATION, "1"), XDDkwd__(HIST_PREFETCH, "ON"), XDDkwd__(HIST_REMOVE_TRAILING_BLANKS, "ON"), // should remove after verifying code is solid DDansi_(HIST_ROOT_NODE, ""), XDDflt1_(HIST_ROWCOUNT_REQUIRING_STATS, "500"), DDflt0_(HIST_SAME_TABLE_PRED_REDUCTION, "0.0"), DD_____(HIST_SCRATCH_VOL, ""), // control the amount of data in each partition of the sample tble. DDflt1_(HIST_SCRATCH_VOL_THRESHOLD, "10240000"), DDflt_0_1(HIST_SKEW_COST_ADJUSTMENT, "0.2"), DDkwd__(HIST_SKIP_MC_FOR_NONKEY_JOIN_COLUMNS, "OFF"), DDui___(HIST_TUPLE_FREQVAL_LIST_THRESHOLD, "40"), DDkwd__(HIST_USE_HIGH_FREQUENCY_INFO, "ON"), XDDkwd__(HIST_USE_SAMPLE_FOR_CARDINALITY_ESTIMATION , "ON"), // CQDs for Trafodion on Hive // Main ones to use: // HIVE_MAX_STRING_LENGTH_IN_BYTES: Hive "string" data type gets converted // into a VARCHAR with this length // HIVE_MIN_BYTES_PER_ESP_PARTITION: Make one ESP for this many bytes // HIVE_NUM_ESPS_PER_DATANODE: Equivalent of MAX_ESPS_PER_CPU_PER_OP // Note that this is really per SeaQuest node DD_____(HIVE_CATALOG, ""), DDkwd__(HIVE_DATA_MOD_CHECK, "ON"), DDkwd__(HIVE_DEFAULT_CHARSET, (char *)SQLCHARSETSTRING_UTF8), DD_____(HIVE_DEFAULT_SCHEMA, "HIVE"), DD_____(HIVE_FILE_CHARSET, ""), DD_____(HIVE_FILE_NAME, "/hive/tpcds/customer/customer.dat" ), DD_____(HIVE_HDFS_STATS_LOG_FILE, ""), DDui___(HIVE_INSERT_ERROR_MODE, "1"), DDint__(HIVE_LIB_HDFS_PORT_OVERRIDE, "-1"), DDint__(HIVE_LOCALITY_BALANCE_LEVEL, "0"), DDui___(HIVE_MAX_ESPS, "9999"), DDui___(HIVE_MAX_STRING_LENGTH_IN_BYTES, "32000"), DDkwd__(HIVE_METADATA_JAVA_ACCESS, "ON"), DDint__(HIVE_METADATA_REFRESH_INTERVAL, "0"), DDflt0_(HIVE_MIN_BYTES_PER_ESP_PARTITION, "67108864"), DDkwd__(HIVE_NO_REGISTER_OBJECTS, "OFF"), DDui___(HIVE_NUM_ESPS_PER_DATANODE, "2"), DDpct__(HIVE_NUM_ESPS_ROUND_DEVIATION, "34"), DDint__(HIVE_SCAN_SPECIAL_MODE, "0"), DDkwd__(HIVE_SORT_HDFS_HOSTS, "ON"), DDkwd__(HIVE_USE_EXT_TABLE_ATTRS, "ON"), DD_____(HIVE_USE_FAKE_SQ_NODE_NAMES, "" ), DDkwd__(HIVE_USE_FAKE_TABLE_DESC, "OFF"), DDkwd__(HIVE_USE_HASH2_AS_PARTFUNCION, "ON"), DDkwd__(HIVE_VIEWS, "ON"), // ------------------------------------------------------------------------- DDui2__(HJ_BUFFER_SIZE, "32"), DDflt0_(HJ_CPUCOST_INITIALIZE, "1."), DDui1__(HJ_INITIAL_BUCKETS_PER_CLUSTER, "4."), DDkwd__(HJ_NEW_MCSB_PLAN, "OFF"), DDint__(HJ_SCAN_TO_NJ_PROBE_SPEED_RATIO, "2000"), DDkwd__(HJ_TYPE, "HYBRID"), DD_____(HP_ROUTINES_SCHEMA, "NEO.HP_ROUTINES"), // Must be in form <cat>.<sch> DDkwd__(HQC_CONVDOIT_DISABLE_NUMERIC_CHECK, "OFF"), DDkwd__(HQC_LOG, "OFF"), DD_____(HQC_LOG_FILE, ""), DDui1_10(HQC_MAX_VALUES_PER_KEY, "5"), DDkwd__(HYBRID_QUERY_CACHE, "ON"), DDkwd__(IF_LOCKED, "WAIT"), // ignore_duplicate_keys is no more valid. It is still // here as dummy for compatibility with existing scripts. DDkwd__(IGNORE_DUPLICATE_KEYS, "SYSTEM"), // in mode_special_1, duplicate rows are ignored if inserting a row in the // base table which has a user defined primary key. If this default is set // to OFF in mode_special_1, then duplicate rows are not ignored. // // If not in mode_special_1, and this default is ON, then duplicate rows // are ignored. DDkwd__(IGNORE_DUPLICATE_ROWS, "SYSTEM"), DDkwd__(IMPLICIT_DATETIME_INTERVAL_HOSTVAR_CONVERSION, "FALSE"), DDkwd__(IMPLICIT_HOSTVAR_CONVERSION, "FALSE"), // threshold for the number of rows inserted into a volatile/temp // table which will cause an automatic update stats. // -1 indicates do not upd stats. 0 indicates always upd stats. DDint__(IMPLICIT_UPD_STATS_THRESHOLD, "-1"), //"10000"), DDkwd__(INCORPORATE_SKEW_IN_COSTING, "ON"), DDkwd__(INDEX_ELIMINATION_LEVEL, "AGGRESSIVE"), DDui1__(INDEX_ELIMINATION_THRESHOLD, "50"), DDkwd__(INDEX_HINT_WARNINGS, "ON"), SDDkwd__(INFER_CHARSET, "OFF"), // UDF initial row cost CQDs DDui___(INITIAL_UDF_CPU_COST, "100"), DDui___(INITIAL_UDF_IO_COST, "1"), DDui___(INITIAL_UDF_MSG_COST, "2"), DDkwd__(INPUT_CHARSET, (char *)SQLCHARSETSTRING_ISO88591), // SQLCHARSETSTRING_UTF8 XDDkwd__(INSERT_VSBB, "SYSTEM"), //10-040621-7139-begin //This CDQ will alllow the user to force the compiler to //choose an interactive access path. ie., prefer access path with //index in it. If such a path is not found which ever access path is //available is chosen. DDkwd__(INTERACTIVE_ACCESS, "OFF"), //10-040621-7139-end DDkwd__(IN_MEMORY_OBJECT_DEFN, "OFF"), DDflte_(IO_SEEKS_INORDER_FACTOR, "0.10"), // History: // 3/11/99 Changed to zero because in large tables the read-ahead // seems negligible (and/or hard to simulate) // Before 3/11/99: 0.58 DDflt0_(IO_TRANSFER_COST_PREFETCH_MISSES_FRACTION, "0."), XDDkwd__(ISOLATION_LEVEL, "READ_COMMITTED"), XDDkwd__(ISOLATION_LEVEL_FOR_UPDATES, "NONE"), SDDkwd__(ISO_MAPPING, (char *)SQLCHARSETSTRING_ISO88591), DDkwd__(IS_DB_TRANSPORTER, "OFF"), DDkwd__(IS_SQLCI, "FALSE"), DDkwd__(IUD_NONAUDITED_INDEX_MAINT, "OFF"), DDkwd__(JDBC_PROCESS, "FALSE"), // Force the join order given by the user XDDkwd__(JOIN_ORDER_BY_USER, "OFF"), DDkwd__(KEYLESS_NESTED_JOINS, "OFF"), XDDkwd__(LAST0_MODE, "OFF"), DDansi_(LDAP_USERNAME, ""), // Disallow/Allow left joins in MultiJoin framework DDkwd__(LEFT_JOINS_SPOIL_JBB, "OFF"), DDkwd__(LIMIT_HBASE_SCAN_DOP, "OFF"), // if this default is set to ON, then the max precision of a numeric // expression(arithmetic, aggregate) is limited to MAX_NUMERIC_PRECISION // (= 18). If this is set to OFF, the default value, then the max precision // is computed based on the operands and the operation which could make the // result a software datatype(BIGNUM). Software datatypes give better // precision but degraded performance. SDDkwd__(LIMIT_MAX_NUMERIC_PRECISION, "SYSTEM"), // Size in bytes used to perform garbage collection to lob data file // default size is 5GB . Change to adjust disk usage. DDint__(LOB_GC_LIMIT_SIZE, "5000"), DDint__(LOB_HDFS_PORT, "0"), DD_____(LOB_HDFS_SERVER, "default"), // Size of memoryin Megabytes used to perform I/O to lob data file // default size is 512MB . Change to adjust memory usage. DDint__(LOB_MAX_CHUNK_MEM_SIZE, "512"), // default size is 10 G (10000 M) DDint__(LOB_MAX_SIZE, "10000"), // (unused)default size is 32000. Change this to extract more data into memory. DDui___(LOB_OUTPUT_SIZE, "32000"), DD_____(LOB_STORAGE_FILE_DIR, "/lobs"), // storage types defined in exp/ExpLOBenum.h. // Default is hdfs_file (value = 1) DDint__(LOB_STORAGE_TYPE, "2"), //New default size for buffer size for local node DDui2__(LOCAL_MESSAGE_BUFFER_SIZE, "50"), DDansi_(MAINTAIN_CATALOG, "NEO"), // Set the maintain control table timeout to 5 minutes DDint__(MAINTAIN_CONTROL_TABLE_TIMEOUT, "30000"), DDint__(MAINTAIN_REORG_PRIORITY, "-1"), DDint__(MAINTAIN_REORG_PRIORITY_DELTA, "0"), DDint__(MAINTAIN_REORG_RATE, "40"), DDint__(MAINTAIN_REORG_SLACK, "0"), DDint__(MAINTAIN_UPD_STATS_SAMPLE, "-1"), DDkwd__(MARIAQUEST_PROCESS, "OFF"), DDSint__(MASTER_PRIORITY, "0"), DDSint__(MASTER_PRIORITY_DELTA, "0"), DDint__(MATCH_CONSTANTS_OF_EQUALITY_PREDICATES, "2"), DDui1__(MAX_ACCESS_NODES_PER_ESP, "1024"), // this is the default length of a param which is typed as a VARCHAR. DDui2__(MAX_CHAR_PARAM_DEFAULT_SIZE, "32"), DDint__(MAX_DEPTH_TO_CHECK_FOR_CYCLIC_PLAN, "1"), // default value of maximum dp2 groups for a hash-groupby DDui1__(MAX_DP2_HASHBY_GROUPS, "1000"), // // The max number of ESPs per cpu for a given operator. // i.e. this number times the number of available CPUs is "max pipelines". // // On Linux, "CPU" means cores. // DDflt__(MAX_ESPS_PER_CPU_PER_OP, "0.5"), DDui1__(MAX_EXPRS_USED_FOR_CONST_FOLDING, "1000"), // used in hash groupby costing in esp/master DDui1__(MAX_HEADER_ENTREIS_PER_HASH_TABLE, "250000"), DDui1__(MAX_LONG_VARCHAR_DEFAULT_SIZE, "2000"), DDui1__(MAX_LONG_WVARCHAR_DEFAULT_SIZE, "2000"), DD18_128(MAX_NUMERIC_PRECISION_ALLOWED, "128"), // The max number of vertical partitions for optimization to be done under // a VPJoin. DDui___(MAX_NUM_VERT_PARTS_FOR_OPT, "20"), DDui1__(MAX_ROWS_LOCKED_FOR_STABLE_ACCESS, "1"), // The max number of skewed values detected - skew buster DDui1__(MAX_SKEW_VALUES_DETECTED, "10000"), // multi-column skew inner table broadcast threashold in bytes (=1 MB) DDui___(MC_SKEW_INNER_BROADCAST_THRESHOLD, "1000000"), // multi-column skew sensitivity threshold // // For new MCSB (that is, we utilize MC skews directly), // apply the MC skew buster when // frequency of MC skews > MC_SKEW_SENSITIVITY_THRESHOLD / count_of_cpus // // For old MCSB (that is, we guess MC skews from SC skews), // apply the MC skew buster when // SFa,b... * countOfPipeline > MC_SKEW_SENSITIVITY_THRESHOLD // SFa,b ... is the skew factor for multi column a,b,... // XDDflt__(MC_SKEW_SENSITIVITY_THRESHOLD, "0.1"), DDui___(MDAM_APPLY_RESTRICTION_CHECK, "2"), DDflt0_(MDAM_CPUCOST_NET_OVH, "2000."), // The cost that takes to build the mdam network per predicate: // (we assume that the cost to build the mdam network is a linear function // of the key predicates) DDflt0_(MDAM_CPUCOST_NET_PER_PRED, ".5"), // controls the max. number of seek positions under which MDAM will be // allowed. Set it to 0 turns off the feature. XDDui___(MDAM_NO_STATS_POSITIONS_THRESHOLD, "10"), // MDAM_SCAN_METHOD ON means MDAM is enabled, // OFF means MDAM is disabled. MDAM is enabled by default // externalized 06/21/01 RV // mdam off on open source at this point XDDkwd__(MDAM_SCAN_METHOD, "ON"), DDflt0_(MDAM_SELECTION_DEFAULT, "0.5"), DDflt0_(MDAM_TOTAL_UEC_CHECK_MIN_RC_THRESHOLD, "10000"), DDflt0_(MDAM_TOTAL_UEC_CHECK_UEC_THRESHOLD, "0.2"), DDkwd__(MDAM_TRACING, "OFF"), // controls the max. number of probes at which MDAM under NJ plan will be // generated. Set it to 0 turns off the feature. XDDui___(MDAM_UNDER_NJ_PROBES_THRESHOLD, "0"), // controls the amount of penalty for CPU resource required that is // beyond the value specified by MDOP_CPUS_SOFT_LIMIT. The number of extra CPUs // actually allocated is computed as the origial value divided by the CQD. // If the CQD is set to 1 (default), then there is no penalty. DDflt1_(MDOP_CPUS_PENALTY, "70"), // specify the limit beyond which the number of CPUs will be limited. DDui1__(MDOP_CPUS_SOFT_LIMIT, "64"), // controls the amount of penalty for CPU resource per memory unit // required that is beyond the value specified by MDOP_CPUS_SOFT_LIMIT. // The number of extra CPUs actually allocated is computed as the // origial value divided by the CQD. DDflt1_(MDOP_MEMORY_PENALTY, "70"), // CQD to test/enforce heap memory upper limits // values are in KB DDui___(MEMORY_LIMIT_CMPCTXT_UPPER_KB, "0"), DDui___(MEMORY_LIMIT_CMPSTMT_UPPER_KB, "0"), DDui___(MEMORY_LIMIT_HISTCACHE_UPPER_KB, "0"), DDui___(MEMORY_LIMIT_NATABLECACHE_UPPER_KB, "0"), DDui___(MEMORY_LIMIT_QCACHE_UPPER_KB, "0"), // SQL/MX Compiler/Optimzer Memory Monitor. DDkwd__(MEMORY_MONITOR, "OFF"), DDui1__(MEMORY_MONITOR_AFTER_TASKS, "30000"), DDkwd__(MEMORY_MONITOR_IN_DETAIL, "OFF"), DD_____(MEMORY_MONITOR_LOGFILE, "NONE"), DDkwd__(MEMORY_MONITOR_LOG_INSTANTLY, "OFF"), DDui1__(MEMORY_MONITOR_TASK_INTERVAL, "5000"), // Hash join currently uses 20 Mb before it overflows, use this // as the limit DDui1__(MEMORY_UNITS_SIZE, "20480"), // amount of memory available per CPU for any query SDDflte_(MEMORY_UNIT_ESP, "300"), DDflt1_(MEMORY_USAGE_NICE_CONTEXT_FACTOR, "1"), DDflt1_(MEMORY_USAGE_OPT_PASS_FACTOR, "1.5"), DDui1__(MEMORY_USAGE_SAFETY_NET, "500"), // MERGE_JOINS ON means do MERGE_JOINS XDDkwd__(MERGE_JOINS, "ON"), DDkwd__(MERGE_JOIN_ACCEPT_MULTIPLE_NJ_PROBES, "ON"), DDkwd__(MERGE_JOIN_CONTROL, "OFF"), DDkwd__(MERGE_JOIN_WITH_POSSIBLE_DEADLOCK, "OFF"), // controls if merge/upsert is supported on table with a unique index DDkwd__(MERGE_WITH_UNIQUE_INDEX, "ON"), SDDui___(METADATA_CACHE_SIZE, "20"), DDkwd__(METADATA_STABLE_ACCESS, "OFF"), //------------------------------------------------------------------- // Minimum ESP parallelism. If the user does not specify this value // (default value 0 does not change) then the number of segments // (totalNumCPUs/16, where totalNumCPUs=gpClusterInfo->numOfSMPs()) // will be used as the value of minimum ESP parallelism. If user sets // this value it should be integer between 1 and totalNumCPUs. In // this case actual value of minimum ESP parallelism will be // min(CDQ value, MDOP), where MDOP (maximum degree of parallelism) // is defined by adaptive segmentation //------------------------------------------------------------------- DDui___(MINIMUM_ESP_PARALLELISM, "0"), DDui1__(MIN_LONG_VARCHAR_DEFAULT_SIZE, "1"), DDui1__(MIN_LONG_WVARCHAR_DEFAULT_SIZE, "1"), DDkwd__(MIN_MAX_OPTIMIZATION, "ON"), DDpct__(MJ_BMO_QUOTA_PERCENT, "0"), DDflt0_(MJ_CPUCOST_ALLOCATE_LIST, ".05"), DDflt0_(MJ_CPUCOST_CLEAR_LIST, ".01"), DDflt0_(MJ_CPUCOST_GET_NEXT_ROW_FROM_LIST, ".01"), // calibrated 01/16/98: // 01/13/98 40000., this did not work with small tables // Before 01/13/98: 0.5 DDflt0_(MJ_CPUCOST_INITIALIZE, "1."), // Before 03/12/98: 0.4 // Before 01/13/98: 0.01 DDflt0_(MJ_CPUCOST_INSERT_ROW_TO_LIST, ".0001"), DDflt0_(MJ_CPUCOST_REWIND_LIST, ".01"), DDflte_(MJ_LIST_NODE_SIZE, ".01"), DDkwd__(MJ_OVERFLOW, "ON"), DDkwd__(MODE_SEABASE, "ON"), DDkwd__(MODE_SEAHIVE, "ON"), SDDkwd__(MODE_SPECIAL_1, "OFF"), DDkwd__(MODE_SPECIAL_4, "OFF"), DDflt0_(MSCF_CONCURRENCY_IO, "0.10"), DDflt0_(MSCF_CONCURRENCY_MSG, "0.10"), // Tests suggest that RELEASE is about 2.5 times faster than DEBUG // RELEASE is always faster than DEBUG code so this default must be // at least one. DDflt1_(MSCF_DEBUG_TO_RELEASE_MULTIPLIER, "2.5"), // MSCF_ET_CPU units are seconds/thousand of CPU instructions // History: // Before 02/01/99, the speed was calibrated for debug, now its is for // release: 0.00005 DDflte_(MSCF_ET_CPU, "0.000014"), // was 0.00002 12/2k // MSCF_ET_IO_TRANSFER units are seconds/Kb // History // Changed to '0.000455' to reflect new calibration data // Before 03/11/99 "0.000283" DDflte_(MSCF_ET_IO_TRANSFER, "0.00002"), // Assume time to transfer a KB of local message is 5 times // faster than the time to transfer a KB from disk // Units of MSCF_ET_LOCAL_MSG_TRANSFER are seconds/Kb DDflte_(MSCF_ET_LOCAL_MSG_TRANSFER, "0.000046"), // $$$ This should be removed. It is only used by preliminary costing // for the materialize operator, which should not be using it. DDflte_(MSCF_ET_NM_PAGE_FAULTS, "1"), // "?" used? // : for calibration on 04/08/2004 // Seek time will be derived from disk type. // MSCF_ET_NUM_IO_SEEKS units are seconds DDflte_(MSCF_ET_NUM_IO_SEEKS, "0.0038"), // Assume sending a local message takes 1000 cpu instructions DDflte_(MSCF_ET_NUM_LOCAL_MSGS, "0.000125"), // Assume sending a remote message takes 10000 cpu instructions // DDflte_(MSCF_ET_NUM_REMOTE_MSGS, "0.00125"), // Change the number of instructions to encode a remote message to be // the same as the local message DDflte_(MSCF_ET_NUM_REMOTE_MSGS, "0.000125"), // Assume 1MB/second transfer rate for transferring remote message bytes // (Based on 10 Megabit/second Ethernet transfer rate) // MSCF_ET_REMOTE_MSG_TRANSFER units are kb/Sec // DDflte_(MSCF_ET_REMOTE_MSG_TRANSFER, "0.001"), // the remote msg are 10% more costly than the local transfer // but also may depend on the physical link, so externalize it DDflte_(MSCF_ET_REMOTE_MSG_TRANSFER, "0.00005"), // ------------------------------------------------------------------------- // Factors used for estimating overlappability of I/O and messaging used // in the calculation for overlapped addition // Assume 50% overlap for now. // ------------------------------------------------------------------------- DDflte_(MSCF_OV_IO, "0.5"), DDflte_(MSCF_OV_IO_TRANSFER, "0.5"), DDflte_(MSCF_OV_LOCAL_MSG_TRANSFER, "0.5"), DDflte_(MSCF_OV_MSG, "0.5"), DDflte_(MSCF_OV_NUM_IO_SEEKS, "0.5"), DDflte_(MSCF_OV_NUM_LOCAL_MSGS, "0.5"), DDflte_(MSCF_OV_NUM_REMOTE_MSGS, "0.5"), DDflte_(MSCF_OV_REMOTE_MSG_TRANSFER, "0.5"), DDui___(MSCF_SYS_DISKS, "16"), // "?" used? DDui___(MSCF_SYS_MEMORY_PER_CPU, "1"), // "?" used? DDui___(MSCF_SYS_TEMP_SPACE_PER_DISK, "50"), // "?" used? DDkwd__(MTD_GENERATE_CC_PREDS, "ON"), DDint__(MTD_MDAM_NJ_UEC_THRESHOLD, "100"), // Allow for the setting of the row count in a long running operation XDDui1__(MULTI_COMMIT_SIZE, "10000"), // try the join order specified in the queries, this will cause the // enumeration of the initial join order specified by the user // among the join orders enumerated // ** This is currently OFF by default ** DDkwd__(MULTI_JOIN_CONSIDER_INITIAL_JOIN_ORDER, "OFF"), // used in JBBSubsetAnalysis::isAStarPattern for finding lowest cost // outer subtree for NJ into fact table. DDflt0_(MULTI_JOIN_PROBE_HASH_TABLE, "0.000001"), // threshold above which a query is considered complex // this only applies to queries that can be rewritten // as Multi Joins DDint__(MULTI_JOIN_QUERY_COMPLEXITY_THRESHOLD, "5120"), // threshold above which a query is considered to do // a lot of work his only applies to queries that can be // rewritten as Multi Joins DDflt__(MULTI_JOIN_QUERY_WORK_THRESHOLD, "0"), SDDint__(MULTI_JOIN_THRESHOLD, "3"), DDint__(MULTI_PASS_JOIN_ELIM_LIMIT, "5"), DDflt0_(MU_CPUCOST_INITIALIZE, ".05"), DDui___(MU_INITIAL_BUFFER_COUNT, "5."), DDflte_(MU_INITIAL_BUFFER_SIZE, "1033.7891"), //-------------------------------------------------------------------------- //++ MV XDDkwd__(MVGROUP_AUTOMATIC_CREATION, "ON"), DDkwd__(MVQR_ALL_JBBS_IN_QD, "OFF"), #ifdef NDEBUG DDkwd__(MVQR_ENABLE_LOGGING, "OFF"), // No logging by default for release #else DDkwd__(MVQR_ENABLE_LOGGING, "ON"), #endif DD_____(MVQR_FILENAME_PREFIX, "/usr/tandem/sqlmx/log"), DDkwd__(MVQR_LOG_QUERY_DESCRIPTORS, "OFF"), DDint__(MVQR_MAX_EXPR_DEPTH, "20"), DDint__(MVQR_MAX_EXPR_SIZE, "100"), DDint__(MVQR_MAX_MV_JOIN_SIZE, "10"), DDkwd__(MVQR_PARAMETERIZE_EQ_PRED, "ON"), DDkwd__(MVQR_PRIVATE_QMS_INIT, "SMD"), DDansi_(MVQR_PUBLISH_TABLE_LOCATION, ""), DDkwd__(MVQR_PUBLISH_TO, "BOTH"), DDansi_(MVQR_REWRITE_CANDIDATES, ""), XDDkwd__(MVQR_REWRITE_ENABLED_OPTION, "OFF"), // @ZX -- change to ON later XDDui0_5(MVQR_REWRITE_LEVEL, "0"), XDDkwd__(MVQR_REWRITE_SINGLE_TABLE_QUERIES, "ON"), DDkwd__(MVQR_USE_EXTRA_HUB_TABLES, "ON"), DDkwd__(MVQR_USE_RI_FOR_EXTRA_HUB_TABLES, "OFF"), DD_____(MVQR_WORKLOAD_ANALYSIS_MV_NAME, ""), XDDMVA__(MV_AGE, "0 MINUTES"), XDDkwd__(MV_ALLOW_SELECT_SYSTEM_ADDED_COLUMNS, "OFF"), DDkwd__(MV_AS_ROW_TRIGGER, "OFF"), DDkwd__(MV_AUTOMATIC_LOGGABLE_COLUMN_MAINTENANCE, "ON"), DDkwd__(MV_DUMP_DEBUG_INFO, "OFF"), DDkwd__(MV_ENABLE_INTERNAL_REFRESH_SHOWPLAN, "OFF"), DDui___(MV_LOG_CLEANUP_SAFETY_FACTOR, "200"), DDui___(MV_LOG_CLEANUP_USE_MULTI_COMMIT, "1"), SDDkwd__(MV_LOG_PUSH_DOWN_DP2_DELETE, "OFF"), // push down mv logging tp dp2 for delete SDDkwd__(MV_LOG_PUSH_DOWN_DP2_INSERT, "OFF"), // push down mv logging tp dp2 for insert SDDkwd__(MV_LOG_PUSH_DOWN_DP2_UPDATE, "ON"), // push down mv logging tp dp2 for update SDDui___(MV_REFRESH_MAX_PARALLELISM, "0"), DDui___(MV_REFRESH_MAX_PIPELINING, "0"), DDint__(MV_REFRESH_MDELTA_MAX_DELTAS_THRESHOLD, "31"), DDint__(MV_REFRESH_MDELTA_MAX_JOIN_SIZE_FOR_SINGLE_PHASE, "3"), DDint__(MV_REFRESH_MDELTA_MIN_JOIN_SIZE_FOR_SINGLE_PRODUCT_PHASE, "8"), DDint__(MV_REFRESH_MDELTA_PHASE_SIZE_FOR_MID_RANGE, "6"), DDkwd__(MV_TRACE_INCONSISTENCY, "OFF"), DDSint__(MXCMP_PRIORITY, "0"), DDSint__(MXCMP_PRIORITY_DELTA, "0"), DDkwd__(NAMETYPE, "ANSI"), DDkwd__(NAR_DEPOBJ_ENABLE, "ON"), DDkwd__(NAR_DEPOBJ_ENABLE2, "ON"), // NATIONAL_CHARSET reuses the "kwd" logic here, w/o having to add any // DF_ token constants (this can be considered either clever or kludgy coding). DDkwd__(NATIONAL_CHARSET, (char *)SQLCHARSETSTRING_UNICODE), // These CQDs are reserved for NCM. These are mostly used for // internal testing, turning on/off features for debugging, and for tuning. // In normal situations, these will not be externalized in keeping // with the very few CQDs philosophy of NCM. // These are applicable only in conjunction with SIMPLE_COST_MODEL 'on'. DDflt__(NCM_CACHE_SIZE_IN_BLOCKS, "52"), DDflt__(NCM_COSTLIMIT_FACTOR, "0.05"), //change to 0.05 DDint__(NCM_ESP_FIXUP_WEIGHT, "300"), DDkwd__(NCM_ESP_STARTUP_FIX, "ON"), DDflt__(NCM_EXCH_MERGE_FACTOR, "0.10"), // change to 0.10 DDkwd__(NCM_EXCH_NDCS_FIX, "ON"), // change to ON DDkwd__(NCM_HBASE_COSTING, "ON"), // change to ON DDkwd__(NCM_HGB_OVERFLOW_COSTING, "ON"), DDkwd__(NCM_HJ_OVERFLOW_COSTING, "ON"), DDflt__(NCM_IND_JOIN_COST_ADJ_FACTOR, "1.0"), DDflt__(NCM_IND_JOIN_SELECTIVITY, "1.0"), DDflt__(NCM_IND_SCAN_COST_ADJ_FACTOR, "1.0"), DDflt__(NCM_IND_SCAN_SELECTIVITY, "1.0"), DDflt__(NCM_MAP_CPU_FACTOR, "4.0"), DDflt__(NCM_MAP_MSG_FACTOR, "4.0"), DDflt__(NCM_MAP_RANDIO_FACTOR, "4.0"), DDflt__(NCM_MAP_SEQIO_FACTOR, "4.0"), DDflt__(NCM_MDAM_COST_ADJ_FACTOR, "1.0"), DDflt__(NCM_MJ_TO_HJ_FACTOR, "0.6"), DDflt__(NCM_NJ_PC_THRESHOLD, "1.0"), DDflt0_(NCM_NJ_PROBES_MAXCARD_FACTOR, "10000"), DDkwd__(NCM_NJ_SEQIO_FIX, "ON"), // change to ON DDint__(NCM_NUM_SORT_RUNS, "4"), DDflt__(NCM_OLTP_ET_THRESHOLD, "60.0"), DDflt__(NCM_PAR_ADJ_FACTOR, "0.10"), DDkwd__(NCM_PAR_GRPBY_ADJ, "ON"), DDkwd__(NCM_PRINT_ROWSIZE, "OFF"), DDflt__(NCM_RAND_IO_ROWSIZE_FACTOR, "0"), DDflt__(NCM_RAND_IO_WEIGHT, "3258"), DDflt__(NCM_SEQ_IO_ROWSIZE_FACTOR, "0"), DDflt__(NCM_SEQ_IO_WEIGHT, "543"), DDflt__(NCM_SERIAL_NJ_FACTOR, "2"), DDflt__(NCM_SGB_TO_HGB_FACTOR, "0.8"), DDkwd__(NCM_SKEW_COST_ADJ_FOR_PROBES, "OFF"), DDkwd__(NCM_SORT_OVERFLOW_COSTING, "ON"), DDflt__(NCM_TUPLES_ROWSIZE_FACTOR, "0.5"), DDflt__(NCM_UDR_NANOSEC_FACTOR, "0.01"), DDkwd__(NCM_USE_HBASE_REGIONS, "ON"), // NESTED_JOINS ON means do NESTED_JOINS XDDkwd__(NESTED_JOINS, "ON"), // max. number of ESPs that will deal with skews for OCR // 0 means to turn off the feature DDintN1__(NESTED_JOINS_ANTISKEW_ESPS , "16"), DDkwd__(NESTED_JOINS_CHECK_LEADING_KEY_SKEW, "OFF"), DDkwd__(NESTED_JOINS_FULL_INNER_KEY, "OFF"), DDkwd__(NESTED_JOINS_KEYLESS_INNERJOINS, "ON"), DDui1__(NESTED_JOINS_LEADING_KEY_SKEW_THRESHOLD, "15"), DDkwd__(NESTED_JOINS_NO_NSQUARE_OPENS, "ON"), DDkwd__(NESTED_JOINS_OCR_GROUPING, "OFF"), // 128X32 being the default threshold for OCR. // 128 partitions per table and 32 ESPs per NJ operator SDDint__(NESTED_JOINS_OCR_MAXOPEN_THRESHOLD, "4096"), // PLAN0 is solely controlled by OCR. If this CQD is off, then // PLAN0 is off unconditionally. This CQD is used by OCR unit test. DDkwd__(NESTED_JOINS_PLAN0, "ON"), // try the explicit sort plan when plan2 produces a non-sort plan DDkwd__(NESTED_JOINS_PLAN3_TRY_SORT, "ON"), // Enable caching for eligible nested joins - see NestedJoin::preCodeGen. DDkwd__(NESTED_JOIN_CACHE, "ON"), // Enable pulling up of predicates into probe cache DDkwd__(NESTED_JOIN_CACHE_PREDS, "ON"), // Nested Join Heuristic DDkwd__(NESTED_JOIN_CONTROL, "ON"), // Allow nested join for cross products DDkwd__(NESTED_JOIN_FOR_CROSS_PRODUCTS, "ON"), DDkwd__(NEW_MDAM, "ON"), DDkwd__(NEW_OPT_DRIVER, "ON"), // Ansi name of the next DEFAULTS table to read in. // Contains blanks, or the name of a DEFAULTS table to read values from next, // after reading all values from this DEFAULTS table. The name may contain // format strings of '%d' and '%u', which are replaced with the domain name // and user name, respectively, of the current user. The name may begin with // '$', in which it is replaced by its value as a SYSTEM environment variable. // This value in turn may contain '%d' and '%u' formats. When these // replacements are complete, the resulting name is qualified by the current // default catalog and schema, if necessary, and the resulting three-part ANSI // table's default values are read in. This table may contain another // NEXT_DEFAULTS_TABLE value, and different default CATALOG and // SCHEMA values to qualify the resulting table name, and so on, allowing a // chain of tables to be read; combined with the format and environment // variable replacements, this allows per-domain, per-system, and per-user // customization of SQL/MX default values. DDansi_(NEXT_DEFAULTS_TABLE, ""), DDui1__(NEXT_VALUE_FOR_BUFFER_SIZE, "10240"), DDui1__(NEXT_VALUE_FOR_NUM_BUFFERS, "3"), DDui1__(NEXT_VALUE_FOR_SIZE_DOWN, "4"), DDui1__(NEXT_VALUE_FOR_SIZE_UP, "2048"), DDflt0_(NJ_CPUCOST_INITIALIZE, ".1"), DDflt0_(NJ_CPUCOST_PASS_ROW, ".02"), DDflte_(NJ_INC_AFTERLIMIT, "0.0055"), DDflte_(NJ_INC_MOVEROWS, "0.0015"), DDflte_(NJ_INC_UPTOLIMIT, "0.0225"), DDui___(NJ_INITIAL_BUFFER_COUNT, "5"), DDui1__(NJ_INITIAL_BUFFER_SIZE, "5"), DDui1__(NJ_MAX_SEEK_DISTANCE, "5000"), // UDF costing CQDs for processing a steady state row DDui___(NORMAL_UDF_CPU_COST, "100"), DDui___(NORMAL_UDF_IO_COST, "0"), DDui___(NORMAL_UDF_MSG_COST, "2"), XDDui30_32000(NOT_ATOMIC_FAILURE_LIMIT, "32000"), //NOT IN ANSI NULL semantics rule DDkwd__(NOT_IN_ANSI_NULL_SEMANTICS, "ON"), //NOT IN optimization DDkwd__(NOT_IN_OPTIMIZATION, "ON"), //NOT IN outer column optimization DDkwd__(NOT_IN_OUTER_OPTIMIZATION, "ON"), // NOT IN skew buster optimization DDkwd__(NOT_IN_SKEW_BUSTER_OPTIMIZATION, "ON"), DDkwd__(NOT_NULL_CONSTRAINT_DROPPABLE_OPTION, "OFF"), DDkwd__(NOWAITED_FIXUP_MESSAGE_TO_DP2, "OFF"), // NSK DEBUG defaults DDansi_(NSK_DBG, "OFF"), DDansi_(NSK_DBG_COMPILE_INSTANCE, "USER"), DDkwd__(NSK_DBG_GENERIC, "OFF"), DDansi_(NSK_DBG_LOG_FILE, ""), DDkwd__(NSK_DBG_MJRULES_TRACKING, "OFF"), DDkwd__(NSK_DBG_PRINT_CHAR_INPUT, "OFF"), DDkwd__(NSK_DBG_PRINT_CHAR_OUTPUT, "OFF"), DDkwd__(NSK_DBG_PRINT_CONSTRAINT, "OFF"), DDkwd__(NSK_DBG_PRINT_CONTEXT, "OFF"), DDkwd__(NSK_DBG_PRINT_CONTEXT_POINTER, "OFF"), DDkwd__(NSK_DBG_PRINT_COST, "OFF"), DDkwd__(NSK_DBG_PRINT_COST_LIMIT, "OFF"), DDkwd__(NSK_DBG_PRINT_INDEX_ELIMINATION, "OFF"), DDkwd__(NSK_DBG_PRINT_ITEM_EXPR, "OFF"), DDkwd__(NSK_DBG_PRINT_LOG_PROP, "OFF"), DDkwd__(NSK_DBG_PRINT_PHYS_PROP, "OFF"), DDkwd__(NSK_DBG_PRINT_TASK, "OFF"), DDkwd__(NSK_DBG_PRINT_TASK_STACK, "OFF"), DDkwd__(NSK_DBG_QUERY_LOGGING_ONLY, "OFF"), DDansi_(NSK_DBG_QUERY_PREFIX, ""), DDkwd__(NSK_DBG_SHOW_PASS1_PLAN, "OFF"), DDkwd__(NSK_DBG_SHOW_PASS2_PLAN, "OFF"), DDkwd__(NSK_DBG_SHOW_PLAN_LOG, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_ANALYSIS, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_BINDING, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_CODEGEN, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_NORMALIZATION, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_PARSING, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_PRE_CODEGEN, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_SEMANTIC_QUERY_OPTIMIZATION, "OFF"), DDkwd__(NSK_DBG_SHOW_TREE_AFTER_TRANSFORMATION, "OFF"), DDkwd__(NSK_DBG_STRATEGIZER, "OFF"), DDflt0_(NUMBER_OF_PARTITIONS_DEVIATION, "0.25"), DDui1__(NUMBER_OF_ROWS_PARALLEL_THRESHOLD, "5000"), DDui1__(NUMBER_OF_USERS, "1"), DDui1__(NUM_OF_BLOCKS_PER_ACCESS, "SYSTEM"), DDflt0_(NUM_OF_PARTS_DEVIATION_TYPE2_JOINS, "SYSTEM"), DDkwd__(NVCI_PROCESS, "FALSE"), DDflt0_(OCB_COST_ADJSTFCTR, "0.996"), DDui___(OCR_FOR_SIDETREE_INSERT, "1"), DDkwd__(ODBC_METADATA_PROCESS, "FALSE"), DDkwd__(ODBC_PROCESS, "FALSE"), DDflte_(OHJ_BMO_REUSE_SORTED_BMOFACTOR_LIMIT, "3.0"), DDflte_(OHJ_BMO_REUSE_SORTED_UECRATIO_UPPERLIMIT, "0.7"), DDflte_(OHJ_BMO_REUSE_UNSORTED_UECRATIO_UPPERLIMIT, "0.01"), DDflte_(OHJ_VBMOLIMIT, "5.0"), DDui1__(OLAP_BUFFER_SIZE, "262144"), // Do not alter (goes to DP2) DDkwd__(OLAP_CAN_INVERSE_ORDER, "ON"), DDui1__(OLAP_MAX_FIXED_WINDOW_EXTRA_BUFFERS, "2"), DDui1__(OLAP_MAX_FIXED_WINDOW_FRAME, "50000"), DDui1__(OLAP_MAX_NUMBER_OF_BUFFERS, "100000"), DDui___(OLAP_MAX_ROWS_IN_OLAP_BUFFER, "0"), //aplies for fixed window-- number of additional oplap buffers //to allocate on top of the minumum numbers DDkwd__(OLD_HASH2_GROUPING, "FALSE"), DDkwd__(OLT_QUERY_OPT, "ON"), DDkwd__(OLT_QUERY_OPT_LEAN, "OFF"), // ----------------------------------------------------------------------- // Optimizer pruning heuristics. // ----------------------------------------------------------------------- DDkwd__(OPH_EXITHJCRCONTCHILOOP, "ON"), DDkwd__(OPH_EXITMJCRCONTCHILOOP, "ON"), DDkwd__(OPH_EXITNJCRCONTCHILOOP, "OFF"), DDkwd__(OPH_PRUNE_WHEN_COST_LIMIT_EXCEEDED, "OFF"), DDflt__(OPH_PRUNING_COMPLEXITY_THRESHOLD, "10.0"), DDflt__(OPH_PRUNING_PASS2_COST_LIMIT, "-1.0"), DDkwd__(OPH_REDUCE_COST_LIMIT_FROM_CANDIDATES, "OFF"), DDkwd__(OPH_REDUCE_COST_LIMIT_FROM_PASS1_SOLUTION, "ON"), DDkwd__(OPH_REUSE_FAILED_PLAN, "ON"), DDkwd__(OPH_REUSE_OPERATOR_COST, "OFF"), DDkwd__(OPH_SKIP_OGT_FOR_SHARED_GC_FAILED_CL, "OFF"), DDkwd__(OPH_USE_CACHED_ELAPSED_TIME, "ON"), DDkwd__(OPH_USE_CANDIDATE_PLANS, "OFF"), DDkwd__(OPH_USE_COMPARE_COST_THRESHOLD, "ON"), DDkwd__(OPH_USE_CONSERVATIVE_COST_LIMIT, "OFF"), DDkwd__(OPH_USE_ENFORCER_PLAN_PROMOTION, "OFF"), DDkwd__(OPH_USE_FAILED_PLAN_COST, "ON"), DDkwd__(OPH_USE_NICE_CONTEXT, "OFF"), DDkwd__(OPH_USE_ORDERED_MJ_PRED, "OFF"), DDkwd__(OPH_USE_PWS_FLAG_FOR_CONTEXT, "OFF"), XDDui___(OPI_ERROR73_RETRIES, "10"), DDflt__(OPTIMIZATION_BUDGET_FACTOR, "5000"), DDkwd__(OPTIMIZATION_GOAL, "LASTROW"), XDDkwd__(OPTIMIZATION_LEVEL, "3"), DDpct__(OPTIMIZATION_LEVEL_1_CONSTANT_1, "50"), DDpct__(OPTIMIZATION_LEVEL_1_CONSTANT_2, "0"), DDui1__(OPTIMIZATION_LEVEL_1_IMMUNITY_LIMIT, "5000"), DDui1__(OPTIMIZATION_LEVEL_1_MJENUM_LIMIT, "20"), DDui1__(OPTIMIZATION_LEVEL_1_SAFETY_NET, "30000"), DDflt__(OPTIMIZATION_LEVEL_1_SAFETY_NET_MULTIPLE, "3.0"), DDui1__(OPTIMIZATION_LEVEL_1_THRESHOLD, "1000"), DDui1__(OPTIMIZATION_TASKS_LIMIT, "2000000000"), DDui1__(OPTIMIZATION_TASK_CAP, "30000"), // Optimizer Graceful Termination: // 1=> randomProbabilistic pruning // > 1 pruning based on potential DDui1__(OPTIMIZER_GRACEFUL_TERMINATION, "2"), DDkwd__(OPTIMIZER_HEURISTIC_1, "OFF"), DDkwd__(OPTIMIZER_HEURISTIC_2, "OFF"), DDkwd__(OPTIMIZER_HEURISTIC_3, "OFF"), DDkwd__(OPTIMIZER_HEURISTIC_4, "OFF"), DDkwd__(OPTIMIZER_HEURISTIC_5, "OFF"), // Tells the compiler to print costing information DDkwd__(OPTIMIZER_PRINT_COST, "OFF"), // Tells the compiler to issue a warning with its internal counters DDkwd__(OPTIMIZER_PRINT_INTERNAL_COUNTERS, "OFF"), // Pruning is OFF because of bugs, turn to ON when bugs are fixed // (03/03/98) SDDkwd__(OPTIMIZER_PRUNING, "ON"), DDkwd__(OPTIMIZER_PRUNING_FIX_1, "ON"), //change to ON DDkwd__(OPTIMIZER_SYNTH_FUNC_DEPENDENCIES, "ON"), //OPTS_PUSH_DOWN_DAM made external RV 06/21/01 CR 10-010425-2440 DDui___(OPTS_PUSH_DOWN_DAM, "0"), DDkwd__(ORDERED_HASH_JOIN_CONTROL, "ON"), SDDkwd__(OR_OPTIMIZATION, "ON"), DDkwd__(OR_PRED_ADD_BLOCK_TO_IN_LIST, "ON"), DDkwd__(OR_PRED_KEEP_CAST_VC_UCS2, "ON"), // controls the jump table method of evaluating an or pred. in a scan node // 0 => feature is OFF, positive integer denotes max OR pred that will be // processed through a jump table. DDint__(OR_PRED_TO_JUMPTABLE, "2000"), // controls semijoin method of evaluating an or pred. // 0 => feature is OFF, positive number means if pred do not cover key cols // and jump table is not available, then the transformation is done if // inlist is larger than this value. DDint__(OR_PRED_TO_SEMIJOIN, "100"), // Ratio of tablesize (without application of any preds)to probes below // which semijoin trans. is favoured. DDflt0_(OR_PRED_TO_SEMIJOIN_PROBES_MAX_RATIO, "0.001"), // Minimum table size beyond which semijoin trans. is considered DDint__(OR_PRED_TO_SEMIJOIN_TABLE_MIN_SIZE, "10000"), // The Optimizer Simulator (OSIM) CQDs DDkwd__(OSIM_USE_POS, "OFF"), DDint__(OSIM_USE_POS_DISK_SIZE_GB, "0"), DD_____(OSIM_USE_POS_NODE_NAMES, ""), DDui2__(OS_MESSAGE_BUFFER_SIZE, "32"), // if set to "ansi", datetime output is in ansi format. Currently only // used in special_1 mode if the caller needs datetime value in // ansi format (like, during upd stats). DDansi_(OUTPUT_DATE_FORMAT, ""), // Overflow mode for scratch files DDkwd__(OVERFLOW_MODE, "DISK"), // Sequence generator override identity values DDkwd__(OVERRIDE_GENERATED_IDENTITY_VALUES, "OFF"), // allow users to specify a source schema to be // replaced by a target schema SDDosch_(OVERRIDE_SCHEMA, ""), // Allows users to specify their own SYSKEY value. In other words // the system does not generate one for them. // Prior to this CQD, pm_regenerate_syskey_for_insert was being used // to preserve the syskey. Carrying over these comments from // pm_regenerate_syskey_for_insert // For audited target partition, PM does the copy in multiple transactions // In each transaction PM does a insert/select from the source to the target // partition. The clustering key values from the last row of a transaction // is used as begin key value for the next transaction. If the table // has a syskey then it gets regenerated and last row contains the new // value for the syskey. This obviously causes us to start at a different // place then we intended to start from. The following default when set // to off forces the engine to not regenerate syskey. DDkwd__(OVERRIDE_SYSKEY, "OFF"), DDui___(PARALLEL_ESP_NODEMASK, "0"), // by default all parallelism heuristics are switched ON. DDkwd__(PARALLEL_HEURISTIC_1, "ON"), DDkwd__(PARALLEL_HEURISTIC_2, "ON"), DDkwd__(PARALLEL_HEURISTIC_3, "ON"), DDkwd__(PARALLEL_HEURISTIC_4, "ON"), // If PARALLEL_NUM_ESPS is "SYSTEM", // optimizer will compute the number of ESPs. XDDui1__(PARALLEL_NUM_ESPS, "SYSTEM"), // If PARALLEL_NUM_ESPS is "SYSTEM", // optimizer will compute the number of ESPs to be used for parallel ddl // operations. DDui1__(PARALLEL_NUM_ESPS_DDL, "SYSTEM"), // If PARALLEL_NUM_ESPS is "SYSTEM", // optimizer will compute the number of ESPs to be used for parallel purgedata // operation. DDui1__(PARALLEL_NUM_ESPS_PD, "SYSTEM"), // is partial sort applicable; if so adjust sort cost accordingly DDflt0_(PARTIAL_SORT_ADJST_FCTR, "1"), DDint__(PARTITIONING_SCHEME_SHARING, "1"), // The optimal number of partition access nodes for a process. // NOTE: Setting this to anything other than 1 will cause problems // with Cascades plan stealing! Don't do it unless you have to! DDui1__(PARTITION_ACCESS_NODES_PER_ESP, "1"), DD_____(PCODE_DEBUG_LOGDIR, "" ), // Pathname of log directory for PCode work DDint__(PCODE_EXPR_CACHE_CMP_ONLY, "0" ), // PCode Expr Cache compare-only mode DDint__(PCODE_EXPR_CACHE_DEBUG, "0" ), // PCode Expr Cache debug (set to 1 to enable dbg logging) DDint__(PCODE_EXPR_CACHE_ENABLED, "1" ), // PCode Expr Cache Enabled (set to 0 to disable the cache) DD0_10485760(PCODE_EXPR_CACHE_SIZE,"2000000"), // PCode Expr Cache Max Size // Maximum number of PCODE Branch Instructions in an Expr // for which we will attempt PCODE optimizations. // NOTE: Default value reduced to 12000 for Trafodion to avoid stack // overflow in PCODE optimization where recursion is used. DDint__(PCODE_MAX_OPT_BRANCH_CNT, "12000"), // Maximum number of PCODE Instructions in an Expr // for which we will attempt PCODE optimizations. DDint__(PCODE_MAX_OPT_INST_CNT, "50000"), DDint__(PCODE_NE_DBG_LEVEL, "-1"), // Native Expression Debug Level DDint__(PCODE_NE_ENABLED, "1" ), // Native Expressions Enabled DDkwd__(PCODE_NE_IN_SHOWPLAN, "ON"), // Native Expression in Showplan output // This PCODE_NE_LOG_PATH cqd is now obsolete. Use PCODE_DEBUG_LOGDIR instead. // Would delete the following line except that would also mean deleting the // corresponding line in DefaultConstants.h which would change the values for // the following definitions in the same enum. DD_____(PCODE_NE_LOG_PATH, "" ), // Pathname of log file for Native Expression work - OBSOLETE DDint__(PCODE_OPT_FLAGS, "60"), DDkwd__(PCODE_OPT_LEVEL, "MAXIMUM"), DDint__(PHY_MEM_CONTINGENCY_MB, "3072"), DDkwd__(PLAN_STEALING, "ON"), DDui50_4194303(PM_OFFLINE_TRANSACTION_GRANULARITY, "5000"), DDui50_4194303(PM_ONLINE_TRANSACTION_GRANULARITY, "400"), // Not in use anymore. OVERRIDE_SYSKEY is used instead. DDkwd__(PM_REGENERATE_SYSKEY_FOR_INSERT, "ON"), // Partition OVerlay Support (POS) options SDDkwd__(POS, "DISK_POOL"), XDDpos__(POS_ABSOLUTE_MAX_TABLE_SIZE, ""), DDkwd__(POS_ALLOW_NON_PK_TABLES, "OFF"), DDui___(POS_CPUS_PER_SEGMENT, "16"), // default to 300 GB DDui___(POS_DEFAULT_LARGEST_DISK_SIZE_GB, "300"), // default to 72GB DDui___(POS_DEFAULT_SMALLEST_DISK_SIZE_GB, "72"), DD_____(POS_DISKS_IN_SEGMENT, ""), DD_____(POS_DISK_POOL, "0"), DD_____(POS_FILE_OPTIONS, ""), DD_____(POS_LOCATIONS, ""), DDkwd__(POS_MAP_HASH_TO_HASH2, "ON"), DDpos__(POS_MAX_EXTENTS, ""), SDDui___(POS_NUM_DISK_POOLS, "0"), DDui___(POS_NUM_OF_PARTNS, "SYSTEM"), SDDint__(POS_NUM_OF_TEMP_TABLE_PARTNS, "SYSTEM"), SDDpos__(POS_PRI_EXT_SIZE, "25"), DDkwd__(POS_RAISE_ERROR, "OFF"), SDDpos__(POS_SEC_EXT_SIZE, ""), SDDpos__(POS_TABLE_SIZE, ""), SDDpct__(POS_TEMP_TABLE_FREESPACE_THRESHOLD_PERCENT, "0"), DD_____(POS_TEMP_TABLE_LOCATIONS, ""), SDDpos__(POS_TEMP_TABLE_SIZE, ""), DDkwd__(POS_TEST_MODE, "OFF"), DDui___(POS_TEST_NUM_NODES, "0"), DDui___(POS_TEST_NUM_VOLUMES_PER_NODE, "0"), // Use info from right child to require order on left child of NJ //PREFERRED_PROBING_ORDER_FOR_NESTED_JOIN made external RV 06/21/01 CR 10-010425-2440 DDkwd__(PREFERRED_PROBING_ORDER_FOR_NESTED_JOIN, "OFF"), DD0_18(PRESERVE_MIN_SCALE, "0"), DDkwd__(PRIMARY_KEY_CONSTRAINT_DROPPABLE_OPTION, "OFF"), DDkwd__(PSHOLD_CLOSE_ON_ROLLBACK, "OFF"), DDkwd__(PSHOLD_UPDATE_BEFORE_FETCH, "OFF"), SDDpsch_(PUBLIC_SCHEMA_NAME, ""), XDDrlis_(PUBLISHING_ROLES, ""), DDkwd__(PURGEDATA_WITH_OFFLINE_TABLE, "OFF"), // Query Invalidation - Debug/Regression test CQDs -- DO NOT externalize these DD_____(QI_PATH, "" ), // Specifies cat.sch.object path for object to have cache entries removed DD0_255(QI_PRIV, "0"), // Note: 0 disables the Debug Mechanism. Set non-zero to kick out cache entries. // Then set back to 0 *before* setting to a non-zero value again. // Do the query analysis phase DDkwd__(QUERY_ANALYSIS, "ON"), // query_cache max should be 200 MB. Set it 0 to turn off query cache //XDD0_200000(QUERY_CACHE, "0"), XDD0_200000(QUERY_CACHE, "16384"), // the initial average plan size (in kbytes) to use for configuring the // number of hash buckets to use for mxcmp's hash table of cached plans DD1_200000(QUERY_CACHE_AVERAGE_PLAN_SIZE, "30"), // literals longer than this are not parameterized DDui___(QUERY_CACHE_MAX_CHAR_LEN, "32000"), // a query with more than QUERY_CACHE_MAX_EXPRS ExprNodes is not cacheable DDint__(QUERY_CACHE_MAX_EXPRS, "1000"), // the largest number of cache entries that an unusually large cache // entry is allowed to displace from mxcmp's cache of query plans DD0_200000(QUERY_CACHE_MAX_VICTIMS, "10"), DDkwd__(QUERY_CACHE_MPALIAS, "OFF"), DD0_255(QUERY_CACHE_REQUIRED_PREFIX_KEYS, "255"), DDkwd__(QUERY_CACHE_RUNTIME, "ON"), SDDflt0_(QUERY_CACHE_SELECTIVITY_TOLERANCE, "0"), // query cache statement pinning is off by default DDkwd__(QUERY_CACHE_STATEMENT_PINNING, "OFF"), DDkwd__(QUERY_CACHE_STATISTICS, "OFF"), DD_____(QUERY_CACHE_STATISTICS_FILE, "qcachsts"), DDkwd__(QUERY_CACHE_TABLENAME, "OFF"), DDkwd__(QUERY_CACHE_USE_CONVDOIT_FOR_BACKPATCH, "ON"), // Limit CPU time a query can use in master or any ESP. Unit is seconds. XDDint__(QUERY_LIMIT_SQL_PROCESS_CPU, "0"), // Extra debugging info for QUERY_LIMIT feature. DDkwd__(QUERY_LIMIT_SQL_PROCESS_CPU_DEBUG, "OFF"), // How many iterations in scheduler subtask list before evaluating limits. DDint__(QUERY_LIMIT_SQL_PROCESS_CPU_DP2_FREQ, "16"), // For X-prod HJ: (# of rows joined * LIMIT) before preempt. DDint__(QUERY_LIMIT_SQL_PROCESS_CPU_XPROD, "10000"), // controls various expr optimizations based on bit flags. // see enum QueryOptimizationOptions in DefaultConstants.h DDint__(QUERY_OPTIMIZATION_OPTIONS, "3"), DDkwd__(QUERY_STRATEGIZER, "ON"), DDflt0_(QUERY_STRATEGIZER_2N_COMPLEXITY_FACTOR, "1"), DDflt0_(QUERY_STRATEGIZER_EXHAUSTIVE_COMPLEXITY_FACTOR, "1"), DDflt0_(QUERY_STRATEGIZER_N2_COMPLEXITY_FACTOR, "1"), DDflt0_(QUERY_STRATEGIZER_N3_COMPLEXITY_FACTOR, "1"), DDflt0_(QUERY_STRATEGIZER_N4_COMPLEXITY_FACTOR, "1"), DDflt0_(QUERY_STRATEGIZER_N_COMPLEXITY_FACTOR, "1"), DDkwd__(QUERY_TEMPLATE_CACHE, "ON"), DDkwd__(QUERY_TEXT_CACHE, "SYSTEM"), DDkwd__(R2_HALLOWEEN_SUPPORT, "OFF"), DDkwd__(RANGESPEC_TRANSFORMATION, "ON"), // RangeSpec Transformation CQD. // To be ANSI compliant you would have to set this default to 'FALSE' DDkwd__(READONLY_CURSOR, "TRUE"), // ReadTableDef compares transactional identifiers during endTransaction() processing DDkwd__(READTABLEDEF_TRANSACTION_ASSERT, "OFF"), DDkwd__(READTABLEDEF_TRANSACTION_ENABLE_WARNINGS, "OFF"), DDint__(READTABLEDEF_TRANSACTION_TESTPOINT, "0"), DDflt0_(READ_AHEAD_MAX_BLOCKS, "16.0"), // OFF means Ansi/NIST setting, ON is more similar to the SQL/MP behavior DDkwd__(RECOMPILATION_WARNINGS, "OFF"), // CLI caller to redrive CTAS(create table as) for child query monitoring DDkwd__(REDRIVE_CTAS, "OFF"), // The group by reduction for pushing a partial group by past the // right side of the TSJ must be at least this much. If 0.0, then // pushing it will always be tried. DDflt0_(REDUCTION_TO_PUSH_GB_PAST_TSJ, "0.0000000001"), // This is the code base for the calibration machine. It must be either // "DEBUG" or "RELEASE" // History: // Before 02/01/99: DEBUG DDkwd__(REFERENCE_CODE, "RELEASE"), // This is the frequency of the representative CPU of the base calibration // cluster. // REFERENCE_CPU_FREQUENCY units are MhZ DDflte_(REFERENCE_CPU_FREQUENCY, "199."), // This is the seek time of the representative disk of the base // calibration cluster. // REFERENCE_IO_SEEK_TIME units are seconds DDflte_(REFERENCE_IO_SEEK_TIME, "0.0038"), // This is the sequential transfer rate for the representative // disk of the base calibration cluster. // REFERENCE_IO_SEQ_READ_RATE units are Mb/Sec DDflte_(REFERENCE_IO_SEQ_READ_RATE, "50.0"), // This is the transfer rate for the fast speed connection of // nodes in the base calibration cluster. // REFERENCE_MSG_LOCAL_RATE units are Mb/Sec DDflte_(REFERENCE_MSG_LOCAL_RATE, "10."), // This is the timeper local msg for the fast speed connection of // nodes in the base calibration cluster. // REFERENCE_MSG_LOCAL_TIME units are seconds DDflte_(REFERENCE_MSG_LOCAL_TIME, "0.000125"), // This is the transfer rate for the connection among clusters // in the base calibration cluster (this only applies to NSK) // REFERENCE_MSG_REMOTE_RATE units are Mb/Sec DDflte_(REFERENCE_MSG_REMOTE_RATE, "1."), // This is the time per remote msg for the fast speed connection of // nodes in the base calibration cluster. // REFERENCE_MSG_REMOTE_TIME units are seconds DDflte_(REFERENCE_MSG_REMOTE_TIME, "0.00125"), DDkwd__(REF_CONSTRAINT_NO_ACTION_LIKE_RESTRICT, "SYSTEM"), DDkwd__(REMOTE_ESP_ALLOCATION, "SYSTEM"), DDkwd__(REORG_IF_NEEDED, "OFF"), DDkwd__(REORG_VERIFY, "OFF"), DDrlis_(REPLICATE_ALLOW_ROLES, ""), // Determines the compression type to be used with DDL when replicating DDkwd__(REPLICATE_COMPRESSION_TYPE, "SYSTEM"), // Determines if DISK POOL setting should be passed with DDL when replicating DDkwd__(REPLICATE_DISK_POOL, "ON"), // Display a BDR-internally-generated command before executing it DDkwd__(REPLICATE_DISPLAY_INTERNAL_CMD, "OFF"), // Executing commands generated internally by BDR DDkwd__(REPLICATE_EXEC_INTERNAL_CMD, "OFF"), // VERSION of the message from the source system to maintain compatibility // This version should be same as REPL_IO_VERSION_CURR in executor/ExeReplInterface.h // Make changes accordingly in validataorReplIoVersion validator DDrver_(REPLICATE_IO_VERSION, "17"), DDansi_(REPLICATE_MANAGEABILITY_CATALOG, "MANAGEABILITY"), // max num of retries after replicate server(mxbdrdrc) returns an error DDui___(REPLICATE_NUM_RETRIES, "0"), DDansi_(REPLICATE_TEST_TARGET_CATALOG, ""), DDansi_(REPLICATE_TEST_TARGET_MANAGEABILITY_CATALOG, ""), DDkwd__(REPLICATE_WARNINGS, "OFF"), DDkwd__(RETURN_AVG_STREAM_WAIT, "OFF"), DDkwd__(REUSE_BASIC_COST, "ON"), // if set, tables are not closed at the end of a query. This allows // the same open to be reused for the next query which accesses that // table. // If the table is shared opened by multiple openers from the same // process, then the share count is decremented until it reaches 1. // At that time, the last open is preserved so it could be reused. // Tables are closed if user id changes. DDkwd__(REUSE_OPENS, "ON"), // multiplicative factor used to inflate cost of risky operators. // = 1.0 means do not demand an insurance premium from risky operators. // = 1.2 means demand a 20% insurance premium that cost of risky operators // must overcome before they will be chosen over less-risky operators. DDflt0_(RISK_PREMIUM_MJ, "1.15"), XDDflt0_(RISK_PREMIUM_NJ, "1.0"), XDDflt0_(RISK_PREMIUM_SERIAL, "1.0"), XDDui___(RISK_PREMIUM_SERIAL_SCALEBACK_MAXCARD_THRESHOLD, "10000"), DDflt0_(ROBUST_HJ_TO_NJ_FUDGE_FACTOR, "0.0"), DDflt0_(ROBUST_PAR_GRPBY_EXCHANGE_FCTR, "0.25"), DDflt0_(ROBUST_PAR_GRPBY_LEAF_FCTR, "0.25"), // external master CQD that sets following internal CQDs // robust_query_optimization // MINIMUM SYSTEM HIGH MAXIMUM // risk_premium_NJ 1.0 system 2.5 5.0 // risk_premium_SERIAL 1.0 system 1.5 2.0 // partitioning_scheme_sharing 0 system 2 2 // robust_hj_to_nj_fudge_factor 0.0 system 3.0 1.0 // robust_sortgroupby 0 system 2 2 // risk_premium_MJ 1.0 system 1.5 2.0 // see optimizer/ControlDB.cpp ControlDB::doRobustQueryOptimizationCQDs // for the actual cqds that set these values XDDkwd__(ROBUST_QUERY_OPTIMIZATION, "SYSTEM"), // 0: allow sort group by in all // 1: disallow sort group by from partial grpByRoot if no order requirement // 2: disallow sort group by from partial grpByRoot // 3: disallow sort group by in ESP DDint__(ROBUST_SORTGROUPBY, "1"), SDDui___(ROUNDING_MODE, "0"), DDui___(ROUTINE_CACHE_SIZE, "20"), // UDF default Uec DDui___(ROUTINE_DEFAULT_UEC, "1"), DDkwd__(ROUTINE_JOINS_SPOIL_JBB, "OFF"), DDkwd__(ROWSET_ROW_COUNT, "OFF"), DDint__(SAP_KEY_NJ_TABLE_SIZE_THRESHOLD, "10000000"), DDkwd__(SAP_PA_DP2_AFFINITY_FOR_INSERTS, "ON"), DDkwd__(SAP_PREFER_KEY_NESTED_JOIN, "OFF"), DDint__(SAP_TUPLELIST_SIZE_THRESHOLD, "5000"), XDDkwd__(SAVE_DROPPED_TABLE_DDL, "OFF"), XDDansi_(SCHEMA, "SEABASE"), //specify a : separated list of full path names where scratch files //should reside. Ensure each specified directoy exisst on each node and //Trafodion user has permissions to access them. DD_____(SCRATCH_DIRS, ""), DDkwd__(SCRATCH_DISK_LOGGING, "OFF"), SDDpct__(SCRATCH_FREESPACE_THRESHOLD_PERCENT, "1"), DDui___(SCRATCH_IO_BLOCKSIZE_SORT_MAX, "5242880"), //On LINUX, writev and readv calls are used to perform //scratch file IO. This CQD sets the vector size to use //in writev and readv calls. Overall IO size is affected //by this cqd. Also, related cqds that are related to //IO size are: COMP_INT_67, GEN_HGBY_BUFFER_SIZE. //GEN_HSHJ_BUFFER_SIZE, OLAP_BUFFER_SIZE, //EXE_HGB_INITIAL_HT_SIZE. Vector size is no-op on other //platforms. DDui___(SCRATCH_IO_VECTOR_SIZE_HASH, "8"), DDui___(SCRATCH_IO_VECTOR_SIZE_SORT, "1"), DDui___(SCRATCH_MAX_OPENS_HASH, "1"), DDui___(SCRATCH_MAX_OPENS_SORT, "1"), DDui___(SCRATCH_MGMT_OPTION, "11"), DDkwd__(SCRATCH_PREALLOCATE_EXTENTS, "OFF"), DD_____(SEABASE_CATALOG, TRAFODION_SYSCAT_LIT), DDkwd__(SEABASE_VOLATILE_TABLES, "ON"), // SeaMonster messaging -- the default can be ON, OFF, or SYSTEM. // When the default is SYSTEM we take the setting from env var // SQ_SEAMONSTER which will have a value of 0 or 1. DDkwd__(SEAMONSTER, "SYSTEM"), SDDkwd__(SEMIJOIN_TO_INNERJOIN_TRANSFORMATION, "SYSTEM"), // Disallow/Allow semi and anti-semi joins in MultiJoin framework DDkwd__(SEMI_JOINS_SPOIL_JBB, "OFF"), DDkwd__(SEQUENTIAL_BLOCKSPLIT, "SYSTEM"), DDansi_(SESSION_ID, ""), DDkwd__(SESSION_IN_USE, "OFF"), DDansi_(SESSION_USERNAME, ""), DDflt0_(SGB_CPUCOST_INITIALIZE, ".05"), DDui___(SGB_INITIAL_BUFFER_COUNT, "5."), DDui1__(SGB_INITIAL_BUFFER_SIZE, "5."), DDkwd__(SHAREOPENS_ON_REFCOUNT, "ON"), DDkwd__(SHARE_TEMPLATE_CACHED_PLANS, "ON"), DDui___(SHORT_OPTIMIZATION_PASS_THRESHOLD, "12"), SDDkwd__(SHOWCONTROL_SHOW_ALL, "OFF"), SDDkwd__(SHOWCONTROL_SHOW_SUPPORT, "OFF"), DDkwd__(SHOWDDL_DISPLAY_FORMAT, "EXTERNAL"), DDkwd__(SHOWDDL_DISPLAY_PRIVILEGE_GRANTS, "SYSTEM"), DDint__(SHOWDDL_FOR_REPLICATE, "0"), DDkwd__(SHOWLABEL_LOCKMODE, "OFF"), DDkwd__(SHOWWARN_OPT, "ON"), DDkwd__(SHOW_MEMO_STATS, "OFF"), DDkwd__(SIMPLE_COST_MODEL, "ON"), XDDkwd__(SKEW_EXPLAIN, "ON"), XDDflt__(SKEW_ROWCOUNT_THRESHOLD, "1000000"), // Column row count // threshold below // which skew // buster is disabled. XDDflt__(SKEW_SENSITIVITY_THRESHOLD, "0.1"), DDkwd__(SKIP_METADATA_VIEWS, "OFF"), DDkwd__(SKIP_TRANSLATE_SYSCAT_DEFSCH_NAMES, "ON"), DDkwd__(SKIP_UNAVAILABLE_PARTITION, "OFF"), DDkwd__(SKIP_VCC, "OFF"), DDui0_5(SOFT_REQ_HASH_TYPE, "2"), DDkwd__(SORT_ALGO, "QS"), // Calibration // 01/23/98: 10000 // Original: 10. DDflt0_(SORT_CPUCOST_INITIALIZE, "10000."), DDui1__(SORT_EX_BUFFER_SIZE, "5."), DDkwd__(SORT_INTERMEDIATE_SCRATCH_CLEANUP, "ON"), DDui1__(SORT_IO_BUFFER_SIZE, "128."), DD1_200000(SORT_MAX_HEAP_SIZE_MB, "800"), DDkwd__(SORT_MEMORY_QUOTA_SYSTEM, "ON"), DD1_128(SORT_MERGE_BUFFER_UNIT_56KB, "1"), // Calibration // 04/06/2005: 1.5 DDflte_(SORT_QS_FACTOR, "1.5"), //Maximum records after which sort would switch over to //iterative heap sort. Most often in partial sort, we may want //do a quick sort or similar to avoid larger in-memory sort //setup. DDint__(SORT_REC_THRESHOLD, "1000"), // Calibration DDflte_(SORT_RS_FACTOR, "3.55"), // Calibration // 04/06/2005: 2.1 DDflte_(SORT_RW_FACTOR, "2.1"), DDflte_(SORT_TREE_NODE_SIZE, ".012"), DDkwd__(SQLMX_REGRESS, "OFF"), DDkwd__(SQLMX_SHOWDDL_SUPPRESS_ROW_FORMAT, "OFF"), DDansi_(SQLMX_UTIL_EXPLAIN_PLAN, "OFF"), SDDkwd__(SQLMX_UTIL_ONLINE_POPINDEX, "ON"), SDDui___(SSD_BMO_MAX_MEM_THRESHOLD_IN_MB, "1200"), // BertBert VV // Timeout for a streaming cursor to return to the fetch(), even if no // rows to return. The cursor is NOT closed, it just gives control to // the user again. // "0" means no timeout, just check instead. // "negative" means never timeout. // "positive" means the number of centiseconds to wait before timing out. XDDint__(STREAM_TIMEOUT, "-1"), XDDkwd__(SUBQUERY_UNNESTING, "ON"), DDkwd__(SUBQUERY_UNNESTING_P2, "ON"), DDkwd__(SUBSTRING_TRANSFORMATION, "OFF"), DDui___(SYNCDEPTH, "1"), XDDkwd__(TABLELOCK, "SYSTEM"), // This is the code base for the end user calibration cluster. // It must be either "DEBUG" or "RELEASE" #ifdef NDEBUG DDkwd__(TARGET_CODE, "RELEASE"), #else DDkwd__(TARGET_CODE, "DEBUG"), #endif // This is the frequency of the representative CPU of the end user // cluster. // TARGET_CPU_FREQUENCY units are MhZ. DDflte_(TARGET_CPU_FREQUENCY, "199."), // This is the seek time of the representative disk of the end user // cluster. // TARGET_IO_SEEK_TIME units are seconds DDflte_(TARGET_IO_SEEK_TIME, "0.0038"), // This is the sequential transfer rate for the representative // disk of the end user cluster. // TARGET_IO_SEQ_READ_RATE units are Mb/Sec DDflte_(TARGET_IO_SEQ_READ_RATE, "50.0"), // This is the transfer rate for the fast speed connection of // nodes in the end user cluster. // TARGET_MSG_LOCAL_RATE units are Mb/Sec DDflte_(TARGET_MSG_LOCAL_RATE, "10."), // This is the per msg time for the fast speed connection of // nodes in the end user cluster. // TARGET_MSG_LOCAL_TIME are seconds DDflte_(TARGET_MSG_LOCAL_TIME, "0.000125"), // This is the transfer rate for the connection among clusters // in the end user cluster (this only applies to NSK) // TARGET_MSG_REMOTE_RATE units are Mb/Sec DDflte_(TARGET_MSG_REMOTE_RATE, "1."), // This is the per msg time for the the connection among clusters // nodes in the end user cluster. // TARGET_MSG_REMOTE_TIME are seconds DDflte_(TARGET_MSG_REMOTE_TIME, "0.00125"), DD_____(TEMPORARY_TABLE_HASH_PARTITIONS, "" ), DDkwd__(TERMINAL_CHARSET, (char *)SQLCHARSETSTRING_ISO88591), DDint__(TEST_PASS_ONE_ASSERT_TASK_NUMBER, "-1"), DDint__(TEST_PASS_TWO_ASSERT_TASK_NUMBER, "-1"), XDDintN2(TIMEOUT, "6000"), DDflt0_(TMUDF_CARDINALITY_FACTOR, "1"), DDflt0_(TMUDF_LEAF_CARDINALITY, "1"), DDkwd__(TOTAL_RESOURCE_COSTING, "ON"), DDint__(TRAF_ALIGNED_FORMAT_ADD_COL_METHOD, "2"), DDkwd__(TRAF_ALIGNED_ROW_FORMAT, "ON"), DDkwd__(TRAF_ALLOW_ESP_COLOCATION, "OFF"), DDkwd__(TRAF_ALLOW_RESERVED_COLNAMES, "OFF"), DDkwd__(TRAF_ALLOW_SELF_REF_CONSTR, "ON"), DDkwd__(TRAF_ALTER_COL_ATTRS, "ON"), DDkwd__(TRAF_AUTO_CREATE_SCHEMA, "OFF"), DDkwd__(TRAF_BLOB_AS_VARCHAR, "ON"), //set to OFF to enable Lobs support DDkwd__(TRAF_BOOLEAN_IO, "OFF"), DDkwd__(TRAF_BOOTSTRAP_MD_MODE, "OFF"), DDkwd__(TRAF_CLOB_AS_VARCHAR, "ON"), //set to OFF to enable Lobs support DDkwd__(TRAF_COL_LENGTH_IS_CHAR, "ON"), DDkwd__(TRAF_CREATE_SIGNED_NUMERIC_LITERAL, "ON"), DDansi_(TRAF_CREATE_TABLE_WITH_UID, ""), DDkwd__(TRAF_CREATE_TINYINT_LITERAL, "ON"), DDkwd__(TRAF_DEFAULT_COL_CHARSET, (char *)SQLCHARSETSTRING_ISO88591), DDkwd__(TRAF_ENABLE_ORC_FORMAT, "OFF"), DDkwd__(TRAF_HBASE_MAPPED_TABLES, "ON"), DDkwd__(TRAF_HBASE_MAPPED_TABLES_IUD, "OFF"), DDkwd__(TRAF_INDEX_ALIGNED_ROW_FORMAT, "ON"), DDkwd__(TRAF_INDEX_CREATE_OPT, "OFF"), DDkwd__(TRAF_LARGEINT_UNSIGNED_IO, "OFF"), DDkwd__(TRAF_LOAD_ALLOW_RISKY_INDEX_MAINTENANCE, "OFF"), DDkwd__(TRAF_LOAD_CONTINUE_ON_ERROR, "OFF"), DD_____(TRAF_LOAD_ERROR_COUNT_ID, "" ), DD_____(TRAF_LOAD_ERROR_COUNT_TABLE, "ERRORCOUNTER" ), DD_____(TRAF_LOAD_ERROR_LOGGING_LOCATION, "/bulkload/logs" ), DDint__(TRAF_LOAD_FLUSH_SIZE_IN_KB, "1024"), DDkwd__(TRAF_LOAD_FORCE_CIF, "ON"), DDkwd__(TRAF_LOAD_LOG_ERROR_ROWS, "OFF"), DDint__(TRAF_LOAD_MAX_ERROR_ROWS, "0"), DDint__(TRAF_LOAD_MAX_HFILE_SIZE, "10240"), // in MB -->10GB by default DDkwd__(TRAF_LOAD_PREP_ADJUST_PART_FUNC, "ON"), DDkwd__(TRAF_LOAD_PREP_CLEANUP, "ON"), DDkwd__(TRAF_LOAD_PREP_KEEP_HFILES, "OFF"), DDkwd__(TRAF_LOAD_PREP_PHASE_ONLY, "OFF"), DDkwd__(TRAF_LOAD_PREP_SKIP_DUPLICATES , "OFF"), //need add code to check if folder exists or not. if not issue an error and ask //user to create it DD_____(TRAF_LOAD_PREP_TMP_LOCATION, "/bulkload/" ), DDkwd__(TRAF_LOAD_TAKE_SNAPSHOT , "OFF"), DDkwd__(TRAF_LOAD_USE_FOR_INDEXES, "ON"), DDkwd__(TRAF_LOAD_USE_FOR_STATS, "OFF"), // max size in bytes of a char or varchar column. Set to 16M DDui___(TRAF_MAX_CHARACTER_COL_LENGTH, MAX_CHAR_COL_LENGTH_IN_BYTES_STR), DDkwd__(TRAF_MAX_CHARACTER_COL_LENGTH_OVERRIDE, "OFF"), DDkwd__(TRAF_MULTI_COL_FAM, "ON"), DDkwd__(TRAF_NO_CONSTR_VALIDATION, "OFF"), DDkwd__(TRAF_NO_DTM_XN, "OFF"), DDint__(TRAF_NUM_HBASE_VERSIONS, "0"), DDint__(TRAF_NUM_OF_SALT_PARTNS, "-1"), DDkwd__(TRAF_READ_OBJECT_DESC, "OFF"), DDkwd__(TRAF_RELOAD_NATABLE_CACHE, "OFF"), DD_____(TRAF_SAMPLE_TABLE_LOCATION, "/sample/"), DDint__(TRAF_SEQUENCE_CACHE_SIZE, "-1"), DDkwd__(TRAF_SIMILARITY_CHECK, "ROOT"), DDkwd__(TRAF_STORE_OBJECT_DESC, "OFF"), DDkwd__(TRAF_STRING_AUTO_TRUNCATE, "OFF"), DDkwd__(TRAF_STRING_AUTO_TRUNCATE_WARNING, "OFF"), //TRAF_TABLE_SNAPSHOT_SCAN CQD can be set to : //NONE--> Snapshot scan is disabled and regular scan is used , //SUFFIX --> Snapshot scan enabled for the bulk unload (bulk unload // behavior id not changed) //LATEST --> enabled for the scan independently from bulk unload // the latest snapshot is used if it exists DDkwd__(TRAF_TABLE_SNAPSHOT_SCAN, "NONE"), DD_____(TRAF_TABLE_SNAPSHOT_SCAN_SNAP_SUFFIX, "SNAP"), //when the estimated table size is below the threshold (in MBs) //defined by TRAF_TABLE_SNAPSHOT_SCAN_TABLE_SIZE_THRESHOLD //regular scan instead of snapshot scan //does not apply to bulk unload which maintains the old behavior DDint__(TRAF_TABLE_SNAPSHOT_SCAN_TABLE_SIZE_THRESHOLD, "1000"), //timeout before we give up when trying to create the snapshot scanner DDint__(TRAF_TABLE_SNAPSHOT_SCAN_TIMEOUT, "6000"), //location for temporary links and files produced by snapshot scan DD_____(TRAF_TABLE_SNAPSHOT_SCAN_TMP_LOCATION, "/bulkload/"), DDkwd__(TRAF_TINYINT_INPUT_PARAMS, "OFF"), DDkwd__(TRAF_TINYINT_RETURN_VALUES, "OFF"), DDkwd__(TRAF_TINYINT_SPJ_SUPPORT, "OFF"), DDkwd__(TRAF_TINYINT_SUPPORT, "ON"), // DTM Transaction Type: MVCC, SSCC XDDkwd__(TRAF_TRANS_TYPE, "MVCC"), DDkwd__(TRAF_UNLOAD_BYPASS_LIBHDFS, "ON"), DD_____(TRAF_UNLOAD_DEF_DELIMITER, "|" ), DD_____(TRAF_UNLOAD_DEF_RECORD_SEPARATOR, "\n" ), DDint__(TRAF_UNLOAD_HDFS_COMPRESS, "0"), DDkwd__(TRAF_UNLOAD_SKIP_WRITING_TO_FILES, "OFF"), DDkwd__(TRAF_UPSERT_ADJUST_PARAMS, "OFF"), DDkwd__(TRAF_UPSERT_MODE, "MERGE"), DDkwd__(TRAF_UPSERT_TO_EFF_TREE, "ON"), DDint__(TRAF_UPSERT_WB_SIZE, "2097152"), DDkwd__(TRAF_UPSERT_WRITE_TO_WAL, "OFF"), DDkwd__(TRAF_USE_REGION_XN, "OFF"), DDkwd__(TRAF_USE_RWRS_FOR_MD_INSERT, "ON"), DDkwd__(TRANSLATE_ERROR, "ON"), DDkwd__(TRANSLATE_ERROR_UNICODE_TO_UNICODE, "ON"), DDkwd__(TRY_DP2_REPARTITION_ALWAYS, "OFF"), SDDkwd__(TRY_PASS_ONE_IF_PASS_TWO_FAILS, "OFF"), // Disallow/Allow TSJs in MultiJoin framework DDkwd__(TSJS_SPOIL_JBB, "OFF"), // type a CASE expression or ValueIdUnion as varchar if its leaves // are of type CHAR of unequal length DDkwd__(TYPE_UNIONED_CHAR_AS_VARCHAR, "ON"), // UDF scalar indicating maximum number of rows out for each row in. DDui___(UDF_FANOUT, "1"), // Must be in form <cat>.<sch>. Delimited catalog names not allowed. DD_____(UDF_METADATA_SCHEMA, "TRAFODION.\"_UDF_\""), DDkwd__(UDF_SUBQ_IN_AGGS_AND_GBYS, "SYSTEM"), XDDui___(UDR_DEBUG_FLAGS, "0"), // see sqludr/sqludr.h for values SDD_____(UDR_JAVA_OPTIONS, "OFF"), DD_____(UDR_JAVA_OPTION_DELIMITERS, " "), XDDui___(UDR_JVM_DEBUG_PORT, "0"), XDDui___(UDR_JVM_DEBUG_TIMEOUT, "0"), DDkwd__(UNAVAILABLE_PARTITION, "STOP"), // "?" used? DDkwd__(UNC_PROCESS, "OFF"), SDDkwd__(UNIQUE_HASH_JOINS, "SYSTEM"), SDDui___(UNIQUE_HASH_JOIN_MAX_INNER_SIZE, "1000"), SDDui___(UNIQUE_HASH_JOIN_MAX_INNER_SIZE_PER_INSTANCE, "100"), SDDui___(UNIQUE_HASH_JOIN_MAX_INNER_TABLES, "2"), DDui___(UNOPTIMIZED_ESP_BUFFER_SIZE_DOWN, "31000"), DDui___(UNOPTIMIZED_ESP_BUFFER_SIZE_UP, "31000"), DDui1__(UPDATED_BYTES_PER_ESP, "400000"), DDkwd__(UPDATE_CLUSTERING_OR_UNIQUE_INDEX_KEY,"ON"), DDkwd__(UPD_ABORT_ON_ERROR, "OFF"), XDDkwd__(UPD_ORDERED, "ON"), DDkwd__(UPD_PARTIAL_ON_ERROR, "OFF"), DDkwd__(UPD_SAVEPOINT_ON_ERROR, "ON"), DDkwd__(USER_EXPERIENCE_LEVEL, "BEGINNER"), // ------------------------------------------------------------------------ // This default will use a new type of an ASSERT, CCMPASSERT as a CMPASSERT // when ON, else use that as a DCMPASSERT. Changed this default to OFF // just before the final build for R2 07/23/2004 RV // ------------------------------------------------------------------------- DDkwd__(USE_CCMPASSERT_AS_CMPASSERT, "OFF"), DDkwd__(USE_DENSE_BUFFERS, "ON"), // Use Hive tables as source for traf ustat and popindex DDkwd__(USE_HIVE_SOURCE, ""), // Use large queues on RHS of Flow/Nested Join when appropriate DDkwd__(USE_LARGE_QUEUES, "ON"), DDkwd__(USE_MAINTAIN_CONTROL_TABLE, "OFF"), DDkwd__(USE_OLD_DT_CONSTRUCTOR, "OFF"), // Adaptive segmentation, use operator max to determine degree of parallelism DDui___(USE_OPERATOR_MAX_FOR_DOP, "1"), // Specify the number of partitions before invoking parallel label operations DDui1__(USE_PARALLEL_FOR_NUM_PARTITIONS, "32"), DDkwd__(USTAT_ADD_SALTED_KEY_PREFIXES_FOR_MC, "ON"), // When ON, generate MCs for primary key prefixes as well as full key // of salted table when ON EVERY KEY or ON EVERY COLUMN is specified. DDkwd__(USTAT_ATTEMPT_ESP_PARALLELISM, "ON"), // for reading column values DDui___(USTAT_AUTOMATION_INTERVAL, "0"), XDDflt0_(USTAT_AUTO_CV_SAMPLE_SLOPE, "0.5"), // CV multiplier for sampling %. DDkwd__(USTAT_AUTO_EMPTYHIST_TWO_TRANS, "OFF"), // When ON empty hist insert will be 2 trans. DDkwd__(USTAT_AUTO_FOR_VOLATILE_TABLES, "OFF"), // Toggle for vol tbl histogram usage DDui___(USTAT_AUTO_MAX_HIST_AGE, "0"), // Age of oldest unused histogram - only applies when automation is on. DDui1__(USTAT_AUTO_MC_MAX_WIDTH, "10"), // The max columns in an MC histogram for automation. DDui___(USTAT_AUTO_MISSING_STATS_LEVEL, "4"), // Similar to HIST_MISSING_STATS_WARNING_LEVEL, but controls // if automation inserts missing stats to HISTOGRAMS table. // 0 - insert no stats, // 1 - insert single col hists, // 2 - insert all single col hists and MC hists for scans, // 3 - insert all single col hists and MC stats for scans and joins. // 4 - insert all single col hists and MC stats for scans, joins, and groupbys. XDDui___(USTAT_AUTO_PRIORITY, "150"), // Priority of ustats under USAS. DDui1__(USTAT_AUTO_READTIME_UPDATE_INTERVAL, "86400"), // Seconds between updates of READ_COUNT. // Should be > CACHE_HISTOGRAMS_REFRESH_INTERVAL. DDkwd__(USTAT_CHECK_HIST_ACCURACY, "OFF"), DDui1__(USTAT_CLUSTER_SAMPLE_BLOCKS, "1"), DDkwd__(USTAT_COLLECT_FILE_STATS, "ON"), // do we collect file stats DDkwd__(USTAT_COLLECT_MC_SKEW_VALUES, "OFF"), DDkwd__(USTAT_COMPACT_VARCHARS, "OFF"), // If on, internal sort does not pad out varchars DD_____(USTAT_CQDS_ALLOWED_FOR_SPAWNED_COMPILERS, ""), // list of CQDs that can be pushed to seconday compilers // CQDs are delimited by "," DDkwd__(USTAT_DEBUG_FORCE_FETCHCOUNT, "OFF"), DD_____(USTAT_DEBUG_TEST, ""), DDflte_(USTAT_DSHMAX, "50.0"), DDkwd__(USTAT_ESTIMATE_HBASE_ROW_COUNT, "ON"), DDkwd__(USTAT_FETCHCOUNT_ACTIVE, "OFF"), DDkwd__(USTAT_FORCE_MOM_ESTIMATOR, "OFF"), DDkwd__(USTAT_FORCE_TEMP, "OFF"), DDflt0_(USTAT_FREQ_SIZE_PERCENT, "0.5"), // >100 effectively disables DDflt0_(USTAT_GAP_PERCENT, "10.0"), DDflt0_(USTAT_GAP_SIZE_MULTIPLIER, "1.5"), DDui___(USTAT_HBASE_SAMPLE_RETURN_INTERVAL, "10000000"), // Avoid scanner timeout by including on average at // least one row per this many when sampling within HBase. DDflt0_(USTAT_INCREMENTAL_FALSE_PROBABILITY, "0.01"), DDkwd__(USTAT_INCREMENTAL_UPDATE_STATISTICS, "ON"), DDkwd__(USTAT_INSERT_TO_NONAUDITED_TABLE, "OFF"), // Used internally to overcome problem in which insert // to the non-audited sample table must be done on same // process it was created on. This CQD is NOT externalized. DDkwd__(USTAT_INTERNAL_SORT, "HYBRID"), DDkwd__(USTAT_IS_IGNORE_UEC_FOR_MC, "OFF"), // if MCIS is ON, use IS to compute SC stats DDflt_0_1(USTAT_IS_MEMORY_FRACTION, "0.6"), DDflt0_(USTAT_IUS_INTERVAL_ROWCOUNT_CHANGE_THRESHOLD, "0.05"), DDflt0_(USTAT_IUS_INTERVAL_UEC_CHANGE_THRESHOLD, "0.05"), DDui1_6(USTAT_IUS_MAX_NUM_HASH_FUNCS, "5"), // the max disk space IUS CBFs can use is // MINOF(USTAT_IUS_MAX_PERSISTENT_DATA_IN_MB, // TtotalSpace * USTAT_IUS_MAX_PERSISTENT_DATA_IN_PERCENTAGE) DDui___(USTAT_IUS_MAX_PERSISTENT_DATA_IN_MB, "50000"), // 50GB DDflt0_(USTAT_IUS_MAX_PERSISTENT_DATA_IN_PERCENTAGE, "0.20"), // 20% of the total DDui1_6(USTAT_IUS_MAX_TRANSACTION_DURATION, "5"), // in minutes DDkwd__(USTAT_IUS_NO_BLOCK, "OFF"), DDansi_(USTAT_IUS_PERSISTENT_CBF_PATH, "SYSTEM"), DDflt0_(USTAT_IUS_TOTAL_ROWCOUNT_CHANGE_THRESHOLD, "0.05"), DDflt0_(USTAT_IUS_TOTAL_UEC_CHANGE_THRESHOLD, "0.05"), DDkwd__(USTAT_IUS_USE_PERIODIC_SAMPLING, "OFF"), DDkwd__(USTAT_JIT_LOGGING, "OFF"), DDkwd__(USTAT_LOCK_HIST_TABLES, "OFF"), DD_____(USTAT_LOG, "ULOG"), DDui30_246(USTAT_MAX_CHAR_BOUNDARY_LEN, "30"), // Values can be 30-246. DDui___(USTAT_MAX_CHAR_COL_LENGTH_IN_BYTES, "256"), // When computing UECs, char cols are limited to this many bytes DDflt0_ (USTAT_MAX_CHAR_DATASIZE_FOR_IS, "1000"), // max data size in MB for char type to use XDDui___(USTAT_MAX_READ_AGE_IN_MIN, "5760"), DDui___(USTAT_MAX_SAMPLE_AGE, "365"), // For R2.5 set to a year so user created samples won't be removed. // internal sort without checking UEC. DDflt0_(USTAT_MIN_CHAR_UEC_FOR_IS, "0.2"), // minimum UEC for char type to use internal sort DDflt0_(USTAT_MIN_DEC_BIN_UEC_FOR_IS, "0.0"), // minimum UEC for binary types to use internal sort DDflt0_(USTAT_MIN_ESTIMATE_FOR_ROWCOUNT, "10000000"), DDui1__(USTAT_MIN_ROWCOUNT_FOR_CTS_SAMPLE, "10000"), XDDui1__(USTAT_MIN_ROWCOUNT_FOR_LOW_SAMPLE, "1000000"), XDDui1__(USTAT_MIN_ROWCOUNT_FOR_SAMPLE, "10000"), DDflt0_(USTAT_MODIFY_DEFAULT_UEC, "0.05"), DDflt0_(USTAT_NAHEAP_ESTIMATED_MAX, "1.3"), // estimated max memory allocation (in GB) feasible with NAHEAP. XDDui1__(USTAT_NECESSARY_SAMPLE_MAX, "5000000"), // Maximum sample size with NECESSARY DDui1__(USTAT_NUM_MC_GROUPS_FOR_KEYS, "10"), XDDpct__(USTAT_OBSOLETE_PERCENT_ROWCOUNT, "15"), DDkwd__(USTAT_PROCESS_GAPS, "ON"), DD0_255(USTAT_RETRY_DELAY, "100"), DD0_255(USTAT_RETRY_LIMIT, "3"), DD0_255(USTAT_RETRY_NEC_COLS_LIMIT, "3"), // by default, use retry for AddNecessaryColumns DDui1__(USTAT_RETRY_SECURITY_COUNT, "120"), DDpct__(USTAT_SAMPLE_PERCENT_DIFF, "10"), DDansi_(USTAT_SAMPLE_TABLE_NAME, " "), DDansi_(USTAT_SAMPLE_TABLE_NAME_CREATE, " "), DDkwd__(USTAT_SHOW_MC_INTERVAL_INFO, "OFF"), DDkwd__(USTAT_SHOW_MFV_INFO, "OFF"), DDflte_(USTAT_UEC_HI_RATIO, "0.5"), DDflte_(USTAT_UEC_LOW_RATIO, "0.1"), DDkwd__(USTAT_USE_BACKING_SAMPLE, "OFF"), DDkwd__(USTAT_USE_BULK_LOAD, "OFF"), DDkwd__(USTAT_USE_GROUPING_FOR_SAMPLING, "ON"), DDkwd__(USTAT_USE_INTERNAL_SORT_FOR_MC, "ON"), DDkwd__(USTAT_USE_INTERNAL_SORT_FOR_MC_LOOP, "ON"), DDkwd__(USTAT_USE_INTERNAL_SORT_FOR_MC_NEW_HIST, "OFF"), // TEMP FOR TESTING -- SHOULD REMOVE DDkwd__(USTAT_USE_IS_WHEN_NO_STATS, "ON"), // use IS when no histograms exist for the column DDkwd__(USTAT_USE_SIDETREE_INSERT, "ON"), DDkwd__(USTAT_USE_SLIDING_SAMPLE_RATIO, "ON"), // Trend sampling rate down w/increasing table size, going // flat at 1%. XDDflt1_(USTAT_YOULL_LIKELY_BE_SORRY, "100000000"), // guard against unintentional long-running UPDATE STATS DDkwd__(VALIDATE_RFORK_REDEF_TS, "OFF"), DDkwd__(VALIDATE_VIEWS_AT_OPEN_TIME, "OFF"), //this is the default length of a param which is typed as a VARCHAR. DD1_4096(VARCHAR_PARAM_DEFAULT_SIZE, "255"), // allows pcodes for varchars DDkwd__(VARCHAR_PCODE, "ON"), DDansi_(VOLATILE_CATALOG, ""), DDkwd__(VOLATILE_SCHEMA_IN_USE, "OFF"), // if this is set to ON or SYSTEM, then find a suitable key among all the // columns of a volatile table. // If this is set to OFF, and there is no user specified primary key or // store by clause, then make the first column of the volatile table // to be the clustering key. DDkwd__(VOLATILE_TABLE_FIND_SUITABLE_KEY, "SYSTEM"), // if this is set, and there is no user specified primary key or // store by clause, then make the first column of the volatile table // to be the clustering key. // Default is ON. DDkwd__(VOLATILE_TABLE_FIRST_COL_IS_CLUSTERING_KEY, "ON"), DDkwd__(VSBB_TEST_MODE, "OFF"), XDDkwd__(WMS_CHILD_QUERY_MONITORING, "OFF"), XDDkwd__(WMS_QUERY_MONITORING, "OFF"), // amount of work we are willing to assign per CPU for any query // not running at full system parallelism SDDflte_(WORK_UNIT_ESP, "0.08"), SDDflte_(WORK_UNIT_ESP_DATA_COPY_COST, "0.001"), // ZIG_ZAG_TREES ON means do ZIG_ZAG_TREES // $$$ OFF for beta DDkwd__(ZIG_ZAG_TREES, "SYSTEM"), DDkwd__(ZIG_ZAG_TREES_CONTROL, "OFF") }; // // NOTE: The defDefIx_ array is an array of integers that map // 'enum' values to defaultDefaults[] entries. // The defDefIx_ array could probably be made global static // since all threads should map the same 'enum' values to the // same defaultDefaults[] entries. Such as change is being // left to a future round of optimizations. // static THREAD_P size_t defDefIx_[__NUM_DEFAULT_ATTRIBUTES]; inline static const char *getAttrName(Int32 attrEnum) { return defaultDefaults[defDefIx_[attrEnum]].attrName; } inline static const char *getDefaultDefaultValue(Int32 attrEnum) { return defaultDefaults[defDefIx_[attrEnum]].value; } inline static const DefaultValidator *validator(Int32 attrEnum) { return defaultDefaults[defDefIx_[attrEnum]].validator; } inline static UInt32 getFlags(Int32 attrEnum) { return defaultDefaults[defDefIx_[attrEnum]].flags; } inline static NABoolean isFlagOn(Int32 attrEnum, NADefaultFlags flagbit) { #pragma nowarn(1506) // warning elimination return defaultDefaults[defDefIx_[attrEnum]].flags & (UInt32)flagbit; #pragma warn(1506) // warning elimination } inline static void setFlagOn(Int32 attrEnum, NADefaultFlags flagbit) { defaultDefaults[defDefIx_[attrEnum]].flags |= (UInt32)flagbit; } static NABoolean isSynonymOfRESET(NAString &value) { return (value == "RESET"); } static NABoolean isSynonymOfSYSTEM(Int32 attrEnum, NAString &value) { if (value == "") return TRUE; if (value == "SYSTEM") return !isFlagOn(attrEnum, DEFAULT_ALLOWS_SEPARATE_SYSTEM); if (value == "ENABLE"){ value = "ON"; return FALSE; } else if (value == "DISABLE"){ value = "OFF"; return FALSE; } // if (getDefaultDefaultValue(attrEnum) != NAString("DISABLE")) // cast reqd!! // return TRUE; // else // value = "ON"; return FALSE; } // Helper class used for holding and restoring CQDs class NADefaults::HeldDefaults { public: HeldDefaults(void); ~HeldDefaults(void); // CMPASSERT's on stack overflow void pushDefault(const char * value); // returns null if nothing to pop char * popDefault(void); private: enum { STACK_SIZE = 3 }; int stackPointer_; char * stackValue_[STACK_SIZE]; }; // Methods for helper class HeldDefaults NADefaults::HeldDefaults::HeldDefaults(void) : stackPointer_(0) { for (int i = 0; i < STACK_SIZE; i++) stackValue_[i] = NULL; } NADefaults::HeldDefaults::~HeldDefaults(void) { for (int i = 0; i < STACK_SIZE; i++) { if (stackValue_[i]) { NADELETEBASIC(stackValue_[i], NADHEAP); } } } // CMPASSERT's on stack overflow void NADefaults::HeldDefaults::pushDefault(const char * value) { CMPASSERT(stackPointer_ < STACK_SIZE); stackValue_[stackPointer_] = new NADHEAP char[strlen(value) + 1]; strcpy(stackValue_[stackPointer_],value); stackPointer_++; } // returns null if nothing to pop char * NADefaults::HeldDefaults::popDefault(void) { char * result = 0; if (stackPointer_ > 0) { stackPointer_--; result = stackValue_[stackPointer_]; stackValue_[stackPointer_] = NULL; } return result; } size_t NADefaults::numDefaultAttributes() { return (size_t)__NUM_DEFAULT_ATTRIBUTES; } // Returns current defaults in alphabetic order (for SHOWCONTROL listing). const char *NADefaults::getCurrentDefaultsAttrNameAndValue( size_t ix, const char* &name, const char* &value, NABoolean userDefaultsOnly) { if (ix < numDefaultAttributes()) { NABoolean get = FALSE; if (userDefaultsOnly) { // if this default was entered by user, return it. get = userDefault(defaultDefaults[ix].attrEnum); } else { // display the control if // - it is externalized or // - it is for support only and a CQD is set to show those, or // - a CQD is set to show all the controls get = (defaultDefaults[ix].flags & DEFAULT_IS_EXTERNALIZED) || // bit-AND ((defaultDefaults[ix].flags & DEFAULT_IS_FOR_SUPPORT) && (getToken(SHOWCONTROL_SHOW_SUPPORT) == DF_ON)) || (getToken(SHOWCONTROL_SHOW_ALL) == DF_ON); } if (get) { name = defaultDefaults[ix].attrName; value = currentDefaults_[defaultDefaults[ix].attrEnum]; return name; } } return name = value = NULL; } // ----------------------------------------------------------------------- // convert the default defaults into a table organized by enum values // ----------------------------------------------------------------------- void NADefaults::initCurrentDefaultsWithDefaultDefaults() { deleteMe(); const size_t numAttrs = numDefaultAttributes(); if (numAttrs != sizeof(defaultDefaults) / sizeof(DefaultDefault)) return; CMPASSERT_STRING (numAttrs == sizeof(defaultDefaults) / sizeof(DefaultDefault), "Check sqlcomp/DefaultConstants.h for a gap in enum DefaultConstants or sqlcomp/nadefaults.cpp for duplicate entries in array defaultDefaults[]."); SqlParser_NADefaults_Glob = SqlParser_NADefaults_ = new NADHEAP SqlParser_NADefaults(); provenances_ = new NADHEAP char [numAttrs]; // enum fits in 2 bits flags_ = new NADHEAP char [numAttrs]; resetToDefaults_ = new NADHEAP char * [numAttrs]; currentDefaults_ = new NADHEAP const char * [numAttrs]; currentFloats_ = new NADHEAP float * [numAttrs]; currentTokens_ = new NADHEAP DefaultToken * [numAttrs]; currentState_ = INIT_DEFAULT_DEFAULTS; heldDefaults_ = new NADHEAP HeldDefaults * [numAttrs]; // reset all entries size_t i = 0; for (i = 0; i < numAttrs; i++) { provenances_[i] = currentState_; flags_[i] = 0; defDefIx_[i] = 0; } memset( resetToDefaults_, 0, sizeof(char *) * numAttrs ); memset( currentDefaults_, 0, sizeof(char *) * numAttrs ); memset( currentFloats_, 0, sizeof(float *) * numAttrs ); memset( currentTokens_, 0, sizeof(DefaultToken *) * numAttrs ); memset( heldDefaults_, 0, sizeof(HeldDefaults *) * numAttrs ); #ifndef NDEBUG // This env-var turns on consistency checking of default-defaults and // other static info. The env-var does not get passed from sqlci to arkdev // until *AFTER* the initialization code runs, so you must do a static // arkcmp compile to do this checking. TEST050 does this, in fact. NABoolean nadval = !!getenv("NADEFAULTS_VALIDATE"); #endif // for each entry of the (alphabetically sorted) default defaults // table, enter the default default into the current default table // which is sorted by enum values NAString prevAttrName; for (i = 0; i < numAttrs; i++) { // the enum must be less than the max (if this assert fails // you might have made the range of constants in the enum // non-contiguous by assigning hard-coded numbers to some entries) CMPASSERT(ENUM_RANGE_CHECK(defaultDefaults[i].attrEnum)); // can't have the same enum value twice in defaultDefaults CMPASSERT(currentDefaults_[defaultDefaults[i].attrEnum] == NULL); // set currentDefaults_[enum] to the static string, // leaving the "allocated from heap" flag as FALSE char * value = new NADHEAP char[strlen(defaultDefaults[i].value) + 1]; strcpy(value,defaultDefaults[i].value); // trim trailing spaces (except UDR_JAVA_OPTION_DELIMITERS, since // trailing space is allowed for it) if (defaultDefaults[i].attrEnum != UDR_JAVA_OPTION_DELIMITERS) { Lng32 len = strlen(value); while ((len > 0) && (value[len-1] == ' ')) { value[len-1] = 0; len--; } } currentDefaults_[defaultDefaults[i].attrEnum] = value; // set up our backlink which maps [enum] to its defaultDefaults entry defDefIx_[defaultDefaults[i].attrEnum] = i; // attrs must be in ascending sorted order. If not, error out. if (prevAttrName > defaultDefaults[i].attrName) { SqlParser_NADefaults_ = NULL; return; } prevAttrName = defaultDefaults[i].attrName; // validate initial default default values CMPASSERT(defaultDefaults[i].validator); if (! defaultDefaults[i].validator->validate( defaultDefaults[i].value, this, defaultDefaults[i].attrEnum, +1/*warning*/)) { SqlParser_NADefaults_ = NULL; cerr << "\nERROR: " << defaultDefaults[i].attrName << " has invalid value" << defaultDefaults[i].value << endl; return; } // LCOV_EXCL_START // for debugging only #ifndef NDEBUG if (nadval) { // additional sanity checking we want to do occasionally NAString v; // ensure the static table really is in alphabetic order CMPASSERT(i == 0 || strcmp(defaultDefaults[i-1].attrName, defaultDefaults[i].attrName) < 0); // ensure these names are fit and trim and in canonical form v = defaultDefaults[i].attrName; TrimNAStringSpace(v); v.toUpper(); CMPASSERT(v == defaultDefaults[i].attrName); // validate initial default default values CMPASSERT(defaultDefaults[i].validator); defaultDefaults[i].validator->validate( defaultDefaults[i].value, this, defaultDefaults[i].attrEnum, +1/*warning*/); // ensure these values are fit and trim and in canonical form v = defaultDefaults[i].value; TrimNAStringSpace(v); defaultDefaults[i].validator->applyUpper(v); CMPASSERT(v == defaultDefaults[i].value); // alert the programmer if (isSynonymOfSYSTEM(defaultDefaults[i].attrEnum, v)) if (v != "" || defaultDefaults[i].validator != &validateAnsiName) cerr << "\nWARNING: " << defaultDefaults[i].attrName << " has SYSTEM default (" << v << ");\n\t read NOTE 2 in " << __FILE__ << endl; if (isSynonymOfRESET(v)) if (v != "" || defaultDefaults[i].validator != &validateAnsiName) cerr << "\nWARNING: " << defaultDefaults[i].attrName << " has RESET default (" << v << ");\n\t this makes no sense!" << endl; if (defaultDefaults[i].validator == &validateUnknown) cerr << "\nWARNING: " << defaultDefaults[i].attrName << " has a NO-OP validator" << endl; // the token keyword array must have no missing strings, // it must also be in alphabetic order, // each entry must be canonical, and // must have no embedded spaces (see token() method, space/uscore...) if (i == 0) for (size_t j = 0; j < DF_lastToken; j++) { CMPASSERT(keywords_[j]); CMPASSERT(j == 0 || strcmp(keywords_[j-1], keywords_[j]) < 0); NAString v(keywords_[j]); TrimNAStringSpace(v); v.toUpper(); // we know keywords must be caseINsens CMPASSERT(v == keywords_[j]); CMPASSERT(v.first(' ') == NA_NPOS); } } // if env-var #endif // NDEBUG // LCOV_EXCL_STOP } // for i // set the default value for GENERATE_EXPLAIN depending on whether // this is a static compile or a dynamic compile. if (CmpCommon::context()->GetMode() == STMT_STATIC) { currentDefaults_[GENERATE_EXPLAIN] = "ON"; currentDefaults_[DO_RUNTIME_EID_SPACE_COMPUTATION] = "ON"; currentDefaults_[DETAILED_STATISTICS] = "MEASURE"; } else { currentDefaults_[GENERATE_EXPLAIN] = "OFF"; currentDefaults_[DO_RUNTIME_EID_SPACE_COMPUTATION] = "OFF"; currentDefaults_[DETAILED_STATISTICS] = "OPERATOR"; } // set the default value of hive_catalog to the hive_system_catalog currentDefaults_[HIVE_CATALOG] = HIVE_SYSTEM_CATALOG; // set the default value of hbase_catalog to the hbase_system_catalog currentDefaults_[HBASE_CATALOG] = HBASE_SYSTEM_CATALOG; currentDefaults_[SEABASE_CATALOG] = TRAFODION_SYSCAT_LIT; // Test for TM_USE_SSCC from ms.env. // Only a setting of TM_USE_SSCC set to 1 will change the value to SSCC. // Otherwise, the default will remain at MVCC. char * ev = getenv("TM_USE_SSCC"); Lng32 useValue = 0; if (ev) { useValue = (Lng32)str_atoi(ev, str_len(ev)); if (useValue == 1) currentDefaults_[TRAF_TRANS_TYPE] = "SSCC"; } // Begin: Temporary workaround for SQL build regressions to pass NABoolean resetNeoDefaults = FALSE; // On SQ, the way to get an envvar from inside a un-attached process // is to use the msg_getenv_str() call and set the env inside // the SQ_PROP_ property file. In this case the property // file is $TRAF_HOME/etc/SQ_PROP_tdm_arkcmp which contains the line // "SQLMX_REGRESS=1". This file was generated by tools/setuplnxenv. // resetNeoDefaults = (msg_getenv_str("SQLMX_REGRESS") != NULL); resetNeoDefaults = (getenv("SQLMX_REGRESS") != NULL); if(resetNeoDefaults) { // turn on ALL stats during regressions run. currentDefaults_[COMP_BOOL_157] = "ON"; // turn on INTERNAL format for SHOWDDL statements currentDefaults_[SHOWDDL_DISPLAY_FORMAT] = "INTERNAL"; } // End: Temporary workaround for SQL build regressions to pass // Cache all the default keywords up front, // leaving other non-keyword token to be cached on demand. // The "keyword" that is not cached is the kludge/clever trick that // Matt puts in for NATIONAL_CHARSET. NAString tmp( NADHEAP ); for ( i = 0; i < numAttrs; i++ ) { #ifndef NDEBUG #pragma nowarn(1506) // warning elimination const DefaultValidatorType validatorType = validator(i)->getType(); #pragma warn(1506) // warning elimination #endif #pragma nowarn(1506) // warning elimination if ( validator(i)->getType() == VALID_KWD && (i != NATIONAL_CHARSET) && (i != INPUT_CHARSET) && (i != ISO_MAPPING) ) #pragma warn(1506) // warning elimination { currentTokens_[i] = new NADHEAP DefaultToken; // do not call 'token' method as it will return an error if FALSE // is to be inserted. Just directly assign DF_OFF to non-resetable defs. if (isNonResetableAttribute(defaultDefaults[defDefIx_[i]].attrName)) *currentTokens_[i] = DF_OFF; else #pragma nowarn(1506) // warning elimination *currentTokens_[i] = token( i, tmp ); #pragma warn(1506) // warning elimination } } if (getToken(MODE_SEABASE) == DF_ON) { currentDefaults_[CATALOG] = TRAFODION_SYSCAT_LIT; if (getToken(SEABASE_VOLATILE_TABLES) == DF_ON) { NAString sbCat = getValue(SEABASE_CATALOG); CmpCommon::context()->sqlSession()->setVolatileCatalogName(sbCat, TRUE); } } SqlParser_NADefaults_->NAMETYPE_ = getToken(NAMETYPE); SqlParser_NADefaults_->NATIONAL_CHARSET_ = CharInfo::getCharSetEnum(currentDefaults_[NATIONAL_CHARSET]); SqlParser_NADefaults_->ISO_MAPPING_ = CharInfo::getCharSetEnum(currentDefaults_[ISO_MAPPING]); SqlParser_NADefaults_->DEFAULT_CHARSET_ = CharInfo::getCharSetEnum(currentDefaults_[DEFAULT_CHARSET]); SqlParser_NADefaults_->ORIG_DEFAULT_CHARSET_ = CharInfo::getCharSetEnum(currentDefaults_[DEFAULT_CHARSET]); // Set the NAString_isoMappingCS memory cache for use by routines // ToInternalIdentifier() and ToAnsiIdentifier[2|3]() in module // w:/common/NAString[2].cpp. These routines currently cannot // access SqlParser_ISO_MAPPING directly due to the complex // build hierarchy. NAString_setIsoMapCS((SQLCHARSET_CODE) SqlParser_NADefaults_->ISO_MAPPING_); } NADefaults::NADefaults(NAMemory * h) : provenances_(NULL) , flags_(NULL) , resetToDefaults_(NULL) , currentDefaults_(NULL) , currentFloats_(NULL) , currentTokens_(NULL) , heldDefaults_(NULL) , currentState_(UNINITIALIZED) , readFromSQDefaultsTable_(FALSE) , SqlParser_NADefaults_(NULL) , catSchSetToUserID_(NULL) , heap_(h) , resetAll_(FALSE) , defFlags_(0) , tablesRead_(h) { static THREAD_P NABoolean systemParamterUpdated = FALSE; // First (but only if NSK-LITE Services exist), // write system parameters (attributes DEF_*) into DefaultDefaults, if (!systemParamterUpdated && !cmpCurrentContext->isStandalone()) { updateSystemParameters(); systemParamterUpdated = TRUE; } // then copy DefaultDefaults into CurrentDefaults. initCurrentDefaultsWithDefaultDefaults(); // Set additional defaultDefaults flags: // If an attr allows ON/OFF/SYSTEM and the default-default is not SYSTEM, // then you must set this flag. Otherwise, CQD attr 'system' will revert // the value back to the default-default, which is not SYSTEM. // setFlagOn(...attr..., DEFAULT_ALLOWS_SEPARATE_SYSTEM); // // (See attESPPara in OptPhysRelExpr.cpp.) setFlagOn(ATTEMPT_ESP_PARALLELISM, DEFAULT_ALLOWS_SEPARATE_SYSTEM); setFlagOn(HJ_TYPE, DEFAULT_ALLOWS_SEPARATE_SYSTEM); setFlagOn(ZIG_ZAG_TREES, DEFAULT_ALLOWS_SEPARATE_SYSTEM); setFlagOn(COMPRESSED_INTERNAL_FORMAT, DEFAULT_ALLOWS_SEPARATE_SYSTEM); setFlagOn(COMPRESSED_INTERNAL_FORMAT_BMO, DEFAULT_ALLOWS_SEPARATE_SYSTEM); setFlagOn(HBASE_SMALL_SCANNER, DEFAULT_ALLOWS_SEPARATE_SYSTEM); } NADefaults::~NADefaults() { deleteMe(); } void NADefaults::deleteMe() { if (resetToDefaults_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(resetToDefaults_[i], NADHEAP); NADELETEBASIC(resetToDefaults_, NADHEAP); } if (currentDefaults_) { for (size_t i = numDefaultAttributes(); i--; ) if (provenances_[i] > INIT_DEFAULT_DEFAULTS) NADELETEBASIC(currentDefaults_[i], NADHEAP); NADELETEBASIC(currentDefaults_, NADHEAP); } if (currentFloats_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(currentFloats_[i], NADHEAP); NADELETEBASIC(currentFloats_, NADHEAP); } if (currentTokens_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(currentTokens_[i], NADHEAP); NADELETEBASIC(currentTokens_, NADHEAP); } if (heldDefaults_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETE(heldDefaults_[i], HeldDefaults, NADHEAP); NADELETEBASIC(heldDefaults_, NADHEAP); } for (CollIndex i = tablesRead_.entries(); i--; ) tablesRead_.removeAt(i); NADELETEBASIC(provenances_, NADHEAP); NADELETEBASIC(flags_, NADHEAP); NADELETE(SqlParser_NADefaults_, SqlParser_NADefaults, NADHEAP); } // ----------------------------------------------------------------------- // Find the attribute name from its enum value in the defaults table. // ----------------------------------------------------------------------- const char *NADefaults::lookupAttrName(Int32 attrEnum, Int32 errOrWarn) { if (ATTR_RANGE_CHECK) return getAttrName(attrEnum); static THREAD_P char noSuchAttr[20]; sprintf(noSuchAttr, "**%d**", attrEnum); if (errOrWarn) // $0~string0 is not the name of any DEFAULTS table attribute. *CmpCommon::diags() << DgSqlCode(ERRWARN(2050)) << DgString0(noSuchAttr); return noSuchAttr; } // ----------------------------------------------------------------------- // Find the enum value from its string representation in the defaults table. // ----------------------------------------------------------------------- enum DefaultConstants NADefaults::lookupAttrName(const char *name, Int32 errOrWarn, Int32 *position) { NAString attrName(name); TrimNAStringSpace(attrName, FALSE, TRUE); // trim trailing blanks only attrName.toUpper(); // start with the full range of defaultDefaults size_t lo = 0; size_t hi = numDefaultAttributes(); size_t split; Int32 cresult; // perform a binary search in the ordered table defaultDefaults do { // compare the token with the middle entry in the range split = (lo + hi) / 2; cresult = attrName.compareTo(defaultDefaults[split].attrName); if (cresult < 0) { // token < split value, search first half of range hi = split; } else if (cresult > 0) { if (lo == split) // been there, done that { CMPASSERT(lo == hi-1); break; } // token > split value, search second half of range lo = split; } } while (cresult != 0 && lo < hi); if (position != 0) #pragma nowarn(1506) // warning elimination *position = split; #pragma warn(1506) // warning elimination // if the last comparison result was equal, return value at "split" if (cresult == 0) return defaultDefaults[split].attrEnum; // otherwise the string has no corresponding enum value if (errOrWarn) // $0~string0 is not the name of any DEFAULTS table attribute. *CmpCommon::diags() << DgSqlCode(ERRWARN(2050)) << DgString0(attrName); return __INVALID_DEFAULT_ATTRIBUTE; // negative } #define WIDEST_CPUARCH_VALUE 30 // also wider than any utoa_() result static void utoa_(UInt32 val, char *buf) { sprintf(buf, "%u", val); } static void itoa_(Int32 val, char *buf) { sprintf(buf, "%d", val); } static void ftoa_(float val, char *buf) { snprintf(buf, WIDEST_CPUARCH_VALUE, "%0.2f", val); } // Updates the system parameters in the defaultDefaults table. void NADefaults::updateSystemParameters(NABoolean reInit) { static const char *arrayOfSystemParameters[] = { "DEF_CPU_ARCHITECTURE", "DEF_DISCS_ON_CLUSTER", "DEF_INSTRUCTIONS_SECOND", "DEF_PAGE_SIZE", "DEF_LOCAL_CLUSTER_NUMBER", "DEF_LOCAL_SMP_NODE_NUMBER", "DEF_NUM_SMP_CPUS", "MAX_ESPS_PER_CPU_PER_OP", "DEFAULT_DEGREE_OF_PARALLELISM", "DEF_NUM_NODES_IN_ACTIVE_CLUSTERS", // this is deliberately not in the list: "DEF_CHUNK_SIZE", "DEF_NUM_BM_CHUNKS", "DEF_PHYSICAL_MEMORY_AVAILABLE", //returned in KB not bytes "DEF_TOTAL_MEMORY_AVAILABLE", //returned in KB not bytes "DEF_VIRTUAL_MEMORY_AVAILABLE" , "GEN_MAX_NUM_PART_DISK_ENTRIES" , "USTAT_IUS_PERSISTENT_CBF_PATH" }; //returned in KB not bytes char valuestr[WIDEST_CPUARCH_VALUE]; // Set up global cluster information. setUpClusterInfo(CmpCommon::contextHeap()); // Extract SMP node number and cluster number where this arkcmp is running. short nodeNum = 0; Int32 clusterNum = 0; OSIM_getNodeAndClusterNumbers(nodeNum, clusterNum); // First (but only if NSK-LITE Services exist), // write system parameters (attributes DEF_*) into DefaultDefaults, // then copy DefaultDefaults into CurrentDefaults. if (!cmpCurrentContext->isStandalone()) { size_t numElements = sizeof(arrayOfSystemParameters) / sizeof(char *); for (size_t i = 0; i < numElements; i++) { Int32 j; // perform a lookup for the string, using a binary search lookupAttrName(arrayOfSystemParameters[i], -1, &j); CMPASSERT(j >= 0); if(reInit) NADELETEBASIC(defaultDefaults[j].value,NADHEAP); char *newValue = new (GetCliGlobals()->exCollHeap()) char[WIDEST_CPUARCH_VALUE]; newValue[0] = '\0'; defaultDefaults[j].value = newValue; switch(defaultDefaults[j].attrEnum) { case DEF_CPU_ARCHITECTURE: switch(gpClusterInfo->cpuArchitecture()) { // 123456789!1234567890@123456789 case CPU_ARCH_INTEL_80386: strcpy(newValue, "INTEL_80386"); break; case CPU_ARCH_INTEL_80486: strcpy(newValue, "INTEL_80486"); break; case CPU_ARCH_PENTIUM: strcpy(newValue, "PENTIUM"); break; case CPU_ARCH_PENTIUM_PRO: strcpy(newValue, "PENTIUM_PRO"); break; case CPU_ARCH_MIPS: strcpy(newValue, "MIPS"); break; case CPU_ARCH_ALPHA: strcpy(newValue, "ALPHA"); break; case CPU_ARCH_PPC: strcpy(newValue, "PPC"); break; default: strcpy(newValue, "UNKNOWN"); break; } if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j], FALSE); break; case DEF_DISCS_ON_CLUSTER: strcpy(newValue, "8"); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_PAGE_SIZE: utoa_(gpClusterInfo->pageSize(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_LOCAL_CLUSTER_NUMBER: utoa_(clusterNum, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_LOCAL_SMP_NODE_NUMBER: utoa_(nodeNum, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_NUM_SMP_CPUS: utoa_(gpClusterInfo->numberOfCpusPerSMP(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEFAULT_DEGREE_OF_PARALLELISM: { Lng32 x = 2; utoa_(x, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); } break; case MAX_ESPS_PER_CPU_PER_OP: { float espsPerCore = computeNumESPsPerCore(FALSE); ftoa_(espsPerCore, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); } break; case DEF_NUM_NODES_IN_ACTIVE_CLUSTERS: utoa_(((NAClusterInfoLinux*)gpClusterInfo)->numLinuxNodes(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_PHYSICAL_MEMORY_AVAILABLE: utoa_(gpClusterInfo->physicalMemoryAvailable(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_TOTAL_MEMORY_AVAILABLE: utoa_(gpClusterInfo->totalMemoryAvailable(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_VIRTUAL_MEMORY_AVAILABLE: utoa_(gpClusterInfo->virtualMemoryAvailable(), valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); break; case DEF_NUM_BM_CHUNKS: { UInt32 numChunks = (UInt32) (gpClusterInfo->physicalMemoryAvailable() / def_DEF_CHUNK_SIZE / 4); utoa_(numChunks, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); } break; case DEF_INSTRUCTIONS_SECOND: { Int32 frequency, speed; frequency = gpClusterInfo->processorFrequency(); switch (gpClusterInfo->cpuArchitecture()) { case CPU_ARCH_PENTIUM_PRO: speed = (Int32) (frequency * 0.5); break; case CPU_ARCH_PENTIUM: speed = (Int32) (frequency * 0.4); break; default: speed = (Int32) (frequency * 0.3); break; } itoa_(speed, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults(). updateCurrentDefaultsForOSIM(&defaultDefaults[j]); } break; case GEN_MAX_NUM_PART_DISK_ENTRIES: { // Make sure the gpClusterInfo points at an NAClusterLinux object. // In osim simulation mode, the pointer can point at a // NAClusterNSK object, for which the method numTSEsForPOS() is not // defined. NAClusterInfoLinux* gpLinux = dynamic_cast<NAClusterInfoLinux*>(gpClusterInfo); if ( gpLinux ) { UInt32 numTSEs = (UInt32)gpLinux->numTSEsForPOS(); utoa_(numTSEs, valuestr); strcpy(newValue, valuestr); if(reInit) ActiveSchemaDB()-> getDefaults().updateCurrentDefaultsForOSIM(&defaultDefaults[j]); } } break; case USTAT_IUS_PERSISTENT_CBF_PATH: { // set the CQD it to $HOME/cbfs const char* home = getenv("HOME"); if ( home ) { str_cat(home, "/cbfs", newValue); } } break; default: #ifndef NDEBUG cerr << "updateSystemParameters: no case for " << defaultDefaults[j].attrName << endl; #endif break; } // switch (arrayOfSystemParameters) } // for } // isStandalone } // updateSystemParameters() //============================================================================== // Get SMP node number and cluster number on which this arkcmp.exe is running. //============================================================================== void NADefaults::getNodeAndClusterNumbers(short& nodeNum, Int32& clusterNum) { SB_Phandle_Type pHandle; Int32 error = XPROCESSHANDLE_GETMINE_(&pHandle); Int32 nodeNumInt; // XPROCESSHANDLE_DECOMPOSE_ takes an integer. Int32 pin; error = XPROCESSHANDLE_DECOMPOSE_(&pHandle, &nodeNumInt, &pin, &clusterNum); nodeNum = nodeNumInt; // Store 4-byte integer back to short integer CMPASSERT(error == 0); } inline static NABoolean initializeSQLdone() { return FALSE; } // Setup for readFromSQLTable(): // #include "SQLCLIdev.h" const SQLMODULE_ID __SQL_mod_866668761818000 = { /* version */ SQLCLI_CURRENT_VERSION, /* module name */ "HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.READDEF_N29_000", /* time stamp */ 866668761818000LL, /* char set */ "ISO88591", /* name length */ 47 }; static const Int32 MAX_VALUE_LEN = 1000; // Read the SQL defaults table, to layer on further defaults. // // [1] This is designed such that it can be called multiple times // (a site-wide defaults table, then a user-specific one, e.g.) // and by default it will supersede values read/computed from earlier tables. // // [2] It can also be called *after* CQD's have been issued // (e.g. from the getCatalogAndSchema() method) // and by default it will supersede values from earlier tables // but *not* explicitly CQD-ed settings. // // This default behavior is governed by the overwrite* arguments in // various methods (see the .h file). Naturally you can override such behavior, // e.g., if you wanted to reset to an earlier state, erasing all user CQD's. // void NADefaults::readFromSQLTable(const char *tname, Provenance overwriteIfNotYet, Int32 errOrWarn) { char value[MAX_VALUE_LEN + 1]; // CMPASSERT(MAX_VALUE_LEN >= ComMAX_2_PART_EXTERNAL_UCS2_NAME_LEN_IN_NAWCHARS); // First (but only if NSK-LITE Services exist), // write system parameters (attributes DEF_*) into DefaultDefaults, // then copy DefaultDefaults into CurrentDefaults. if (!cmpCurrentContext->isStandalone()) { Lng32 initialErrCnt = CmpCommon::diags()->getNumber(); // Set this *before* doing any insert()'s ... currentState_ = READ_FROM_SQL_TABLE; Int32 loop_here=0; while (loop_here > 10) { loop_here++; if (loop_here > 1000) loop_here=100; } if (tname) { NABoolean isSQLTable = TRUE; if (*tname == ' ') { // called from NADefaults::readFromFlatFile() isSQLTable = FALSE; // -- see kludge in .h file! tname++; } char attrName[101]; // column ATTRIBUTE VARCHAR(100) UPSHIFT Int32 sqlcode; static THREAD_P struct SQLCLI_OBJ_ID __SQL_id0; FILE *flatfile = NULL; if (isSQLTable) { init_SQLCLI_OBJ_ID(&__SQL_id0, SQLCLI_CURRENT_VERSION, cursor_name, &__SQL_mod_866668761818000, "S1", 0, SQLCHARSETSTRING_ISO88591, 2); /* EXEC SQL OPEN S1; See file NADefaults.mdf for cursor declaration */ sqlcode = SQL_EXEC_ClearDiagnostics(&__SQL_id0); sqlcode = SQL_EXEC_Exec(&__SQL_id0,NULL,1,tname,NULL); } else { flatfile = fopen(tname, "r"); sqlcode = flatfile ? 0 : -ABS(arkcmpErrorFileOpenForRead); } /* EXEC SQL FETCH S1 INTO :attrName, :value; */ // Since the DEFAULTS table is PRIMARY KEY (SUBSYSTEM, ATTRIBUTE), // we'll fetch (scanning the clustering index) // CATALOG before SCHEMA; this is important if user has rows like // ('CATALOG','c1') and ('SCHEMA','c2.sn') -- // the schema setting must supersede the catalog one. // We should also put an ORDER BY into the cursor decl in the .mdf, // to handle user-created DEFAULTS tables w/o a PK. if (sqlcode >= 0) if (isSQLTable) { sqlcode = SQL_EXEC_Fetch(&__SQL_id0,NULL,2,attrName,NULL,value,NULL); if (sqlcode >= 0) readFromSQDefaultsTable_ = TRUE; } else { value[0] = 0; // NULL terminator if (fscanf(flatfile, " %100[A-Za-z0-9_#] ,", attrName) < 0) sqlcode = +100; else fgets((char *) value, sizeof(value), flatfile); } // Ignore warnings except for end-of-data while (sqlcode >= 0 && sqlcode != +100) { NAString v(value); // skip comments, indicated by a # if (attrName[0] != '#') validateAndInsert(attrName, v, FALSE, errOrWarn, overwriteIfNotYet); /* EXEC SQL FETCH S1 INTO :attrName, :value; */ if (isSQLTable) sqlcode = SQL_EXEC_Fetch(&__SQL_id0,NULL,2,attrName,NULL,value,NULL); else { value[0] = 0; // NULL terminator if (fscanf(flatfile, " %100[A-Za-z0-9_#] ,", attrName) < 0) sqlcode = +100; else fgets((char *) value, sizeof(value), flatfile); } } if (sqlcode < 0 && errOrWarn && initializeSQLdone()) { if (ABS(sqlcode) == ABS(CLI_MODULEFILE_OPEN_ERROR) && cmpCurrentContext->isInstalling()) { // Emit no warning when (re)installing, // because obviously the module will not exist before we have // (re)arkcmp'd it! } else { // 2001 Error $0 reading table $1. Using $2 values. CollIndex n = tablesRead_.entries(); const char *errtext = n ? tablesRead_[n-1].data() : "default-default"; *CmpCommon::diags() << DgSqlCode(ERRWARN(2001)) << DgInt0(sqlcode) << DgTableName(tname) << DgString0(errtext); } } if (isSQLTable) { /* EXEC SQL CLOSE S1; */ sqlcode = SQL_EXEC_ClearDiagnostics(&__SQL_id0); sqlcode = SQL_EXEC_CloseStmt(&__SQL_id0); // The above statement should not start any transactions because // it uses read uncommitted access. If it ever changes, then we // would need to commit it at this time. } } // tname if (initialErrCnt < CmpCommon::diags()->getNumber() && errOrWarn) *CmpCommon::diags() << DgSqlCode(ERRWARN(2059)) << DgString0(tname ? tname : ""); } // isStandalone } // NADefaults::readFromSQLTable() void NADefaults::readFromSQLTables(Provenance overwriteIfNotYet, Int32 errOrWarn) { NABoolean cat = FALSE; NABoolean sch = FALSE; if (getToken(MODE_SEABASE) == DF_ON && !readFromSQDefaultsTable()) { // Read system defaults from configuration file. // keep this name in sync with file cli/SessionDefaults.cpp NAString confFile(getenv("TRAF_HOME")); confFile += "/etc/SQSystemDefaults.conf"; readFromFlatFile(confFile, overwriteIfNotYet, errOrWarn); tablesRead_.insert(confFile); CmpSeabaseDDL cmpSBD((NAHeap *)heap_, FALSE); Lng32 hbaseErr = 0; NAString hbaseErrStr; Lng32 errNum = cmpSBD.validateVersions(this, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &hbaseErr, &hbaseErrStr); if (errNum == 0) // seabase is initialized properly { // read from seabase defaults table cmpSBD.readAndInitDefaultsFromSeabaseDefaultsTable (overwriteIfNotYet, errOrWarn, this); // set authorization state NABoolean checkAllPrivTables = FALSE; errNum = cmpSBD.isPrivMgrMetadataInitialized(this,checkAllPrivTables); CmpCommon::context()->setAuthorizationState(errNum); } else { CmpCommon::context()->setIsUninitializedSeabase(TRUE); CmpCommon::context()->uninitializedSeabaseErrNum() = errNum; CmpCommon::context()->hbaseErrNum() = hbaseErr; CmpCommon::context()->hbaseErrStr() = hbaseErrStr; } } currentState_ = SET_BY_CQD; // enter the next state... // Make self fully consistent, by executing deferred actions last of all getSqlParser_NADefaults(); } // NADefaults::readFromSQLTables() // This method is used by SchemaDB::initPerStatement const char * NADefaults::getValueWhileInitializing(Int32 attrEnum) { // We can't rely on our state_ because SQLC might have called CQD::bindNode() // which does a setState(SET_BY_CQD)... if (!tablesRead_.entries()) if (getProvenance(attrEnum) < SET_BY_CQD) readFromSQLTables(SET_BY_CQD); return getValue(attrEnum); } // This method is used by SchemaDB::initPerStatement *and* // by CmpCommon, CmpStatement, and SQLC/SQLCO. void NADefaults::getCatalogAndSchema(NAString &cat, NAString &sch) { cat = getValueWhileInitializing(CATALOG); sch = getValueWhileInitializing(SCHEMA); } // Should be called only privately and by DefaultValidator! Int32 NADefaults::validateFloat(const char *value, float &result, Int32 attrEnum, Int32 errOrWarn) const { Int32 n = -1; // NT's scanf("%n") is not quite correct; hence this code-around sscanf(value, "%g%n", &result, &n); if (n > 0 && value[n] == '\0') { switch (attrEnum) { case HIVE_INSERT_ERROR_MODE: { Lng32 v = str_atoi(value, str_len(value)); if (v >= 0 && v <= 3) return TRUE; } break; default: return TRUE; // a valid float } } NAString v(value); NABoolean silentIf = (errOrWarn == SilentIfSYSTEM); if (silentIf) errOrWarn = 0/*silent*/; NABoolean useSYSTEM = (token(attrEnum, v, TRUE, errOrWarn) == DF_SYSTEM); if (useSYSTEM && silentIf) // ValidateNumeric is caller return SilentIfSYSTEM; // special it-is-valid return! if (errOrWarn) *CmpCommon::diags() << DgSqlCode(ERRWARN(2055)) << DgString0(value) << DgString1(lookupAttrName(attrEnum, errOrWarn)); if (useSYSTEM) { // programmer error CMPASSERT("Numeric attr allows SYSTEM -- you need to call token() first to see if its current value is this keyword, and compute your system default value!" == NULL); } // ensure an out-of-range error if domainMatch or ValidateNumeric is called result = -FLT_MAX; return FALSE; // not valid } NABoolean NADefaults::insert(Int32 attrEnum, const NAString &value, Int32 errOrWarn) { // private method; callers have all already done this: ATTR_RANGE_ASSERT; assert(errOrWarn != SilentIfSYSTEM); // yeh private, but just in case // Update cache: // (Re)validate that new value is numeric. // Useful if programmer did not correctly specify the DefaultValidator for // this attr in DefaultDefaults. // if (currentFloats_[attrEnum]) { float result; if (validateFloat(value, result, attrEnum, errOrWarn)) *currentFloats_[attrEnum] = result; else return FALSE; // not a valid float } // Update cache for DefaultToken by deallocating the cached entry. if ( currentTokens_[attrEnum] ) { NADELETEBASIC( currentTokens_[attrEnum], NADHEAP ); currentTokens_[attrEnum] = NULL; } // If we're past the read-from-SQLTable phase, then // the first CQD of a given attr must first save the from-SQLTable value, // to which the user can RESET if desired. // if (currentState_ >= SET_BY_CQD && !resetToDefaults_[attrEnum]) { NAString currValStr(currentDefaults_[attrEnum]); Lng32 currValLen = str_len(currValStr) + 1; char *pCurrVal = new NADHEAP char[currValLen]; str_cpy_all(pCurrVal, currValStr, currValLen); resetToDefaults_[attrEnum] = pCurrVal; } char *newVal = NULL; Lng32 newValLen = str_len(value) + 1; if (provenances_[attrEnum] > INIT_DEFAULT_DEFAULTS) { Lng32 oldValLen = str_len(currentDefaults_[attrEnum]) + 1; if (oldValLen >= newValLen && oldValLen < newValLen + 100) newVal = const_cast<char*>(currentDefaults_[attrEnum]); // reuse, to reduce mem frag else NADELETEBASIC(currentDefaults_[attrEnum], NADHEAP); } if (!newVal) newVal = new NADHEAP char[newValLen]; str_cpy_all(newVal, value, newValLen); currentDefaults_[attrEnum] = newVal; // when the parser flag is on for a set-once CQD // set its provenance as INIT_DEFAULT_DEFAULTS, // so the user can set it once later if ( isSetOnceAttribute(attrEnum) && Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL) ) { provenances_[attrEnum] = INIT_DEFAULT_DEFAULTS; } else { provenances_[attrEnum] = currentState_; } return TRUE; } NADefaults::Provenance NADefaults::getProvenance(Int32 attrEnum) const { ATTR_RANGE_ASSERT; return (Provenance)provenances_[attrEnum]; } NABoolean NADefaults::getValue(Int32 attrEnum, NAString &result) const { ATTR_RANGE_ASSERT; result = currentDefaults_[attrEnum]; return TRUE; // we always have a STRING REPRESENTATION value } NAString NADefaults::getString(Int32 attrEnum) const { ATTR_RANGE_ASSERT; return currentDefaults_[attrEnum]; } const char * NADefaults::getValue(Int32 attrEnum) const { ATTR_RANGE_ASSERT; return currentDefaults_[attrEnum]; } NABoolean NADefaults::getFloat(Int32 attrEnum, float &result) const { ATTR_RANGE_ASSERT; if (currentFloats_[attrEnum]) { result = *currentFloats_[attrEnum]; } else if (validateFloat(currentDefaults_[attrEnum], result, attrEnum)) { currentFloats_[attrEnum] = new NADHEAP float; // cache the result *currentFloats_[attrEnum] = result; } else { return FALSE; // result is neg, from failed validateFloat() } return TRUE; } double NADefaults::getAsDouble(Int32 attrEnum) const { // No domainMatch() needed: any float or double (or int or uint) is okay; // getFloat()/validateFloat() will disallow any non-numerics. float flt; getFloat(attrEnum, flt); return double(flt); } Lng32 NADefaults::getAsLong(Int32 attrEnum) const { float flt; getFloat(attrEnum, flt); if (!domainMatch(attrEnum, VALID_INT, &flt)) { CMPBREAK; } return Lng32(flt); } ULng32 NADefaults::getAsULong(Int32 attrEnum) const { float flt; getFloat(attrEnum, flt); if (!domainMatch(attrEnum, VALID_UINT, &flt)) { CMPBREAK; } return (ULng32)(flt); } ULng32 NADefaults::getNumOfESPsPerNode() const { return (ULng32)MAXOF(ceil(getNumOfESPsPerNodeInFloat()), 1); } float NADefaults::getNumOfESPsPerNodeInFloat() const { double maxEspPerCpuPerOp = getAsDouble(MAX_ESPS_PER_CPU_PER_OP); CollIndex cores = ( (CmpCommon::context() && CURRSTMT_OPTDEFAULTS->isFakeHardware()) ) ? getAsLong(DEF_NUM_SMP_CPUS) : gpClusterInfo->numberOfCpusPerSMP(); return float(maxEspPerCpuPerOp * cores); } ULng32 NADefaults::getTotalNumOfESPsInCluster(NABoolean& fakeEnv) const { fakeEnv = FALSE; if (getToken(PARALLEL_NUM_ESPS, 0) != DF_SYSTEM ) { fakeEnv = TRUE; return getAsLong(PARALLEL_NUM_ESPS); } float espsPerNode = getNumOfESPsPerNodeInFloat(); CollIndex numOfNodes = gpClusterInfo->numOfSMPs(); if ( (CmpCommon::context() && CURRSTMT_OPTDEFAULTS->isFakeHardware())) { fakeEnv = TRUE; numOfNodes = getAsLong(DEF_NUM_NODES_IN_ACTIVE_CLUSTERS); } return MAXOF(ceil(espsPerNode * numOfNodes), 1); } NABoolean NADefaults::domainMatch(Int32 attrEnum, Int32 expectedType/*DefaultValidatorType*/, float *flt) const { if (validator(attrEnum)->getType() == expectedType) return TRUE; // yes, domains match // Emit error messages only if the value is actually out-of-range. // // Users (optimizer code) should REALLY be using 'unsigned long' fields // and calling getAsULong, instead of using 'long' fields to retrieve // unsigned(DDui*) attr values via getAsLong ... // // LCOV_EXCL_START // if we get here the compiler will crash if (flt) { DefaultValidator *validator = NULL; if (expectedType == VALID_INT) validator = (DefaultValidator *)&validateInt; else if (expectedType == VALID_UINT) validator = (DefaultValidator *)&validateUI; // Explicitly check for TRUE here -- // both FALSE/error and SilentIfSYSTEM are out-of-range/out-of-domain // from this method's point of view. if (validator) if (validator->validate( currentDefaults_[attrEnum], this, attrEnum, -1, flt) == TRUE) return TRUE; // domain mismatch, but value *is* in the domain range } // fall thru to emit additional failure info *CmpCommon::diags() << DgSqlCode(+2058) // emit a mismatch WARNING << DgString0(lookupAttrName(attrEnum)) << DgString1(validator(attrEnum)->getTypeText()) << DgString2(DefaultValidator::getTypeText( DefaultValidatorType(expectedType))); #ifndef NDEBUG cerr << "Warning[2058] " << lookupAttrName(attrEnum) << " " << validator(attrEnum)->getTypeText() << " " << DefaultValidator::getTypeText( DefaultValidatorType(expectedType)) << " " << (flt ? *flt : 123.45) << endl; #endif // LCOV_EXCL_STOP return FALSE; } // CONTROL QUERY DEFAULT attr RESET; // resets the single attr to the value it had right after we read all // the DEFAULTS tables, // or the value it had right before a CQD * RESET RESET. // CONTROL QUERY DEFAULT * RESET; // resets all attrs to the values they had by same criteria as above. // CONTROL QUERY DEFAULT * RESET RESET; // resets the "reset-to" values so that all current values become the // effective "reset-to"'s -- i.e, the current values can't be lost // on the next CQD * RESET; // Useful for apps that dynamically send startup settings that ought // to be preserved -- ODBC and SQLCI do this. // void NADefaults::resetAll(NAString &value, NABoolean reset, Int32 errOrWarn) { size_t i, numAttrs = numDefaultAttributes(); if (reset == 1) { // CQD * RESET; (not RESET RESET) setResetAll(TRUE); for (i = 0; i < numAttrs; i++) { const char * attributeName = defaultDefaults[i].attrName; DefaultConstants attrEnum = lookupAttrName(attributeName, errOrWarn); if (isNonResetableAttribute(attributeName)) continue; validateAndInsert(attributeName, value, TRUE, errOrWarn); } // if DEFAULT_SCHEMA_NAMETYPE=USER after CQD * RESET // set SCHEMA to LDAP_USERNAME // if SCHEMA has not been specified by user if ( (getToken(DEFAULT_SCHEMA_NAMETYPE) == DF_USER) && schSetByNametype() ) { setSchemaAsLdapUser(); } setResetAll(FALSE); } else if (reset == 2) { for (i = 0; i < numAttrs; i++) { if (resetToDefaults_[i]) { // CONTROL QUERY DEFAULT * RESET RESET; -- this code cloned below // Can't reset prov, because to which? // provenances_[i] = READ_FROM_SQL_TABLE or COMPUTED ?? NADELETEBASIC(resetToDefaults_[i], NADHEAP); resetToDefaults_[i] = NULL; } } } else { CMPASSERT(!reset); } } // Reset to default-defaults, as if readFromSQLTables() had not executed, // but setting state and provenance so no future reads will be triggered. // See StaticCompiler and Genesis 10-990204-2469 above for motivation. void NADefaults::undoReadsAndResetToDefaultDefaults() { initCurrentDefaultsWithDefaultDefaults(); } NABoolean NADefaults::isReadonlyAttribute(const char* attrName) const { if ((( stricmp(attrName, "ISO_MAPPING") == 0 ) || ( stricmp(attrName, "OVERFLOW_MODE") == 0 ) || ( stricmp(attrName, "SORT_ALGO") == 0 )) && ( CmpCommon::getDefault(DISABLE_READ_ONLY) == DF_ON )) return FALSE; // for internal development and testing purposes if (( stricmp(attrName, "ISO_MAPPING") == 0 )|| ( stricmp(attrName, "NATIONAL_CHARSET") == 0 ) || ( stricmp(attrName, "VALIDATE_VIEWS_AT_OPEN_TIME") == 0 ) || ( stricmp(attrName, "USER_EXPERIENCE_LEVEL") == 0 ) || ( stricmp(attrName, "POS_DISKS_IN_SEGMENT") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_HASHJOIN") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_MERGEJOIN") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_HASHGROUPBY") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_SORT") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_PROBE_CACHE") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_PA") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_SEQUENCE") == 0 ) || ( stricmp(attrName, "EXE_MEMORY_LIMIT_LOWER_BOUND_EXCHANGE") == 0 ) || ( stricmp(attrName, "SORT_ALGO") == 0 ) || ( stricmp(attrName, "OVERFLOW_MODE") == 0 ) ) return TRUE; if (strlen(attrName) > 0) { DefaultConstants v = lookupAttrName(attrName, 0, 0); if ((v != __INVALID_DEFAULT_ATTRIBUTE) && (getFlags(v) & DEFAULT_IS_SSD)) return TRUE; } return FALSE; } // these defaults cannot be reset or set to FALSE through a cqd. NABoolean NADefaults::isNonResetableAttribute(const char* attrName) const { if (( stricmp(attrName, "IS_SQLCI") == 0 ) || ( stricmp(attrName, "NVCI_PROCESS") == 0 ) || ( stricmp(attrName, "SESSION_ID") == 0 ) || ( stricmp(attrName, "LDAP_USERNAME") == 0 ) || ( stricmp(attrName, "VOLATILE_SCHEMA_IN_USE") == 0 ) || ( stricmp(attrName, "SESSION_USERNAME") == 0 ) ) return TRUE; return FALSE; } // these defaults can be set only once by user. NABoolean NADefaults::isSetOnceAttribute(Int32 attrEnum) const { if ( attrEnum == DEFAULT_SCHEMA_ACCESS_ONLY || attrEnum == PUBLISHING_ROLES ) return TRUE; return FALSE; } void NADefaults::resetSessionOnlyDefaults() { NAString value; validateAndInsert("NVCI_PROCESS", value, 3, 0); } // Parameter <reset> must not be a reference (&); // see <value = ... fall thru> below. enum DefaultConstants NADefaults::validateAndInsert(const char *attrName, NAString &value, NABoolean reset, Int32 errOrWarn, Provenance overwriteIfNotYet) { NABoolean overwrite = FALSE; NABoolean isJDBC = FALSE; NABoolean isODBC = FALSE; if (ActiveSchemaDB()) { isJDBC = (CmpCommon::getDefault(JDBC_PROCESS) == DF_ON ? TRUE : FALSE); isODBC = (CmpCommon::getDefault(ODBC_PROCESS) == DF_ON ? TRUE : FALSE); } if (reset && !attrName[0]) { // CONTROL QUERY DEFAULT * RESET overwrite = currentState_ < overwriteIfNotYet; if (overwrite) resetAll(value, reset, errOrWarn); return (DefaultConstants)0; // success } // Perform a lookup for the string, using a binary search. DefaultConstants attrEnum = lookupAttrName(attrName, errOrWarn); if (attrEnum >= 0) { // valid attrName // ignore DEFAULT_SCHEMA_ACCESS_ONLY if it is in system defaults if ( attrEnum == DEFAULT_SCHEMA_ACCESS_ONLY && getState() < SET_BY_CQD ) return attrEnum; // do the following check when // this is the primary mxcmp // and INTERNAL_QUERY_FROM_EXEUTIL is not set if (!CmpCommon::context()->isSecondaryMxcmp() && !Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL)) { // This logic will catch if the set-once CQD // is set, but the ALLOW_SET_ONCE_DEFAULTS parserflags // are not set. This is absolutely necessary for security // to ensure that the correct parserflags are set. if ((isSetOnceAttribute(attrEnum)) && (!isResetAll()) && // no error msg for cqd * reset (NOT Get_SqlParser_Flags(ALLOW_SET_ONCE_DEFAULTS))) { *CmpCommon::diags() << DgSqlCode(-30042) << DgString0(attrName); return attrEnum; } // if DEFAULT_SCHEMA_ACCESS_ONLY is on, // users cannot change the following CQDs if ( getState() >= SET_BY_CQD && getToken(DEFAULT_SCHEMA_ACCESS_ONLY) == DF_ON ) { if (attrEnum == SCHEMA || attrEnum == PUBLIC_SCHEMA_NAME || attrEnum == DEFAULT_SCHEMA_NAMETYPE || attrEnum == PUBLISHING_ROLES) { if (!isResetAll()) // no error msg for cqd * reset *CmpCommon::diags() << DgSqlCode(-30043) << DgString0(attrName); return attrEnum; } } } else { // ignore LAST0_MODE cqd if we are in secondary mxcmp or if // internal_query_from_exeutil is set. This cqd is not meant // to apply in these cases if ( attrEnum == LAST0_MODE ) return attrEnum; } overwrite = getProvenance(attrEnum) < overwriteIfNotYet; // Put value into canonical form (trimmed, upcased where pertinent). // // Possibly revert to initial default default value -- see NOTE 3 up above. // Note further that ANSI names cannot revert on values of // 'SYSTEM' or 'ENABLE', as those are legal cat/sch/tbl names, // nor can they revert on '' (empty/blank), as ANSI requires us to // emit a syntax error for this. // // Possibly RESET to read-from-table value (before any CQD value). // TrimNAStringSpace(value); if (validator(attrEnum) != &validateAnsiName && !reset) { validator(attrEnum)->applyUpper(value); if (isSynonymOfSYSTEM(attrEnum, value)) value = getDefaultDefaultValue(attrEnum); else if (isSynonymOfRESET(value)) // CQD attr 'RESET'; ... reset = 1; } if (reset) { // CQD attr RESET; if ((isNonResetableAttribute(attrName)) && (reset != 3)) return attrEnum; if (!resetToDefaults_[attrEnum]) { if (overwrite) value = currentDefaults_[attrEnum]; // return actual val to caller if (attrEnum == ISOLATION_LEVEL) { // reset this in the global area TransMode::IsolationLevel il; getIsolationLevel(il); CmpCommon::transMode()->updateAccessModeFromIsolationLevel(il); } // Solution: 10-060418-5903. Do not update MXCMP global access mode // with CQD ISOLATION_LEVEL_FOR_UPDATES as it will overwrite that // set by ISOLATION_LEVE. The CQD ISOLATION_LEVEL_FOR_UPDATES is // always accessed directly when necessary. //else if (attrEnum == ISOLATION_LEVEL_FOR_UPDATES) // { // // reset this in the global area // TransMode::IsolationLevel il; // getIsolationLevel(il, getToken(attrEnum)); // CmpCommon::transMode()->updateAccessModeFromIsolationLevel(il, // FALSE); // } return attrEnum; } value = resetToDefaults_[attrEnum]; // fall thru, REINSERT this val } if (attrEnum == CATALOG) { if (!setCatalog(value, errOrWarn, overwrite)) attrEnum = __INVALID_DEFAULT_ATTRIBUTE; else { if (getState() == READ_FROM_SQL_TABLE) { // set the volatile catalog to be same as the catalog read from // defaults table. If there is no catalog or volatile_catalog // specified in the defaults table, then volatile catalog name // will be the default catalog in use in the session where // volatile tables are created. CmpCommon::context()->sqlSession()->setVolatileCatalogName(value); } } } else if (attrEnum == SCHEMA) { if (!setSchema(value, errOrWarn, overwrite)) attrEnum = __INVALID_DEFAULT_ATTRIBUTE; else { if (getState() == READ_FROM_SQL_TABLE) { // set the volatile catalog to be same as the catalog read from // defaults table. If there is no catalog or volatile_catalog // specified in the defaults table, then volatile catalog name // will be the default catalog in use in the session where // volatile tables are created. NAString cat(getValue(CATALOG)); CmpCommon::context()->sqlSession()->setVolatileCatalogName(cat); } } } else { if ( attrEnum == MAX_LONG_VARCHAR_DEFAULT_SIZE || attrEnum == MAX_LONG_WVARCHAR_DEFAULT_SIZE ) { ULng32 minLength; switch (attrEnum) { case MAX_LONG_VARCHAR_DEFAULT_SIZE: minLength = (Lng32)getAsULong(MIN_LONG_VARCHAR_DEFAULT_SIZE); break; case MAX_LONG_WVARCHAR_DEFAULT_SIZE: minLength = (Lng32)getAsULong(MIN_LONG_WVARCHAR_DEFAULT_SIZE); break; default: attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } if ( attrEnum != __INVALID_DEFAULT_ATTRIBUTE ) { UInt32 newMaxLength; Int32 n = -1; sscanf(value.data(), "%u%n", &newMaxLength, &n); if ( n>0 && (UInt32)n == value.length() ) { // a valid unsigned number if ( newMaxLength < minLength ) { *CmpCommon::diags() << DgSqlCode(-2030) << DgInt0((Lng32)minLength); attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } } } } if ( attrEnum == MIN_LONG_VARCHAR_DEFAULT_SIZE || attrEnum == MIN_LONG_WVARCHAR_DEFAULT_SIZE ) { ULng32 maxLength; switch (attrEnum) { case MIN_LONG_VARCHAR_DEFAULT_SIZE: maxLength = getAsULong(MAX_LONG_VARCHAR_DEFAULT_SIZE); break; case MIN_LONG_WVARCHAR_DEFAULT_SIZE: maxLength = getAsULong(MAX_LONG_WVARCHAR_DEFAULT_SIZE); break; default: attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } if ( attrEnum != __INVALID_DEFAULT_ATTRIBUTE ) { UInt32 newMinLength; Int32 n = -1; sscanf(value.data(), "%u%n", &newMinLength, &n); if ( n>0 && (UInt32)n == value.length() ) { // a valid unsigned number if ( newMinLength > maxLength ) { *CmpCommon::diags() << DgSqlCode(-2029) << DgInt0((Lng32)maxLength); attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } } } } if (errOrWarn && (attrEnum == ROUNDING_MODE)) { if (NOT ((value.length() == 1) && ((*value.data() == '0') || (*value.data() == '1') || (*value.data() == '2')))) { *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(value) << DgString1(lookupAttrName(attrEnum)); attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } } if ( attrEnum == SCRATCH_MAX_OPENS_HASH || attrEnum == SCRATCH_MAX_OPENS_SORT ) { if (NOT ((value.length() == 1) && ((*value.data() == '1') || (*value.data() == '2') || (*value.data() == '3') || (*value.data() == '4')))) { *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(value) << DgString1(lookupAttrName(attrEnum)); attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } } if (attrEnum != __INVALID_DEFAULT_ATTRIBUTE) { // We know that the MP_COLLATIONS validator emits only warnings // and always returns TRUE. On the validate-but-do-not-insert step // (CQD compilation), those warnings will be seen by the user. // On the validate-AND-insert (CQD execution), there is no need // to repeat them (besides, that causes Executor to choke on the // warnings in the diags and say 'Error fetching from TCB tree'). Int32 isValid = TRUE; if (!overwrite || currentState_ < SET_BY_CQD || validator(attrEnum) != &validateCollList) isValid = validator(attrEnum)->validate(value, this, attrEnum, errOrWarn); // if an internal reset is being done, then make it a valid attr // even if the 'validate' method above returned invalid. if ((!isValid) && (isNonResetableAttribute(attrName)) && (reset == 3)) { isValid = TRUE; } if (!isValid) attrEnum = __INVALID_DEFAULT_ATTRIBUTE; else if (overwrite) { if (isValid == SilentIfSYSTEM) { // defDef value was "SYSTEM" or "" // Undo any caching from getFloat() NADELETEBASIC(currentFloats_[attrEnum], NADHEAP); currentFloats_[attrEnum] = NULL; // Undo any caching from getToken() NADELETEBASIC( currentTokens_[attrEnum], NADHEAP ); currentTokens_[attrEnum] = NULL; // Now fall thru to insert the string "SYSTEM" or "" } if (!insert(attrEnum, value, errOrWarn)) attrEnum = __INVALID_DEFAULT_ATTRIBUTE; } // overwrite (i.e. insert) } } // not special val/ins for CAT, SCH, or MPLOC } // valid attrName if (attrEnum >= 0) { if (overwrite) { if ((! reset) && (currentState_ == SET_BY_CQD)) { // indicate that this attribute was set by a user CQD. setUserDefault(attrEnum, TRUE); } switch (attrEnum) { case CATALOG: case SCHEMA: break; case ISOLATION_LEVEL: { // Ansi 14.1 SR 4. See comexe/ExControlArea::addControl(). //## I now think this implementation is wrong //## because this is setting GLOBAL state //## for something that should be CONTEXT-dependent. //## Will cause us headaches later, when we //## make arkcmp be a multi-context multi-threaded server. TransMode::IsolationLevel il; getIsolationLevel(il); CmpCommon::transMode()->updateAccessModeFromIsolationLevel(il); } break; // Solution: 10-060418-5903. Do not update MXCMP global access mode // with CQD ISOLATION_LEVEL_FOR_UPDATES as it will overwrite that // set by ISOLATION_LEVEL. The CQD ISOLATION_LEVEL_FOR_UPDATES is // always accessed directly when necessary. //case ISOLATION_LEVEL_FOR_UPDATES: //{ // TransMode::IsolationLevel il; // getIsolationLevel(il, getToken(attrEnum)); // CmpCommon::transMode()->updateAccessModeFromIsolationLevel(il, // FALSE); //} //break; case ALLOW_INCOMPATIBLE_ASSIGNMENT: case ALLOW_INCOMPATIBLE_COMPARISON: { NAString val; if (value == "ON") val = "ON"; else val = "OFF"; insert(ALLOW_INCOMPATIBLE_OPERATIONS, val, errOrWarn); } break; case MODE_SPECIAL_1: { NAString val; if (value == "ON") val = "ON"; else val = "OFF"; insert(ALLOW_INCOMPATIBLE_OPERATIONS, val, errOrWarn); // find_suitable_key to be turned off in this mode, unless // it has been explicitely set. if (getToken(VOLATILE_TABLE_FIND_SUITABLE_KEY) == DF_SYSTEM) { insert(VOLATILE_TABLE_FIND_SUITABLE_KEY, "OFF", errOrWarn); } } break; case MODE_SPECIAL_4: { NAString val; if (value == "ON") val = "ON"; else val = "OFF"; insert(ALLOW_INCOMPATIBLE_OPERATIONS, val, errOrWarn); insert(ALLOW_NULLABLE_UNIQUE_KEY_CONSTRAINT, val, errOrWarn); NAString csVal; if (value == "ON") csVal = SQLCHARSETSTRING_UTF8; else csVal = ""; validateAndInsert("TRAF_DEFAULT_COL_CHARSET", csVal, FALSE, errOrWarn); NAString notVal; if (value == "ON") notVal = "OFF"; else notVal = "ON"; insert(TRAF_COL_LENGTH_IS_CHAR, notVal, errOrWarn); NAString costVal1; NAString costVal2; if (value == "ON") { costVal1 = "8.0"; costVal2 = "16.0" ; } else { costVal1 = "1.0"; costVal2 = "1.0" ; } validateAndInsert("NCM_IND_JOIN_COST_ADJ_FACTOR", costVal1, FALSE, errOrWarn); validateAndInsert("NCM_IND_SCAN_COST_ADJ_FACTOR", costVal2, FALSE, errOrWarn); if (value == "ON") Set_SqlParser_Flags(IN_MODE_SPECIAL_4); else Reset_SqlParser_Flags(IN_MODE_SPECIAL_4); } break; case MODE_SEABASE: { if (value == "ON") { if (NOT seabaseDefaultsTableRead()) { CmpSeabaseDDL cmpSBD((NAHeap *)heap_); Lng32 errNum = cmpSBD.validateVersions(this); if (errNum == 0) // seabase is initialized properly { // read from seabase defaults table cmpSBD.readAndInitDefaultsFromSeabaseDefaultsTable (overwriteIfNotYet, errOrWarn, this); } else { CmpCommon::context()->setIsUninitializedSeabase(TRUE); CmpCommon::context()->uninitializedSeabaseErrNum() = errNum; } } NAString sbCat = getValue(SEABASE_CATALOG); insert(SEABASE_VOLATILE_TABLES, "ON", errOrWarn); CmpCommon::context()->sqlSession()->setVolatileCatalogName(sbCat, TRUE); insert(UPD_SAVEPOINT_ON_ERROR, "OFF", errOrWarn); } else { NAString defCat = getValue(CATALOG); insert(SEABASE_VOLATILE_TABLES, "OFF", errOrWarn); CmpCommon::context()->sqlSession()->setVolatileCatalogName(defCat); insert(UPD_SAVEPOINT_ON_ERROR, "ON", errOrWarn); } } break; case MEMORY_LIMIT_QCACHE_UPPER_KB: CURRENTQCACHE->setHeapUpperLimit((size_t) 1024 * atoi(value.data())); break; case MEMORY_LIMIT_HISTCACHE_UPPER_KB: CURRCONTEXT_HISTCACHE->setHeapUpperLimit((size_t) 1024 * atoi(value.data())); break; case MEMORY_LIMIT_CMPSTMT_UPPER_KB: STMTHEAP->setUpperLimit((size_t) 1024 * atoi(value.data())); break; case MEMORY_LIMIT_CMPCTXT_UPPER_KB: CTXTHEAP->setUpperLimit((size_t) 1024 * atoi(value.data())); break; case MEMORY_LIMIT_NATABLECACHE_UPPER_KB: ActiveSchemaDB()->getNATableDB()->setHeapUpperLimit((size_t) 1024 * atoi(value.data())); break; case NAMETYPE: SqlParser_NADefaults_->NAMETYPE_ = token(NAMETYPE, value, TRUE); break; case NATIONAL_CHARSET: SqlParser_NADefaults_->NATIONAL_CHARSET_ = CharInfo::getCharSetEnum(value); break; case SESSION_ID: { CmpCommon::context()->sqlSession()->setSessionId(value); } break; case SESSION_USERNAME: { CmpCommon::context()->sqlSession()->setSessionUsername(value); } break; case SESSION_IN_USE: { CmpCommon::context()->sqlSession()->setSessionInUse ((getToken(attrEnum) == DF_ON)); } break; case SQLMX_REGRESS: { if (value == "ON") { insert(COMP_BOOL_157, "ON", errOrWarn); insert(SHOWDDL_DISPLAY_FORMAT, "INTERNAL", errOrWarn); insert(MODE_SPECIAL_1, "OFF", errOrWarn); if (getToken(VOLATILE_TABLE_FIND_SUITABLE_KEY) == DF_SYSTEM) { insert(VOLATILE_TABLE_FIND_SUITABLE_KEY, "OFF", errOrWarn); } char * env = getenv("SQLMX_REGRESS"); if (env) CmpCommon::context()->setSqlmxRegress(atoi(env)); else CmpCommon::context()->setSqlmxRegress(1); } else { insert(COMP_BOOL_157, "OFF", errOrWarn); insert(SHOWDDL_DISPLAY_FORMAT, "EXTERNAL", errOrWarn); CmpCommon::context()->setSqlmxRegress(0); } } break; case VOLATILE_CATALOG: { CmpCommon::context()->sqlSession()->setVolatileCatalogName(value); } break; case VOLATILE_SCHEMA_IN_USE: { CmpCommon::context()->sqlSession()->setVolatileSchemaInUse ((getToken(attrEnum) == DF_ON)); } break; case ISO_MAPPING: { SqlParser_NADefaults_->ISO_MAPPING_ = CharInfo::getCharSetEnum(value); // Set the NAString_isoMappingCS memory cache for use by routines // ToInternalIdentifier() and ToAnsiIdentifier[2|3]() in module // w:/common/NAString[2].cpp. These routines currently cannot // access SqlParser_ISO_MAPPING directly due to the complex // build hierarchy. NAString_setIsoMapCS((SQLCHARSET_CODE) SqlParser_NADefaults_->ISO_MAPPING_); } break; case DEFAULT_CHARSET: { SqlParser_NADefaults_->DEFAULT_CHARSET_ = CharInfo::getCharSetEnum(value); SqlParser_NADefaults_->ORIG_DEFAULT_CHARSET_ = CharInfo::getCharSetEnum(value); } break; case ESP_ON_AGGREGATION_NODES_ONLY: { NABoolean useAgg = (getToken(attrEnum) == DF_ON); gpClusterInfo->setUseAggregationNodesOnly(useAgg); break; } case QUERY_TEXT_CACHE: { // If public schema is in use, query text cache has to be off NAString pSchema = getValue(PUBLIC_SCHEMA_NAME); if (pSchema != "") value = "OFF"; } break; case PUBLIC_SCHEMA_NAME: { // when PUBLIC_SCHEMA is used, turn off Query Text Cache if ( (value != "") && !(getToken(QUERY_TEXT_CACHE) == DF_OFF) ) insert(QUERY_TEXT_CACHE, "OFF"); // when PUBLIC_SCHEMA is not used, reset to the default value if ( value == "" ) { NAString v(""); validateAndInsert("QUERY_TEXT_CACHE", v, TRUE); } } break; case LDAP_USERNAME: { // when the LDAP_USERNAME is set (first time by CLI) // if DEFAULT_SCHEMA_NAMETYPE is USER, set schema to LDAP_USERNAME if ( !value.isNull() && (getToken(DEFAULT_SCHEMA_NAMETYPE) == DF_USER) && !userDefault(SCHEMA) && // do not change user setting ( schSetToUserID() || // only when schema was initialized to guardian id schSetByNametype() ) ) // or changed by same CQD { setSchemaAsLdapUser(value); setSchByNametype(TRUE); } } break; case DEFAULT_SCHEMA_ACCESS_ONLY: { if ( value == "ON" ) { NAString schemaNameType = getValue(DEFAULT_SCHEMA_NAMETYPE); if ( schemaNameType == "USER" ) { setSchemaAsLdapUser(); } } } break; case DEFAULT_SCHEMA_NAMETYPE: { if ( userDefault(SCHEMA) ) // if SCHEMA has been changed by user, do nothing break; if ( value == "SYSTEM" ) // reset to default schema { if ( schSetByNametype() ) // only when schema was changed by this CQD { // do not change catSchSetToUserID_ flag Int32 preVal = catSchSetToUserID_; NAString v(""); validateAndInsert("SCHEMA", v, TRUE); catSchSetToUserID_ = preVal; } } if ( value == "USER" ) // set default schema to ldpa username { if ( schSetToUserID() || // only when schema was initialized to guardian id schSetByNametype() ) // or was changed by this CQD { setSchemaAsLdapUser(); setSchByNametype(TRUE); } } } break; case USTAT_IUS_PERSISTENT_CBF_PATH: { // if the CBF path is SYSTEM, set it to $HOME/cbfs if ( value == "SYSTEM" ) { const char* home = getenv("HOME"); if ( home ) { value = home; value += "/cbfs"; validateAndInsert("USTAT_IUS_PERSISTENT_CBF_PATH", value, FALSE); } } } break; case TRAF_LOAD_ERROR_LOGGING_LOCATION: { if (value.length() > 512) { *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(value) << DgString1(lookupAttrName(attrEnum)); } } break; case AGGRESSIVE_ESP_ALLOCATION_PER_CORE: { NABoolean useAgg = (getToken(attrEnum) == DF_ON); float numESPsPerCore = computeNumESPsPerCore(useAgg); char valuestr[WIDEST_CPUARCH_VALUE]; ftoa_(numESPsPerCore, valuestr); NAString val(valuestr); insert(MAX_ESPS_PER_CPU_PER_OP, val, errOrWarn); } break; // max char col length is defined in common/ComSmallDefs.h. // In special cases, it could be overridden. Internal use only or // use only under trafodion supervision. case TRAF_MAX_CHARACTER_COL_LENGTH: { NABoolean override = (getToken(TRAF_MAX_CHARACTER_COL_LENGTH_OVERRIDE) == DF_ON); double d = atof(value.data()); if ((NOT override) && (NOT (d >= 0 && d <= MAX_CHAR_COL_LENGTH_IN_BYTES))) { *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(value) << DgString1(lookupAttrName(attrEnum)); } } break; case TRAF_MAX_CHARACTER_COL_LENGTH_OVERRIDE: { // if override is being turned off, reset max_char_len to default value. if (value == "OFF") { NAString val; validateAndInsert("TRAF_MAX_CHARACTER_COL_LENGTH", val, TRUE); } } break; default: break; } } // code to valid overwrite (insert) if (reset && overwrite) { // CONTROL QUERY DEFAULT attr RESET; -- this code cloned above // Can't reset prov, because to which? // provenances_[attrEnum] = READ_FROM_SQL_TABLE or COMPUTED ?? NADELETEBASIC(resetToDefaults_[attrEnum], NADHEAP); resetToDefaults_[attrEnum] = NULL; } else if (!overwrite && errOrWarn && getProvenance(attrEnum) >= IMMUTABLE) { *CmpCommon::diags() << DgSqlCode(ERRWARN(2200)) << DgString0(lookupAttrName(attrEnum, errOrWarn)); } } // valid attrName return attrEnum; } // NADefaults::validateAndInsert() float NADefaults::computeNumESPsPerCore(NABoolean aggressive) { #define DEFAULT_ESPS_PER_NODE 2 // for conservation allocation #define DEFAULT_ESPS_PER_CORE 0.5 // for aggressive allocation // Make sure the gpClusterInfo points at an NAClusterLinux object. // In osim simulation mode, the pointer can point at a NAClusterNSK // object, for which the method numTSEsForPOS() is not defined. NAClusterInfoLinux* gpLinux = dynamic_cast<NAClusterInfoLinux*>(gpClusterInfo); assert(gpLinux); // cores per node Lng32 coresPerNode = gpClusterInfo->numberOfCpusPerSMP(); if ( aggressive ) { float totalMemory = gpLinux->totalMemoryAvailable(); // per Node, in KB totalMemory /= (1024*1024); // per Node, in GB totalMemory /= coresPerNode ; // per core, in GB totalMemory /= 2; // per core, 2GB per ESP return MINOF(DEFAULT_ESPS_PER_CORE, totalMemory); } else { Lng32 numESPsPerNode = DEFAULT_ESPS_PER_NODE; return (float)(numESPsPerNode)/(float)(coresPerNode); } // The following lines of code are comment out but retained for possible // future references. // // // number of POS TSE // Lng32 numTSEsPerCluster = gpLinux->numTSEsForPOS(); // // // cluster nodes // Lng32 nodesdPerCluster = gpClusterInfo->getTotalNumberOfCPUs(); // // // TSEs per node // Lng32 TSEsPerNode = numTSEsPerCluster/nodesdPerCluster; // // // // // For Linux/nt, we conservatively allocate ESPs per node as follows // // - 1 ESP per 2 cpu cores if cores are equal or less than TSEs // // - 1 ESP per TSE if number of cores is more than double the TSEs // // - 1 ESP per 2 TSEs if cores are more than TSEs but less than double the TSEs // // - 1 ESP per node. Only possible on NT or workstations // // - number of cores less than TSEs and there are 1 or 2 cpur cores per node // // - number of TSEs is less than cpu cores and there 1 or 2 TSEs per node. // // This case is probable if virtual nodes are used // // // TSEsPerNode is 0 for arkcmps started by the seapilot universal comsumers // // in this case we only consider cpu cores // if ( coresPerNode <= TSEsPerNode || TSEsPerNode == 0 ) // { // if (coresPerNode > 1) // numESPsPerNode = DEFAULT_ESPS_PER_NODE; // } // else if (coresPerNode > (TSEsPerNode*2)) // { // numESPsPerNode = TSEsPerNode; // } // else if (TSEsPerNode > 1) // { // numESPsPerNode = TSEsPerNode/2; // } // else // not really needed since numESPsPerNode is set to 1 from above // { // numESPsPerNode = DEFAULT_ESPS_PER_NODE; // } // // return (float)(numESPsPerNode)/(float)(coresPerNode); } enum DefaultConstants NADefaults::holdOrRestore (const char *attrName, Lng32 holdOrRestoreCQD) { DefaultConstants attrEnum = __INVALID_DEFAULT_ATTRIBUTE; if (holdOrRestoreCQD == 0) { *CmpCommon::diags() << DgSqlCode(-2050) << DgString0(attrName); return attrEnum; } // Perform a lookup for the string, using a binary search. attrEnum = lookupAttrName(attrName, -1); if (attrEnum < 0) { *CmpCommon::diags() << DgSqlCode(-2050) << DgString0(attrName); return attrEnum; } char * value = NULL; if (holdOrRestoreCQD == 1) // hold cqd { if (currentDefaults_[attrEnum]) { value = new NADHEAP char[strlen(currentDefaults_[attrEnum]) + 1]; strcpy(value, currentDefaults_[attrEnum]); } else { value = new NADHEAP char[strlen(defaultDefaults[defDefIx_[attrEnum]].value) + 1]; strcpy(value, defaultDefaults[defDefIx_[attrEnum]].value); } if (! heldDefaults_[attrEnum]) heldDefaults_[attrEnum] = new NADHEAP HeldDefaults(); heldDefaults_[attrEnum]->pushDefault(value); } else { // restore cqd from heldDefaults_ array, if it was held. if (! heldDefaults_[attrEnum]) return attrEnum; value = heldDefaults_[attrEnum]->popDefault(); if (! value) return attrEnum; // there is an odd semantic that if currentDefaults_[attrEnum] // is null, we leave it as null, but pop a held value anyway; // this semantic was preserved when heldDefaults_ was converted // to a stack. if (currentDefaults_[attrEnum]) { // do a validateAndInsert so the caches (such as currentToken_) // get updated and so appropriate semantic actions are taken. // Note that validateAndInsert will take care of deleting the // storage currently held by currentDefaults_[attrEnum]. NAString valueS(value); validateAndInsert(lookupAttrName(attrEnum), // sad that we have to do a lookup again valueS, FALSE); } NADELETEBASIC(value, NADHEAP); } return attrEnum; } const SqlParser_NADefaults *NADefaults::getSqlParser_NADefaults() { return SqlParser_NADefaults_; } static void setCatSchErr(NAString &value, Lng32 sqlCode, Int32 errOrWarn, NABoolean catErr = FALSE) { if (!sqlCode || !errOrWarn) return; TrimNAStringSpace(value); // prettify further (neater errmsg) *CmpCommon::diags() << DgSqlCode(ERRWARN(sqlCode)) << DgCatalogName(value) << DgSchemaName(value) << DgString0(value) << DgString1(value); if (value.first('"') == NA_NPOS) { // delimited names too complicated ! NAString namepart = value; size_t dot = value.first('.'); if (dot != NA_NPOS) { namepart.remove(dot); if (!IsSqlReservedWord(namepart)) { namepart = value; namepart.remove(0, dot+1); } } if (IsSqlReservedWord(namepart)) { *CmpCommon::diags() << DgSqlCode(ERRWARN(3128)) << DgString0(namepart) << DgString1(namepart); return; } } // must determine if the defaults have been set up before parseDML is called if (IdentifyMyself::GetMyName() == I_AM_UNKNOWN){ return; // diagnostic already put into diags above. } // Produce additional (more informative) syntax error messages, // trying delimited-value first and then possibly regular-value-itself. Parser parser(CmpCommon::context()); Lng32 errs = CmpCommon::diags()->getNumber(DgSqlCode::ERROR_); NAString pfx(catErr ? "SET CATALOG " : "SET SCHEMA "); NAString stmt; char c = *value.data(); if (c && c != '\"') { stmt = pfx; stmt += "\""; stmt += value; stmt += "\""; stmt += ";"; #pragma nowarn(1506) // warning elimination parser.parseDML(stmt, stmt.length(), OBJECTNAMECHARSET ); #pragma warn(1506) // warning elimination } if (errs == CmpCommon::diags()->getNumber(DgSqlCode::ERROR_)) { stmt = pfx; stmt += value; stmt += ";"; #pragma nowarn(1506) // warning elimination parser.parseDML(stmt, stmt.length(), OBJECTNAMECHARSET ); #pragma warn(1506) // warning elimination } // Change errors to warnings if errOrWarn is +1 (i.e. warning). if (errOrWarn > 0) NegateAllErrors(CmpCommon::diags()); } NABoolean NADefaults::setCatalog(NAString &value, Int32 errOrWarn, NABoolean overwrite, NABoolean alreadyCanonical) { setCatUserID(currentState_ == COMPUTED); // The input value is in external (Ansi) format. // If we are in the COMPUTED currentState_, // make the value strictly canonical, // and try non-delimited first, then delimited. // Prettify removes lead/trailing blanks, // and upcases where unquoted (for nicer errmsgs); // ComSchemaName parses/validates. // if (alreadyCanonical) ; // leave it alone, for performance's sake else if (currentState_ == COMPUTED) { // ' SQL.FOO' TrimNAStringSpace(value); // 'SQL.FOO' NAString tmp(value); value = ToAnsiIdentifier(value); // nondelim ok? if (value.isNull()) value = NAString("\"") + tmp + "\""; // '"SQL.FOO"' } else PrettifySqlText(value); ComSchemaName nam(value); if (nam.getSchemaNamePart().isEmpty() || // 0 name parts, if *any* error !nam.getCatalogNamePart().isEmpty()) { // 2 parts (cat.sch) is an error setCatSchErr(value, EXE_INVALID_CAT_NAME, errOrWarn, TRUE); return FALSE; // invalid value } else { // Get the 1 name part (the "schema" part as far as ComSchema knows...) if (overwrite) insert(CATALOG, nam.getSchemaNamePartAsAnsiString()); return TRUE; } } NABoolean NADefaults::setSchema(NAString &value, Int32 errOrWarn, NABoolean overwrite, NABoolean alreadyCanonical) { // if this is part of CQD *RESET and it was initialized with role name // do not change the following flags // to allow DEFAULT_SCHEMA_NAMETYPE to set its value if (!( schSetToUserID() && isResetAll() )) { setSchUserID(currentState_ == COMPUTED); setSchByNametype(FALSE); } if (alreadyCanonical) ; // leave it alone, for performance's sake else if (currentState_ == COMPUTED) { // ' SQL.FOO' TrimNAStringSpace(value); // 'SQL.FOO' NAString tmp(value); value = ToAnsiIdentifier(value); // nondelim ok? if (value.isNull()) value = NAString("\"") + tmp + "\""; // '"SQL.FOO"' } else PrettifySqlText(value); ComSchemaName nam(value); if (nam.getSchemaNamePart().isEmpty()) { // 0 name parts, if *any* error setCatSchErr(value, EXE_INVALID_SCH_NAME, errOrWarn); return FALSE; // invalid value } else { if (overwrite) insert(SCHEMA, nam.getSchemaNamePartAsAnsiString()); // If 2 parts, overwrite any prior catalog default if (!nam.getCatalogNamePart().isEmpty()) { if (overwrite) { insert(CATALOG, nam.getCatalogNamePartAsAnsiString()); if (currentState_ == SET_BY_CQD) { // indicate that this attribute was set by a user CQD. setUserDefault(CATALOG, TRUE); } } } return TRUE; } } NAString NADefaults::keyword(DefaultToken tok) { CMPASSERT(tok >= 0 && tok < DF_lastToken); return keywords_[tok]; } // Defaults Tokens // There is a set of keywords which can appear as values of Defaults entries // in the Defaults Table. We declare, for each such token, a string (the // keyword), and an enumeration value. The string values belong in an // array, DFkeywords, in sorted order. The idea is we can use binary // search in order to obtain the index of a string to the matching // entry in this sorted array. // // If we define the enumerations carefully (pay attention here!), then // that index we just found (see previous paragraph) is the enum value // of the token. // In simple words: this has to be in identical order with enum DefaultToken // in DefaultConstants.h const char *NADefaults::keywords_[DF_lastToken] = { "ACCUMULATED", "ADVANCED", "AGGRESSIVE", "ALL", "ANSI", "BEGINNER", "BOTH", "CLEAR", "DEBUG", "DISK", "DISK_POOL", "DUMP", "DUMP_MV", "EXTERNAL", "EXTERNAL_DETAILED", "FIRSTROW", "HARDWARE", "HEAP", "HIGH", "HYBRID", "IEEE", "INDEXES", "INTERNAL", "IQS", "JNI", "JNI_TRX", "KEYINDEXES", "LASTROW", "LATEST", "LEAF", "LOADNODUP", "LOCAL", "LOCAL_NODE", "LOG", "MAXIMUM", "MEASURE", "MEDIUM", "MEDIUM_LOW", "MERGE", "MINIMUM", "MMAP", "MULTI_NODE", "MVCC", "NONE", "NSK", "OFF", "ON", "OPENS_FOR_WRITE", "OPERATOR", "OPTIMAL", "ORDERED", "PERTABLE", "PRINT", "PRIVATE", "PUBLIC", "QS", "READ_COMMITTED", "READ_UNCOMMITTED", "RELEASE", "REMOTE", "REPEATABLE_READ", "REPLACE", "REPSEL", "RESOURCES", "RETURN", "ROOT", "SAMPLE", "SERIALIZABLE", "SHORTANSI", "SIMPLE", "SKIP", "SMD", "SOFTWARE", "SOURCE", "SQLMP", "SSCC", "SSD", "STOP", "SUFFIX", "SYSTEM", "TANDEM", "THRIFT", "USER", "VERTICAL", "WAIT", "WARN", "XML" }; // To call bsearch we must satisfy each of its arguments. Either // NULL comes back, or, comes back a pointer to the element which is // a true match for our key. bsearch.key is upperKey.data(). // bsearch.base is keywords_. nel is DF_lastToken. // The next argument is sizeof char*. Finally, the comparison // function can simply be the strcmp function. // // Note that this function makes heavy reliance on the idea that // the DefaultToken enumerations go up in sequence 0, 1, 2, 3... . // // We do the cast on strcmp because its signature from the header // file is: int (*)(const char *, const char *). In general, we're // doing a lot of type casting in here. static Int32 stringCompare(const void* s1, const void* s2) { return strcmp( * (char**) s1, * (char**) s2); } DefaultToken NADefaults::token(Int32 attrEnum, NAString &value, NABoolean valueAlreadyGotten, Int32 errOrWarn) const { ATTR_RANGE_ASSERT; if (!valueAlreadyGotten) { value = getValue(attrEnum); // already trim & upper (by validateAndInsert) TrimNAStringSpace(value); // can't trust that the stored value is canonical } else { TrimNAStringSpace(value); // can't trust that input value is canonical, value.toUpper(); // so here do what validateAndInsert does } DefaultToken tok = DF_noSuchToken; if (value.isNull()) tok = DF_SYSTEM; else { if ((attrEnum == TERMINAL_CHARSET) || (attrEnum == USE_HIVE_SOURCE) || (attrEnum == HIVE_FILE_CHARSET) || (attrEnum == HBASE_DATA_BLOCK_ENCODING_OPTION) || (attrEnum == HBASE_COMPRESSION_OPTION)) return DF_USER; if ( attrEnum == NATIONAL_CHARSET || attrEnum == DEFAULT_CHARSET || attrEnum == HIVE_DEFAULT_CHARSET || attrEnum == ISO_MAPPING || attrEnum == INPUT_CHARSET || attrEnum == TRAF_DEFAULT_COL_CHARSET ) { CharInfo::CharSet cs = CharInfo::getCharSetEnum(value); Int32 err_found = 0; if ( !CharInfo::isCharSetSupported(cs) ) { err_found = 1; } else { switch( attrEnum ) { case NATIONAL_CHARSET: if (cs == CharInfo::KANJI_MP) break; //Allow (for regression test) if ((cs != CharInfo::UNICODE) && (cs != CharInfo::ISO88591)) err_found = 1; break; case DEFAULT_CHARSET: if (cs != CharInfo::ISO88591 && cs != CharInfo::UTF8 // && cs != CharInfo::SJIS ) err_found = 1; break; case HIVE_DEFAULT_CHARSET: case TRAF_DEFAULT_COL_CHARSET: if ((cs != CharInfo::UTF8) && (cs != CharInfo::ISO88591)) err_found = 1; break; case ISO_MAPPING: if (cs != CharInfo::ISO88591) err_found = 1; break; default: break; } } if ( (err_found != 0) && errOrWarn ) *CmpCommon::diags() << DgSqlCode(ERRWARN(3010)) << DgString0(value); else return DF_USER; // kludge, return any valid token } //else //else fall thru to see if value is SYSTEM // OPTIMIZATION_LEVEL if ((attrEnum == OPTIMIZATION_LEVEL) && value.length() == 1) switch (*value.data()) { case '0': return DF_MINIMUM; case '1': return DF_MINIMUM; case '2': return DF_MEDIUM_LOW; case '3': return DF_MEDIUM; case '4': return DF_MEDIUM; case '5': return DF_MAXIMUM; } // PCODE_OPT_LEVEL if ((attrEnum == PCODE_OPT_LEVEL) && value.length() == 1) switch (*value.data()) { case '0': return DF_MINIMUM; case '1': return DF_MEDIUM; case '2': return DF_HIGH; case '3': return DF_MAXIMUM; } // HBASE_FILTER_PREDS if ((attrEnum == HBASE_FILTER_PREDS) && value.length()==1) switch (*value.data()){ case '0': return DF_OFF; case '1': return DF_MINIMUM; case '2': return DF_MEDIUM; // in the future add DF_HIGH and DF_MAXIMUM when we implement more // pushdown capabilities } if ( attrEnum == TEMPORARY_TABLE_HASH_PARTITIONS || attrEnum == MVQR_REWRITE_CANDIDATES || attrEnum == MVQR_PUBLISH_TABLE_LOCATION || attrEnum == MVQR_WORKLOAD_ANALYSIS_MV_NAME || attrEnum == HIST_SCRATCH_VOL) return DF_SYSTEM; const char *k = value.data(); char *match = (char*) bsearch( &k, keywords_, DF_lastToken, sizeof(char*), stringCompare); if (match) tok = (DefaultToken) (((const char**) match) - keywords_); else { // Check for synonyms const char *c = value; for (; *c == '0'; c++) ; // all ascii '0' ? if (*c == '\0') // terminating nul '\0' tok = DF_OFF; else if (value.length() <= 2) { if (value == "1" || value == "+1" || value == "-1") tok = DF_ON; } else { if ((value == "STOP_AT") || (value == "STOP AT")) tok = DF_STOP; else if (value == "READ COMMITTED") tok = DF_READ_COMMITTED; else if (value == "READ UNCOMMITTED") tok = DF_READ_UNCOMMITTED; else if (value == "REPEATABLE READ") tok = DF_REPEATABLE_READ; else if (value == "BEGINNER") tok = DF_BEGINNER; else if (value == "ADVANCED") tok = DF_ADVANCED; #define CONVERT_SYNONYM(from,to) \ else if (value == "" # from "") { \ CMPASSERT(DF_ ## from == DF_ ## to); \ tok = DF_ ## to; \ } CONVERT_SYNONYM(COMPAQ, TANDEM) CONVERT_SYNONYM(DISABLE, OFF) CONVERT_SYNONYM(ENABLE, SYSTEM) CONVERT_SYNONYM(FALSE, OFF) CONVERT_SYNONYM(FULL, MAXIMUM) CONVERT_SYNONYM(TRUE, ON) } } } NABoolean isValid = FALSE; if (tok != DF_noSuchToken) switch (attrEnum) { case DEFAULT_SCHEMA_ACCESS_ONLY: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case DEFAULT_SCHEMA_NAMETYPE: if (tok == DF_SYSTEM || tok == DF_USER) isValid = TRUE; break; case DETAILED_STATISTICS: if (tok == DF_ALL || tok == DF_MEASURE || tok == DF_ACCUMULATED || tok == DF_OPERATOR || tok == DF_PERTABLE || tok == DF_OFF) isValid = TRUE; break; case HIDE_INDEXES: if (tok == DF_NONE || tok == DF_ALL || tok == DF_VERTICAL || tok == DF_INDEXES || tok == DF_KEYINDEXES) isValid = TRUE; break; case HIVE_USE_EXT_TABLE_ATTRS: if (tok == DF_ALL || tok == DF_OFF || tok == DF_ON ) isValid = TRUE; break; case INDEX_ELIMINATION_LEVEL: if (tok == DF_MINIMUM || tok == DF_MEDIUM || tok == DF_MAXIMUM || tok == DF_AGGRESSIVE ) isValid = TRUE; break; case IF_LOCKED: if (tok == DF_RETURN || tok == DF_WAIT) isValid = TRUE; break; case INSERT_VSBB: if (tok == DF_OFF || tok == DF_LOADNODUP || tok == DF_SYSTEM || tok == DF_USER) isValid = TRUE; break; case OVERFLOW_MODE: if (tok == DF_DISK || tok == DF_SSD || tok == DF_MMAP) isValid = TRUE; break; case SORT_ALGO: if(tok == DF_HEAP || tok == DF_IQS || tok == DF_REPSEL || tok == DF_QS) isValid = TRUE; break; case QUERY_CACHE_MPALIAS: case QUERY_TEMPLATE_CACHE: case SHARE_TEMPLATE_CACHED_PLANS: case VSBB_TEST_MODE: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case QUERY_TEXT_CACHE: if (tok == DF_ON || tok == DF_OFF || tok == DF_SYSTEM || tok == DF_SKIP) isValid = TRUE; break; case DISABLE_BUFFERED_INSERTS: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case ISOLATION_LEVEL: { TransMode::IsolationLevel iltmp; isValid = getIsolationLevel(iltmp, tok); } break; case ISOLATION_LEVEL_FOR_UPDATES: { TransMode::IsolationLevel iltmp; isValid = getIsolationLevel(iltmp, tok); } break; case MVGROUP_AUTOMATIC_CREATION: case MV_TRACE_INCONSISTENCY: //++ MV case MV_AS_ROW_TRIGGER: //++ MV { if(DF_ON == tok || DF_OFF == tok) { isValid = TRUE; } } break; case IUD_NONAUDITED_INDEX_MAINT: if (tok == DF_OFF || tok == DF_SYSTEM || tok == DF_WARN || tok == DF_ON) isValid = TRUE; break; case HIVE_SCAN_SPECIAL_MODE: isValid = TRUE; break; case IS_SQLCI: // for primary mxcmp that is invoked for user queries, the only valid // value for mxci_process cqd is TRUE. This cqd is set once by mxci // at startup time and cannot be changed by user. That way we know that // a request has come in from mxci(trusted) process. // For secondary mxcmp's invoked for internal queries where cqd's are // sent using sendAllControls method, all values are valid. This will // ensure that if this default is not set and is sent over to secondary // mxcmp using an internal CQD statement, it doesn't return an error. if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case NVCI_PROCESS: // for primary mxcmp that is invoked for user queries, the only valid // value for nvci_process cqd is TRUE. This cqd is set once by nvci // at startup time and cannot be changed by user. That way we know that // a request has come in from nvci(trusted) process. // For secondary mxcmp's invoked for internal queries where cqd's are // sent using sendAllControls method, all values are valid. This will // ensure that if this default is not set and is sent over to secondary // mxcmp using an internal CQD statement, it doesn't return an error. if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case NAMETYPE: if (tok == DF_ANSI || tok == DF_SHORTANSI || tok == DF_NSK) isValid = TRUE; break; case OPTIMIZATION_GOAL: if (tok == DF_FIRSTROW || tok == DF_LASTROW || tok == DF_RESOURCES) isValid = TRUE; break; case USER_EXPERIENCE_LEVEL: if (tok == DF_ADVANCED || tok == DF_BEGINNER) isValid = TRUE; break; case PCODE_OPT_LEVEL: if (tok == DF_OFF) { isValid = TRUE; break; } // else fall through to the next case, all those keywords are allowed // as well case ATTEMPT_ESP_PARALLELISM: if (tok == DF_SYSTEM || tok == DF_ON || tok == DF_OFF || tok == DF_MAXIMUM) isValid = TRUE; break; case OPTIMIZATION_LEVEL: if (tok == DF_MINIMUM || tok == DF_MEDIUM_LOW || tok == DF_MEDIUM || tok == DF_MAXIMUM) isValid = TRUE; break; case HBASE_FILTER_PREDS: if(tok == DF_OFF || tok == DF_ON) { if (tok == DF_ON) tok = DF_MINIMUM; // to keep backward compatibility isValid= TRUE; } break; case ROBUST_QUERY_OPTIMIZATION: if (tok == DF_MINIMUM || tok == DF_SYSTEM || tok == DF_MAXIMUM || tok == DF_HIGH) isValid = TRUE; break; case REFERENCE_CODE: case TARGET_CODE: if (tok == DF_RELEASE || tok == DF_DEBUG) isValid = TRUE; break; /* case ROLLBACK_ON_ERROR: if (tok == DF_OFF || tok == DF_ON || tok == DF_SYSTEM) isValid = TRUE; break; */ case AUTO_QUERY_RETRY: if (tok == DF_ON || tok == DF_OFF || tok == DF_SYSTEM) isValid = TRUE; break; case AUTO_QUERY_RETRY_WARNINGS: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case EXE_PARALLEL_DDL: if (tok == DF_OFF || tok == DF_ON || tok == DF_EXTERNAL || tok == DF_INTERNAL) isValid = TRUE; break; case UNAVAILABLE_PARTITION: if (tok == DF_SKIP || tok == DF_STOP) isValid = TRUE; break; case QUERY_CACHE_STATISTICS: // on, off are no-ops if (tok == DF_PRINT || tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case QUERY_CACHE_STATEMENT_PINNING: if (tok == DF_CLEAR || tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case HJ_TYPE: if (tok == DF_ORDERED || tok == DF_HYBRID || tok == DF_SYSTEM) isValid = TRUE; break; case REF_CONSTRAINT_NO_ACTION_LIKE_RESTRICT: if (tok == DF_OFF || tok == DF_ON || tok == DF_SYSTEM) isValid = TRUE; break; case POS: if (tok == DF_LOCAL_NODE || tok == DF_OFF || tok == DF_MULTI_NODE || tok == DF_DISK_POOL) isValid = TRUE; break; case USTAT_INTERNAL_SORT: if (tok == DF_ON || tok == DF_OFF || tok == DF_HYBRID) isValid = TRUE; break; case USTAT_AUTO_FOR_VOLATILE_TABLES: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case SUBQUERY_UNNESTING: if (tok == DF_OFF || tok == DF_ON || tok == DF_DEBUG) isValid = TRUE; break; case SUBQUERY_UNNESTING_P2: if (tok == DF_OFF || tok == DF_ON || tok == DF_INTERNAL) isValid = TRUE; break; case SORT_INTERMEDIATE_SCRATCH_CLEANUP: if(tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case SORT_MEMORY_QUOTA_SYSTEM: if(tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; /* If MDAM_SCAN_METHOD's value is "MAXIMUM" only, Right side of Nested Join will use the MDAM path Allowable values for MDAM_SCAN_METHOD are 'ON' | 'OFF' | 'MAXIMUM' */ case MDAM_SCAN_METHOD: if (tok == DF_ON || tok == DF_OFF || tok == DF_MAXIMUM) isValid = TRUE; break; case SHOWDDL_DISPLAY_FORMAT: if (tok == DF_INTERNAL || tok == DF_EXTERNAL || tok == DF_LOG) isValid = TRUE; break; case SHOWDDL_DISPLAY_PRIVILEGE_GRANTS: if (tok == DF_SYSTEM || tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case EXPLAIN_DISPLAY_FORMAT: if (tok == DF_INTERNAL || tok == DF_EXTERNAL || tok == DF_EXTERNAL_DETAILED) isValid = TRUE; break; case UPDATE_CLUSTERING_OR_UNIQUE_INDEX_KEY: if (tok == DF_ON || tok == DF_OFF || tok == DF_AGGRESSIVE) isValid = TRUE; break; case MVQR_ALL_JBBS_IN_QD: case MVQR_REWRITE_ENABLED_OPTION: case MVQR_REWRITE_SINGLE_TABLE_QUERIES: case MVQR_USE_EXTRA_HUB_TABLES: case MVQR_ENABLE_LOGGING: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case MVQR_LOG_QUERY_DESCRIPTORS: if (tok == DF_OFF || tok == DF_DUMP || tok == DF_DUMP_MV || tok == DF_LOG) isValid = TRUE; break; case MVQR_PRIVATE_QMS_INIT: if (tok == DF_SMD || tok == DF_XML || tok == DF_NONE) isValid = TRUE; break; case MVQR_PUBLISH_TO: if (tok == DF_PUBLIC || tok == DF_PRIVATE || tok == DF_BOTH || tok == DF_NONE) isValid = TRUE; break; case MVQR_WORKLOAD_ANALYSIS_MV_NAME: isValid = TRUE; break; case ELIMINATE_REDUNDANT_JOINS: if (tok == DF_OFF || tok == DF_ON || tok == DF_DEBUG || tok == DF_MINIMUM) isValid = TRUE; break; case VOLATILE_TABLE_FIND_SUITABLE_KEY: if (tok == DF_SYSTEM || tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case CAT_DISTRIBUTE_METADATA: if (tok == DF_OFF || tok == DF_LOCAL_NODE || tok == DF_ON) isValid = TRUE; break; case MV_DUMP_DEBUG_INFO: if (tok == DF_OFF || tok == DF_ON) isValid = TRUE; break; case RANGESPEC_TRANSFORMATION: if (tok == DF_OFF || tok == DF_ON || tok == DF_MINIMUM) isValid = TRUE; break; case ASYMMETRIC_JOIN_TRANSFORMATION: if (tok == DF_MINIMUM || tok == DF_MAXIMUM) isValid = TRUE; break; case CAT_DEFAULT_COMPRESSION: if (tok == DF_NONE || tok == DF_HARDWARE || tok == DF_SOFTWARE) isValid = TRUE; break; case REPLICATE_DISK_POOL: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case COMPRESSION_TYPE: if (tok == DF_NONE || tok == DF_HARDWARE || tok == DF_SOFTWARE) isValid = TRUE; break; // The DF_SAMPLE setting indicates that the persistent sample will be // updated incrementally, but not the histograms; they will be created // anew from the incrementally updated sample. case USTAT_INCREMENTAL_UPDATE_STATISTICS: if (tok == DF_OFF || tok == DF_SAMPLE || tok == DF_ON) isValid = TRUE; break; case REPLICATE_COMPRESSION_TYPE: if (tok == DF_NONE || tok == DF_HARDWARE || tok == DF_SOFTWARE || tok == DF_SOURCE || tok == DF_SYSTEM) isValid = TRUE; break; case REUSE_OPENS: if (tok==DF_ON || tok == DF_OFF || tok == DF_OPENS_FOR_WRITE) isValid = TRUE; break; case USE_HIVE_SOURCE: isValid = TRUE; break; case TRAF_SIMILARITY_CHECK: if (tok == DF_ROOT || tok == DF_LEAF || tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; case TRAF_TABLE_SNAPSHOT_SCAN: if (tok == DF_NONE || tok == DF_SUFFIX || tok == DF_LATEST) isValid = TRUE; break; case LOB_OUTPUT_SIZE: if (tok >=0 && tok <= 512000) isValid = TRUE; break; case LOB_MAX_CHUNK_MEM_SIZE: if (tok >=0 && tok <= 512000) isValid = TRUE; break; case LOB_GC_LIMIT_SIZE: if (tok >= 0 ) isValid=TRUE; case TRAF_TRANS_TYPE: if (tok == DF_MVCC || tok == DF_SSCC) isValid = TRUE; break; case HBASE_RANGE_PARTITIONING_PARTIAL_COLS: if (tok == DF_OFF || tok == DF_MINIMUM || tok == DF_MEDIUM || tok == DF_MAXIMUM || tok == DF_ON) isValid = TRUE; break; case TRAF_UPSERT_MODE: if (tok == DF_MERGE || tok == DF_REPLACE || tok == DF_OPTIMAL) isValid = TRUE; break; // Nothing needs to be added here for ON/OFF/SYSTEM keywords -- // instead, add to DEFAULT_ALLOWS_SEPARATE_SYSTEM code in the ctor. default: if (tok == DF_ON || tok == DF_OFF) isValid = TRUE; break; } // See "NOTE 2" way up top. if (!isValid) { if (tok == DF_SYSTEM) { isValid = isFlagOn(attrEnum, DEFAULT_ALLOWS_SEPARATE_SYSTEM); if (!isValid) { NAString tmp(getDefaultDefaultValue(attrEnum)); isValid = isSynonymOfSYSTEM(attrEnum, tmp); } } } if (!isValid) { tok = DF_noSuchToken; if (errOrWarn) *CmpCommon::diags() << DgSqlCode(ERRWARN(2055)) << DgString0(value) << DgString1(lookupAttrName(attrEnum)); } return tok; } DefaultToken NADefaults::getToken( const Int32 attrEnum, const Int32 errOrWarn ) const { // Check the cache first. if ( currentTokens_[attrEnum] != NULL ) { return *currentTokens_[attrEnum]; } // Get the token and allocate memory to store the token value. NAString tmp( NADHEAP ); currentTokens_[attrEnum] = new NADHEAP DefaultToken; *currentTokens_[attrEnum] = token( attrEnum, tmp, FALSE, errOrWarn ); return *currentTokens_[attrEnum]; } NABoolean NADefaults::getIsolationLevel(TransMode::IsolationLevel &arg, DefaultToken tok) const { NABoolean specifiedOK = TRUE; if (tok == DF_noSuchToken) tok = getToken(ISOLATION_LEVEL); switch (tok) { case DF_READ_COMMITTED: arg = TransMode::READ_COMMITTED_; break; case DF_READ_UNCOMMITTED: arg = TransMode::READ_UNCOMMITTED_; break; case DF_REPEATABLE_READ: arg = TransMode::REPEATABLE_READ_; break; case DF_SERIALIZABLE: case DF_SYSTEM: arg = TransMode::SERIALIZABLE_; break; case DF_NONE: arg = TransMode::IL_NOT_SPECIFIED_; break; default: arg = TransMode::SERIALIZABLE_; specifiedOK = FALSE; NAString value(NADHEAP); if (tok != DF_noSuchToken) value = keyword(tok); *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(value) << DgString1("ISOLATION_LEVEL"); } return specifiedOK; } // find the packed length for all the default values stored // in currentDefaults_ array. // currentDefaults_ is a fixed sized array of "char *" where each // entry is pointing to the default value for that default. // After pack, the default values are put in the buffer in // sequential order with a null terminator. Lng32 NADefaults::packedLengthDefaults() { Lng32 size = 0; const size_t numAttrs = numDefaultAttributes(); for (size_t i = 0; i < numAttrs; i++) { size += strlen(currentDefaults_[i]) + 1; } return size; } Lng32 NADefaults::packDefaultsToBuffer(char * buffer) { const size_t numAttrs = numDefaultAttributes(); Lng32 totalSize = 0; Lng32 size = 0; for (UInt32 i = 0; i < numAttrs; i++) { size = (Lng32)strlen(currentDefaults_[i]) + 1; strcpy(buffer, currentDefaults_[i]); buffer += size; totalSize += size; } return totalSize; } Lng32 NADefaults::unpackDefaultsFromBuffer(Lng32 numEntriesInBuffer, char * buffer) { return 0; } NABoolean NADefaults::isSameCQD(Lng32 numEntriesInBuffer, char * buffer, Lng32 bufLen) { const Lng32 numCurrentDefaultAttrs = (Lng32)numDefaultAttributes(); // check to see if the default values in 'buffer' are the same // as those in the currentDefaults_ array. // Return TRUE if they are all the same. if (numCurrentDefaultAttrs != numEntriesInBuffer) return FALSE; if (bufLen == 0) return FALSE; Int32 curPos = 0; for (Int32 i = 0; i < numEntriesInBuffer; i++) { if (strcmp(currentDefaults_[i], &buffer[curPos]) != 0) return FALSE; curPos += strlen(&buffer[curPos]) + 1; } // everything matches. return TRUE; } Lng32 NADefaults::createNewDefaults(Lng32 numEntriesInBuffer, char * buffer) { const Lng32 numCurrentDefaultAttrs = (Lng32)numDefaultAttributes(); // save the current defaults savedCurrentDefaults_ = currentDefaults_; savedCurrentFloats_ = currentFloats_; savedCurrentTokens_ = currentTokens_; // VO, Plan Versioning Support. // // This code may execute in a downrev compiler, which knows about fewer // defaults than the compiler originally used to compile the statement. // Only copy those defaults we know about, and skip the rest. Lng32 numEntriesToCopy = _min (numEntriesInBuffer, numCurrentDefaultAttrs); // allocate a new currentDefaults_ array and make it point to // the default values in the input 'buffer'. // If the current number of default attributes are greater than the // ones in the input buffer, then populate the remaining default // entries in the currentDefaults_ array with the values from the // the savedCurrentDefaults_. currentDefaults_ = new NADHEAP const char * [numCurrentDefaultAttrs]; Int32 curPos = 0; Int32 i = 0; for (i = 0; i < numEntriesToCopy; i++) { currentDefaults_[i] = &buffer[curPos]; curPos += strlen(&buffer[curPos]) + 1; } for (i = numEntriesToCopy; i < numCurrentDefaultAttrs; i++) { currentDefaults_[i] = savedCurrentDefaults_[i]; } // allocate two empty arrays for floats and tokens. currentFloats_ = new NADHEAP float * [numCurrentDefaultAttrs]; currentTokens_ = new NADHEAP DefaultToken * [numCurrentDefaultAttrs]; memset( currentFloats_, 0, sizeof(float *) * numCurrentDefaultAttrs ); memset( currentTokens_, 0, sizeof(DefaultToken *) * numCurrentDefaultAttrs ); return 0; } Lng32 NADefaults::restoreDefaults(Lng32 numEntriesInBuffer, char * buffer) { // Deallocate the currentDefaults_ array. // The array entries are not to be deleted as they point to // entries in 'buffer' or the 'savedCurrentDefaults_'. // See NADefaults::createNewDefaults() method. if (currentDefaults_) { NADELETEBASIC(currentDefaults_, NADHEAP); } if (currentFloats_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(currentFloats_[i], NADHEAP); NADELETEBASIC(currentFloats_, NADHEAP); } if (currentTokens_) { for (size_t i = numDefaultAttributes(); i--; ) NADELETEBASIC(currentTokens_[i], NADHEAP); NADELETEBASIC(currentTokens_, NADHEAP); } // restore the saved defaults currentDefaults_ = savedCurrentDefaults_; currentFloats_ = savedCurrentFloats_; currentTokens_ = savedCurrentTokens_; return 0; } void NADefaults::updateCurrentDefaultsForOSIM(DefaultDefault * defaultDefault, NABoolean validateFloatVal) { Int32 attrEnum = defaultDefault->attrEnum; const char * defaultVal = defaultDefault->value; const char * valueStr = currentDefaults_[attrEnum]; if(valueStr) { NADELETEBASIC(valueStr,NADHEAP); } char * value = new NADHEAP char[strlen(defaultVal) + 1]; strcpy(value, defaultVal); currentDefaults_[attrEnum] = value; if ( validateFloatVal ) { float floatVal = 0; if (validateFloat(currentDefaults_[attrEnum], floatVal, attrEnum)) { if (currentFloats_[attrEnum]) { NADELETEBASIC(currentFloats_[attrEnum], NADHEAP); } currentFloats_[attrEnum] = new NADHEAP float; *currentFloats_[attrEnum] = floatVal; } } if ( currentTokens_[attrEnum] ) { NADELETEBASIC( currentTokens_[attrEnum], NADHEAP ); currentTokens_[attrEnum] = NULL; } } void NADefaults::setSchemaAsLdapUser(const NAString val) { NAString ldapUsername = val; if ( ldapUsername.isNull() ) ldapUsername = getValue(LDAP_USERNAME); if ( ldapUsername.isNull() ) return; ldapUsername.toUpper(); NAString schName = '"'; schName += ldapUsername; schName += '"'; // check schema name before insert // may get special characters from ldap ComSchemaName cSchName(schName); if ( !cSchName.getSchemaNamePart().isEmpty() && cSchName.getCatalogNamePart().isEmpty()) // should have no catalog { insert(SCHEMA, schName); } else { *CmpCommon::diags() << DgSqlCode(-2055) << DgString0(schName) << DgString1("SCHEMA"); } }
1
16,990
Do you now why is it a specific table name is used as a default?
apache-trafodion
cpp