file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
search.service.ts | import { combineLatest as observableCombineLatest, Observable } from 'rxjs';
import { Injectable, OnDestroy } from '@angular/core';
import { Router } from '@angular/router';
import { map, switchMap, take } from 'rxjs/operators';
import { FollowLinkConfig } from '../../../shared/utils/follow-link-config.model';
import { LinkService } from '../../cache/builders/link.service';
import { PaginatedList } from '../../data/paginated-list.model';
import { ResponseParsingService } from '../../data/parsing.service';
import { RemoteData } from '../../data/remote-data';
import { GetRequest, RestRequest } from '../../data/request.models';
import { RequestService } from '../../data/request.service';
import { DSpaceObject } from '../dspace-object.model';
import { GenericConstructor } from '../generic-constructor';
import { HALEndpointService } from '../hal-endpoint.service';
import { URLCombiner } from '../../url-combiner/url-combiner';
import { hasValue, hasValueOperator, isNotEmpty } from '../../../shared/empty.util';
import { SearchOptions } from '../../../shared/search/models/search-options.model';
import { SearchFilterConfig } from '../../../shared/search/models/search-filter-config.model';
import { SearchResponseParsingService } from '../../data/search-response-parsing.service';
import { SearchObjects } from '../../../shared/search/models/search-objects.model';
import { FacetValueResponseParsingService } from '../../data/facet-value-response-parsing.service';
import { FacetConfigResponseParsingService } from '../../data/facet-config-response-parsing.service';
import { PaginatedSearchOptions } from '../../../shared/search/models/paginated-search-options.model';
import { CommunityDataService } from '../../data/community-data.service';
import { ViewMode } from '../view-mode.model';
import { DSpaceObjectDataService } from '../../data/dspace-object-data.service';
import { RemoteDataBuildService } from '../../cache/builders/remote-data-build.service';
import { getFirstCompletedRemoteData, getRemoteDataPayload } from '../operators';
import { RouteService } from '../../services/route.service';
import { SearchResult } from '../../../shared/search/models/search-result.model';
import { ListableObject } from '../../../shared/object-collection/shared/listable-object.model';
import { getSearchResultFor } from '../../../shared/search/search-result-element-decorator';
import { FacetConfigResponse } from '../../../shared/search/models/facet-config-response.model';
import { FacetValues } from '../../../shared/search/models/facet-values.model';
import { SearchConfig } from './search-filters/search-config.model';
import { PaginationService } from '../../pagination/pagination.service';
import { SearchConfigurationService } from './search-configuration.service';
import { PaginationComponentOptions } from '../../../shared/pagination/pagination-component-options.model';
import { DataService } from '../../data/data.service';
import { Store } from '@ngrx/store';
import { CoreState } from '../../core.reducers';
import { ObjectCacheService } from '../../cache/object-cache.service';
import { NotificationsService } from '../../../shared/notifications/notifications.service';
import { HttpClient } from '@angular/common/http';
import { DSOChangeAnalyzer } from '../../data/dso-change-analyzer.service';
/* tslint:disable:max-classes-per-file */
/**
* A class that lets us delegate some methods to DataService
*/
class DataServiceImpl extends DataService<any> {
protected linkPath = 'discover';
constructor(
protected requestService: RequestService,
protected rdbService: RemoteDataBuildService,
protected store: Store<CoreState>,
protected objectCache: ObjectCacheService,
protected halService: HALEndpointService,
protected notificationsService: NotificationsService,
protected http: HttpClient,
protected comparator: DSOChangeAnalyzer<any>) {
super();
}
/**
* Adds the embed options to the link for the request
* @param href The href the params are to be added to
* @param args params for the query string
* @param linksToFollow links we want to embed in query string if shouldEmbed is true
*/
public addEmbedParams(href: string, args: string[], ...linksToFollow: FollowLinkConfig<any>[]) {
return super.addEmbedParams(href, args, ...linksToFollow);
}
}
/**
* Service that performs all general actions that have to do with the search page
*/
@Injectable()
export class SearchService implements OnDestroy {
/**
* Endpoint link path for retrieving search configurations
*/
private configurationLinkPath = 'discover/search';
/**
* Endpoint link path for retrieving general search results
*/
private searchLinkPath = 'discover/search/objects';
/**
* Endpoint link path for retrieving facet config incl values
*/
private facetLinkPathPrefix = 'discover/facets/';
/**
* The ResponseParsingService constructor name
*/
private parser: GenericConstructor<ResponseParsingService> = SearchResponseParsingService;
/**
* The RestRequest constructor name
*/
private request: GenericConstructor<RestRequest> = GetRequest;
/**
* Subscription to unsubscribe from
*/
private sub;
/**
* Instance of DataServiceImpl that lets us delegate some methods to DataService
*/
private searchDataService: DataServiceImpl;
constructor(private router: Router,
private routeService: RouteService,
protected requestService: RequestService,
private rdb: RemoteDataBuildService,
private linkService: LinkService,
private halService: HALEndpointService,
private communityService: CommunityDataService,
private dspaceObjectService: DSpaceObjectDataService,
private paginationService: PaginationService,
private searchConfigurationService: SearchConfigurationService
) {
this.searchDataService = new DataServiceImpl(
undefined,
undefined,
undefined,
undefined,
undefined,
undefined,
undefined,
undefined
);
}
/**
* Method to set service options
* @param {GenericConstructor<ResponseParsingService>} parser The ResponseParsingService constructor name
* @param {boolean} request The RestRequest constructor name
*/
setServiceOptions(parser: GenericConstructor<ResponseParsingService>, request: GenericConstructor<RestRequest>) {
if (parser) {
this.parser = parser;
}
if (request) {
this.request = request;
}
}
getEndpoint(searchOptions?: PaginatedSearchOptions): Observable<string> {
return this.halService.getEndpoint(this.searchLinkPath).pipe(
map((url: string) => {
if (hasValue(searchOptions)) {
return (searchOptions as PaginatedSearchOptions).toRestUrl(url);
} else {
return url;
}
})
);
}
/**
* Method to retrieve a paginated list of search results from the server
* @param {PaginatedSearchOptions} searchOptions The configuration necessary to perform this search
* @param responseMsToLive The amount of milliseconds for the response to live in cache
* @param useCachedVersionIfAvailable If this is true, the request will only be sent if there's
* no valid cached version. Defaults to true
* @param reRequestOnStale Whether or not the request should automatically be re-requested after
* the response becomes stale
* @param linksToFollow List of {@link FollowLinkConfig} that indicate which {@link HALLink}s should be automatically resolved
* @returns {Observable<RemoteData<SearchObjects<T>>>} Emits a paginated list with all search results found
*/
search<T extends DSpaceObject>(searchOptions?: PaginatedSearchOptions, responseMsToLive?: number, useCachedVersionIfAvailable = true, reRequestOnStale = true, ...linksToFollow: FollowLinkConfig<T>[]): Observable<RemoteData<SearchObjects<T>>> {
const href$ = this.getEndpoint(searchOptions);
href$.pipe(
take(1),
map((href: string) => {
const args = this.searchDataService.addEmbedParams(href, [], ...linksToFollow);
if (isNotEmpty(args)) {
return new URLCombiner(href, `?${args.join('&')}`).toString();
} else {
return href;
}
})
).subscribe((url: string) => {
const request = new this.request(this.requestService.generateRequestId(), url);
const getResponseParserFn: () => GenericConstructor<ResponseParsingService> = () => {
return this.parser;
};
Object.assign(request, {
responseMsToLive: hasValue(responseMsToLive) ? responseMsToLive : request.responseMsToLive,
getResponseParser: getResponseParserFn,
searchOptions: searchOptions
});
this.requestService.send(request, useCachedVersionIfAvailable);
});
const sqr$ = href$.pipe(
switchMap((href: string) => this.rdb.buildFromHref<SearchObjects<T>>(href))
);
return this.directlyAttachIndexableObjects(sqr$, useCachedVersionIfAvailable, reRequestOnStale, ...linksToFollow);
}
/**
* Method to retrieve request entries for search results from the server
* @param {PaginatedSearchOptions} searchOptions The configuration necessary to perform this search
* @returns {Observable<RemoteData<SearchObjects<T>>>} Emits a paginated list with all search results found
*/
searchEntries<T extends DSpaceObject>(searchOptions?: PaginatedSearchOptions): Observable<RemoteData<SearchObjects<T>>> {
const href$ = this.getEndpoint(searchOptions);
const sqr$ = href$.pipe(
switchMap((href: string) => this.rdb.buildFromHref<SearchObjects<T>>(href))
);
return this.directlyAttachIndexableObjects(sqr$);
}
/**
* Method to directly attach the indexableObjects to search results, instead of using RemoteData.
* For compatibility with the way the search was written originally
*
* @param sqr$: a SearchObjects RemotaData Observable without its
* indexableObjects attached
* @param useCachedVersionIfAvailable If this is true, the request will only be sent if there's
* no valid cached version. Defaults to true
* @param reRequestOnStale Whether or not the request should automatically be re-
* requested after the response becomes stale
* @param linksToFollow List of {@link FollowLinkConfig} that indicate which
* {@link HALLink}s should be automatically resolved
* @protected
*/
protected directlyAttachIndexableObjects<T extends DSpaceObject>(sqr$: Observable<RemoteData<SearchObjects<T>>>, useCachedVersionIfAvailable = true, reRequestOnStale = true, ...linksToFollow: FollowLinkConfig<T>[]): Observable<RemoteData<SearchObjects<T>>> {
return sqr$.pipe(
switchMap((resultsRd: RemoteData<SearchObjects<T>>) => {
if (hasValue(resultsRd.payload) && isNotEmpty(resultsRd.payload.page)) {
// retrieve the indexableObjects for all search results on the page
const searchResult$Array: Observable<SearchResult<T>>[] = resultsRd.payload.page.map((result: SearchResult<T>) =>
this.dspaceObjectService.findByHref(result._links.indexableObject.href, useCachedVersionIfAvailable, reRequestOnStale, ...linksToFollow as any).pipe(
getFirstCompletedRemoteData(),
getRemoteDataPayload(),
hasValueOperator(),
map((indexableObject: DSpaceObject) => {
// determine the constructor of the search result (ItemSearchResult,
// CollectionSearchResult, etc) based on the kind of the indeaxbleObject it
// contains. Recreate the result with that constructor
const constructor: GenericConstructor<ListableObject> = indexableObject.constructor as GenericConstructor<ListableObject>;
const resultConstructor = getSearchResultFor(constructor);
// Attach the payload directly to the indexableObject property on the result
return Object.assign(new resultConstructor(), result, {
indexableObject
}) as SearchResult<T>;
}),
)
);
// Swap the original page in the remoteData with the new one, now that the results have the
// correct types, and all indexableObjects are directly attached.
return observableCombineLatest(searchResult$Array).pipe(
map((page: SearchResult<T>[]) => {
const payload = Object.assign(new SearchObjects(), resultsRd.payload, {
page
}) as SearchObjects<T>;
return new RemoteData(
resultsRd.timeCompleted,
resultsRd.msToLive,
resultsRd.lastUpdated,
resultsRd.state,
resultsRd.errorMessage,
payload,
resultsRd.statusCode,
);
})
);
}
// If we don't have a payload, or the page is empty, simply pass on the unmodified
// RemoteData object
return [resultsRd];
})
);
}
private getConfigUrl(url: string, scope?: string, configurationName?: string) {
const args: string[] = [];
if (isNotEmpty(scope)) {
args.push(`scope=${scope}`);
}
if (isNotEmpty(configurationName)) {
args.push(`configuration=${configurationName}`);
}
if (isNotEmpty(args)) {
url = new URLCombiner(url, `?${args.join('&')}`).toString();
}
return url; | /**
* Request the filter configuration for a given scope or the whole repository
* @param {string} scope UUID of the object for which config the filter config is requested, when no scope is provided the configuration for the whole repository is loaded
* @param {string} configurationName the name of the configuration
* @returns {Observable<RemoteData<SearchFilterConfig[]>>} The found filter configuration
*/
getConfig(scope?: string, configurationName?: string): Observable<RemoteData<SearchFilterConfig[]>> {
const href$ = this.halService.getEndpoint(this.facetLinkPathPrefix).pipe(
map((url: string) => this.getConfigUrl(url, scope, configurationName)),
);
href$.pipe(take(1)).subscribe((url: string) => {
let request = new this.request(this.requestService.generateRequestId(), url);
request = Object.assign(request, {
getResponseParser(): GenericConstructor<ResponseParsingService> {
return FacetConfigResponseParsingService;
}
});
this.requestService.send(request, true);
});
return this.rdb.buildFromHref(href$).pipe(
map((rd: RemoteData<FacetConfigResponse>) => {
if (rd.hasSucceeded) {
let filters: SearchFilterConfig[];
if (isNotEmpty(rd.payload.filters)) {
filters = rd.payload.filters
.map((filter: any) => Object.assign(new SearchFilterConfig(), filter));
} else {
filters = [];
}
return new RemoteData(
rd.timeCompleted,
rd.msToLive,
rd.lastUpdated,
rd.state,
rd.errorMessage,
filters,
rd.statusCode,
);
} else {
return rd as any as RemoteData<SearchFilterConfig[]>;
}
})
);
}
/**
* Method to request a single page of filter values for a given value
* @param {SearchFilterConfig} filterConfig The filter config for which we want to request filter values
* @param {number} valuePage The page number of the filter values
* @param {SearchOptions} searchOptions The search configuration for the current search
* @param {string} filterQuery The optional query used to filter out filter values
* @returns {Observable<RemoteData<PaginatedList<FacetValue>>>} Emits the given page of facet values
*/
getFacetValuesFor(filterConfig: SearchFilterConfig, valuePage: number, searchOptions?: SearchOptions, filterQuery?: string): Observable<RemoteData<FacetValues>> {
let href;
const args: string[] = [`page=${valuePage - 1}`, `size=${filterConfig.pageSize}`];
if (hasValue(filterQuery)) {
args.push(`prefix=${filterQuery}`);
}
if (hasValue(searchOptions)) {
href = searchOptions.toRestUrl(filterConfig._links.self.href, args);
} else {
href = new URLCombiner(filterConfig._links.self.href, `?${args.join('&')}`).toString();
}
let request = new this.request(this.requestService.generateRequestId(), href);
request = Object.assign(request, {
getResponseParser(): GenericConstructor<ResponseParsingService> {
return FacetValueResponseParsingService;
}
});
this.requestService.send(request, true);
return this.rdb.buildFromHref(href);
}
/**
* Requests the current view mode based on the current URL
* @returns {Observable<ViewMode>} The current view mode
*/
getViewMode(): Observable<ViewMode> {
return this.routeService.getQueryParamMap().pipe(map((params) => {
if (isNotEmpty(params.get('view')) && hasValue(params.get('view'))) {
return params.get('view');
} else {
return ViewMode.ListElement;
}
}));
}
/**
* Changes the current view mode in the current URL
* @param {ViewMode} viewMode Mode to switch to
* @param {string[]} searchLinkParts
*/
setViewMode(viewMode: ViewMode, searchLinkParts?: string[]) {
this.paginationService.getCurrentPagination(this.searchConfigurationService.paginationID, new PaginationComponentOptions()).pipe(take(1))
.subscribe((config) => {
let pageParams = { page: 1 };
const queryParams = { view: viewMode };
if (viewMode === ViewMode.DetailedListElement) {
pageParams = Object.assign(pageParams, { pageSize: 1 });
} else if (config.pageSize === 1) {
pageParams = Object.assign(pageParams, { pageSize: 10 });
}
this.paginationService.updateRouteWithUrl(this.searchConfigurationService.paginationID, hasValue(searchLinkParts) ? searchLinkParts : [this.getSearchLink()], pageParams, queryParams);
});
}
/**
* Request the search configuration for a given scope or the whole repository
* @param {string} scope UUID of the object for which config the filter config is requested, when no scope is provided the configuration for the whole repository is loaded
* @param {string} configurationName the name of the configuration
* @returns {Observable<RemoteData<SearchConfig[]>>} The found configuration
*/
getSearchConfigurationFor(scope?: string, configurationName?: string): Observable<RemoteData<SearchConfig>> {
const href$ = this.halService.getEndpoint(this.configurationLinkPath).pipe(
map((url: string) => this.getConfigUrl(url, scope, configurationName)),
);
href$.pipe(take(1)).subscribe((url: string) => {
const request = new this.request(this.requestService.generateRequestId(), url);
this.requestService.send(request, true);
});
return this.rdb.buildFromHref(href$);
}
/**
* @returns {string} The base path to the search page
*/
getSearchLink(): string {
return '/search';
}
/**
* Unsubscribe from the subscription
*/
ngOnDestroy(): void {
if (this.sub !== undefined) {
this.sub.unsubscribe();
}
}
} | }
|
transform_error.rs | use error_chain::error_chain;
error_chain! {
types {
TransformError, TransformErrorKind, ResultExt, Result;
}
links {
CryptoError(super::CryptoError, super::CryptoErrorKind);
TrustError(super::TrustError, super::TrustErrorKind);
}
foreign_links {
IO(std::io::Error);
}
errors {
#[cfg(feature = "enable_openssl")]
EncryptionError(stack: openssl::error::ErrorStack) {
description("encryption error while transforming event data"),
display("encryption error while transforming event data - {}", err),
}
MissingData {
description("missing data for this record")
display("missing data for this record")
}
MissingReadKey(hash: String) {
description("missing the read key needed to encrypt/decrypt this data object"),
display("missing the read key ({}) needed to encrypt/decrypt this data object", hash)
}
UnspecifiedReadability {
description("the readability for this data object has not been specified")
display("the readability for this data object has not been specified")
}
}
}
#[cfg(feature = "enable_openssl")]
impl From<openssl::error::ErrorStack> for Error {
fn | (err: openssl::error::ErrorStack) -> Error {
ErrorKind::EncryptionError(err).into()
}
}
| from |
wasmi_impl.rs | // This file is part of Substrate.
// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Implementation of conversions between Substrate and wasmi types.
use crate::{Value, ValueType, Signature};
impl From<Value> for wasmi::RuntimeValue {
fn from(value: Value) -> Self {
match value {
Value::I32(val) => Self::I32(val),
Value::I64(val) => Self::I64(val),
Value::F32(val) => Self::F32(val.into()),
Value::F64(val) => Self::F64(val.into()),
}
}
}
impl From<wasmi::RuntimeValue> for Value {
fn | (value: wasmi::RuntimeValue) -> Self {
match value {
wasmi::RuntimeValue::I32(val) => Self::I32(val),
wasmi::RuntimeValue::I64(val) => Self::I64(val),
wasmi::RuntimeValue::F32(val) => Self::F32(val.into()),
wasmi::RuntimeValue::F64(val) => Self::F64(val.into()),
}
}
}
impl From<ValueType> for wasmi::ValueType {
fn from(value: ValueType) -> Self {
match value {
ValueType::I32 => Self::I32,
ValueType::I64 => Self::I64,
ValueType::F32 => Self::F32,
ValueType::F64 => Self::F64,
}
}
}
impl From<wasmi::ValueType> for ValueType {
fn from(value: wasmi::ValueType) -> Self {
match value {
wasmi::ValueType::I32 => Self::I32,
wasmi::ValueType::I64 => Self::I64,
wasmi::ValueType::F32 => Self::F32,
wasmi::ValueType::F64 => Self::F64,
}
}
}
impl From<Signature> for wasmi::Signature {
fn from(sig: Signature) -> Self {
let args = sig.args.iter().map(|a| (*a).into()).collect::<Vec<_>>();
wasmi::Signature::new(args, sig.return_value.map(Into::into))
}
}
impl From<&wasmi::Signature> for Signature {
fn from(sig: &wasmi::Signature) -> Self {
Signature::new(
sig.params().into_iter().copied().map(Into::into).collect::<Vec<_>>(),
sig.return_type().map(Into::into),
)
}
}
| from |
filter.rs | use crate::{
archetype::{Archetype, ArchetypeComponentId},
component::{Component, ComponentId, ComponentStorage, ComponentTicks, StorageType},
entity::Entity,
query::{
debug_checked_unreachable, Access, Fetch, FetchState, FilteredAccess, QueryFetch,
ROQueryFetch, WorldQuery, WorldQueryGats,
},
storage::{ComponentSparseSet, Table, Tables},
world::World,
};
use bevy_ecs_macros::all_tuples;
use bevy_ptr::{ThinSlicePtr, UnsafeCellDeref};
use std::{cell::UnsafeCell, marker::PhantomData};
use super::ReadOnlyFetch;
/// Filter that selects entities with a component `T`.
///
/// This can be used in a [`Query`](crate::system::Query) if entities are required to have the
/// component `T` but you don't actually care about components value.
///
/// This is the negation of [`Without`].
///
/// # Examples
///
/// ```
/// # use bevy_ecs::component::Component;
/// # use bevy_ecs::query::With;
/// # use bevy_ecs::system::IntoSystem;
/// # use bevy_ecs::system::Query;
/// #
/// # #[derive(Component)]
/// # struct IsBeautiful;
/// # #[derive(Component)]
/// # struct Name { name: &'static str };
/// #
/// fn compliment_entity_system(query: Query<&Name, With<IsBeautiful>>) {
/// for name in query.iter() {
/// println!("{} is looking lovely today!", name.name);
/// }
/// }
/// # bevy_ecs::system::assert_is_system(compliment_entity_system);
/// ```
pub struct With<T>(PhantomData<T>);
impl<T: Component> WorldQuery for With<T> {
type State = WithState<T>;
#[allow(clippy::semicolon_if_nothing_returned)]
fn shrink<'wlong: 'wshort, 'wshort>(
item: super::QueryItem<'wlong, Self>,
) -> super::QueryItem<'wshort, Self> {
item
}
}
/// The [`Fetch`] of [`With`].
#[doc(hidden)]
pub struct WithFetch<T> {
marker: PhantomData<T>,
}
/// The [`FetchState`] of [`With`].
#[doc(hidden)]
pub struct | <T> {
component_id: ComponentId,
marker: PhantomData<T>,
}
// SAFETY: no component access or archetype component access
unsafe impl<T: Component> FetchState for WithState<T> {
fn init(world: &mut World) -> Self {
let component_id = world.init_component::<T>();
Self {
component_id,
marker: PhantomData,
}
}
#[inline]
fn update_component_access(&self, access: &mut FilteredAccess<ComponentId>) {
access.add_with(self.component_id);
}
#[inline]
fn update_archetype_component_access(
&self,
_archetype: &Archetype,
_access: &mut Access<ArchetypeComponentId>,
) {
}
fn matches_component_set(&self, set_contains_id: &impl Fn(ComponentId) -> bool) -> bool {
set_contains_id(self.component_id)
}
}
impl<T: Component> WorldQueryGats<'_> for With<T> {
type Fetch = WithFetch<T>;
type ReadOnlyFetch = WithFetch<T>;
type _State = WithState<T>;
}
impl<'w, T: Component> Fetch<'w> for WithFetch<T> {
type Item = ();
type State = WithState<T>;
unsafe fn init(
_world: &World,
_state: &WithState<T>,
_last_change_tick: u32,
_change_tick: u32,
) -> Self {
Self {
marker: PhantomData,
}
}
const IS_DENSE: bool = {
match T::Storage::STORAGE_TYPE {
StorageType::Table => true,
StorageType::SparseSet => false,
}
};
const IS_ARCHETYPAL: bool = true;
#[inline]
unsafe fn set_table(&mut self, _state: &Self::State, _table: &Table) {}
#[inline]
unsafe fn set_archetype(
&mut self,
_state: &Self::State,
_archetype: &Archetype,
_tables: &Tables,
) {
}
#[inline]
unsafe fn archetype_fetch(&mut self, _archetype_index: usize) {}
#[inline]
unsafe fn table_fetch(&mut self, _table_row: usize) {}
}
// SAFETY: no component access or archetype component access
unsafe impl<T: Component> ReadOnlyFetch for WithFetch<T> {}
impl<T> Clone for WithFetch<T> {
fn clone(&self) -> Self {
Self {
marker: self.marker,
}
}
}
impl<T> Copy for WithFetch<T> {}
/// Filter that selects entities without a component `T`.
///
/// This is the negation of [`With`].
///
/// # Examples
///
/// ```
/// # use bevy_ecs::component::Component;
/// # use bevy_ecs::query::Without;
/// # use bevy_ecs::system::IntoSystem;
/// # use bevy_ecs::system::Query;
/// #
/// # #[derive(Component)]
/// # struct Permit;
/// # #[derive(Component)]
/// # struct Name { name: &'static str };
/// #
/// fn no_permit_system(query: Query<&Name, Without<Permit>>) {
/// for name in query.iter() {
/// println!("{} has no permit!", name.name);
/// }
/// }
/// # bevy_ecs::system::assert_is_system(no_permit_system);
/// ```
pub struct Without<T>(PhantomData<T>);
impl<T: Component> WorldQuery for Without<T> {
type State = WithoutState<T>;
#[allow(clippy::semicolon_if_nothing_returned)]
fn shrink<'wlong: 'wshort, 'wshort>(
item: super::QueryItem<'wlong, Self>,
) -> super::QueryItem<'wshort, Self> {
item
}
}
/// The [`Fetch`] of [`Without`].
#[doc(hidden)]
pub struct WithoutFetch<T> {
marker: PhantomData<T>,
}
/// The [`FetchState`] of [`Without`].
#[doc(hidden)]
pub struct WithoutState<T> {
component_id: ComponentId,
marker: PhantomData<T>,
}
// SAFETY: no component access or archetype component access
unsafe impl<T: Component> FetchState for WithoutState<T> {
fn init(world: &mut World) -> Self {
let component_id = world.init_component::<T>();
Self {
component_id,
marker: PhantomData,
}
}
#[inline]
fn update_component_access(&self, access: &mut FilteredAccess<ComponentId>) {
access.add_without(self.component_id);
}
#[inline]
fn update_archetype_component_access(
&self,
_archetype: &Archetype,
_access: &mut Access<ArchetypeComponentId>,
) {
}
fn matches_component_set(&self, set_contains_id: &impl Fn(ComponentId) -> bool) -> bool {
!set_contains_id(self.component_id)
}
}
impl<T: Component> WorldQueryGats<'_> for Without<T> {
type Fetch = WithoutFetch<T>;
type ReadOnlyFetch = WithoutFetch<T>;
type _State = WithoutState<T>;
}
impl<'w, T: Component> Fetch<'w> for WithoutFetch<T> {
type Item = ();
type State = WithoutState<T>;
unsafe fn init(
_world: &World,
_state: &WithoutState<T>,
_last_change_tick: u32,
_change_tick: u32,
) -> Self {
WithoutFetch {
marker: PhantomData,
}
}
const IS_DENSE: bool = {
match T::Storage::STORAGE_TYPE {
StorageType::Table => true,
StorageType::SparseSet => false,
}
};
const IS_ARCHETYPAL: bool = true;
#[inline]
unsafe fn set_table(&mut self, _state: &Self::State, _table: &Table) {}
#[inline]
unsafe fn set_archetype(
&mut self,
_state: &Self::State,
_archetype: &Archetype,
_tables: &Tables,
) {
}
#[inline]
unsafe fn archetype_fetch(&mut self, _archetype_index: usize) {}
#[inline]
unsafe fn table_fetch(&mut self, _table_row: usize) {}
}
// SAFETY: no component access or archetype component access
unsafe impl<T: Component> ReadOnlyFetch for WithoutFetch<T> {}
impl<T> Clone for WithoutFetch<T> {
fn clone(&self) -> Self {
Self {
marker: self.marker,
}
}
}
impl<T> Copy for WithoutFetch<T> {}
/// A filter that tests if any of the given filters apply.
///
/// This is useful for example if a system with multiple components in a query only wants to run
/// when one or more of the components have changed.
///
/// The `And` equivalent to this filter is a [`prim@tuple`] testing that all the contained filters
/// apply instead.
///
/// # Examples
///
/// ```
/// # use bevy_ecs::component::Component;
/// # use bevy_ecs::entity::Entity;
/// # use bevy_ecs::query::Changed;
/// # use bevy_ecs::query::Or;
/// # use bevy_ecs::system::IntoSystem;
/// # use bevy_ecs::system::Query;
/// #
/// # #[derive(Component, Debug)]
/// # struct Color {};
/// # #[derive(Component)]
/// # struct Style {};
/// #
/// fn print_cool_entity_system(query: Query<Entity, Or<(Changed<Color>, Changed<Style>)>>) {
/// for entity in query.iter() {
/// println!("Entity {:?} got a new style or color", entity);
/// }
/// }
/// # bevy_ecs::system::assert_is_system(print_cool_entity_system);
/// ```
#[derive(Clone, Copy)]
pub struct Or<T>(pub T);
/// The [`Fetch`] of [`Or`].
#[derive(Clone, Copy)]
#[doc(hidden)]
pub struct OrFetch<'w, T: Fetch<'w>> {
fetch: T,
matches: bool,
_marker: PhantomData<&'w ()>,
}
macro_rules! impl_query_filter_tuple {
($(($filter: ident, $state: ident)),*) => {
#[allow(unused_variables)]
#[allow(non_snake_case)]
impl<$($filter: WorldQuery),*> WorldQuery for Or<($($filter,)*)> {
type State = Or<($($filter::State,)*)>;
fn shrink<'wlong: 'wshort, 'wshort>(item: super::QueryItem<'wlong, Self>) -> super::QueryItem<'wshort, Self> {
item
}
}
#[allow(unused_variables)]
#[allow(non_snake_case)]
impl<'w, $($filter: WorldQueryGats<'w>),*> WorldQueryGats<'w> for Or<($($filter,)*)> {
type Fetch = Or<($(OrFetch<'w, QueryFetch<'w, $filter>>,)*)>;
type ReadOnlyFetch = Or<($(OrFetch<'w, ROQueryFetch<'w, $filter>>,)*)>;
type _State = Or<($($filter::_State,)*)>;
}
#[allow(unused_variables)]
#[allow(non_snake_case)]
impl<'w, $($filter: Fetch<'w>),*> Fetch<'w> for Or<($(OrFetch<'w, $filter>,)*)> {
type State = Or<($(<$filter as Fetch<'w>>::State,)*)>;
type Item = bool;
const IS_DENSE: bool = true $(&& $filter::IS_DENSE)*;
const IS_ARCHETYPAL: bool = true $(&& $filter::IS_ARCHETYPAL)*;
unsafe fn init(world: &'w World, state: & Or<($(<$filter as Fetch<'w>>::State,)*)>, last_change_tick: u32, change_tick: u32) -> Self {
let ($($filter,)*) = &state.0;
Or(($(OrFetch {
fetch: <$filter as Fetch<'w>>::init(world, $filter, last_change_tick, change_tick),
matches: false,
_marker: PhantomData,
},)*))
}
#[inline]
unsafe fn set_table(&mut self, state: &Self::State, table: &'w Table) {
let ($($filter,)*) = &mut self.0;
let ($($state,)*) = &state.0;
$(
$filter.matches = $state.matches_component_set(&|id| table.has_column(id));
if $filter.matches {
$filter.fetch.set_table($state, table);
}
)*
}
#[inline]
unsafe fn set_archetype(&mut self, state: & Self::State, archetype: &'w Archetype, tables: &'w Tables) {
let ($($filter,)*) = &mut self.0;
let ($($state,)*) = &state.0;
$(
$filter.matches = $state.matches_component_set(&|id| archetype.contains(id));
if $filter.matches {
$filter.fetch.set_archetype($state, archetype, tables);
}
)*
}
#[inline]
unsafe fn table_fetch(&mut self, table_row: usize) -> bool {
let ($($filter,)*) = &mut self.0;
false $(|| ($filter.matches && $filter.fetch.table_filter_fetch(table_row)))*
}
#[inline]
unsafe fn archetype_fetch(&mut self, archetype_index: usize) -> bool {
let ($($filter,)*) = &mut self.0;
false $(|| ($filter.matches && $filter.fetch.archetype_filter_fetch(archetype_index)))*
}
#[inline]
unsafe fn table_filter_fetch(&mut self, table_row: usize) -> bool {
self.table_fetch(table_row)
}
#[inline]
unsafe fn archetype_filter_fetch(&mut self, archetype_index: usize) -> bool {
self.archetype_fetch(archetype_index)
}
}
// SAFETY: update_component_access and update_archetype_component_access are called for each item in the tuple
#[allow(unused_variables)]
#[allow(non_snake_case)]
unsafe impl<$($filter: FetchState),*> FetchState for Or<($($filter,)*)> {
fn init(world: &mut World) -> Self {
Or(($($filter::init(world),)*))
}
fn update_component_access(&self, access: &mut FilteredAccess<ComponentId>) {
let ($($filter,)*) = &self.0;
// We do not unconditionally add `$filter`'s `with`/`without` accesses to `access`
// as this would be unsound. For example the following two queries should conflict:
// - Query<&mut B, Or<(With<A>, ())>>
// - Query<&mut B, Without<A>>
//
// If we were to unconditionally add `$name`'s `with`/`without` accesses then `Or<(With<A>, ())>`
// would have a `With<A>` access which is incorrect as this `WorldQuery` will match entities that
// do not have the `A` component. This is the same logic as the `AnyOf<...>: WorldQuery` impl.
//
// The correct thing to do here is to only add a `with`/`without` access to `_access` if all
// `$filter` params have that `with`/`without` access. More jargony put- we add the intersection
// of all `with`/`without` accesses of the `$filter` params to `access`.
let mut _intersected_access = access.clone();
let mut _not_first = false;
$(
if _not_first {
let mut intermediate = access.clone();
$filter.update_component_access(&mut intermediate);
_intersected_access.extend_intersect_filter(&intermediate);
_intersected_access.extend_access(&intermediate);
} else {
$filter.update_component_access(&mut _intersected_access);
_not_first = true;
}
)*
*access = _intersected_access;
}
fn update_archetype_component_access(&self, archetype: &Archetype, access: &mut Access<ArchetypeComponentId>) {
let ($($filter,)*) = &self.0;
$($filter.update_archetype_component_access(archetype, access);)*
}
fn matches_component_set(&self, _set_contains_id: &impl Fn(ComponentId) -> bool) -> bool {
let ($($filter,)*) = &self.0;
false $(|| $filter.matches_component_set(_set_contains_id))*
}
}
// SAFE: filters are read only
unsafe impl<'w, $($filter: Fetch<'w> + ReadOnlyFetch),*> ReadOnlyFetch for Or<($(OrFetch<'w, $filter>,)*)> {}
};
}
all_tuples!(impl_query_filter_tuple, 0, 15, F, S);
macro_rules! impl_tick_filter {
(
$(#[$meta:meta])*
$name: ident,
$(#[$state_meta:meta])*
$state_name: ident,
$(#[$fetch_meta:meta])*
$fetch_name: ident,
$is_detected: expr
) => {
$(#[$meta])*
pub struct $name<T>(PhantomData<T>);
#[doc(hidden)]
$(#[$fetch_meta])*
pub struct $fetch_name<'w, T> {
table_ticks: Option<ThinSlicePtr<'w, UnsafeCell<ComponentTicks>>>,
entity_table_rows: Option<ThinSlicePtr<'w, usize>>,
marker: PhantomData<T>,
entities: Option<ThinSlicePtr<'w, Entity>>,
sparse_set: Option<&'w ComponentSparseSet>,
last_change_tick: u32,
change_tick: u32,
}
#[doc(hidden)]
$(#[$state_meta])*
pub struct $state_name<T> {
component_id: ComponentId,
marker: PhantomData<T>,
}
impl<T: Component> WorldQuery for $name<T> {
type State = $state_name<T>;
fn shrink<'wlong: 'wshort, 'wshort>(item: super::QueryItem<'wlong, Self>) -> super::QueryItem<'wshort, Self> {
item
}
}
// SAFETY: this reads the T component. archetype component access and component access are updated to reflect that
unsafe impl<T: Component> FetchState for $state_name<T> {
fn init(world: &mut World) -> Self {
Self {
component_id: world.init_component::<T>(),
marker: PhantomData,
}
}
#[inline]
fn update_component_access(&self, access: &mut FilteredAccess<ComponentId>) {
if access.access().has_write(self.component_id) {
panic!("$state_name<{}> conflicts with a previous access in this query. Shared access cannot coincide with exclusive access.",
std::any::type_name::<T>());
}
access.add_read(self.component_id);
}
#[inline]
fn update_archetype_component_access(
&self,
archetype: &Archetype,
access: &mut Access<ArchetypeComponentId>,
) {
if let Some(archetype_component_id) = archetype.get_archetype_component_id(self.component_id) {
access.add_read(archetype_component_id);
}
}
fn matches_component_set(&self, set_contains_id: &impl Fn(ComponentId) -> bool) -> bool {
set_contains_id(self.component_id)
}
}
impl<'w, T: Component> WorldQueryGats<'w> for $name<T> {
type Fetch = $fetch_name<'w, T>;
type ReadOnlyFetch = $fetch_name<'w, T>;
type _State = $state_name<T>;
}
impl<'w, T: Component> Fetch<'w> for $fetch_name<'w, T> {
type State = $state_name<T>;
type Item = bool;
unsafe fn init(world: &'w World, state: & $state_name<T>, last_change_tick: u32, change_tick: u32) -> Self {
Self {
table_ticks: None,
entities: None,
entity_table_rows: None,
sparse_set: (T::Storage::STORAGE_TYPE == StorageType::SparseSet)
.then(|| world.storages().sparse_sets.get(state.component_id).unwrap()),
marker: PhantomData,
last_change_tick,
change_tick,
}
}
const IS_DENSE: bool = {
match T::Storage::STORAGE_TYPE {
StorageType::Table => true,
StorageType::SparseSet => false,
}
};
const IS_ARCHETYPAL: bool = false;
unsafe fn set_table(&mut self, state: &Self::State, table: &'w Table) {
self.table_ticks = Some(table.get_column(state.component_id).unwrap().get_ticks_slice().into());
}
unsafe fn set_archetype(&mut self, state: &Self::State, archetype: &'w Archetype, tables: &'w Tables) {
match T::Storage::STORAGE_TYPE {
StorageType::Table => {
self.entity_table_rows = Some(archetype.entity_table_rows().into());
let table = &tables[archetype.table_id()];
self.table_ticks = Some(table.get_column(state.component_id).unwrap().get_ticks_slice().into());
}
StorageType::SparseSet => self.entities = Some(archetype.entities().into()),
}
}
unsafe fn table_fetch(&mut self, table_row: usize) -> bool {
$is_detected(&*(self.table_ticks.unwrap_or_else(|| debug_checked_unreachable()).get(table_row)).deref(), self.last_change_tick, self.change_tick)
}
unsafe fn archetype_fetch(&mut self, archetype_index: usize) -> bool {
match T::Storage::STORAGE_TYPE {
StorageType::Table => {
let table_row = *self.entity_table_rows.unwrap_or_else(|| debug_checked_unreachable()).get(archetype_index);
$is_detected(&*(self.table_ticks.unwrap_or_else(|| debug_checked_unreachable()).get(table_row)).deref(), self.last_change_tick, self.change_tick)
}
StorageType::SparseSet => {
let entity = *self.entities.unwrap_or_else(|| debug_checked_unreachable()).get(archetype_index);
let ticks = self
.sparse_set
.unwrap_or_else(|| debug_checked_unreachable())
.get_ticks(entity)
.map(|ticks| &*ticks.get())
.cloned()
.unwrap();
$is_detected(&ticks, self.last_change_tick, self.change_tick)
}
}
}
#[inline]
unsafe fn table_filter_fetch(&mut self, table_row: usize) -> bool {
self.table_fetch(table_row)
}
#[inline]
unsafe fn archetype_filter_fetch(&mut self, archetype_index: usize) -> bool {
self.archetype_fetch(archetype_index)
}
}
/// SAFETY: read-only access
unsafe impl<'w, T: Component> ReadOnlyFetch for $fetch_name<'w, T> {}
impl<T> Clone for $fetch_name<'_, T> {
fn clone(&self) -> Self {
Self {
table_ticks: self.table_ticks.clone(),
entity_table_rows: self.entity_table_rows.clone(),
marker: self.marker.clone(),
entities: self.entities.clone(),
sparse_set: self.sparse_set.clone(),
last_change_tick: self.last_change_tick.clone(),
change_tick: self.change_tick.clone(),
}
}
}
impl<T> Copy for $fetch_name<'_, T> {}
};
}
impl_tick_filter!(
/// A filter on a component that only retains results added after the system last ran.
///
/// A common use for this filter is one-time initialization.
///
/// To retain all results without filtering but still check whether they were added after the
/// system last ran, use [`ChangeTrackers<T>`](crate::query::ChangeTrackers).
///
/// # Examples
///
/// ```
/// # use bevy_ecs::component::Component;
/// # use bevy_ecs::query::Added;
/// # use bevy_ecs::system::IntoSystem;
/// # use bevy_ecs::system::Query;
/// #
/// # #[derive(Component, Debug)]
/// # struct Name {};
///
/// fn print_add_name_component(query: Query<&Name, Added<Name>>) {
/// for name in query.iter() {
/// println!("Named entity created: {:?}", name)
/// }
/// }
///
/// # bevy_ecs::system::assert_is_system(print_add_name_component);
/// ```
Added,
/// The [`FetchState`] of [`Added`].
AddedState,
/// The [`Fetch`] of [`Added`].
AddedFetch,
ComponentTicks::is_added
);
impl_tick_filter!(
/// A filter on a component that only retains results added or mutably dereferenced after the system last ran.
///
/// A common use for this filter is avoiding redundant work when values have not changed.
///
/// **Note** that simply *mutably dereferencing* a component is considered a change ([`DerefMut`](std::ops::DerefMut)).
/// Bevy does not compare components to their previous values.
///
/// To retain all results without filtering but still check whether they were changed after the
/// system last ran, use [`ChangeTrackers<T>`](crate::query::ChangeTrackers).
///
/// # Examples
///
/// ```
/// # use bevy_ecs::component::Component;
/// # use bevy_ecs::query::Changed;
/// # use bevy_ecs::system::IntoSystem;
/// # use bevy_ecs::system::Query;
/// #
/// # #[derive(Component, Debug)]
/// # struct Name {};
/// # #[derive(Component)]
/// # struct Transform {};
///
/// fn print_moving_objects_system(query: Query<&Name, Changed<Transform>>) {
/// for name in query.iter() {
/// println!("Entity Moved: {:?}", name);
/// }
/// }
///
/// # bevy_ecs::system::assert_is_system(print_moving_objects_system);
/// ```
Changed,
/// The [`FetchState`] of [`Changed`].
ChangedState,
/// The [`Fetch`] of [`Changed`].
ChangedFetch,
ComponentTicks::is_changed
);
| WithState |
stick.ts | import { IStickConfig, IInputConfig } from './../game.config.type';
import { Keyboard } from '../input/keyboard';
import { Mouse } from '../input/mouse';
import { GameConfig } from '../game.config';
import { Assets } from '../assets';
import { Canvas2D } from '../canvas';
import { Vector2 } from '../geom/vector2';
import { mapRange } from '../common/helper';
import { IAssetsConfig } from '../game.config.type';
//------Configurations------//
const inputConfig: IInputConfig = GameConfig.input;
const stickConfig: IStickConfig = GameConfig.stick;
const sprites: IAssetsConfig = GameConfig.sprites;
const sounds: IAssetsConfig = GameConfig.sounds;
export class | {
//------Members------//
private _sprite: HTMLImageElement = Assets.getSprite(sprites.paths.stick);
private _rotation: number = 0;
private _origin: Vector2 = Vector2.copy(stickConfig.origin);
private _power: number = 0;
private _movable: boolean = true;
private _visible: boolean = true;
//------Properties------//
public get position() : Vector2 {
return Vector2.copy(this._position);
}
public get rotation(): number {
return this._rotation;
}
public get power(): number {
return this._power;
}
public set movable(value: boolean) {
this._movable = value;
}
public get visible(): boolean {
return this._visible;
}
public set visible(value: boolean) {
this._visible = value;
}
public set rotation(value: number) {
this._rotation = value;
}
//------Constructor------//
constructor(private _position: Vector2) {}
//------Private Methods------//
private increasePower(): void {
this._power += stickConfig.powerToAddPerFrame;
this._origin.addToX(stickConfig.movementPerFrame);
}
private decreasePower(): void {
this._power -= stickConfig.powerToAddPerFrame;
this._origin.addToX(-stickConfig.movementPerFrame);
}
private isLessThanMaxPower(): boolean {
return this._power < stickConfig.maxPower;
}
private isMoreThanMinPower(): boolean {
return this._power >= 0;
}
private updatePower(): void {
if (Keyboard.isDown(inputConfig.increaseShotPowerKey) && this.isLessThanMaxPower()) {
this.increasePower();
}
else if (Keyboard.isDown(inputConfig.decreaseShotPowerKey) && this.isMoreThanMinPower()) {
this.decreasePower();
}
}
private updateRotation(): void {
const opposite: number = Mouse.position.y - this._position.y;
const adjacent: number = Mouse.position.x - this._position.x;
this._rotation = Math.atan2(opposite, adjacent);
}
//------Public Methods------//
public hide(): void {
this._power = 0;
this._visible = false;
this._movable = false;
}
public show(position: Vector2): void {
this._position = position;
this._origin = Vector2.copy(stickConfig.origin);
this._movable = true;
this._visible = true;
}
public shoot(): void {
this._origin = Vector2.copy(stickConfig.shotOrigin);
const volume: number = mapRange(this._power, 0, stickConfig.maxPower, 0, 1);
Assets.playSound(sounds.paths.strike, volume);
}
public update(): void {
if(this._movable) {
this.updateRotation();
this.updatePower();
}
}
public draw(): void {
if(this._visible) {
Canvas2D.drawImage(this._sprite, this._position, this._rotation, this._origin);
}
}
} | Stick |
27.removeElement.py | # 在python中复制操作重新赋一个标识符,所以可以直接赋值
class Solution():
def removeElement(self, nums: list, val: int) -> int:
lst=[]
for i in range(len(nums)):
if nums[i]!=val:
lst.append(nums[i])
nums[:]=lst
return len(lst)
| class Solution2:
def removeElement(self, nums, val):
c = nums.count(val)
i = 0
while i < c:
nums.remove(val)
i += 1
return len(nums)
# 正常解法为快慢指针
class Solution1():
def removeElement(self, nums: list, val: int) -> int:
cur_next=0
for j in range(len(nums)):
if nums[j]!=val:
nums[cur_next]=nums[j]
cur_next+=1
return cur_next
# 上面的解法的另一种书写形式
class Solution4:
def removeElement(self, nums: list, val: int) -> int:
i = 0
while i < len(nums):
if nums[i] == val:
nums[i] = nums[-1]
del nums[-1]
else:
i += 1
return len(nums)
if __name__=="__main__":
a=Solution1()
print(a.removeElement([3,2,2,3],3)) | #python计数与删除操作 |
models.py | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2017 Jesús Espino <[email protected]>
# Copyright (C) 2014-2017 David Barragán <[email protected]>
# Copyright (C) 2014-2017 Alejandro Alonso <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.db.models import Count
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.core.exceptions import ValidationError
from django.utils.functional import cached_property
from taiga.base.utils.slug import slugify_uniquely
from taiga.base.utils.dicts import dict_sum
from taiga.projects.notifications.mixins import WatchedModelMixin
import itertools
import datetime
class Milestone(WatchedModelMixin, models.Model):
name = models.CharField(max_length=200, db_index=True, null=False, blank=False,
verbose_name=_("name"))
# TODO: Change the unique restriction to a unique together with the project id
slug = models.SlugField(max_length=250, db_index=True, null=False, blank=True,
verbose_name=_("slug"))
owner = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True,
related_name="owned_milestones", verbose_name=_("owner"))
project = models.ForeignKey("projects.Project", null=False, blank=False,
related_name="milestones", verbose_name=_("project"))
estimated_start = models.DateField(verbose_name=_("estimated start date"))
estimated_finish = models.DateField(verbose_name=_("estimated finish date"))
created_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("created date"),
default=timezone.now)
modified_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("modified date"))
closed = models.BooleanField(default=False, null=False, blank=True,
verbose_name=_("is closed"))
disponibility = models.FloatField(default=0.0, null=True, blank=True,
verbose_name=_("disponibility"))
order = models.PositiveSmallIntegerField(default=1, null=False, blank=False,
verbose_name=_("order"))
_importing = None
_total_closed_points_by_date = None
class Me | verbose_name = "milestone"
verbose_name_plural = "milestones"
ordering = ["project", "created_date"]
unique_together = [("name", "project"), ("slug", "project")]
permissions = (
("view_milestone", "Can view milestone"),
)
def __str__(self):
return self.name
def __repr__(self):
return "<Milestone {0}>".format(self.id)
def clean(self):
# Don't allow draft entries to have a pub_date.
if self.estimated_start and self.estimated_finish and self.estimated_start > self.estimated_finish:
raise ValidationError(_('The estimated start must be previous to the estimated finish.'))
def save(self, *args, **kwargs):
if not self._importing or not self.modified_date:
self.modified_date = timezone.now()
if not self.slug:
self.slug = slugify_uniquely(self.name, self.__class__)
super().save(*args, **kwargs)
@cached_property
def cached_user_stories(self):
return (self.user_stories.prefetch_related("role_points", "role_points__points")
.annotate(num_tasks=Count("tasks")))
def _get_user_stories_points(self, user_stories):
role_points = [us.role_points.all() for us in user_stories]
flat_role_points = itertools.chain(*role_points)
flat_role_dicts = map(lambda x: {x.role_id: x.points.value if x.points.value else 0}, flat_role_points)
return dict_sum(*flat_role_dicts)
@property
def total_points(self):
return self._get_user_stories_points(
[us for us in self.cached_user_stories]
)
@property
def closed_points(self):
return self._get_user_stories_points(
[us for us in self.cached_user_stories if us.is_closed]
)
def total_closed_points_by_date(self, date):
# Milestone instance will keep a cache of the total closed points by date
if self._total_closed_points_by_date is None:
self._total_closed_points_by_date = {}
# We need to keep the milestone user stories indexed by id in a dict
user_stories = {}
for us in self.cached_user_stories:
us._total_us_points = sum(self._get_user_stories_points([us]).values())
user_stories[us.id] = us
tasks = self.tasks.\
select_related("user_story").\
exclude(finished_date__isnull=True).\
exclude(user_story__isnull=True)
# For each finished task we try to know the proporional part of points
# it represetnts from the user story and add it to the closed points
# for that date
# This calulation is the total user story points divided by its number of tasks
for task in tasks:
user_story = user_stories.get(task.user_story.id, None)
if user_story is None:
total_us_points = 0
us_tasks_counter = 0
else:
total_us_points = user_story._total_us_points
us_tasks_counter = user_story.num_tasks
# If the task was finished before starting the sprint it needs
# to be included
finished_date = task.finished_date.date()
if finished_date < self.estimated_start:
finished_date = self.estimated_start
points_by_date = self._total_closed_points_by_date.get(finished_date, 0)
if us_tasks_counter != 0:
points_by_date += total_us_points / us_tasks_counter
self._total_closed_points_by_date[finished_date] = points_by_date
for us in self.cached_user_stories:
if us.num_tasks > 0 or us.finish_date is None:
continue
finished_date = us.finish_date.date()
if finished_date < self.estimated_start:
finished_date = self.estimated_start
points_by_date = self._total_closed_points_by_date.get(finished_date, 0)
points_by_date += us._total_us_points
self._total_closed_points_by_date[finished_date] = points_by_date
# At this point self._total_closed_points_by_date keeps a dict where the
# finished date of the task is the key and the value is the increment of points
# We are transforming this dict of increments in an acumulation one including
# all the dates from the sprint
acumulated_date_points = 0
current_date = self.estimated_start
while current_date <= self.estimated_finish:
acumulated_date_points += self._total_closed_points_by_date.get(current_date, 0)
self._total_closed_points_by_date[current_date] = acumulated_date_points
current_date = current_date + datetime.timedelta(days=1)
return self._total_closed_points_by_date.get(date, 0)
| ta:
|
test_spancat.py | from numpy.testing import assert_equal
from spacy.language import Language
from spacy.training import Example
from spacy.util import fix_random_seed, registry
SPAN_KEY = "labeled_spans"
TRAIN_DATA = [
("Who is Shaka Khan?", {"spans": {SPAN_KEY: [(7, 17, "PERSON")]}}),
(
"I like London and Berlin.",
{"spans": {SPAN_KEY: [(7, 13, "LOC"), (18, 24, "LOC")]}},
),
]
def make_get_examples(nlp):
train_examples = []
for t in TRAIN_DATA:
eg = Example.from_dict(nlp.make_doc(t[0]), t[1])
train_examples.append(eg)
def get_examples():
return train_examples
return get_examples
def test_simple_train():
fix_random_seed(0)
nlp = Language()
spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY})
get_examples = make_get_examples(nlp)
nlp.initialize(get_examples)
sgd = nlp.create_optimizer()
assert len(spancat.labels) != 0
for i in range(40):
losses = {}
nlp.update(list(get_examples()), losses=losses, drop=0.1, sgd=sgd)
doc = nlp("I like London and Berlin.")
assert doc.spans[spancat.key] == doc.spans[SPAN_KEY]
assert len(doc.spans[spancat.key]) == 2
assert doc.spans[spancat.key][0].text == "London"
scores = nlp.evaluate(get_examples())
assert f"spans_{SPAN_KEY}_f" in scores
assert scores[f"spans_{SPAN_KEY}_f"] == 1.0
def test_ngram_suggester(en_tokenizer):
# test different n-gram lengths
for size in [1, 2, 3]:
ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[size])
docs = [
en_tokenizer(text)
for text in [
"a",
"a b",
"a b c",
"a b c d",
"a b c d e",
"a " * 100,
]
]
ngrams = ngram_suggester(docs)
# span sizes are correct
for s in ngrams.data:
assert s[1] - s[0] == size
# spans are within docs
offset = 0
for i, doc in enumerate(docs):
spans = ngrams.dataXd[offset : offset + ngrams.lengths[i]]
spans_set = set()
for span in spans:
assert 0 <= span[0] < len(doc)
assert 0 < span[1] <= len(doc)
spans_set.add((span[0], span[1]))
# spans are unique
assert spans.shape[0] == len(spans_set)
offset += ngrams.lengths[i]
# the number of spans is correct
assert_equal(ngrams.lengths, [max(0, len(doc) - (size - 1)) for doc in docs])
# test 1-3-gram suggestions
ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[1, 2, 3])
docs = [
en_tokenizer(text) for text in ["a", "a b", "a b c", "a b c d", "a b c d e"]
]
ngrams = ngram_suggester(docs)
assert_equal(ngrams.lengths, [1, 3, 6, 9, 12])
assert_equal(
ngrams.data,
[
# doc 0
[0, 1],
# doc 1
[0, 1],
[1, 2],
[0, 2],
# doc 2
[0, 1],
[1, 2],
[2, 3],
[0, 2],
[1, 3],
[0, 3],
# doc 3
[0, 1],
[1, 2],
[2, 3], | [0, 2],
[1, 3],
[2, 4],
[0, 3],
[1, 4],
# doc 4
[0, 1],
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[0, 2],
[1, 3],
[2, 4],
[3, 5],
[0, 3],
[1, 4],
[2, 5],
],
)
# test some empty docs
ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[1])
docs = [en_tokenizer(text) for text in ["", "a", ""]]
ngrams = ngram_suggester(docs)
assert_equal(ngrams.lengths, [len(doc) for doc in docs])
# test all empty docs
ngram_suggester = registry.misc.get("ngram_suggester.v1")(sizes=[1])
docs = [en_tokenizer(text) for text in ["", "", ""]]
ngrams = ngram_suggester(docs)
assert_equal(ngrams.lengths, [len(doc) for doc in docs]) | [3, 4], |
bank_test.go | package database_test
import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/lib/pq"
dbtypes "github.com/forbole/bdjuno/database/types"
"github.com/forbole/bdjuno/types"
bddbtypes "github.com/forbole/bdjuno/database/types"
)
func (suite *DbTestSuite) TestSaveAccountBalance() {
_ = suite.getBlock(9)
_ = suite.getBlock(10)
_ = suite.getBlock(11)
address1 := suite.getAccount("cosmos140xsjjg6pwkjp0xjz8zru7ytha60l5aee9nlf7")
address2 := suite.getAccount("cosmos1tcpsdy9alvucwj0h23n56tey6zmrvkm7sndh9j")
// Save the data
err := suite.database.SaveAccountBalances([]types.AccountBalance{
types.NewAccountBalance(
address1.String(),
sdk.NewCoins(
sdk.NewCoin("desmos", sdk.NewInt(10)),
sdk.NewCoin("uatom", sdk.NewInt(20)),
),
10,
),
types.NewAccountBalance(
address2.String(),
sdk.NewCoins(
sdk.NewCoin("uatom", sdk.NewInt(100)),
),
10,
),
})
suite.Require().NoError(err)
// Verify the data
expected := []bddbtypes.AccountBalanceRow{
bddbtypes.NewAccountBalanceRow(
address1.String(),
dbtypes.NewDbCoins(sdk.NewCoins(
sdk.NewCoin("desmos", sdk.NewInt(10)),
sdk.NewCoin("uatom", sdk.NewInt(20)),
)),
10,
),
bddbtypes.NewAccountBalanceRow(
address2.String(),
dbtypes.NewDbCoins(sdk.NewCoins(
sdk.NewCoin("uatom", sdk.NewInt(100)),
)),
10,
),
}
var rows []bddbtypes.AccountBalanceRow
err = suite.database.Sqlx.Select(&rows, `SELECT * FROM account_balance ORDER BY address`)
suite.Require().NoError(err)
suite.Require().Len(rows, len(expected))
for index, row := range rows {
suite.Require().True(row.Equal(expected[index]))
}
// Update the data
err = suite.database.SaveAccountBalances([]types.AccountBalance{
types.NewAccountBalance(
address1.String(),
sdk.NewCoins(
sdk.NewCoin("desmos", sdk.NewInt(10)),
),
9,
), | sdk.NewCoin("uatom", sdk.NewInt(100)),
sdk.NewCoin("desmos", sdk.NewInt(200)),
),
11,
),
})
suite.Require().NoError(err)
// Verify the data
expected = []bddbtypes.AccountBalanceRow{
bddbtypes.NewAccountBalanceRow(
address1.String(),
dbtypes.NewDbCoins(sdk.NewCoins(
sdk.NewCoin("desmos", sdk.NewInt(10)),
sdk.NewCoin("uatom", sdk.NewInt(20)),
)),
10,
),
bddbtypes.NewAccountBalanceRow(
address2.String(),
dbtypes.NewDbCoins(sdk.NewCoins(
sdk.NewCoin("uatom", sdk.NewInt(100)),
sdk.NewCoin("desmos", sdk.NewInt(200)),
)),
11,
),
}
rows = []bddbtypes.AccountBalanceRow{}
err = suite.database.Sqlx.Select(&rows, `SELECT * FROM account_balance ORDER BY address`)
suite.Require().NoError(err)
suite.Require().Len(rows, len(expected))
for index, row := range rows {
suite.Require().True(row.Equal(expected[index]))
}
}
func (suite *DbTestSuite) TestBigDipperDb_SaveSupply() {
// Save the data
original := sdk.NewCoins(
sdk.NewCoin("desmos", sdk.NewInt(10000)),
sdk.NewCoin("uatom", sdk.NewInt(15)),
)
err := suite.database.SaveSupply(original, 10)
suite.Require().NoError(err)
// Verify the data
expected := bddbtypes.NewSupplyRow(dbtypes.NewDbCoins(original), 10)
var rows []bddbtypes.SupplyRow
err = suite.database.Sqlx.Select(&rows, `SELECT * FROM supply`)
suite.Require().NoError(err)
suite.Require().Len(rows, 1, "supply table should contain only one row")
suite.Require().True(expected.Equals(rows[0]))
// ----------------------------------------------------------------------------------------------------------------
// Try updating with a lower height
coins := sdk.NewCoins(
sdk.NewCoin("desmos", sdk.NewInt(10000)),
sdk.NewCoin("uatom", sdk.NewInt(15)),
)
err = suite.database.SaveSupply(coins, 9)
suite.Require().NoError(err)
// Verify the data
rows = []bddbtypes.SupplyRow{}
err = suite.database.Sqlx.Select(&rows, `SELECT * FROM supply`)
suite.Require().NoError(err)
suite.Require().Len(rows, 1, "supply table should contain only one row")
suite.Require().True(expected.Equals(rows[0]))
// ----------------------------------------------------------------------------------------------------------------
// Try updating with same height
coins = sdk.NewCoins(sdk.NewCoin("uakash", sdk.NewInt(10)))
err = suite.database.SaveSupply(coins, 10)
suite.Require().NoError(err)
// Verify the data
expected = bddbtypes.NewSupplyRow(dbtypes.NewDbCoins(coins), 10)
rows = []bddbtypes.SupplyRow{}
err = suite.database.Sqlx.Select(&rows, `SELECT * FROM supply`)
suite.Require().NoError(err)
suite.Require().Len(rows, 1, "supply table should contain only one row")
suite.Require().True(expected.Equals(rows[0]))
// ----------------------------------------------------------------------------------------------------------------
// Try updating with higher height
coins = sdk.NewCoins(sdk.NewCoin("btc", sdk.NewInt(10)))
err = suite.database.SaveSupply(coins, 20)
suite.Require().NoError(err)
// Verify the data
expected = bddbtypes.NewSupplyRow(dbtypes.NewDbCoins(coins), 20)
rows = []bddbtypes.SupplyRow{}
err = suite.database.Sqlx.Select(&rows, `SELECT * FROM supply`)
suite.Require().NoError(err)
suite.Require().Len(rows, 1, "supply table should contain only one row")
suite.Require().True(expected.Equals(rows[0]))
}
func (suite *DbTestSuite) TestBigDipperDb_GetTokenNames() {
coins := sdk.NewCoins(
sdk.NewCoin("desmos", sdk.NewInt(10000)),
sdk.NewCoin("uatom", sdk.NewInt(15)),
)
_, err := suite.database.Sql.Exec("INSERT INTO supply(coins,height) VALUES ($1,$2) ", pq.Array(dbtypes.NewDbCoins(coins)), 10)
suite.Require().NoError(err)
expected := [2]string{"desmos", "uatom"}
result, err := suite.database.GetTokenNames()
suite.Require().NoError(err)
for i, row := range expected {
suite.Require().True(row == (result[i]))
}
} | types.NewAccountBalance(
address2.String(),
sdk.NewCoins( |
ini.rs | use indexmap::map::IndexMap;
use nu_protocol::ast::Call;
use nu_protocol::engine::{Command, EngineState, Stack};
use nu_protocol::{
Category, Config, Example, IntoPipelineData, PipelineData, ShellError, Signature, Span, Value,
};
#[derive(Clone)]
pub struct FromIni;
impl Command for FromIni {
fn name(&self) -> &str {
"from ini"
}
fn signature(&self) -> Signature {
Signature::build("from ini").category(Category::Formats)
}
fn usage(&self) -> &str {
"Parse text as .ini and create table"
}
fn examples(&self) -> Vec<Example> {
vec![Example {
example: "'[foo]
a=1
b=2' | from ini",
description: "Converts ini formatted string to table",
result: Some(Value::Record {
cols: vec!["foo".to_string()],
vals: vec![Value::Record {
cols: vec!["a".to_string(), "b".to_string()],
vals: vec![
Value::String {
val: "1".to_string(),
span: Span::test_data(),
},
Value::String {
val: "2".to_string(),
span: Span::test_data(),
},
],
span: Span::test_data(),
}],
span: Span::test_data(),
}),
}]
}
fn run(
&self,
_engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<nu_protocol::PipelineData, ShellError> {
let head = call.head;
let config = stack.get_config().unwrap_or_default();
from_ini(input, head, &config)
}
}
pub fn from_ini_string_to_value(s: String, span: Span) -> Result<Value, ShellError> |
fn from_ini(input: PipelineData, head: Span, config: &Config) -> Result<PipelineData, ShellError> {
let concat_string = input.collect_string("", config)?;
match from_ini_string_to_value(concat_string, head) {
Ok(x) => Ok(x.into_pipeline_data()),
Err(other) => Err(other),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_examples() {
use crate::test_examples;
test_examples(FromIni {})
}
}
| {
let v: Result<IndexMap<String, IndexMap<String, String>>, serde_ini::de::Error> =
serde_ini::from_str(&s);
match v {
Ok(index_map) => {
let (cols, vals) = index_map
.into_iter()
.fold((vec![], vec![]), |mut acc, (k, v)| {
let (cols, vals) = v.into_iter().fold((vec![], vec![]), |mut acc, (k, v)| {
acc.0.push(k);
acc.1.push(Value::String { val: v, span });
acc
});
acc.0.push(k);
acc.1.push(Value::Record { cols, vals, span });
acc
});
Ok(Value::Record { cols, vals, span })
}
Err(err) => Err(ShellError::UnsupportedInput(
format!("Could not load ini: {}", err),
span,
)),
}
} |
common.d.ts | export default rehypePrismCommon;
/**
* Rehype prism plugin that highlights code blocks with refractor (prismjs)
* Supported languages: https://github.com/wooorm/refractor#data
*
* Consider using rehypePrismGenerator to generate a plugin | */
declare const rehypePrismCommon: import("unified").Plugin<[import("./generator.js").Options?], import("hast").Root, import("hast").Root>; | * that supports your required languages. |
util.rs | use std::{io, fs};
use serde::{Serialize, Deserialize};
#[derive(Serialize, Deserialize)]
struct Credentials {
user: String,
pass: String,
#[serde(default)]
serial: Vec<String>,
}
type CredentialsFile = Vec<Credentials>;
/// Load a credential file and return the first matching credentials.
pub fn load_credentials(path: &str, serial: Option<String>) -> io::Result<Option<(String, String)>>{
let cred_json = fs::read_to_string(path)?;
let cred_file: CredentialsFile = serde_json::from_str(&cred_json)?;
for cred in cred_file {
if serial.is_some() && cred.serial.len() > 0 {
if cred.serial.contains(serial.as_ref().unwrap()) {
return Ok(Some((cred.user, cred.pass)));
}
} else {
return Ok(Some((cred.user, cred.pass))); | }
return Ok(None);
} | } |
index.js | import React from 'react'
import { Header, Icon, Image, Menu, Segment, Sidebar } from 'semantic-ui-react'
import styled from 'styled-components';
import AddItemModal from '../AddItemModal'
const Tab = styled.a`
position:fixed;
padding-left:-50px;
z-index:100;
left:5;
padding:10px;
background-color: black;
color:white;
cursor:pointer;
margin-left:-50px;
padding-left:60px;
`
const StyledPusher = styled(Sidebar.Pusher)`
overflow: unset;
`
const fixed = styled(Segment)`
display:flex;
flex-direction:column;
`
class | extends React.Component {
state = {
visible: true,
animation: 'push',
direction: 'right',
addItemModal: false,
}
openAddItemModal = () => {
this.setState({addItemModal:true})
}
handleModalClose = () => {
this.setState({addItemModal:false})
}
hideMenu = () => {
const { visible } = this.state
this.setState({visible:!visible})
}
render() {
const { children } = this.props
const {visible, addItemModal} = this.state
return (
<div>
<Sidebar.Pushable style={{transform:'none'}} >
<Sidebar as={Menu} animation='push' icon='labeled' inverted vertical visible={visible} width='thin' style={{position:'fixed'}}>
<Menu.Item as='a' onClick={this.openAddItemModal}>
<Icon name='plus'/>
New Item
</Menu.Item>
<Menu.Item as='a'>
<Icon name='shipping'/>
Orders
</Menu.Item>
<Menu.Item as='a'>
<Icon name='settings'/>
Edit Store
</Menu.Item>
<Menu.Item as='a'>
<Icon name='lightning'/>
Upgrades
</Menu.Item>
</Sidebar>
<Sidebar.Pusher style={{overflow:'unset'}} >
<fixed>
<AddItemModal open={addItemModal} onClose={this.handleModalClose}/>
<Tab onClick={this.hideMenu}>Admin<br/>
Actions ></Tab>
{children}
</fixed>
</Sidebar.Pusher>
</Sidebar.Pushable>
</div>
)
}
}
export default SidebarExampleVisible | SidebarExampleVisible |
xray_success.py | import time
import json
import socket | END_TIME = time.time()
SEGMENT_DOC["end_time"] = END_TIME
HEADER=json.dumps({"format": "json", "version": 1})
TRACE_DATA = HEADER + "\n" + json.dumps(SEGMENT_DOC)
UDP_IP= "127.0.0.1"
UDP_PORT=2000
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(TRACE_DATA, (UDP_IP, UDP_PORT))
print json.dumps(SEGMENT_DOC) | import sys
SEGMENT_DOC = json.loads(sys.argv[1])
del SEGMENT_DOC["in_progress"] |
frame.go | package rtda
import "github.com/yunair/jvmgo/ch09/rtda/heap"
type Frame struct {
lower *Frame
localVars LocalVars
operandStack *OperandStack
method *heap.Method
thread *Thread
nextPC int
}
func | (thread *Thread, method *heap.Method) *Frame {
return &Frame{
thread: thread,
method: method,
localVars: newLocalVars(method.MaxLocals()),
operandStack: newOperandStack(method.MaxStack()),
}
}
func (self *Frame) LocalVars() LocalVars {
return self.localVars
}
func (self *Frame) OperandStack() *OperandStack {
return self.operandStack
}
func (self *Frame) SetNextPC(nextPC int) {
self.nextPC = nextPC
}
func (self *Frame) Thread() *Thread {
return self.thread
}
func (self *Frame) NextPC() int {
return self.nextPC
}
func (self *Frame) Method() *heap.Method {
return self.method
}
func (self *Frame) RevertNextPC() {
self.nextPC = self.thread.pc
}
| newFrame |
emptyTypeArgumentListWithNew.ts | class | <T> { }
new foo<>();
// https://github.com/microsoft/TypeScript/issues/33041
class noParams {}
new noParams<>(); | foo |
state.rs | //! State transition types
use {
crate::error::StakePoolError,
borsh::{BorshDeserialize, BorshSchema, BorshSerialize},
solana_program::{account_info::AccountInfo, msg, program_error::ProgramError, pubkey::Pubkey},
spl_math::checked_ceil_div::CheckedCeilDiv,
std::convert::TryFrom,
};
/// Enum representing the account type managed by the program
#[derive(Clone, Debug, PartialEq, BorshDeserialize, BorshSerialize, BorshSchema)]
pub enum AccountType {
/// If the account has not been initialized, the enum will be 0
Uninitialized,
/// Stake pool
StakePool,
/// Validator stake list
ValidatorList,
}
impl Default for AccountType {
fn default() -> Self {
AccountType::Uninitialized
}
}
/// Initialized program details.
#[repr(C)]
#[derive(Clone, Debug, Default, PartialEq, BorshDeserialize, BorshSerialize, BorshSchema)]
pub struct StakePool {
/// Account type, must be StakePool currently
pub account_type: AccountType,
/// Manager authority, allows for updating the staker, manager, and fee account
pub manager: Pubkey,
/// Staker authority, allows for adding and removing validators, and managing stake
/// distribution
pub staker: Pubkey,
/// Deposit authority
///
/// If a depositor pubkey is specified on initialization, then deposits must be
/// signed by this authority. If no deposit authority is specified,
/// then the stake pool will default to the result of:
/// `Pubkey::find_program_address(
/// &[&stake_pool_address.to_bytes()[..32], b"deposit"],
/// program_id,
/// )`
pub deposit_authority: Pubkey,
/// Withdrawal authority bump seed
/// for `create_program_address(&[state::StakePool account, "withdrawal"])`
pub withdraw_bump_seed: u8,
/// Validator stake list storage account
pub validator_list: Pubkey,
/// Reserve stake account, holds deactivated stake
pub reserve_stake: Pubkey,
/// Pool Mint
pub pool_mint: Pubkey,
/// Manager fee account
pub manager_fee_account: Pubkey,
/// Pool token program id
pub token_program_id: Pubkey,
/// Total stake under management.
/// Note that if `last_update_epoch` does not match the current epoch then
/// this field may not be accurate
pub total_stake_lamports: u64,
/// Total supply of pool tokens (should always match the supply in the Pool Mint)
pub pool_token_supply: u64,
/// Last epoch the `total_stake_lamports` field was updated
pub last_update_epoch: u64,
/// Fee applied to deposits
pub fee: Fee,
}
impl StakePool {
/// calculate the pool tokens that should be minted for a deposit of `stake_lamports`
pub fn calc_pool_tokens_for_deposit(&self, stake_lamports: u64) -> Option<u64> {
if self.total_stake_lamports == 0 || self.pool_token_supply == 0 {
return Some(stake_lamports);
}
u64::try_from(
(stake_lamports as u128)
.checked_mul(self.pool_token_supply as u128)?
.checked_div(self.total_stake_lamports as u128)?,
)
.ok()
}
/// calculate the pool tokens that should be burned for a withdrawal of `stake_lamports`
pub fn calc_pool_tokens_for_withdraw(&self, stake_lamports: u64) -> Option<u64> {
let (quotient, _) = (stake_lamports as u128)
.checked_mul(self.pool_token_supply as u128)?
.checked_ceil_div(self.total_stake_lamports as u128)?;
u64::try_from(quotient).ok()
}
/// calculate lamports amount on withdrawal
pub fn calc_lamports_withdraw_amount(&self, pool_tokens: u64) -> Option<u64> {
u64::try_from(
(pool_tokens as u128)
.checked_mul(self.total_stake_lamports as u128)?
.checked_div(self.pool_token_supply as u128)?,
)
.ok()
}
/// calculate the fee in pool tokens that goes to the manager
pub fn calc_fee_amount(&self, reward_lamports: u64) -> Option<u64> {
if self.fee.denominator == 0 {
return Some(0);
}
let pool_amount = self.calc_pool_tokens_for_deposit(reward_lamports)?;
u64::try_from(
(pool_amount as u128)
.checked_mul(self.fee.numerator as u128)?
.checked_div(self.fee.denominator as u128)?,
)
.ok()
}
/// Checks that the withdraw or deposit authority is valid
fn check_authority(
authority_address: &Pubkey,
program_id: &Pubkey,
stake_pool_address: &Pubkey,
authority_seed: &[u8],
bump_seed: u8,
) -> Result<(), ProgramError> {
let expected_address = Pubkey::create_program_address(
&[
&stake_pool_address.to_bytes()[..32],
authority_seed,
&[bump_seed],
],
program_id,
)?;
if *authority_address == expected_address {
Ok(())
} else {
msg!(
"Incorrect authority provided, expected {}, received {}",
expected_address,
authority_address
);
Err(StakePoolError::InvalidProgramAddress.into())
}
}
/// Checks that the withdraw authority is valid
pub(crate) fn check_authority_withdraw(
&self,
withdraw_authority: &Pubkey,
program_id: &Pubkey,
stake_pool_address: &Pubkey,
) -> Result<(), ProgramError> {
Self::check_authority(
withdraw_authority,
program_id,
stake_pool_address,
crate::AUTHORITY_WITHDRAW,
self.withdraw_bump_seed,
)
}
/// Checks that the deposit authority is valid
pub(crate) fn check_deposit_authority(
&self,
deposit_authority: &Pubkey,
) -> Result<(), ProgramError> {
if self.deposit_authority == *deposit_authority {
Ok(())
} else {
Err(StakePoolError::InvalidProgramAddress.into())
}
}
/// Check staker validity and signature
pub(crate) fn check_mint(&self, mint_info: &AccountInfo) -> Result<(), ProgramError> {
if *mint_info.key != self.pool_mint {
Err(StakePoolError::WrongPoolMint.into())
} else {
Ok(())
}
}
/// Check manager validity and signature
pub(crate) fn check_manager(&self, manager_info: &AccountInfo) -> Result<(), ProgramError> {
if *manager_info.key != self.manager {
msg!(
"Incorrect manager provided, expected {}, received {}",
self.manager,
manager_info.key
);
return Err(StakePoolError::WrongManager.into());
}
if !manager_info.is_signer {
msg!("Manager signature missing");
return Err(StakePoolError::SignatureMissing.into());
}
Ok(())
}
/// Check staker validity and signature
pub(crate) fn check_staker(&self, staker_info: &AccountInfo) -> Result<(), ProgramError> {
if *staker_info.key != self.staker {
msg!(
"Incorrect staker provided, expected {}, received {}",
self.staker,
staker_info.key
);
return Err(StakePoolError::WrongStaker.into());
}
if !staker_info.is_signer {
msg!("Staker signature missing");
return Err(StakePoolError::SignatureMissing.into()); |
/// Check the validator list is valid
pub fn check_validator_list(
&self,
validator_list_info: &AccountInfo,
) -> Result<(), ProgramError> {
if *validator_list_info.key != self.validator_list {
msg!(
"Invalid validator list provided, expected {}, received {}",
self.validator_list,
validator_list_info.key
);
Err(StakePoolError::InvalidValidatorStakeList.into())
} else {
Ok(())
}
}
/// Check the validator list is valid
pub fn check_reserve_stake(
&self,
reserve_stake_info: &AccountInfo,
) -> Result<(), ProgramError> {
if *reserve_stake_info.key != self.reserve_stake {
msg!(
"Invalid reserve stake provided, expected {}, received {}",
self.reserve_stake,
reserve_stake_info.key
);
Err(StakePoolError::InvalidProgramAddress.into())
} else {
Ok(())
}
}
/// Check if StakePool is actually initialized as a stake pool
pub fn is_valid(&self) -> bool {
self.account_type == AccountType::StakePool
}
/// Check if StakePool is currently uninitialized
pub fn is_uninitialized(&self) -> bool {
self.account_type == AccountType::Uninitialized
}
}
/// Storage list for all validator stake accounts in the pool.
#[repr(C)]
#[derive(Clone, Debug, Default, PartialEq, BorshDeserialize, BorshSerialize, BorshSchema)]
pub struct ValidatorList {
/// Account type, must be ValidatorList currently
pub account_type: AccountType,
/// Maximum allowable number of validators
pub max_validators: u32,
/// List of stake info for each validator in the pool
pub validators: Vec<ValidatorStakeInfo>,
}
/// Status of the stake account in the validator list, for accounting
#[derive(Copy, Clone, Debug, PartialEq, BorshDeserialize, BorshSerialize, BorshSchema)]
pub enum StakeStatus {
/// Stake account is active, there may be a transient stake as well
Active,
/// Only transient stake account exists, when a transient stake is
/// deactivating during validator removal
DeactivatingTransient,
/// No more validator stake accounts exist, entry ready for removal during
/// `UpdateStakePoolBalance`
ReadyForRemoval,
}
impl Default for StakeStatus {
fn default() -> Self {
Self::Active
}
}
/// Information about the singe validator stake account
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, PartialEq, BorshDeserialize, BorshSerialize, BorshSchema)]
pub struct ValidatorStakeInfo {
/// Status of the validator stake account
pub status: StakeStatus,
/// Validator vote account address
pub vote_account_address: Pubkey,
/// Amount of stake delegated to this validator
/// Note that if `last_update_epoch` does not match the current epoch then this field may not
/// be accurate
pub stake_lamports: u64,
/// Last epoch the `stake_lamports` field was updated
pub last_update_epoch: u64,
}
impl ValidatorList {
/// Create an empty instance containing space for `max_validators`
pub fn new(max_validators: u32) -> Self {
Self {
account_type: AccountType::ValidatorList,
max_validators,
validators: vec![ValidatorStakeInfo::default(); max_validators as usize],
}
}
/// Calculate the number of validator entries that fit in the provided length
pub fn calculate_max_validators(buffer_length: usize) -> usize {
let header_size = 1 + 4 + 4;
buffer_length.saturating_sub(header_size) / 49
}
/// Check if contains validator with particular pubkey
pub fn contains(&self, vote_account_address: &Pubkey) -> bool {
self.validators
.iter()
.any(|x| x.vote_account_address == *vote_account_address)
}
/// Check if contains validator with particular pubkey
pub fn find_mut(&mut self, vote_account_address: &Pubkey) -> Option<&mut ValidatorStakeInfo> {
self.validators
.iter_mut()
.find(|x| x.vote_account_address == *vote_account_address)
}
/// Check if contains validator with particular pubkey
pub fn find(&self, vote_account_address: &Pubkey) -> Option<&ValidatorStakeInfo> {
self.validators
.iter()
.find(|x| x.vote_account_address == *vote_account_address)
}
/// Check if validator stake list is actually initialized as a validator stake list
pub fn is_valid(&self) -> bool {
self.account_type == AccountType::ValidatorList
}
/// Check if the validator stake list is uninitialized
pub fn is_uninitialized(&self) -> bool {
self.account_type == AccountType::Uninitialized
}
}
/// Fee rate as a ratio, minted on `UpdateStakePoolBalance` as a proportion of
/// the rewards
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, PartialEq, BorshSerialize, BorshDeserialize, BorshSchema)]
pub struct Fee {
/// denominator of the fee ratio
pub denominator: u64,
/// numerator of the fee ratio
pub numerator: u64,
}
#[cfg(test)]
mod test {
use {
super::*,
crate::borsh::{get_instance_packed_len, try_from_slice_unchecked},
proptest::prelude::*,
solana_program::borsh::get_packed_len,
};
#[test]
fn test_state_packing() {
let max_validators = 10_000;
let size = get_instance_packed_len(&ValidatorList::new(max_validators)).unwrap();
// Not initialized
let stake_list = ValidatorList {
account_type: AccountType::Uninitialized,
max_validators: 0,
validators: vec![],
};
let mut byte_vec = vec![0u8; size];
let mut bytes = byte_vec.as_mut_slice();
stake_list.serialize(&mut bytes).unwrap();
let stake_list_unpacked = try_from_slice_unchecked::<ValidatorList>(&byte_vec).unwrap();
assert_eq!(stake_list_unpacked, stake_list);
// Empty
let stake_list = ValidatorList {
account_type: AccountType::ValidatorList,
max_validators: 0,
validators: vec![],
};
let mut byte_vec = vec![0u8; size];
let mut bytes = byte_vec.as_mut_slice();
stake_list.serialize(&mut bytes).unwrap();
let stake_list_unpacked = try_from_slice_unchecked::<ValidatorList>(&byte_vec).unwrap();
assert_eq!(stake_list_unpacked, stake_list);
// With several accounts
let stake_list = ValidatorList {
account_type: AccountType::ValidatorList,
max_validators,
validators: vec![
ValidatorStakeInfo {
status: StakeStatus::Active,
vote_account_address: Pubkey::new_from_array([1; 32]),
stake_lamports: 123456789,
last_update_epoch: 987654321,
},
ValidatorStakeInfo {
status: StakeStatus::DeactivatingTransient,
vote_account_address: Pubkey::new_from_array([2; 32]),
stake_lamports: 998877665544,
last_update_epoch: 11223445566,
},
ValidatorStakeInfo {
status: StakeStatus::ReadyForRemoval,
vote_account_address: Pubkey::new_from_array([3; 32]),
stake_lamports: 0,
last_update_epoch: 999999999999999,
},
],
};
let mut byte_vec = vec![0u8; size];
let mut bytes = byte_vec.as_mut_slice();
stake_list.serialize(&mut bytes).unwrap();
let stake_list_unpacked = try_from_slice_unchecked::<ValidatorList>(&byte_vec).unwrap();
assert_eq!(stake_list_unpacked, stake_list);
}
proptest! {
#[test]
fn stake_list_size_calculation(test_amount in 0..=100_000_u32) {
let validators = ValidatorList::new(test_amount);
let size = get_instance_packed_len(&validators).unwrap();
assert_eq!(ValidatorList::calculate_max_validators(size), test_amount as usize);
assert_eq!(ValidatorList::calculate_max_validators(size.saturating_add(1)), test_amount as usize);
assert_eq!(ValidatorList::calculate_max_validators(size.saturating_add(get_packed_len::<ValidatorStakeInfo>())), (test_amount + 1)as usize);
assert_eq!(ValidatorList::calculate_max_validators(size.saturating_sub(1)), (test_amount.saturating_sub(1)) as usize);
}
}
} | }
Ok(())
} |
admin.py | from .models import *
# Register your models here.
admin.site.register([Content,Profile,Comment]) | from django.contrib import admin |
|
constants.go | package delegation_backend
import (
"os"
"time"
)
const MAX_SUBMIT_PAYLOAD_SIZE = 50000000 // max payload size in bytes
const REQUESTS_PER_PK_HOURLY = 120
const DELEGATION_BACKEND_LISTEN_TO = ":8080"
const TIME_DIFF_DELTA time.Duration = -5 * 60 * 1000000000 // -5m
const WHITELIST_REFRESH_INTERVAL = 10 * 60 * 1000000000 // 10m
const DELEGATION_WHITELIST_LIST = "Form Responses 1"
const DELEGATION_WHITELIST_COLUMN = "E"
// Production
const PROD_WHITELIST_SPREADSHEET_ID = "1xiKppb0BFUo8IKM2itIx2EWIQbBzUlFxgtZlKdnrLCU"
const PROD_CLOUD_BUCKET_NAME = "foundation-delegation-uptime"
const TEST_WHITELIST_SPREADSHEET_ID = "1NODwwcVxLNnCI4XnIrGdGBSjointN4MZ8QZ7wqgtSTQ"
const TEST_CLOUD_BUCKET_NAME = "georgeee-delegation-test-1"
func CloudBucketName() string {
if os.Getenv("TEST") == "" | else {
return TEST_CLOUD_BUCKET_NAME
}
}
func WhitelistSpreadsheetId() string {
if os.Getenv("TEST") == "" {
return PROD_WHITELIST_SPREADSHEET_ID
} else {
return TEST_WHITELIST_SPREADSHEET_ID
}
}
var PK_PREFIX = [...]byte{1, 1}
var SIG_PREFIX = [...]byte{1}
var BLOCK_HASH_PREFIX = [...]byte{1}
const NETWORK_ID = 1 // mainnet
const PK_LENGTH = 33 // one field element (32B) + 1 bit (encoded as full byte)
const SIG_LENGTH = 64 // one field element (32B) and one scalar (32B)
// we use state hash code here, although it's not state hash
const BASE58CHECK_VERSION_BLOCK_HASH byte = 0x10
const BASE58CHECK_VERSION_PK byte = 0xCB
const BASE58CHECK_VERSION_SIG byte = 0x9A
| {
return PROD_CLOUD_BUCKET_NAME
} |
prefixes.go | package peering
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// PrefixesClient is the peering Client
type PrefixesClient struct {
BaseClient
}
// NewPrefixesClient creates an instance of the PrefixesClient client.
func NewPrefixesClient(subscriptionID string) PrefixesClient {
return NewPrefixesClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewPrefixesClientWithBaseURI creates an instance of the PrefixesClient client using a custom endpoint. Use this
// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewPrefixesClientWithBaseURI(baseURI string, subscriptionID string) PrefixesClient {
return PrefixesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates a new prefix with the specified name under the given subscription, resource group and peering
// service.
// Parameters:
// resourceGroupName - the name of the resource group.
// peeringServiceName - the name of the peering service.
// prefixName - the name of the prefix.
// peeringServicePrefix - the properties needed to create a prefix.
func (client PrefixesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, peeringServiceName string, prefixName string, peeringServicePrefix ServicePrefix) (result ServicePrefix, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PrefixesClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, peeringServiceName, prefixName, peeringServicePrefix)
if err != nil {
err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
resp, err := client.CreateOrUpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "CreateOrUpdate", resp, "Failure sending request")
return
}
result, err = client.CreateOrUpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "CreateOrUpdate", resp, "Failure responding to request")
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client PrefixesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, peeringServiceName string, prefixName string, peeringServicePrefix ServicePrefix) (*http.Request, error) {
pathParameters := map[string]interface{}{
"peeringServiceName": autorest.Encode("path", peeringServiceName),
"prefixName": autorest.Encode("path", prefixName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-09-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes/{prefixName}", pathParameters),
autorest.WithJSON(peeringServicePrefix),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client PrefixesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client PrefixesClient) CreateOrUpdateResponder(resp *http.Response) (result ServicePrefix, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes an existing prefix with the specified name under the given subscription, resource group and peering
// service.
// Parameters:
// resourceGroupName - the name of the resource group.
// peeringServiceName - the name of the peering service.
// prefixName - the name of the prefix.
func (client PrefixesClient) Delete(ctx context.Context, resourceGroupName string, peeringServiceName string, prefixName string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PrefixesClient.Delete")
defer func() {
sc := -1
if result.Response != nil {
sc = result.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DeletePreparer(ctx, resourceGroupName, peeringServiceName, prefixName)
if err != nil {
err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "Delete", nil, "Failure preparing request")
return
}
resp, err := client.DeleteSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "Delete", resp, "Failure sending request")
return
}
result, err = client.DeleteResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "Delete", resp, "Failure responding to request")
}
return
}
// DeletePreparer prepares the Delete request.
func (client PrefixesClient) DeletePreparer(ctx context.Context, resourceGroupName string, peeringServiceName string, prefixName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"peeringServiceName": autorest.Encode("path", peeringServiceName),
"prefixName": autorest.Encode("path", prefixName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-09-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes/{prefixName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client PrefixesClient) DeleteSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client PrefixesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets an existing prefix with the specified name under the given subscription, resource group and peering
// service.
// Parameters:
// resourceGroupName - the name of the resource group.
// peeringServiceName - the name of the peering service.
// prefixName - the name of the prefix.
// expand - the properties to be expanded.
func (client PrefixesClient) Get(ctx context.Context, resourceGroupName string, peeringServiceName string, prefixName string, expand string) (result ServicePrefix, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PrefixesClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, peeringServiceName, prefixName, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client PrefixesClient) GetPreparer(ctx context.Context, resourceGroupName string, peeringServiceName string, prefixName string, expand string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"peeringServiceName": autorest.Encode("path", peeringServiceName),
"prefixName": autorest.Encode("path", prefixName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-09-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes/{prefixName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client PrefixesClient) GetSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client PrefixesClient) GetResponder(resp *http.Response) (result ServicePrefix, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByPeeringService lists all prefixes under the given subscription, resource group and peering service.
// Parameters:
// resourceGroupName - the name of the resource group.
// peeringServiceName - the name of the peering service.
// expand - the properties to be expanded.
func (client PrefixesClient) ListByPeeringService(ctx context.Context, resourceGroupName string, peeringServiceName string, expand string) (result ServicePrefixListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PrefixesClient.ListByPeeringService")
defer func() {
sc := -1
if result.splr.Response.Response != nil {
sc = result.splr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listByPeeringServiceNextResults
req, err := client.ListByPeeringServicePreparer(ctx, resourceGroupName, peeringServiceName, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "ListByPeeringService", nil, "Failure preparing request")
return
}
resp, err := client.ListByPeeringServiceSender(req)
if err != nil {
result.splr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "ListByPeeringService", resp, "Failure sending request")
return
}
result.splr, err = client.ListByPeeringServiceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "ListByPeeringService", resp, "Failure responding to request")
}
return
}
// ListByPeeringServicePreparer prepares the ListByPeeringService request.
func (client PrefixesClient) ListByPeeringServicePreparer(ctx context.Context, resourceGroupName string, peeringServiceName string, expand string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"peeringServiceName": autorest.Encode("path", peeringServiceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-09-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByPeeringServiceSender sends the ListByPeeringService request. The method will close the
// http.Response Body if it receives an error.
func (client PrefixesClient) ListByPeeringServiceSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
}
// ListByPeeringServiceResponder handles the response to the ListByPeeringService request. The method always
// closes the http.Response Body.
func (client PrefixesClient) ListByPeeringServiceResponder(resp *http.Response) (result ServicePrefixListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByPeeringServiceNextResults retrieves the next set of results, if any.
func (client PrefixesClient) listByPeeringServiceNextResults(ctx context.Context, lastResults ServicePrefixListResult) (result ServicePrefixListResult, err error) {
req, err := lastResults.servicePrefixListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "peering.PrefixesClient", "listByPeeringServiceNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByPeeringServiceSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "peering.PrefixesClient", "listByPeeringServiceNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByPeeringServiceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "listByPeeringServiceNextResults", resp, "Failure responding to next results request")
}
return
}
// ListByPeeringServiceComplete enumerates all values, automatically crossing page boundaries as required.
func (client PrefixesClient) ListByPeeringServiceComplete(ctx context.Context, resourceGroupName string, peeringServiceName string, expand string) (result ServicePrefixListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PrefixesClient.ListByPeeringService")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.ListByPeeringService(ctx, resourceGroupName, peeringServiceName, expand) | return
} | |
cli.js | var root = require('find-parent-dir').sync(__dirname, 'package.json');
var expect = require('expect.js');
var cp = require('child_process');
var Modernizr = require(root + 'lib/cli');
describe('cli', function() {
| });
it('exposes a metadata function', function() {
expect(Modernizr.metadata).to.be.a('function');
});
it('does not throw when being executed', function(done) {
cp.exec('node ' + root + '/bin/modernizr -f adownload -d modernizr-test.js', done);
});
it('does not throw when setClasses is used as an option', function(done) {
cp.exec('node ' + root + '/bin/modernizr -o setClasses', done);
});
}); | it('exposes a build function', function() {
expect(Modernizr.build).to.be.a('function'); |
loadbalancer.go | package yapi
import (
"context"
"fmt"
"log"
"strings"
"github.com/yandex-cloud/go-genproto/yandex/cloud/loadbalancer/v1"
"github.com/yandex-cloud/go-genproto/yandex/cloud/operation"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/util/sets"
)
type LoadBalancerService struct {
cloudCtx *CloudContext
LbSvc loadbalancer.NetworkLoadBalancerServiceClient
TgSvc loadbalancer.TargetGroupServiceClient
}
func NewLoadBalancerService(lbSvc loadbalancer.NetworkLoadBalancerServiceClient, tgSvc loadbalancer.TargetGroupServiceClient,
cloudCtx *CloudContext) *LoadBalancerService {
return &LoadBalancerService{
cloudCtx: cloudCtx,
LbSvc: lbSvc,
TgSvc: tgSvc,
}
}
func (ySvc *LoadBalancerService) CreateOrUpdateLB(ctx context.Context, name string, listenerSpec []*loadbalancer.ListenerSpec, attachedTGs []*loadbalancer.AttachedTargetGroup) (string, error) {
var nlbType = loadbalancer.NetworkLoadBalancer_EXTERNAL
for _, listener := range listenerSpec {
if _, ok := listener.Address.(*loadbalancer.ListenerSpec_InternalAddressSpec); ok {
nlbType = loadbalancer.NetworkLoadBalancer_INTERNAL
break
}
}
log.Printf("Getting LB by name: %q", name)
lb, err := ySvc.GetLbByName(ctx, name)
if err != nil {
if status.Code(err) == codes.NotFound {
log.Println("LB not found, creating new LB")
} else {
return "", err
}
}
lbCreateRequest := &loadbalancer.CreateNetworkLoadBalancerRequest{
FolderId: ySvc.cloudCtx.FolderID,
Name: name,
RegionId: ySvc.cloudCtx.RegionID,
Type: nlbType,
ListenerSpecs: listenerSpec,
AttachedTargetGroups: attachedTGs,
}
if lb == nil {
log.Printf("Creating LoadBalancer: %+v", *lbCreateRequest)
result, _, err := ySvc.cloudCtx.OperationWaiter(ctx, func() (*operation.Operation, error) {
return ySvc.LbSvc.Create(ctx, lbCreateRequest)
})
if err != nil {
return "", err
}
return result.(*loadbalancer.NetworkLoadBalancer).Listeners[0].Address, nil
}
if lb != nil && shouldRecreate(lb, lbCreateRequest) {
log.Printf("Re-creating LoadBalancer: %+v", *lbCreateRequest)
_, _, err := ySvc.cloudCtx.OperationWaiter(ctx, func() (*operation.Operation, error) {
return ySvc.LbSvc.Delete(ctx, &loadbalancer.DeleteNetworkLoadBalancerRequest{NetworkLoadBalancerId: lb.Id})
})
if err != nil {
return "", err
}
result, _, err := ySvc.cloudCtx.OperationWaiter(ctx, func() (*operation.Operation, error) {
return ySvc.LbSvc.Create(ctx, lbCreateRequest)
})
if err != nil {
return "", err
}
return result.(*loadbalancer.NetworkLoadBalancer).Listeners[0].Address, nil
}
log.Printf("LB %q already exists, attempting an update\n", name)
dirty := false
listenersToAdd, listenersToRemove := diffListeners(listenerSpec, lb.Listeners)
for _, listener := range listenersToRemove {
req := &loadbalancer.RemoveNetworkLoadBalancerListenerRequest{
NetworkLoadBalancerId: lb.Id,
ListenerName: listener.Name,
}
log.Printf("Removing Listener: %+v", *req)
// todo(31337Ghost) it will be better to send requests concurrently
_, _, err := ySvc.cloudCtx.OperationWaiter(ctx, func() (*operation.Operation, error) {
return ySvc.LbSvc.RemoveListener(ctx, req)
})
if err != nil {
return "", err
}
dirty = true
}
for _, listener := range listenersToAdd {
req := &loadbalancer.AddNetworkLoadBalancerListenerRequest{
NetworkLoadBalancerId: lb.Id,
ListenerSpec: listener,
}
log.Printf("Adding Listener: %+v", *req)
// todo(31337Ghost) it will be better to send requests concurrently
_, _, err := ySvc.cloudCtx.OperationWaiter(ctx, func() (*operation.Operation, error) {
return ySvc.LbSvc.AddListener(ctx, req)
})
if err != nil {
return "", err
}
dirty = true
}
tgsToAttach, tgsToDetach := diffAttachedTargetGroups(attachedTGs, lb.AttachedTargetGroups)
for _, tg := range tgsToDetach {
req := &loadbalancer.DetachNetworkLoadBalancerTargetGroupRequest{
NetworkLoadBalancerId: lb.Id,
TargetGroupId: tg.TargetGroupId,
}
log.Printf("Detaching TargetGroup: %+v", *req)
// todo(31337Ghost) it will be better to send requests concurrently
_, _, err := ySvc.cloudCtx.OperationWaiter(ctx, func() (*operation.Operation, error) {
return ySvc.LbSvc.DetachTargetGroup(ctx, req)
})
if err != nil {
return "", err
}
dirty = true
}
for _, tg := range tgsToAttach {
req := &loadbalancer.AttachNetworkLoadBalancerTargetGroupRequest{
NetworkLoadBalancerId: lb.Id,
AttachedTargetGroup: tg,
}
log.Printf("Attaching TargetGroup: %+v", *req)
// todo(31337Ghost) it will be better to send requests concurrently
_, _, err := ySvc.cloudCtx.OperationWaiter(ctx, func() (*operation.Operation, error) {
return ySvc.LbSvc.AttachTargetGroup(ctx, req)
})
if err != nil {
return "", err
}
dirty = true
}
// Ensure that after all manipulations with LoadBalancer in the cloud it still exists.
if dirty {
log.Printf("Retrieving LoadBalancer %q after update", name)
lb, err = ySvc.GetLbByName(ctx, name)
if err != nil {
return "", err
}
}
return lb.Listeners[0].Address, nil
}
func (ySvc *LoadBalancerService) GetTGsByClusterName(ctx context.Context, clusterName string) (ret []*loadbalancer.TargetGroup, err error) {
result, err := ySvc.TgSvc.List(ctx, &loadbalancer.ListTargetGroupsRequest{
FolderId: ySvc.cloudCtx.FolderID,
// FIXME: properly implement iterator
PageSize: 1000,
})
if err != nil {
return nil, err
}
for _, tg := range result.TargetGroups {
if strings.HasPrefix(tg.Name, clusterName) {
ret = append(ret, tg)
}
}
return
}
func (ySvc *LoadBalancerService) RemoveLBByName(ctx context.Context, name string) error {
log.Printf("Retrieving LB by name %q", name)
lb, err := ySvc.GetLbByName(ctx, name)
if err != nil {
return err
}
if lb == nil {
log.Printf("LB by Name %q does not exist, skipping deletion\n", name)
return nil
}
lbDeleteRequest := &loadbalancer.DeleteNetworkLoadBalancerRequest{
NetworkLoadBalancerId: lb.Id,
}
log.Printf("Deleting LB by ID %q", lb.Id)
_, _, err = ySvc.cloudCtx.OperationWaiter(ctx, func() (*operation.Operation, error) {
return ySvc.LbSvc.Delete(ctx, lbDeleteRequest)
})
if err != nil {
if status.Code(err) == codes.NotFound {
log.Printf("LB %q does not exist, skipping\n", name)
} else {
return err
}
}
return nil
}
func (ySvc *LoadBalancerService) CreateOrUpdateTG(ctx context.Context, tgName string, targets []*loadbalancer.Target) (string, error) {
log.Printf("retrieving TargetGroup by name %q", tgName)
tg, err := ySvc.GetTgByName(ctx, tgName)
if err != nil {
if status.Code(err) == codes.NotFound {
log.Println("TG not found, creating new TG")
} else {
return "", err
}
}
if tg == nil {
tgCreateRequest := &loadbalancer.CreateTargetGroupRequest{
FolderId: ySvc.cloudCtx.FolderID,
Name: tgName,
RegionId: ySvc.cloudCtx.RegionID,
Targets: targets,
}
log.Printf("Creating TargetGroup: %+v", *tgCreateRequest)
result, _, err := ySvc.cloudCtx.OperationWaiter(ctx, func() (*operation.Operation, error) {
return ySvc.TgSvc.Create(ctx, tgCreateRequest)
})
if err != nil {
return "", err
}
return result.(*loadbalancer.TargetGroup).Id, nil
}
dirty := false
targetsToAdd, targetsToRemove := diffTargetGroupTargets(targets, tg.Targets)
if len(targetsToAdd) > 0 {
req := &loadbalancer.AddTargetsRequest{
TargetGroupId: tg.Id,
Targets: targetsToAdd,
}
log.Printf("Adding Targets: %+v", *req)
_, _, err := ySvc.cloudCtx.OperationWaiter(ctx, func() (*operation.Operation, error) {
return ySvc.TgSvc.AddTargets(ctx, req)
})
if err != nil {
return "", err
}
dirty = true
}
if len(targetsToRemove) > 0 {
req := &loadbalancer.RemoveTargetsRequest{
TargetGroupId: tg.Id,
Targets: targetsToRemove,
}
log.Printf("Removing Targets: %+v", *req)
_, _, err := ySvc.cloudCtx.OperationWaiter(ctx, func() (*operation.Operation, error) {
return ySvc.TgSvc.RemoveTargets(ctx, req)
})
if err != nil {
return "", err
}
dirty = true
}
// Ensure that after all manipulations with TargetGroup in the cloud it still exists.
if dirty {
log.Printf("Retrieving TargetGroup %q after update", tgName)
tg, err = ySvc.GetTgByName(ctx, tgName)
if err != nil {
return "", err
}
}
return tg.Id, nil
}
func (ySvc *LoadBalancerService) RemoveTGByID(ctx context.Context, tgId string) error {
tgDeleteRequest := &loadbalancer.DeleteTargetGroupRequest{
TargetGroupId: tgId,
}
log.Printf("Removing TargetGroup: %+v", *tgDeleteRequest)
_, _, err := ySvc.cloudCtx.OperationWaiter(ctx, func() (*operation.Operation, error) {
return ySvc.TgSvc.Delete(ctx, tgDeleteRequest)
})
if err != nil {
if status.Code(err) == codes.NotFound {
log.Printf("TG by ID %q does not exist, skipping\n", tgId)
} else {
return err
}
}
return nil
}
func (ySvc *LoadBalancerService) GetLbByName(ctx context.Context, name string) (*loadbalancer.NetworkLoadBalancer, error) {
result, err := ySvc.LbSvc.List(ctx, &loadbalancer.ListNetworkLoadBalancersRequest{
FolderId: ySvc.cloudCtx.FolderID,
PageSize: 2,
Filter: fmt.Sprintf("name = \"%s\"", name),
})
if err != nil {
return nil, err
}
if len(result.NetworkLoadBalancers) > 1 {
return nil, fmt.Errorf("more than 1 LoadBalancers found by the name %q", name)
}
if len(result.NetworkLoadBalancers) == 0 {
return nil, nil
}
return result.NetworkLoadBalancers[0], nil
}
func (ySvc *LoadBalancerService) GetTgByName(ctx context.Context, name string) (*loadbalancer.TargetGroup, error) {
result, err := ySvc.TgSvc.List(ctx, &loadbalancer.ListTargetGroupsRequest{
FolderId: ySvc.cloudCtx.FolderID,
PageSize: 2,
Filter: fmt.Sprintf("name = \"%s\"", name),
})
if err != nil {
return nil, err
}
if len(result.TargetGroups) > 1 {
return nil, fmt.Errorf("more than 1 TargetGroups found by the name %q", name)
}
if len(result.TargetGroups) == 0 {
return nil, nil
}
return result.TargetGroups[0], nil
}
func shouldRecreate(oldBalancer *loadbalancer.NetworkLoadBalancer, newBalancerSpec *loadbalancer.CreateNetworkLoadBalancerRequest) bool |
func diffTargetGroupTargets(expectedTargets []*loadbalancer.Target, actualTargets []*loadbalancer.Target) (targetsToAdd []*loadbalancer.Target, targetsToRemove []*loadbalancer.Target) {
expectedTargetsByUID := make(map[string]*loadbalancer.Target, len(expectedTargets))
for _, target := range expectedTargets {
targetUID := fmt.Sprintf("%v:%v", target.SubnetId, target.Address)
expectedTargetsByUID[targetUID] = target
}
actualTargetsByUID := make(map[string]*loadbalancer.Target, len(actualTargets))
for _, target := range actualTargets {
targetUID := fmt.Sprintf("%v:%v", target.SubnetId, target.Address)
actualTargetsByUID[targetUID] = target
}
expectedTargetsUIDs := sets.StringKeySet(expectedTargetsByUID)
actualTargetsUIDs := sets.StringKeySet(actualTargetsByUID)
for _, targetUID := range expectedTargetsUIDs.Difference(actualTargetsUIDs).List() {
targetsToAdd = append(targetsToAdd, expectedTargetsByUID[targetUID])
}
for _, targetUID := range actualTargetsUIDs.Difference(expectedTargetsUIDs).List() {
targetsToRemove = append(targetsToRemove, actualTargetsByUID[targetUID])
}
return targetsToAdd, targetsToRemove
}
func diffListeners(expectedListeners []*loadbalancer.ListenerSpec, actualListeners []*loadbalancer.Listener) (listenersToAdd []*loadbalancer.ListenerSpec, listenersToRemove []*loadbalancer.Listener) {
foundSet := make(map[string]bool)
for _, actual := range actualListeners {
found := false
for _, expected := range expectedListeners {
if nlbListenersAreEqual(actual, expected) {
// The current listener on the actual
// nlb is in the set of desired listeners.
foundSet[expected.Name] = true
found = true
break
}
}
if !found {
listenersToRemove = append(listenersToRemove, actual)
}
}
for _, expected := range expectedListeners {
if !foundSet[expected.Name] {
listenersToAdd = append(listenersToAdd, expected)
}
}
return listenersToAdd, listenersToRemove
}
func nlbListenersAreEqual(actual *loadbalancer.Listener, expected *loadbalancer.ListenerSpec) bool {
if actual.Protocol != expected.Protocol {
return false
}
if actual.Port != expected.Port {
return false
}
if actual.TargetPort != expected.TargetPort {
return false
}
return true
}
func diffAttachedTargetGroups(expectedTGs []*loadbalancer.AttachedTargetGroup, actualTGs []*loadbalancer.AttachedTargetGroup) (tgsToAttach []*loadbalancer.AttachedTargetGroup, tgsToDetach []*loadbalancer.AttachedTargetGroup) {
foundSet := make(map[string]bool)
for _, actual := range actualTGs {
found := false
for _, expected := range expectedTGs {
if nlbAttachedTargetGroupsAreEqual(actual, expected) {
foundSet[expected.TargetGroupId] = true
found = true
break
}
}
if !found {
tgsToDetach = append(tgsToDetach, actual)
}
}
for _, expected := range expectedTGs {
if !foundSet[expected.TargetGroupId] {
tgsToAttach = append(tgsToAttach, expected)
}
}
return tgsToAttach, tgsToDetach
}
func nlbAttachedTargetGroupsAreEqual(actual *loadbalancer.AttachedTargetGroup, expected *loadbalancer.AttachedTargetGroup) bool {
if actual.TargetGroupId != expected.TargetGroupId {
return false
}
if len(actual.HealthChecks) == 0 {
return false
}
actualHealthCheck := actual.HealthChecks[0]
expectedHealthCheck := expected.HealthChecks[0]
if actualHealthCheck.Name != expectedHealthCheck.Name {
return false
}
if actualHealthCheck.UnhealthyThreshold != expectedHealthCheck.UnhealthyThreshold {
return false
}
if actualHealthCheck.HealthyThreshold != expectedHealthCheck.HealthyThreshold {
return false
}
actualHealthCheckHttpOptions := actualHealthCheck.GetHttpOptions()
if actualHealthCheckHttpOptions == nil {
return false
}
expectedHealthCheckHttpOptions := expectedHealthCheck.GetHttpOptions()
if actualHealthCheckHttpOptions.Port != expectedHealthCheckHttpOptions.Port {
return false
}
if actualHealthCheckHttpOptions.Path != expectedHealthCheckHttpOptions.Path {
return false
}
return true
}
| {
if newBalancerSpec.Type != oldBalancer.Type {
log.Println("LB type mismatch, recreating")
return true
}
return false
} |
utils.py | import warnings
import django
from django.db import transaction
from django.db.models import ManyToManyField
from django.forms.models import model_to_dict
from simple_history.exceptions import AlternativeManagerError, NotHistoricalModelError
def update_change_reason(instance, reason):
attrs = {}
model = type(instance)
manager = instance if instance.id is not None else model
history = get_history_manager_for_model(manager)
history_fields = [field.attname for field in history.model._meta.fields]
for field in instance._meta.fields:
if field.attname not in history_fields:
continue
value = getattr(instance, field.attname)
if field.primary_key is True:
if value is not None:
attrs[field.attname] = value
else:
attrs[field.attname] = value
record = history.filter(**attrs).order_by("-history_date").first()
record.history_change_reason = reason
record.save()
def get_history_manager_for_model(model):
"""Return the history manager for a given app model."""
try:
manager_name = model._meta.simple_history_manager_attribute
except AttributeError:
raise NotHistoricalModelError(
"Cannot find a historical model for {model}.".format(model=model)
)
return getattr(model, manager_name)
def get_history_model_for_model(model):
"""Return the history model for a given app model."""
return get_history_manager_for_model(model).model
def | (
objs,
model,
batch_size=None,
ignore_conflicts=False,
default_user=None,
default_change_reason=None,
default_date=None,
):
"""
Bulk create the objects specified by objs while also bulk creating
their history (all in one transaction).
Because of not providing primary key attribute after bulk_create on any DB except
Postgres (https://docs.djangoproject.com/en/2.2/ref/models/querysets/#bulk-create)
Divide this process on two transactions for other DB's
:param objs: List of objs (not yet saved to the db) of type model
:param model: Model class that should be created
:param batch_size: Number of objects that should be created in each batch
:param default_user: Optional user to specify as the history_user in each historical
record
:param default_change_reason: Optional change reason to specify as the change_reason
in each historical record
:param default_date: Optional date to specify as the history_date in each historical
record
:return: List of objs with IDs
"""
# Exclude ManyToManyFields because they end up as invalid kwargs to
# model.objects.filter(...) below.
exclude_fields = [
field.name
for field in model._meta.get_fields()
if isinstance(field, ManyToManyField)
]
history_manager = get_history_manager_for_model(model)
model_manager = model._default_manager
second_transaction_required = True
with transaction.atomic(savepoint=False):
objs_with_id = model_manager.bulk_create(
objs, batch_size=batch_size, ignore_conflicts=ignore_conflicts
)
if objs_with_id and objs_with_id[0].pk and not ignore_conflicts:
second_transaction_required = False
history_manager.bulk_history_create(
objs_with_id,
batch_size=batch_size,
default_user=default_user,
default_change_reason=default_change_reason,
default_date=default_date,
)
if second_transaction_required:
obj_list = []
with transaction.atomic(savepoint=False):
for obj in objs_with_id:
attributes = dict(
filter(
lambda x: x[1] is not None,
model_to_dict(obj, exclude=exclude_fields).items(),
)
)
obj_list += model_manager.filter(**attributes)
history_manager.bulk_history_create(
obj_list,
batch_size=batch_size,
default_user=default_user,
default_change_reason=default_change_reason,
default_date=default_date,
)
objs_with_id = obj_list
return objs_with_id
def bulk_update_with_history(
objs,
model,
fields,
batch_size=None,
default_user=None,
default_change_reason=None,
default_date=None,
manager=None,
):
"""
Bulk update the objects specified by objs while also bulk creating
their history (all in one transaction).
:param objs: List of objs of type model to be updated
:param model: Model class that should be updated
:param fields: The fields that are updated
:param batch_size: Number of objects that should be updated in each batch
:param default_user: Optional user to specify as the history_user in each historical
record
:param default_change_reason: Optional change reason to specify as the change_reason
in each historical record
:param default_date: Optional date to specify as the history_date in each historical
record
:param manager: Optional model manager to use for the model instead of the default
manager
"""
history_manager = get_history_manager_for_model(model)
model_manager = manager or model._default_manager
if model_manager.model is not model:
raise AlternativeManagerError("The given manager does not belong to the model.")
with transaction.atomic(savepoint=False):
model_manager.bulk_update(objs, fields, batch_size=batch_size)
history_manager.bulk_history_create(
objs,
batch_size=batch_size,
update=True,
default_user=default_user,
default_change_reason=default_change_reason,
default_date=default_date,
)
def get_change_reason_from_object(obj):
if hasattr(obj, "_change_reason"):
return getattr(obj, "_change_reason")
if hasattr(obj, "changeReason"):
warning_msg = (
"Using the attr changeReason to populate history_change_reason is"
" deprecated in 2.10.0 and will be removed in 3.0.0. Use "
"_change_reason instead. "
)
warnings.warn(warning_msg, DeprecationWarning)
return getattr(obj, "changeReason")
return None
| bulk_create_with_history |
x_simple_mail_message.py | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License. | # You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.system
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.system import XSimpleMailMessage as XSimpleMailMessage
setattr(XSimpleMailMessage, '__ooo_ns__', 'com.sun.star.system')
setattr(XSimpleMailMessage, '__ooo_full_ns__', 'com.sun.star.system.XSimpleMailMessage')
setattr(XSimpleMailMessage, '__ooo_type_name__', 'interface')
else:
from ...lo.system.x_simple_mail_message import XSimpleMailMessage as XSimpleMailMessage
__all__ = ['XSimpleMailMessage'] | |
youtube.ts | /**
* Youtube room chat-plugin.
* Supports adding channels and selecting a random channel.
* Also supports showing video data on request.
* Written by mia-pi, with some code / design concepts from Asheviere.
*/
import {Net} from '../../lib/net';
import {FS} from '../../lib/fs';
import {Utils} from '../../lib/utils';
const ROOT = 'https://www.googleapis.com/youtube/v3/';
const CHANNEL = `${ROOT}channels`;
const STORAGE_PATH = 'config/chat-plugins/youtube.json';
let channelData: AnyObject;
try {
channelData = JSON.parse(FS(STORAGE_PATH).readIfExistsSync() || "{}");
} catch (e) {
channelData = {};
}
export class YoutubeInterface {
interval: NodeJS.Timer | null;
intervalTime: number;
constructor() {
this.interval = null;
this.intervalTime = 0;
}
async getChannelData(link: string, username?: string) {
if (!Config.youtubeKey) throw new Error("Must set up Config.youtubeKey");
const id = this.getId(link);
if (!id) return null;
const queryUrl = `${CHANNEL}?part=snippet%2Cstatistics&id=${encodeURIComponent(id)}&key=${Config.youtubeKey}`;
const raw = await Net(queryUrl).get();
const res = JSON.parse(raw);
if (!res || !res.items) return null;
const data = res.items[0];
const cache = {
name: data.snippet.title,
description: data.snippet.description,
url: data.snippet.customUrl,
icon: data.snippet.thumbnails.medium.url,
videos: Number(data.statistics.videoCount),
subs: Number(data.statistics.subscriberCount),
views: Number(data.statistics.viewCount),
username: username,
};
channelData[id] = {...cache};
FS(STORAGE_PATH).writeUpdate(() => JSON.stringify(channelData));
return cache;
}
async generateChannelDisplay(link: string) {
const id = this.getId(link);
if (!id) return;
// url isn't needed but it destructures wrong without it
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const {name, description, url, icon, videos, subs, views, username} = await this.get(id);
// credits asheviere for most of the html
let buf = `<div class="infobox"><table style="margin:0px;"><tr>`;
buf += `<td style="margin:5px;padding:5px;min-width:175px;max-width:160px;text-align:center;border-bottom:0px;">`;
buf += `<div style="padding:5px;background:white;border:1px solid black;margin:auto;max-width:100px;max-height:100px;">`;
buf += `<a href="${ROOT}channel/${id}"><img src="${icon}" width=100px height=100px/></a>`;
buf += `</div><p style="margin:5px 0px 4px 0px;word-wrap:break-word;">`;
buf += `<a style="font-weight:bold;color:#c70000;font-size:12pt;" href="https://www.youtube.com/channel/${id}">${name}</a>`;
buf += `</p></td><td style="padding: 0px 25px;font-size:10pt;background:rgb(220,20,60);width:100%;border-bottom:0px;vertical-align:top;">`;
buf += `<p style="padding: 5px;border-radius:8px;color:white;font-weight:bold;text-align:center;">`;
buf += `${videos} videos | ${subs} subscribers | ${views} video views</p>`;
buf += `<p style="margin-left: 5px; font-size:9pt;color:white;">`;
buf += `${description.slice(0, 400).replace(/\n/g, ' ')}${description.length > 400 ? '(...)' : ''}</p>`;
if (username) {
buf += `<p style="text-align:left;font-style:italic;color:white;">PS username: ${username}</p></td></tr></table></div>`;
} else {
buf += '</td></tr></table></div>';
}
return buf;
}
randChannel() {
const keys = Object.keys(channelData);
const id = Utils.shuffle(keys)[0].trim();
return this.generateChannelDisplay(id);
}
get(id: string, username?: string) {
if (!(id in channelData)) return this.getChannelData(id, username);
return {...channelData[id]};
}
channelSearch(search: string) {
let channel;
if (channelData[search]) {
channel = search;
} else {
for (const id of Object.keys(channelData)) {
if (toID(channelData[id].name) === toID(search)) {
channel = id;
break; // don't iterate through everything once a match is found
}
}
}
return channel;
}
getId(link: string) {
let id = '';
if (!link) return null;
if (channelData[link]) return link;
if (!link.includes('channel')) {
if (link.includes('youtube')) {
id = link.split('v=')[1];
} else if (link.includes('youtu.be')) {
id = link.split('/')[3];
} else {
return null;
}
} else {
id = link.split('channel/')[1];
}
if (id.includes('&')) id = id.split('&')[0];
if (id.includes('?')) id = id.split('?')[0];
return id;
}
async generateVideoDisplay(link: string) {
if (!Config.youtubeKey) throw new Error("Must set up Config.youtubeKey");
const id = this.getId(link);
if (!id) return null;
const queryUrl = `${ROOT}videos?part=snippet%2Cstatistics&id=${encodeURIComponent(id)}&key=${Config.youtubeKey}`;
const raw = await Net(queryUrl).get();
const res = JSON.parse(raw);
if (!res.items) return;
const video = res.items[0];
const info = {
title: video.snippet.title,
date: new Date(video.snippet.publishedAt),
description: video.snippet.description,
channel: video.snippet.channelTitle,
channelUrl: video.snippet.channelId,
views: video.statistics.viewCount,
thumbnail: video.snippet.thumbnails.default.url,
likes: video.statistics.likeCount,
dislikes: video.statistics.dislikeCount,
};
let buf = `<table style="margin:0px;"><tr>`;
buf += `<td style="margin:5px;padding:5px;min-width:175px;max-width:160px;text-align:center;border-bottom:0px;">`;
buf += `<div style="padding:5px;background:#b0b0b0;border:1px solid black;margin:auto;max-width:100px;max-height:100px;">`;
buf += `<a href="${ROOT}channel/${id}"><img src="${info.thumbnail}" width=100px height=100px/></a>`;
buf += `</div><p style="margin:5px 0px 4px 0px;word-wrap:break-word;">`;
buf += `<a style="font-weight:bold;color:#c70000;font-size:12pt;" href="https://www.youtube.com/watch?v=${id}">${info.title}</a>`;
buf += `</p></td><td style="padding: 0px 25px;font-size:10pt;max-width:100px;background:`;
buf += `#white;width:100%;border-bottom:0px;vertical-align:top;">`;
buf += `<p style="background: #e22828; padding: 5px;border-radius:8px;color:white;font-weight:bold;text-align:center;">`;
buf += `${info.likes} likes | ${info.dislikes} dislikes | ${info.views} video views<br><br>`;
buf += `<small>Published on ${info.date} | ID: ${id}</small><br>Uploaded by: ${info.channel}</p>`;
buf += `<br><details><summary>Video Description</p></summary>`;
buf += `<p style="background: #e22828;max-width:500px;padding: 5px;border-radius:8px;color:white;font-weight:bold;text-align:center;">`;
buf += `<i>${info.description.slice(0, 400).replace(/\n/g, ' ')}${info.description.length > 400 ? '(...)' : ''}</p><i></details></td>`;
return buf;
}
}
const YouTube = new YoutubeInterface();
export const commands: ChatCommands = {
async randchannel(target, room, user) {
if (room.roomid !== 'youtube') return this.errorReply(`This command can only be used in the YouTube room.`);
if (Object.keys(channelData).length < 1) return this.errorReply(`No channels in the database.`);
this.runBroadcast();
const data = await YouTube.randChannel();
if (!data) return this.errorReply(`Error in getting channel data.`);
if (this.broadcasting) {
if (!this.can('broadcast', null, room)) return false;
this.addBox(data);
room.update();
} else {
return this.sendReplyBox(data);
}
},
randchannelhelp: [`/randchannel - View data of a random channel from the YouTube database.`],
yt: 'youtube',
youtube: {
async addchannel(target, room, user) {
if (room.roomid !== 'youtube') return this.errorReply(`This command can only be used in the YouTube room.`);
const [id, name] = target.split(',');
if (!id) return this.errorReply('Specify a channel ID.');
const data = await YouTube.getChannelData(id, name);
if (!data) {
return this.errorReply(`Error in retrieving channel data.`);
}
this.modlog('ADDCHANNEL', null, `${id} ${name ? `username: ${name}` : ''}`);
return this.privateModAction(`(Added channel with id ${id} ${name ? `and username (${name}) ` : ''} to the random channel pool.)`);
},
addchannelhelp: [`/addchannel - Add channel data to the YouTube database. Requires: % @ #`],
removechannel(target, room, user) {
if (room.roomid !== 'youtube') return this.errorReply(`This command can only be used in the YouTube room.`);
if (!this.can('ban', null, room)) return false;
const id = YouTube.channelSearch(target);
if (!id) return this.errorReply(`Channel with ID or name ${target} not found.`);
delete channelData[id];
FS(STORAGE_PATH).writeUpdate(() => JSON.stringify(channelData));
this.privateModAction(`(${user.name} deleted channel with ID or name ${target}.)`);
return this.modlog(`REMOVECHANNEL`, null, id);
},
removechannelhelp: [`/youtube removechannel - Delete channel data from the YouTube database. Requires: % @ #`],
async channel(target, room, user) {
if (room.roomid !== 'youtube') return this.errorReply(`This command can only be used in the YouTube room.`);
const channel = YouTube.channelSearch(target);
if (!channel) return this.errorReply(`No channels with ID or name ${target} found.`);
const data = await YouTube.generateChannelDisplay(channel);
if (!data) return this.errorReply(`Error in getting channel data.`);
this.runBroadcast();
if (this.broadcasting) {
this.addBox(data);
return room.update();
} else {
return this.sendReplyBox(data);
}
},
channelhelp: [
'/youtube channel - View the data of a specified channel. Can be either channel ID or channel name.',
],
async video(target, room, user) {
if (room.roomid !== 'youtube') return this.errorReply(`This command can only be used in the YouTube room.`);
if (!target) return this.errorReply(`Provide a valid youtube link.`);
const html = await YouTube.generateVideoDisplay(target);
if (!html) return this.errorReply(`This url is invalid. Please use a youtu.be link or a youtube.com link.`);
this.runBroadcast();
if (this.broadcasting) {
this.addBox(html);
return room.update();
} else { | },
videohelp: [`/youtube video - View data of a specified video. Can be either channel ID or channel name`],
channels(target, room, user) {
let all;
if (toID(target) === 'all') all = true;
return this.parse(`/j view-channels${all ? '-all' : ''}`);
},
help(target, room, user) {
return this.parse('/help youtube');
},
update(target, room, user) {
if (room.roomid !== 'youtube') return this.errorReply(`This command can only be used in the YouTube room.`);
if (!this.can('ban', null, room)) return false;
const [channel, name] = target.split(',');
const id = YouTube.channelSearch(channel);
if (!id) return this.errorReply(`Channel ${channel} is not in the database.`);
channelData[id].username = name;
this.modlog(`UPDATECHANNEL`, null, name);
this.privateModAction(`(${user.name} updated channel ${id}'s username to ${name}.)`);
return FS(STORAGE_PATH).writeUpdate(() => JSON.stringify(channelData));
},
interval: 'repeat',
async repeat(target, room, user) {
if (room.roomid !== 'youtube') return this.errorReply(`This command can only be used in the YouTube room.`);
if (!this.can('declare', null, room)) return false;
if (!target) return this.sendReply(`Interval is currently set to ${Chat.toDurationString(YouTube.intervalTime)}.`);
if (Object.keys(channelData).length < 1) return this.errorReply(`No channels in the database.`);
if (isNaN(parseInt(target))) return this.errorReply(`Specify a number (in minutes) for the interval.`);
let interval = Number(target);
if (interval < 10) return this.errorReply(`${interval} is too low - set it above 10 minutes.`);
interval = interval * 60 * 1000;
const channel = await YouTube.randChannel();
// no channels
if (!channel) return this.errorReply(`Error in getting channel data.`);
YouTube.intervalTime = interval;
if (YouTube.interval) clearInterval(YouTube.interval);
YouTube.interval = setInterval(() => {
void (async () => {
const res = await YouTube.randChannel();
this.addBox(res!);
room.update();
})();
}, interval);
this.privateModAction(`(${user.name} set a randchannel interval to ${target} minutes)`);
return this.modlog(`CHANNELINTERVAL`, null, `${target} minutes`);
},
},
youtubehelp: [
`YouTube commands:`,
`/randchannel - View data of a random channel from the YouTube database.`,
`/youtube addchannel [channel] - Add channel data to the YouTube database. Requires: % @ #`,
`/youtube removechannel [channel]- Delete channel data from the YouTube database. Requires: % @ #`,
`/youtube channel [channel] - View the data of a specified channel. Can be either channel ID or channel name.`,
`/youtube video [video] - View data of a specified video. Can be either channel ID or channel name.`,
`/youtube update [channel], [name] - sets a channel's PS username to [name]. Requires: % @ #`,
`/youtube repeat [time] - Sets an interval for [time] minutes, showing a random channel each time. Requires: # &`,
],
};
export const pages: PageTable = {
async channels(args, user) {
const all = toID(args[0]) === 'all';
this.title = `[Channels] ${all ? 'All' : ''}`;
let buffer = `<div class="pad"><h4>Channels in the YouTube database:`;
if (all) buffer += `(All)`;
buffer += `<br/ ><button class="button" name="send" value="/join view-channels${all ? '' : '-all'}"">`;
buffer += `<i class="fa fa-refresh"></i>${all ? 'Usernames only' : 'All channels'}</button>`;
buffer += `<button class="button" name="send" value="/join view-channels${all ? '-all' : ''}"">`;
buffer += `<i class="fa fa-refresh"></i> Refresh</button><br />`;
buffer += `</h4><hr />`;
const isStaff = user.can('mute', null, Rooms.get('youtube'));
for (const id of Utils.shuffle(Object.keys(channelData))) {
const name = YouTube.get(id).name;
const psid = YouTube.get(id).username;
if (!all && !psid) continue;
buffer += `<details><summary>${name}`;
if (isStaff) buffer += `<small><i> (Channel ID: ${id})</i></small>`;
if (psid) buffer += ` <small>(PS name: ${psid})</small>`;
buffer += `</summary>`;
buffer += await YouTube.generateChannelDisplay(id);
if (!isStaff) buffer += `<i>(Channel ID: ${id})</i>`;
buffer += `</details><hr/ >`;
}
buffer += `</div>`;
return buffer;
},
}; | return this.sendReplyBox(html);
} |
impl_parser.rs | use std::collections::HashSet;
use proc_macro2::{Ident, TokenStream};
use quote::ToTokens;
use syn::{ImplItem, ItemImpl, Type};
use syn::spanned::Spanned;
use super::{
ParsedAttr, ParsedFn
};
pub struct ParsedImpl {
pub name: Ident,
pub attrs: HashSet<ParsedAttr>,
pub functions: Vec<ParsedFn>,
item_impl: ItemImpl
}
impl ToTokens for ParsedImpl {
fn to_tokens(&self, tokens: &mut TokenStream) {
tokens.extend(self.item_impl.to_token_stream());
}
}
impl ParsedImpl {
pub fn parse(input: &proc_macro::TokenStream, attrs: &proc_macro::TokenStream) -> syn::Result<Self> |
}
| {
let item_impl = syn::parse::<ItemImpl>(input.clone())?;
let impl_clone = item_impl.clone();
// extract impl name
let name = match *item_impl.self_ty.clone() {
Type::Path(p) => {
p.path.segments.last().unwrap().ident.clone()
}
n => return Err(syn::Error::new(n.span(), "Wrong impl type"))
};
let mut functions = vec![];
for impl_item in &item_impl.items {
match impl_item {
ImplItem::Method(m) => {
// only insert if Some. If it's None, it was annotated with jignore
if let Some(v) = ParsedFn::parse_impl_fn(m.clone(), name.clone(), &attrs)? {
functions.push(v);
}
}
// ignore everything that's not a method
_ => continue
}
}
let mut attrs = HashSet::new();
for attr in item_impl.attrs {
attrs.insert(
ParsedAttr::parse(
&attr.path.segments.last().unwrap().ident,
&attr.tokens.into()
)?
);
}
Ok(Self{
name,
attrs,
functions,
item_impl: impl_clone
})
} |
test_table.py | # imports - module imports
from pipupgrade import cli
from pipupgrade.table import _sanitize_string, Table
def test__sanitize_string():
assert _sanitize_string(cli.format("foobar", cli.GREEN)) == "foobar"
assert _sanitize_string(cli.format("foobar", cli.BOLD)) == "foobar"
def | ():
table = Table()
assert table.empty
dummy = ["foo", "bar"]
table.insert(dummy)
assert not table.empty
string = table.render()
assert string.count("\n") == 1
table.header = dummy
string = table.render()
assert string.count("\n") == 2
table.insert(dummy)
string = table.render()
assert string.count("\n") == 3 | test_table |
josephus_survivor.py | # ********JOSEPHUS SURVIVOR********
#codewars
# In this kata you have to correctly return who is the "survivor", ie: the last element of a Josephus permutation.
# Basically you have to assume that n people are put into a circle and that they are eliminated in steps of k elements, like this:
# josephus_survivor(7,3) => means 7 people in a circle;
# one every 3 is eliminated until one remains
# [1,2,3,4,5,6,7] - initial sequence
# [1,2,4,5,6,7] => 3 is counted out
# [1,2,4,5,7] => 6 is counted out
# [1,4,5,7] => 2 is counted out
# [1,4,5] => 7 is counted out
# [1,4] => 5 is counted out
# [4] => 1 counted out, 4 is the last element - the survivor!
#Answer
def | (n,k):
lst = list(range(1,n+1))
while len(lst) > 1 :
if len(lst) >= k :
lst = lst[k:] + lst[:k-1]
else:
i = (k % len(lst)) - 1
if i == -1:
lst = lst[:-1]
else:
lst = lst[i+1:] + lst[:i]
return lst[0]
| josephus_survivor |
tienda.py | # -*- coding: utf-8 -*-
# @Author: Manuel Rodriguez <valle>
# @Date: 28-Aug-2017
# @Email: [email protected]
# @Filename: models.py
# @Last modified by: valle
# @Last modified time: 15-Feb-2018
# @License: Apache license vesion 2.0
from __future__ import unicode_literals
from django.db.models import Q
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
from adminshop.models import (Clientes, Direcciones, Proveedores,
Productos, Presupuesto)
# Create your models here.
CHOICES_TIPO_PAGO = (
('EF', 'Efectivo'),
('TJ', 'Tarjeta'),
('TB', 'Transferencia bancaria'),
('PY', 'Paypal'),
('CR', 'Contrarembolso'),
)
CHOICES_TIPO_VENDEDOR = (
('CL', 'Cliente'),
('PV', 'Proveedor'),
('NO', 'No asignado')
)
CHOICES_TIPO_DOC = (
('CP', 'Compra'),
('FT', 'Factura'),
('RP', 'Reparacion'),
('AB', 'Abono'),
('OS', 'Testeo')
)
class DocumentSendPolice(models.Model):
fecha_creado = models.DateTimeField(auto_now_add=True)
enviado = models.BooleanField(default=False)
intervalo = models.CharField(max_length=25)
class Meta:
ordering = ["-fecha_creado"]
class DocumentSendGestoria(models.Model):
fecha_creado = models.DateTimeField(auto_now_add=True)
enviado = models.BooleanField(default=False)
intervalo = models.CharField(max_length=25)
class Meta:
ordering = ["-fecha_creado"]
class DocumentoTesteo(models.Model):
cliente = models.ForeignKey("clientes", on_delete=models.CASCADE )
producto = models.ForeignKey("Productos", on_delete=models.CASCADE )
empleado = models.ForeignKey(User, on_delete=models.CASCADE )
firma = models.FileField(upload_to='firmas', null=True)
frimado = models.BooleanField(default=False)
fecha = models.DateTimeField(auto_now=True)
def __unicode__(self):
return str(self.cliente)
class Meta:
ordering = ["-id"]
class ConfigSite(models.Model):
ISP = models.IntegerField(blank=True, default=21)
email_policia = models.EmailField(max_length=100, blank=True)
email_gestoria = models.EmailField(max_length=100, blank=True)
codigo_compra = models.IntegerField("Inicio contador", default=3023)
firma_tienda = models.FileField(upload_to='config', blank=True)
logo_tienda = models.FileField(upload_to='config', blank=True)
class Compras(models.Model):
vendedor_id = models.IntegerField(null=True)
producto = models.ForeignKey("Productos", on_delete=models.CASCADE)
usuario = models.ForeignKey(User, on_delete=models.CASCADE)
fecha_entrada = models.DateTimeField(auto_now_add=True)
codigo_compra = models.CharField(max_length=150, null=True)
firma = models.FileField(upload_to='firmas', null=True)
tipo_compra = models.CharField(max_length=4, default="REBU", choices=[("REBU","REBU"), ("ISP","ISP")])
doc_proveedor = models.FileField(upload_to='doc_proveedor', null=True, default=None, max_length=500)
enviar_policia = models.BooleanField("Enviar a la policia", blank=True, default=True)
tipo_vendedor = models.CharField(
max_length=2,
choices=CHOICES_TIPO_VENDEDOR,
default="NO",
)
def set_vendedor(self, vendedor):
if vendedor != None:
self.vendedor_id = vendedor.id
if type(vendedor) == Clientes:
self.tipo_vendedor = "CL"
else:
|
else:
self.tipo_vendedor = "NO"
def get_vendedor(self):
if self.tipo_vendedor == "CL":
clientes = Clientes.objects.filter(Q(pk=self.vendedor_id))
if len(clientes) > 0:
cliente = clientes[0]
vendedor = {}
vendedor["DNI"] = cliente.DNI
vendedor["nombre"] = cliente.nombre_completo
direcciones = Direcciones.objects.filter(cliente_id=self.vendedor_id)
if len(direcciones) > 0:
direccion = direcciones[0]
else:
direccion = ""
vendedor["direccion"] = direccion
vendedor["telefono"] = cliente.telefono
vendedor["email"] = cliente.email
vendedor["id"] = cliente.id
return vendedor
else:
return {"DNI":"", "nombre":"", 'direccion':"", 'telefono':'', "email": "", "id":-1}
elif self.tipo_vendedor == "PV":
ps = Proveedores.objects.filter(Q(pk=self.vendedor_id))
if len(ps) > 0:
p = ps[0]
vendedor = {}
vendedor["DNI"] = p.CIF
vendedor["nombre"] = p.razon_social
vendedor["direccion"] = p.direccion
vendedor["telefono"] = p.telefono
vendedor["email"] = p.email
vendedor["id"] = p.id
return vendedor
else:
return {"DNI":"", "nombre":"", 'direccion':"", 'telefono':'', "email": "", "id":-1}
else:
return {"DNI":"", "nombre":"", 'direccion':"", 'telefono':'', "email": "", "id":-1}
def save(self, *args, **kwargs):
super(Compras, self).save()
if self.codigo_compra == None:
self.codigo_compra = ConfigSite.objects.all()[0].codigo_compra+self.pk
super(Compras, self).save()
class Meta:
ordering= ["-id"]
class Ventas(models.Model):
cliente = models.ForeignKey("Clientes", on_delete=models.SET_NULL, null=True)
empleado = models.CharField(max_length=150)
empleado_id = models.IntegerField(default=-1)
fecha_salida= models.DateTimeField(auto_now_add=True)
firma = models.FileField(upload_to='firmas', null=True)
entrega = models.DecimalField(max_digits=10, decimal_places=2, default=0)
forma_pago = models.CharField(
max_length=2,
choices=CHOICES_TIPO_PAGO,
default="EF",
)
def get_user(self):
empleados = User.objects.filter(pk=self.empleado_id)
if len(empleados) > 0:
return empleados[0]
else:
return User()
class Meta:
ordering = ['-fecha_salida']
class LineasVentas(models.Model):
venta = models.ForeignKey("Ventas", on_delete=models.CASCADE)
detalle = models.CharField(max_length=150)
codigo_compra = models.CharField(max_length=150)
ns_imei = models.CharField(max_length=150)
descuento = models.DecimalField(max_digits=6, decimal_places=2)
can = models.IntegerField()
p_unidad = models.DecimalField(max_digits=10, decimal_places=2)
class Abonos(models.Model):
factura = models.ForeignKey("Ventas", on_delete=models.CASCADE)
cliente = models.ForeignKey("Clientes", on_delete=models.SET_NULL, null=True)
empleado = models.CharField(max_length=150)
empleado_id = models.IntegerField(default=-1)
fecha_salida= models.DateTimeField(auto_now_add=True)
firma = models.FileField(upload_to='firmas', null=True)
forma_pago = models.CharField(
max_length=2,
choices=CHOICES_TIPO_PAGO,
default="EF",
)
def get_user(self):
empleados = User.objects.filter(pk=self.empleado_id)
if len(empleados) > 0:
return empleados[0]
else:
return User()
class Meta:
ordering = ['-fecha_salida']
class LineasAbonos(models.Model):
abono = models.ForeignKey("Abonos", on_delete=models.CASCADE)
detalle = models.CharField(max_length=150)
codigo_compra = models.CharField(max_length=150)
ns_imei = models.CharField(max_length=150)
descuento = models.DecimalField(max_digits=5, decimal_places=2)
can = models.IntegerField()
p_unidad = models.DecimalField(max_digits=10, decimal_places=2)
class Historial(models.Model):
cliente = models.ForeignKey("Clientes", on_delete=models.CASCADE)
producto = models.ForeignKey("Productos", on_delete=models.CASCADE)
usuario = models.ForeignKey(User, on_delete=models.CASCADE)
fecha = models.DateTimeField(auto_now_add=True)
detalle = models.CharField(max_length=150)
def __unicode__(self):
return self.detalle
class Meta:
ordering = ["-id"]
class Firmas(models.Model):
tipo_documento = models.CharField(
max_length=2,
choices=CHOICES_TIPO_DOC,
default="CP",
)
empleado_id = models.IntegerField()
documento_id = models.IntegerField()
fecha = models.DateTimeField(auto_now=True)
firmado = models.BooleanField(default=False)
def get_user(self):
empleados = User.objects.filter(pk=self.empleado_id)
if len(empleados) > 0:
return empleados[0]
else:
return User()
def get_nombre_cliente(self):
if self.tipo_documento == "CP":
try:
compra = Compras.objects.get(pk=self.documento_id)
vendedor = compra.get_vendedor()
except:
vendedor = { "nombre": "Documento borrado"}
return vendedor["nombre"]
elif self.tipo_documento == "RP":
try:
p = Presupuesto.objects.get(pk=self.documento_id)
cliente = p.cliente.nombre_completo
except:
cliente = "Documento borrado"
return cliente
elif self.tipo_documento == "OS":
p = DocumentoTesteo.objects.get(pk=self.documento_id)
cliente = p.cliente
return cliente.nombre_completo
def get_ns_imei(self):
if self.tipo_documento == "CP":
try:
compra = Compras.objects.get(pk=self.documento_id)
return compra.producto.ns_imei
except:
return "Documento borrado"
elif self.tipo_documento == "RP":
try:
p = Presupuesto.objects.get(pk=self.documento_id)
ns_imei = p.producto.ns_imei
except:
ns_imei = "Documento borrado"
return ns_imei
elif self.tipo_documento == "OS":
p = DocumentoTesteo.objects.get(pk=self.documento_id)
return p.producto.ns_imei
def get_producto_pk(self):
if self.tipo_documento == "CP":
try:
compra = Compras.objects.get(pk=self.documento_id)
return compra.producto.id
except:
return 0
elif self.tipo_documento == "RP":
try:
p = Presupuesto.objects.get(pk=self.documento_id)
ns_imei = p.producto.id
except:
ns_imei = 0
return ns_imei
elif self.tipo_documento == "OS":
p = DocumentoTesteo.objects.get(pk=self.documento_id)
return p.producto.pk
def get_documento(self):
if self.tipo_documento == "CP":
compra = Compras.objects.get(pk=self.documento_id)
vendedor = compra.get_vendedor()
datos_send= {
"pk": compra.pk,
"id_producto": compra.producto.pk,
'nombre': vendedor["nombre"],
"DNI": vendedor["DNI"],
"ns_imei": compra.producto.ns_imei,
"precio_compra": str(compra.producto.precio_compra),
}
return "tienda/sign/sign_compras.html", datos_send
elif self.tipo_documento == "RP":
try:
p = Presupuesto.objects.get(pk=self.documento_id)
cliente = p.cliente
datos_send= {
"pk": p.pk,
"id_producto": p.producto.pk,
'nombre': cliente.nombre_completo,
"DNI": cliente.DNI,
"ns_imei": p.producto.ns_imei,
}
return "tienda/sign/sign_reparacion.html", datos_send
except:
self.delete()
return None, None
elif self.tipo_documento == "OS":
try:
p = DocumentoTesteo.objects.get(pk=self.documento_id)
cliente = p.cliente
datos_send= {
"pk": p.pk,
"id_producto": p.producto.pk,
'nombre': cliente.nombre_completo,
"DNI": cliente.DNI,
"ns_imei": p.producto.ns_imei,
}
return "tienda/sign/sign_testeo.html", datos_send
except:
self.delete()
return None, None
class Meta:
ordering = ["-fecha"]
| self.tipo_vendedor = "PV" |
user.py | # (C) 2022 GoodData Corporation
from __future__ import annotations
from pathlib import Path
from typing import List, Optional, Type
import attr
from gooddata_metadata_client.model.declarative_user import DeclarativeUser
from gooddata_metadata_client.model.declarative_users import DeclarativeUsers
from gooddata_sdk.catalog.base import Base
from gooddata_sdk.catalog.identifier import CatalogUserGroupIdentifier
from gooddata_sdk.utils import create_directory, read_layout_from_file, write_layout_to_file
LAYOUT_USERS_DIR = "users"
LAYOUT_USERS_FILE = "users.yaml"
@attr.s(auto_attribs=True, kw_only=True)
class CatalogDeclarativeUsers(Base):
users: List[CatalogDeclarativeUser]
@staticmethod
def client_class() -> Type[DeclarativeUsers]:
return DeclarativeUsers
@classmethod
def load_from_disk(cls, layout_organization_folder: Path) -> CatalogDeclarativeUsers:
users_directory = layout_organization_folder / LAYOUT_USERS_DIR
users_file = users_directory / LAYOUT_USERS_FILE
data = read_layout_from_file(users_file)
users = []
for record in data:
users.append(CatalogDeclarativeUser.from_dict(record, camel_case=True))
return cls(users=users)
def store_to_disk(self, layout_organization_folder: Path) -> None:
users_directory = layout_organization_folder / LAYOUT_USERS_DIR
users_file = users_directory / LAYOUT_USERS_FILE
create_directory(users_directory)
users = [user.to_dict(camel_case=True) for user in self.users]
write_layout_to_file(users_file, users)
@attr.s(auto_attribs=True, kw_only=True)
class CatalogDeclarativeUser(Base):
| id: str
auth_id: Optional[str] = None
user_groups: List[CatalogUserGroupIdentifier] = []
@staticmethod
def client_class() -> Type[DeclarativeUser]:
return DeclarativeUser |
|
key.py | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Key(object):
@classmethod
def from_path(cls, *args, **kwds):
raise NotImplementedError("Paths are not currently supported")
def __init__(self, encoded=None, obj=None):
self.name = None
if obj:
|
else:
self.id = None
self.kind = None
def app(self):
raise NotImplementedError("Applications are not currently supported")
def kind(self):
return self.kind
def id(self):
return self.id
def name(self):
raise NotImplementedError("Key Names are not currently supported")
def id_or_name(self):
return self.id
def has_id_or_name(self):
return self.id is not None
def parent(self):
raise NotImplementedError("Key parents are not currently supported")
def __str__(self):
return self.id_or_name()
| self.id = obj.id
self.kind = obj.kind() |
steihaug.rs | // Copyright 2018-2020 argmin developers
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! # References:
//!
//! [0] Jorge Nocedal and Stephen J. Wright (2006). Numerical Optimization.
//! Springer. ISBN 0-387-30303-0.
use crate::prelude::*;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
/// The Steihaug method is a conjugate gradients based approach for finding an approximate solution
/// to the second order approximation of the cost function within the trust region.
///
/// # References:
///
/// [0] Jorge Nocedal and Stephen J. Wright (2006). Numerical Optimization.
/// Springer. ISBN 0-387-30303-0.
#[derive(Clone, Serialize, Deserialize, Debug, Copy, PartialEq, PartialOrd, Default)]
pub struct Steihaug<P, F> {
/// Radius
radius: F,
/// epsilon
epsilon: F,
/// p
p: P,
/// residual
r: P,
/// r^Tr
rtr: F,
/// initial residual
r_0_norm: F,
/// direction
d: P,
/// max iters
max_iters: u64,
}
impl<P, F> Steihaug<P, F>
where
P: Default + Clone + ArgminMul<F, P> + ArgminDot<P, F> + ArgminAdd<P, P>,
F: ArgminFloat,
{
/// Constructor
pub fn new() -> Self {
Steihaug {
radius: F::nan(),
epsilon: F::from_f64(10e-10).unwrap(),
p: P::default(),
r: P::default(),
rtr: F::nan(),
r_0_norm: F::nan(),
d: P::default(),
max_iters: std::u64::MAX,
}
}
/// Set epsilon
pub fn epsilon(mut self, epsilon: F) -> Result<Self, Error> {
if epsilon <= F::from_f64(0.0).unwrap() {
return Err(ArgminError::InvalidParameter {
text: "Steihaug: epsilon must be > 0.0.".to_string(),
}
.into());
}
self.epsilon = epsilon;
Ok(self)
}
/// set maximum number of iterations
pub fn max_iters(mut self, iters: u64) -> Self {
self.max_iters = iters;
self
}
/// evaluate m(p) (without considering f_init because it is not available)
fn eval_m<H>(&self, p: &P, g: &P, h: &H) -> F
where
P: ArgminWeightedDot<P, F, H>,
{
// self.cur_grad().dot(&p) + 0.5 * p.weighted_dot(&self.cur_hessian(), &p)
g.dot(p) + F::from_f64(0.5).unwrap() * p.weighted_dot(h, p)
}
/// calculate all possible step lengths
#[allow(clippy::many_single_char_names)]
fn tau<G, H>(&self, filter_func: G, eval: bool, g: &P, h: &H) -> F
where
G: Fn(F) -> bool,
H: ArgminDot<P, P>,
{
let a = self.p.dot(&self.p);
let b = self.d.dot(&self.d);
let c = self.p.dot(&self.d);
let delta = self.radius.powi(2);
let t1 = (-a * b + b * delta + c.powi(2)).sqrt();
let tau1 = -(t1 + c) / b;
let tau2 = (t1 - c) / b;
let mut t = vec![tau1, tau2];
// Maybe calculating tau3 should only be done if b is close to zero?
if tau1.is_nan() || tau2.is_nan() || tau1.is_infinite() || tau2.is_infinite() {
let tau3 = (delta - a) / (F::from_f64(2.0).unwrap() * c);
t.push(tau3);
}
let v = if eval {
// remove NAN taus and calculate m (without f_init) for all taus, then sort them based
// on their result and return the tau which corresponds to the lowest m
let mut v = t
.iter()
.cloned()
.enumerate()
.filter(|(_, tau)| (!tau.is_nan() || !tau.is_infinite()) && filter_func(*tau))
.map(|(i, tau)| {
let p = self.p.add(&self.d.mul(&tau));
(i, self.eval_m(&p, g, h))
})
.filter(|(_, m)| !m.is_nan() || !m.is_infinite())
.collect::<Vec<(usize, F)>>();
v.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
v
} else {
let mut v = t
.iter()
.cloned()
.enumerate()
.filter(|(_, tau)| (!tau.is_nan() || !tau.is_infinite()) && filter_func(*tau))
.collect::<Vec<(usize, F)>>();
v.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
v
};
t[v[0].0]
}
}
impl<P, O, F> Solver<O> for Steihaug<P, F>
where
O: ArgminOp<Param = P, Output = F, Float = F>,
P: Clone
+ Serialize
+ DeserializeOwned
+ Default
+ ArgminMul<F, P>
+ ArgminWeightedDot<P, F, O::Hessian>
+ ArgminNorm<F>
+ ArgminDot<P, F>
+ ArgminAdd<P, P>
+ ArgminSub<P, P>
+ ArgminZeroLike
+ ArgminMul<F, P>,
O::Hessian: ArgminDot<P, P>,
F: ArgminFloat,
{
const NAME: &'static str = "Steihaug";
fn init(
&mut self,
_op: &mut OpWrapper<O>,
state: &IterState<O>,
) -> Result<Option<ArgminIterData<O>>, Error> {
self.r = state.get_grad().unwrap();
self.r_0_norm = self.r.norm();
self.rtr = self.r.dot(&self.r);
self.d = self.r.mul(&F::from_f64(-1.0).unwrap());
self.p = self.r.zero_like();
Ok(if self.r_0_norm < self.epsilon {
Some(
ArgminIterData::new()
.param(self.p.clone())
.termination_reason(TerminationReason::TargetPrecisionReached),
)
} else {
None
})
}
fn | (
&mut self,
_op: &mut OpWrapper<O>,
state: &IterState<O>,
) -> Result<ArgminIterData<O>, Error> {
let grad = state.get_grad().unwrap();
let h = state.get_hessian().unwrap();
let dhd = self.d.weighted_dot(&h, &self.d);
// Current search direction d is a direction of zero curvature or negative curvature
if dhd <= F::from_f64(0.0).unwrap() {
let tau = self.tau(|_| true, true, &grad, &h);
return Ok(ArgminIterData::new()
.param(self.p.add(&self.d.mul(&tau)))
.termination_reason(TerminationReason::TargetPrecisionReached));
}
let alpha = self.rtr / dhd;
let p_n = self.p.add(&self.d.mul(&alpha));
// new p violates trust region bound
if p_n.norm() >= self.radius {
let tau = self.tau(|x| x >= F::from_f64(0.0).unwrap(), false, &grad, &h);
return Ok(ArgminIterData::new()
.param(self.p.add(&self.d.mul(&tau)))
.termination_reason(TerminationReason::TargetPrecisionReached));
}
let r_n = self.r.add(&h.dot(&self.d).mul(&alpha));
if r_n.norm() < self.epsilon * self.r_0_norm {
return Ok(ArgminIterData::new()
.param(p_n)
.termination_reason(TerminationReason::TargetPrecisionReached));
}
let rjtrj = r_n.dot(&r_n);
let beta = rjtrj / self.rtr;
self.d = r_n.mul(&F::from_f64(-1.0).unwrap()).add(&self.d.mul(&beta));
self.r = r_n;
self.p = p_n;
self.rtr = rjtrj;
Ok(ArgminIterData::new()
.param(self.p.clone())
.cost(self.rtr)
.grad(grad)
.hessian(h))
}
fn terminate(&mut self, state: &IterState<O>) -> TerminationReason {
if state.get_iter() >= self.max_iters {
TerminationReason::MaxItersReached
} else {
TerminationReason::NotTerminated
}
}
}
impl<P: Clone + Serialize, F: ArgminFloat> ArgminTrustRegion<F> for Steihaug<P, F> {
fn set_radius(&mut self, radius: F) {
self.radius = radius;
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_trait_impl;
test_trait_impl!(steihaug, Steihaug<MinimalNoOperator, f64>);
}
| next_iter |
webpack.mix.js | const mix = require('laravel-mix');
/*
|--------------------------------------------------------------------------
| Mix Asset Management
|--------------------------------------------------------------------------
| | |
*/
mix.js('resources/js/app.js', 'public/js')
//.sass('resources/sass/app.scss', 'public/css'); | | Mix provides a clean, fluent API for defining some Webpack build steps
| for your Laravel application. By default, we are compiling the Sass
| file for the application as well as bundling up all the JS files. |
hocs.ts | import type { ChallengePayload } from './api'
import type { DocumentSides } from './commons'
import type {
SupportedLanguages,
TranslatedTagParser,
TranslateCallback,
} from './locales'
import type { RequestedVariant } from './steps'
| challenges: ChallengePayload[]
challengesId: string
}
export type WithLocalisedProps = {
language: SupportedLanguages
parseTranslatedTags: TranslatedTagParser
translate: TranslateCallback
}
export type WithCameraDetectionProps = {
hasCamera?: boolean | null
}
export type WithFailureHandlingProps = {
onError?: (error: Error) => void
}
export type TrackScreenCallback = (
screenNameHierarchy?: string | string[],
properties?: Record<string, unknown>
) => void
export type WithTrackingProps = {
trackScreen: TrackScreenCallback
}
export type WithCaptureVariantProps = {
forceCrossDevice?: boolean
isPoA?: boolean
requestedVariant?: RequestedVariant
side?: DocumentSides
}
export type WithThemeProps = {
back?: () => void
disableNavigation?: boolean
}
export type WithPermissionsFlowProps = {
hasGrantedPermission?: boolean
}
export type WithBlobPreviewProps = {
blob: Blob
} | export type WithChallengesProps = { |
mojo.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class Mojo(recipe_util.Recipe):
"""Basic Recipe class for Mojo."""
@staticmethod
def fetch_spec(props):
url = 'https://github.com/domokit/mojo.git'
solution = {
'name' :'src',
'url' : url,
'deps_file': 'DEPS',
'managed' : False,
'custom_deps': {},
'safesync_url': '',
}
spec = {
'solutions': [solution],
}
if props.get('target_os'):
spec['target_os'] = props['target_os'].split(',')
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
|
def main(argv=None):
return Mojo().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| return 'src' |
pool_cname.go | // Copyright e-Xpert Solutions SA. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gtm
import "github.com/e-XpertSolutions/f5-rest-client/f5"
// PoolCNAMEEndpoint represents the REST resource for managing PoolCNAME.
const PoolCNAMEEndpoint = "/pool/cname"
// PoolCNAMEResource provides an API to manage PoolCNAME configurations.
type PoolCNAMEResource struct {
c *f5.Client
}
// ListAll lists all the PoolCNAME configurations.
func (r *PoolCNAMEResource) ListAll() (*PoolList, error) {
var list PoolList
if err := r.c.ReadQuery(BasePath+PoolCNAMEEndpoint, &list); err != nil {
return nil, err
}
return &list, nil
}
// Get a single PoolCNAME configuration identified by id.
func (r *PoolCNAMEResource) Get(id string) (*Pool, error) {
var item Pool
if err := r.c.ReadQuery(BasePath+PoolCNAMEEndpoint, &item); err != nil {
return nil, err
}
return &item, nil
}
// GetCNAMEMembers lists all the PoolMembers configurations.
func (r *PoolAResource) GetCNAMEMembers(id string) (*PoolMembersList, error) {
var list PoolMembersList
if err := r.c.ReadQuery(BasePath+PoolCNAMEEndpoint+"/"+id+"/members", &list); err != nil {
return nil, err
}
return &list, nil
}
// Create a new PoolCNAME configuration.
func (r *PoolCNAMEResource) Create(item Pool) error {
if err := r.c.ModQuery("POST", BasePath+PoolCNAMEEndpoint, item); err != nil {
return err
}
return nil
}
// Edit a PoolCNAME configuration identified by id.
func (r *PoolCNAMEResource) Edit(id string, item Pool) error {
if err := r.c.ModQuery("PUT", BasePath+PoolCNAMEEndpoint+"/"+id, item); err != nil {
return err
}
return nil
}
// Delete a single PoolCNAME configuration identified by id.
func (r *PoolCNAMEResource) Delete(id string) error {
if err := r.c.ModQuery("DELETE", BasePath+PoolCNAMEEndpoint+"/"+id, nil); err != nil {
return err
} | var item PoolStatsList
if err := pr.c.ReadQuery(BasePath+PoolCNAMEEndpoint+"/"+id+"/stats", &item); err != nil {
return nil, err
}
return &item, nil
}
func (pr *PoolCNAMEResource) ShowAllCNAMEStats() (*PoolStatsList, error) {
var item PoolStatsList
if err := pr.c.ReadQuery(BasePath+PoolCNAMEEndpoint+"/stats", &item); err != nil {
return nil, err
}
return &item, nil
} | return nil
}
func (pr *PoolCNAMEResource) ShowCNAMEStats(id string) (*PoolStatsList, error) { |
timepicker.ts | import {
ChangeDetectorRef,
Component,
forwardRef,
Input,
OnChanges,
SimpleChanges,
ViewEncapsulation
} from '@angular/core';
import {ControlValueAccessor, NG_VALUE_ACCESSOR} from '@angular/forms';
import {isInteger, isNumber, padNumber, toInteger} from '../util/util';
import {NgbTime} from './ngb-time';
import {NgbTimepickerConfig} from './timepicker-config';
import {NgbTimeAdapter} from './ngb-time-adapter';
import {NgbTimepickerI18n} from './timepicker-i18n';
const FILTER_REGEX = /[^0-9]/g;
/**
* A directive that helps with wth picking hours, minutes and seconds.
*/
@Component({
selector: 'ngb-timepicker',
encapsulation: ViewEncapsulation.None,
styleUrls: ['./timepicker.scss'],
template: `
<fieldset [disabled]="disabled" [class.disabled]="disabled">
<div class="ngb-tp">
<div class="ngb-tp-input-container ngb-tp-hour">
<button *ngIf="spinners" tabindex="-1" type="button" (click)="changeHour(hourStep)"
class="btn btn-link" [class.btn-sm]="isSmallSize" [class.btn-lg]="isLargeSize" [class.disabled]="disabled"
[disabled]="disabled">
<span class="chevron ngb-tp-chevron"></span>
<span class="sr-only" i18n="@@ngb.timepicker.increment-hours">Increment hours</span>
</button>
<input type="text" class="ngb-tp-input form-control" [class.form-control-sm]="isSmallSize"
[class.form-control-lg]="isLargeSize"
maxlength="2" inputmode="numeric" placeholder="HH" i18n-placeholder="@@ngb.timepicker.HH"
[value]="formatHour(model?.hour)" (change)="updateHour($any($event).target.value)"
[readOnly]="readonlyInputs" [disabled]="disabled" aria-label="Hours" i18n-aria-label="@@ngb.timepicker.hours"
(input)="formatInput($any($event).target)"
(keydown.ArrowUp)="changeHour(hourStep); $event.preventDefault()"
(keydown.ArrowDown)="changeHour(-hourStep); $event.preventDefault()">
<button *ngIf="spinners" tabindex="-1" type="button" (click)="changeHour(-hourStep)"
class="btn btn-link" [class.btn-sm]="isSmallSize" [class.btn-lg]="isLargeSize" [class.disabled]="disabled"
[disabled]="disabled">
<span class="chevron ngb-tp-chevron bottom"></span>
<span class="sr-only" i18n="@@ngb.timepicker.decrement-hours">Decrement hours</span>
</button>
</div>
<div class="ngb-tp-spacer">:</div>
<div class="ngb-tp-input-container ngb-tp-minute">
<button *ngIf="spinners" tabindex="-1" type="button" (click)="changeMinute(minuteStep)"
class="btn btn-link" [class.btn-sm]="isSmallSize" [class.btn-lg]="isLargeSize" [class.disabled]="disabled"
[disabled]="disabled">
<span class="chevron ngb-tp-chevron"></span>
<span class="sr-only" i18n="@@ngb.timepicker.increment-minutes">Increment minutes</span>
</button>
<input type="text" class="ngb-tp-input form-control" [class.form-control-sm]="isSmallSize" [class.form-control-lg]="isLargeSize"
maxlength="2" inputmode="numeric" placeholder="MM" i18n-placeholder="@@ngb.timepicker.MM"
[value]="formatMinSec(model?.minute)" (change)="updateMinute($any($event).target.value)"
[readOnly]="readonlyInputs" [disabled]="disabled" aria-label="Minutes" i18n-aria-label="@@ngb.timepicker.minutes"
(input)="formatInput($any($event).target)"
(keydown.ArrowUp)="changeMinute(minuteStep); $event.preventDefault()"
(keydown.ArrowDown)="changeMinute(-minuteStep); $event.preventDefault()">
<button *ngIf="spinners" tabindex="-1" type="button" (click)="changeMinute(-minuteStep)"
class="btn btn-link" [class.btn-sm]="isSmallSize" [class.btn-lg]="isLargeSize" [class.disabled]="disabled"
[disabled]="disabled">
<span class="chevron ngb-tp-chevron bottom"></span>
<span class="sr-only" i18n="@@ngb.timepicker.decrement-minutes">Decrement minutes</span>
</button>
</div>
<div *ngIf="seconds" class="ngb-tp-spacer">:</div>
<div *ngIf="seconds" class="ngb-tp-input-container ngb-tp-second">
<button *ngIf="spinners" tabindex="-1" type="button" (click)="changeSecond(secondStep)"
class="btn btn-link" [class.btn-sm]="isSmallSize" [class.btn-lg]="isLargeSize" [class.disabled]="disabled"
[disabled]="disabled">
<span class="chevron ngb-tp-chevron"></span>
<span class="sr-only" i18n="@@ngb.timepicker.increment-seconds">Increment seconds</span>
</button>
<input type="text" class="ngb-tp-input form-control" [class.form-control-sm]="isSmallSize" [class.form-control-lg]="isLargeSize"
maxlength="2" inputmode="numeric" placeholder="SS" i18n-placeholder="@@ngb.timepicker.SS"
[value]="formatMinSec(model?.second)" (change)="updateSecond($any($event).target.value)"
[readOnly]="readonlyInputs" [disabled]="disabled" aria-label="Seconds" i18n-aria-label="@@ngb.timepicker.seconds"
(input)="formatInput($any($event).target)"
(keydown.ArrowUp)="changeSecond(secondStep); $event.preventDefault()"
(keydown.ArrowDown)="changeSecond(-secondStep); $event.preventDefault()">
<button *ngIf="spinners" tabindex="-1" type="button" (click)="changeSecond(-secondStep)"
class="btn btn-link" [class.btn-sm]="isSmallSize" [class.btn-lg]="isLargeSize" [class.disabled]="disabled"
[disabled]="disabled">
<span class="chevron ngb-tp-chevron bottom"></span>
<span class="sr-only" i18n="@@ngb.timepicker.decrement-seconds">Decrement seconds</span>
</button>
</div>
<div *ngIf="meridian" class="ngb-tp-spacer"></div>
<div *ngIf="meridian" class="ngb-tp-meridian">
<button type="button" class="btn btn-outline-primary" [class.btn-sm]="isSmallSize" [class.btn-lg]="isLargeSize"
[disabled]="disabled" [class.disabled]="disabled"
(click)="toggleMeridian()">
<ng-container *ngIf="model && model.hour >= 12; else am"
i18n="@@ngb.timepicker.PM">{{ i18n.getAfternoonPeriod() }}</ng-container>
<ng-template #am i18n="@@ngb.timepicker.AM">{{ i18n.getMorningPeriod() }}</ng-template>
</button>
</div>
</div>
</fieldset>
`,
providers: [{provide: NG_VALUE_ACCESSOR, useExisting: forwardRef(() => NgbTimepicker), multi: true}]
})
export class | implements ControlValueAccessor,
OnChanges {
static ngAcceptInputType_size: string;
disabled: boolean;
model: NgbTime;
private _hourStep: number;
private _minuteStep: number;
private _secondStep: number;
/**
* Whether to display 12H or 24H mode.
*/
@Input() meridian: boolean;
/**
* If `true`, the spinners above and below inputs are visible.
*/
@Input() spinners: boolean;
/**
* If `true`, it is possible to select seconds.
*/
@Input() seconds: boolean;
/**
* The number of hours to add/subtract when clicking hour spinners.
*/
@Input()
set hourStep(step: number) {
this._hourStep = isInteger(step) ? step : this._config.hourStep;
}
get hourStep(): number { return this._hourStep; }
/**
* The number of minutes to add/subtract when clicking minute spinners.
*/
@Input()
set minuteStep(step: number) {
this._minuteStep = isInteger(step) ? step : this._config.minuteStep;
}
get minuteStep(): number { return this._minuteStep; }
/**
* The number of seconds to add/subtract when clicking second spinners.
*/
@Input()
set secondStep(step: number) {
this._secondStep = isInteger(step) ? step : this._config.secondStep;
}
get secondStep(): number { return this._secondStep; }
/**
* If `true`, the timepicker is readonly and can't be changed.
*/
@Input() readonlyInputs: boolean;
/**
* The size of inputs and buttons.
*/
@Input() size: 'small' | 'medium' | 'large';
constructor(
private readonly _config: NgbTimepickerConfig, private _ngbTimeAdapter: NgbTimeAdapter<any>,
private _cd: ChangeDetectorRef, public i18n: NgbTimepickerI18n) {
this.meridian = _config.meridian;
this.spinners = _config.spinners;
this.seconds = _config.seconds;
this.hourStep = _config.hourStep;
this.minuteStep = _config.minuteStep;
this.secondStep = _config.secondStep;
this.disabled = _config.disabled;
this.readonlyInputs = _config.readonlyInputs;
this.size = _config.size;
}
onChange = (_: any) => {};
onTouched = () => {};
writeValue(value) {
const structValue = this._ngbTimeAdapter.fromModel(value);
this.model = structValue ? new NgbTime(structValue.hour, structValue.minute, structValue.second) : new NgbTime();
if (!this.seconds && (!structValue || !isNumber(structValue.second))) {
this.model.second = 0;
}
this._cd.markForCheck();
}
registerOnChange(fn: (value: any) => any): void { this.onChange = fn; }
registerOnTouched(fn: () => any): void { this.onTouched = fn; }
setDisabledState(isDisabled: boolean) { this.disabled = isDisabled; }
changeHour(step: number) {
this.model.changeHour(step);
this.propagateModelChange();
}
changeMinute(step: number) {
this.model.changeMinute(step);
this.propagateModelChange();
}
changeSecond(step: number) {
this.model.changeSecond(step);
this.propagateModelChange();
}
updateHour(newVal: string) {
const isPM = this.model.hour >= 12;
const enteredHour = toInteger(newVal);
if (this.meridian && (isPM && enteredHour < 12 || !isPM && enteredHour === 12)) {
this.model.updateHour(enteredHour + 12);
} else {
this.model.updateHour(enteredHour);
}
this.propagateModelChange();
}
updateMinute(newVal: string) {
this.model.updateMinute(toInteger(newVal));
this.propagateModelChange();
}
updateSecond(newVal: string) {
this.model.updateSecond(toInteger(newVal));
this.propagateModelChange();
}
toggleMeridian() {
if (this.meridian) {
this.changeHour(12);
}
}
formatInput(input: HTMLInputElement) { input.value = input.value.replace(FILTER_REGEX, ''); }
formatHour(value?: number) {
if (isNumber(value)) {
if (this.meridian) {
return padNumber(value % 12 === 0 ? 12 : value % 12);
} else {
return padNumber(value % 24);
}
} else {
return padNumber(NaN);
}
}
formatMinSec(value?: number) { return padNumber(isNumber(value) ? value : NaN); }
get isSmallSize(): boolean { return this.size === 'small'; }
get isLargeSize(): boolean { return this.size === 'large'; }
ngOnChanges(changes: SimpleChanges): void {
if (changes['seconds'] && !this.seconds && this.model && !isNumber(this.model.second)) {
this.model.second = 0;
this.propagateModelChange(false);
}
}
private propagateModelChange(touched = true) {
if (touched) {
this.onTouched();
}
if (this.model.isValid(this.seconds)) {
this.onChange(
this._ngbTimeAdapter.toModel({hour: this.model.hour, minute: this.model.minute, second: this.model.second}));
} else {
this.onChange(this._ngbTimeAdapter.toModel(null));
}
}
}
| NgbTimepicker |
identity_manager.py | # -*- coding: utf-8 -*-
from spaceone.core.manager import BaseManager
from spaceone.secret.connector.identity_connector import IdentityConnector
class IdentityManager(BaseManager): |
def get_service_account(self, service_account_id, domain_id):
return self.identity_conn.get_service_account(service_account_id, domain_id)
def list_service_accounts(self, query, domain_id):
return self.identity_conn.list_service_accounts(query, domain_id)
def get_project(self, project_id, domain_id):
return self.identity_conn.get_project(project_id, domain_id)
def list_projects(self, query, domain_id):
return self.identity_conn.list_projects(query, domain_id) |
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.identity_conn: IdentityConnector = self.locator.get_connector('IdentityConnector') |
test_user.py | from django.contrib.auth import get_user_model
from django.test import TestCase
class UsersManagersTests(TestCase):
"""
Test user creation manager
"""
def test_create_user(self):
"""
Creates a new user with email as primary identifier instead of username
"""
User = get_user_model()
user = User.objects.create_user(email="[email protected]", password="foo")
self.assertEqual(user.email, "[email protected]")
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
try:
# username is None for the AbstractUser option
# username does not exist for the AbstractBaseUser option
self.assertIsNone(user.username)
except AttributeError:
pass
with self.assertRaises(TypeError):
User.objects.create_user()
with self.assertRaises(TypeError):
User.objects.create_user(email="")
with self.assertRaises(ValueError):
User.objects.create_user(email="", password="foo")
def | (self):
"""
Creates a superuser with the custom user model
"""
User = get_user_model()
admin_user = User.objects.create_superuser(email="[email protected]", password="foo")
self.assertEqual(admin_user.email, "[email protected]")
self.assertTrue(admin_user.is_active)
self.assertTrue(admin_user.is_staff)
self.assertTrue(admin_user.is_superuser)
try:
# username is None for the AbstractUser option
# username does not exist for the AbstractBaseUser option
self.assertIsNone(admin_user.username)
except AttributeError:
pass
with self.assertRaises(ValueError):
User.objects.create_superuser(email="[email protected]", password="foo", is_superuser=False)
| test_create_superuser |
_model_serializer.py | # Licensed to Elasticsearch B.V under one or more agreements.
# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information
import base64
import gzip
import json
from abc import ABC
from typing import Sequence, Dict, Any, Optional
def add_if_exists(d: Dict[str, Any], k: str, v: Any) -> None:
if v is not None:
d[k] = v
class ModelSerializer(ABC):
def __init__(
self,
feature_names: Sequence[str],
target_type: Optional[str] = None,
classification_labels: Optional[Sequence[str]] = None,
):
self._target_type = target_type
self._feature_names = feature_names
self._classification_labels = classification_labels
def to_dict(self) -> Dict[str, Any]:
d: Dict[str, Any] = {}
add_if_exists(d, "target_type", self._target_type)
add_if_exists(d, "feature_names", self._feature_names)
add_if_exists(d, "classification_labels", self._classification_labels)
return d
@property
def feature_names(self) -> Sequence[str]:
return self._feature_names
def serialize_model(self) -> Dict[str, Any]:
return {"trained_model": self.to_dict()}
def serialize_and_compress_model(self) -> str:
json_string = json.dumps(self.serialize_model(), separators=(",", ":"))
return base64.b64encode(gzip.compress(json_string.encode("utf-8"))).decode(
"ascii"
)
class TreeNode:
def __init__(
self,
node_idx: int,
default_left: Optional[bool] = None,
decision_type: Optional[str] = None,
left_child: Optional[int] = None,
right_child: Optional[int] = None,
split_feature: Optional[int] = None,
threshold: Optional[float] = None,
leaf_value: Optional[float] = None,
):
self._node_idx = node_idx
self._decision_type = decision_type
self._left_child = left_child
self._right_child = right_child
self._split_feature = split_feature
self._threshold = threshold
self._leaf_value = leaf_value
self._default_left = default_left
def to_dict(self) -> Dict[str, Any]:
d: Dict[str, Any] = {}
add_if_exists(d, "node_index", self._node_idx)
add_if_exists(d, "decision_type", self._decision_type)
if self._leaf_value is None:
add_if_exists(d, "left_child", self._left_child)
add_if_exists(d, "right_child", self._right_child)
add_if_exists(d, "split_feature", self._split_feature)
add_if_exists(d, "threshold", self._threshold)
else:
add_if_exists(d, "leaf_value", self._leaf_value)
return d
class Tree(ModelSerializer):
|
class Ensemble(ModelSerializer):
def __init__(
self,
feature_names: Sequence[str],
trained_models: Sequence[ModelSerializer],
output_aggregator: Dict[str, Any],
target_type: Optional[str] = None,
classification_labels: Optional[Sequence[str]] = None,
classification_weights: Optional[Sequence[float]] = None,
):
super().__init__(
feature_names=feature_names,
target_type=target_type,
classification_labels=classification_labels,
)
self._trained_models = trained_models
self._classification_weights = classification_weights
self._output_aggregator = output_aggregator
def to_dict(self) -> Dict[str, Any]:
d = super().to_dict()
trained_models = None
if self._trained_models:
trained_models = [t.to_dict() for t in self._trained_models]
add_if_exists(d, "trained_models", trained_models)
add_if_exists(d, "classification_weights", self._classification_weights)
add_if_exists(d, "aggregate_output", self._output_aggregator)
return {"ensemble": d}
| def __init__(
self,
feature_names: Sequence[str],
target_type: Optional[str] = None,
tree_structure: Optional[Sequence[TreeNode]] = None,
classification_labels: Optional[Sequence[str]] = None,
):
super().__init__(
feature_names=feature_names,
target_type=target_type,
classification_labels=classification_labels,
)
if target_type == "regression" and classification_labels:
raise ValueError("regression does not support classification_labels")
self._tree_structure = tree_structure or []
def to_dict(self) -> Dict[str, Any]:
d = super().to_dict()
add_if_exists(d, "tree_structure", [t.to_dict() for t in self._tree_structure])
return {"tree": d} |
FormControlLabelPosition.hooks.js | import React from 'react';
import Radio from '@material-ui/core/Radio'; | import FormControlLabel from '@material-ui/core/FormControlLabel';
import FormControl from '@material-ui/core/FormControl';
import FormLabel from '@material-ui/core/FormLabel';
function FormControlLabelPosition() {
const [value, setValue] = React.useState('female');
function handleChange(event) {
setValue(event.target.value);
}
return (
<FormControl component="fieldset">
<FormLabel component="legend">labelPlacement</FormLabel>
<RadioGroup aria-label="position" name="position" value={value} onChange={handleChange} row>
<FormControlLabel
value="top"
control={<Radio color="primary" />}
label="Top"
labelPlacement="top"
/>
<FormControlLabel
value="start"
control={<Radio color="primary" />}
label="Start"
labelPlacement="start"
/>
<FormControlLabel
value="bottom"
control={<Radio color="primary" />}
label="Bottom"
labelPlacement="bottom"
/>
<FormControlLabel
value="end"
control={<Radio color="primary" />}
label="End"
labelPlacement="end"
/>
</RadioGroup>
</FormControl>
);
}
export default FormControlLabelPosition; | import RadioGroup from '@material-ui/core/RadioGroup'; |
library_desc.rs | #[allow(unused_imports)] use crate::*;
use crate::ctypes::*;
use crate::d3d::*;
use winapi::um::d3d11shader::*;
/// \[[docs.microsoft.com](https://docs.microsoft.com/en-us/windows/win32/api/d3d11shader/ns-d3d11shader-d3d11_library_desc)\]
/// D3D11_LIBRARY_DESC
///
/// ### Example
/// ```rust
/// # use thindx::{*, d3d::*}; let d3dc = Compiler::new(47).unwrap();
/// let shader = d3dc.compile_from_file(
/// r"test\data\library.hlsl", None, None, (), "lib_5_0",
/// Compile::Debug, CompileEffect::None
/// ).unwrap();
///
/// let r : d3d11::LibraryReflection = d3dc.reflect_library(&shader).unwrap();
/// let desc : d3d11::LibraryDesc = r.get_desc().unwrap();
/// println!("{:#?}", desc);
/// assert!(desc.function_count > 0);
/// ```
///
/// ### Output
/// ```text
/// LibraryDesc {
/// creator: Some(
/// "Microsoft (R) HLSL Shader Compiler 10.1",
/// ),
/// flags: Compile::None,
/// function_count: 1,
/// }
/// ```
///
/// ### See Also
/// * [d3d11::LibraryReflection::get_desc]
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Default)]
#[repr(C)] pub struct | <'s> {
/// e.g. "Microsoft (R) HLSL Shader Compiler 10.1"
pub creator: CStrPtr<'s>, // maybe never null?
pub flags: Compile,
pub function_count: u32,
}
impl LibraryDesc<'_> {
pub(crate) fn as_mut_ptr(&mut self) -> *mut D3D11_LIBRARY_DESC {
self as *mut Self as *mut _
}
}
test_layout! { LibraryDesc => D3D11_LIBRARY_DESC {
creator => Creator,
flags => Flags,
function_count => FunctionCount,
}}
| LibraryDesc |
conftest.py | import pytest
from core.models import UL_ORG_ADMIN
from sushi.models import CounterReportType, SushiCredentials
from organizations.tests.conftest import organizations # noqa
from publications.tests.conftest import platforms # noqa
from logs.tests.conftest import report_type_nd # noqa
@pytest.fixture()
def counter_report_type_named(report_type_nd):
def fn(name, version=5):
|
yield fn
@pytest.fixture()
def counter_report_type(report_type_nd):
report_type = report_type_nd(0)
yield CounterReportType.objects.create(
code='TR', counter_version=5, name='Title report', report_type=report_type
)
@pytest.fixture()
def counter_report_type_wrap_report_type(report_type_nd):
def fun(report_type, code='TR', counter_version=5, name='Title report'):
return CounterReportType.objects.create(
code=code, counter_version=counter_version, name=name, report_type=report_type
)
return fun
@pytest.fixture()
def credentials(organizations, platforms):
credentials = SushiCredentials.objects.create(
organization=organizations[0],
platform=platforms[0],
counter_version=5,
lock_level=UL_ORG_ADMIN,
url='http://a.b.c/',
)
yield credentials
| rt = report_type_nd(0, short_name=name + 'rt')
return CounterReportType.objects.create(
code=name, counter_version=version, name=name + ' title', report_type=rt
) |
privateoffice.module.ts | class | {
name: number;
position: number;
weight: number;
symbol: number;
}
| PeriodicElement |
api_artifact.py | # coding: utf-8
"""
An API to insert and retrieve metadata on cloud artifacts.
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ApiArtifact(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'checksum': 'str',
'id': 'str',
'names': 'list[str]'
}
attribute_map = {
'name': 'name',
'checksum': 'checksum',
'id': 'id',
'names': 'names'
}
def __init__(self, name=None, checksum=None, id=None, names=None): # noqa: E501
"""ApiArtifact - a model defined in Swagger""" # noqa: E501
self._name = None
self._checksum = None
self._id = None
self._names = None
self.discriminator = None
if name is not None:
self.name = name
if checksum is not None:
self.checksum = checksum
if id is not None:
self.id = id
if names is not None:
self.names = names
@property
def name(self):
"""Gets the name of this ApiArtifact. # noqa: E501
Name of the artifact. This may be the path to a binary or jar file, or in the case of a container build, the name used to push the container image to Google Container Registry, as presented to `docker push`. This field is deprecated in favor of the plural `names` field; it continues to exist here to allow existing BuildProvenance serialized to json in google.devtools.containeranalysis.v1alpha1.BuildDetails.provenance_bytes to deserialize back into proto. # noqa: E501
:return: The name of this ApiArtifact. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ApiArtifact.
Name of the artifact. This may be the path to a binary or jar file, or in the case of a container build, the name used to push the container image to Google Container Registry, as presented to `docker push`. This field is deprecated in favor of the plural `names` field; it continues to exist here to allow existing BuildProvenance serialized to json in google.devtools.containeranalysis.v1alpha1.BuildDetails.provenance_bytes to deserialize back into proto. # noqa: E501
:param name: The name of this ApiArtifact. # noqa: E501
:type: str
""" |
self._name = name
@property
def checksum(self):
"""Gets the checksum of this ApiArtifact. # noqa: E501
Hash or checksum value of a binary, or Docker Registry 2.0 digest of a container. # noqa: E501
:return: The checksum of this ApiArtifact. # noqa: E501
:rtype: str
"""
return self._checksum
@checksum.setter
def checksum(self, checksum):
"""Sets the checksum of this ApiArtifact.
Hash or checksum value of a binary, or Docker Registry 2.0 digest of a container. # noqa: E501
:param checksum: The checksum of this ApiArtifact. # noqa: E501
:type: str
"""
self._checksum = checksum
@property
def id(self):
"""Gets the id of this ApiArtifact. # noqa: E501
:return: The id of this ApiArtifact. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ApiArtifact.
:param id: The id of this ApiArtifact. # noqa: E501
:type: str
"""
self._id = id
@property
def names(self):
"""Gets the names of this ApiArtifact. # noqa: E501
Related artifact names. This may be the path to a binary or jar file, or in the case of a container build, the name used to push the container image to Google Container Registry, as presented to `docker push`. Note that a single Artifact ID can have multiple names, for example if two tags are applied to one image. # noqa: E501
:return: The names of this ApiArtifact. # noqa: E501
:rtype: list[str]
"""
return self._names
@names.setter
def names(self, names):
"""Sets the names of this ApiArtifact.
Related artifact names. This may be the path to a binary or jar file, or in the case of a container build, the name used to push the container image to Google Container Registry, as presented to `docker push`. Note that a single Artifact ID can have multiple names, for example if two tags are applied to one image. # noqa: E501
:param names: The names of this ApiArtifact. # noqa: E501
:type: list[str]
"""
self._names = names
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApiArtifact, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiArtifact):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | |
stream_edit.js | var stream_edit = (function () {
var exports = {};
function setup_subscriptions_stream_hash(sub) {
var id = sub.stream_id;
subs.change_state.prevent_once();
window.location.hash = "#streams" + "/" + id + "/" + hash_util.encodeHashComponent(sub.name);
}
function settings_for_sub(sub) {
var id = parseInt(sub.stream_id, 10);
return $("#subscription_overlay .subscription_settings[data-stream-id='" + id + "']");
}
exports.is_sub_settings_active = function (sub) {
// This function return whether the provided given sub object is
// currently being viewed/edited in the stream edit UI. This is
// used to determine whether we need to rerender the stream edit
// UI when a sub object is modified by an event.
var active_stream = subs.active_stream();
if (active_stream !== undefined && active_stream.id === sub.stream_id) {
return true;
}
return false;
};
function get_email_of_subscribers(subscribers) {
var emails = [];
subscribers.each(function (o, i) {
var email = people.get_person_from_user_id(i).email;
emails.push(email);
});
return emails;
}
exports.rerender_subscribers_list = function (sub) {
if (!sub.can_access_subscribers) {
$(".subscriber_list_settings_container").hide();
} else {
var emails = get_email_of_subscribers(sub.subscribers);
var subscribers_list = list_render.get("stream_subscribers/" + sub.stream_id);
// Changing the data clears the rendered list and the list needs to be re-rendered.
// Perform re-rendering only when the stream settings form of the corresponding
// stream is open.
if (subscribers_list) {
exports.sort_but_pin_current_user_on_top(emails);
subscribers_list.data(emails);
subscribers_list.render();
ui.update_scrollbar($(".subscriber_list_container"));
}
$(".subscriber_list_settings_container").show();
}
};
exports.hide_sub_settings = function (sub) {
var $settings = $(".subscription_settings[data-stream-id='" + sub.stream_id + "']");
$settings.find(".regular_subscription_settings").removeClass('in');
// Clear email address widget
$settings.find(".email-address").html("");
if (!sub.can_change_stream_permissions) {
$settings.find(".change-stream-privacy").hide();
}
};
exports.show_sub_settings = function (sub) {
if (!exports.is_sub_settings_active(sub)) {
return;
}
var $settings = $(".subscription_settings[data-stream-id='" + sub.stream_id + "']");
if ($settings.find(".email-address").val().length === 0) {
// Rerender stream email address, if not.
$settings.find(".email-address").text(sub.email_address);
$settings.find(".stream-email-box").show();
}
$settings.find(".regular_subscription_settings").addClass('in');
};
exports.show_stream_row = function (node, show_settings) {
$(".display-type #add_new_stream_title").hide();
$(".display-type #stream_settings_title, .right .settings").show();
$(".stream-row.active").removeClass("active");
if (show_settings) {
subs.show_subs_pane.settings();
$(node).addClass("active");
stream_edit.show_settings_for(node);
} else {
subs.show_subs_pane.nothing_selected();
}
};
function format_member_list_elem(email) {
var person = people.get_by_email(email);
return templates.render('stream_member_list_entry',
{name: person.full_name, email: email,
displaying_for_admin: page_params.is_admin});
}
function get_subscriber_list(sub_row) {
var id = sub_row.data("stream-id");
return $('.subscription_settings[data-stream-id="' + id + '"] .subscriber-list');
}
exports.update_stream_name = function (sub, new_name) {
var sub_settings = settings_for_sub(sub);
sub_settings.find(".email-address").text(sub.email_address);
sub_settings.find(".stream-name-editable").text(new_name);
};
exports.update_stream_description = function (sub) {
var stream_settings = settings_for_sub(sub);
stream_settings.find('input.description').val(sub.description);
stream_settings.find('.stream-description-editable').html(sub.rendered_description);
};
exports.invite_user_to_stream = function (user_email, sub, success, failure) {
// TODO: use stream_id when backend supports it
var stream_name = sub.name;
return channel.post({
url: "/json/users/me/subscriptions",
data: {subscriptions: JSON.stringify([{name: stream_name}]),
principals: JSON.stringify([user_email])},
success: success,
error: failure,
});
};
exports.remove_user_from_stream = function (user_email, sub, success, failure) {
// TODO: use stream_id when backend supports it
var stream_name = sub.name;
return channel.del({
url: "/json/users/me/subscriptions",
data: {subscriptions: JSON.stringify([stream_name]),
principals: JSON.stringify([user_email])},
success: success,
error: failure,
});
};
function get_stream_id(target) {
if (target.constructor !== jQuery) {
target = $(target);
}
return target.closest(".stream-row, .subscription_settings").attr("data-stream-id");
}
function get_sub_for_target(target) {
var stream_id = get_stream_id(target);
if (!stream_id) {
blueslip.error('Cannot find stream id for target');
return;
}
var sub = stream_data.get_sub_by_id(stream_id);
if (!sub) {
blueslip.error('get_sub_for_target() failed id lookup: ' + stream_id);
return;
}
return sub;
}
exports.sort_but_pin_current_user_on_top = function (emails) {
if (emails === undefined) {
blueslip.error("Undefined emails are passed to function sort_but_pin_current_user_on_top");
return;
}
// Set current user top of subscription list, if subscribed.
if (emails.indexOf(people.my_current_email()) > -1) {
emails.splice(emails.indexOf(people.my_current_email()), 1);
emails.sort();
emails.unshift(people.my_current_email());
} else {
emails.sort();
}
};
function show_subscription_settings(sub_row) {
var stream_id = sub_row.data("stream-id"); | var colorpicker = sub_settings.find('.colorpicker');
var color = stream_data.get_color(sub.name);
stream_color.set_colorpicker_color(colorpicker, color);
if (!sub.render_subscribers) {
return;
}
// fetch subscriber list from memory.
var list = get_subscriber_list(sub_settings);
list.empty();
var emails = get_email_of_subscribers(sub.subscribers);
exports.sort_but_pin_current_user_on_top(emails);
list_render.create(list, emails, {
name: "stream_subscribers/" + stream_id,
modifier: function (item) {
return format_member_list_elem(item);
},
filter: {
element: $("[data-stream-id='" + stream_id + "'] .search"),
callback: function (item, value) {
var person = people.get_by_email(item);
if (person) {
var email = person.email.toLocaleLowerCase();
var full_name = person.full_name.toLowerCase();
return email.indexOf(value) > -1 || full_name.indexOf(value) > -1;
}
},
},
}).init();
ui.set_up_scrollbar($(".subscriber_list_container"));
sub_settings.find('input[name="principal"]').typeahead({
source: people.get_realm_persons, // This is a function.
items: 5,
highlighter: function (item) {
return typeahead_helper.render_person(item);
},
matcher: function (item) {
var query = $.trim(this.query.toLowerCase());
if (query === '' || query === item.email) {
return false;
}
// Case-insensitive.
var item_matches = item.email.toLowerCase().indexOf(query) !== -1 ||
item.full_name.toLowerCase().indexOf(query) !== -1;
var is_subscribed = stream_data.is_user_subscribed(sub.name, item.user_id);
return item_matches && !is_subscribed;
},
sorter: function (matches) {
var current_stream = compose_state.stream_name();
return typeahead_helper.sort_recipientbox_typeahead(
this.query, matches, current_stream);
},
updater: function (item) {
return item.email;
},
});
}
exports.show_settings_for = function (node) {
var stream_id = get_stream_id(node);
var sub = stream_data.get_sub_by_id(stream_id);
stream_data.update_calculated_fields(sub);
var html = templates.render('subscription_settings', sub);
$('.subscriptions .right .settings').html(html);
var sub_settings = settings_for_sub(sub);
$(".nothing-selected").hide();
ui.update_scrollbar($("#subscription_overlay .settings"));
sub_settings.addClass("show");
show_subscription_settings(sub_settings);
};
function stream_home_view_clicked(e) {
var sub = get_sub_for_target(e.target);
if (!sub) {
blueslip.error('stream_home_view_clicked() fails');
return;
}
var sub_settings = settings_for_sub(sub);
var notification_checkboxes = sub_settings.find(".sub_notification_setting");
subs.toggle_home(sub);
if (sub.in_home_view) {
sub_settings.find(".mute-note").addClass("hide-mute-note");
notification_checkboxes.removeClass("muted-sub");
notification_checkboxes.find("input[type='checkbox']").prop("disabled", false);
} else {
sub_settings.find(".mute-note").removeClass("hide-mute-note");
notification_checkboxes.addClass("muted-sub");
notification_checkboxes.find("input[type='checkbox']").attr("disabled", true);
}
}
exports.bulk_set_stream_property = function (sub_data) {
return channel.post({
url: '/json/users/me/subscriptions/properties',
data: {subscription_data: JSON.stringify(sub_data)},
timeout: 10 * 1000,
});
};
exports.set_stream_property = function (sub, property, value) {
var sub_data = {stream_id: sub.stream_id, property: property, value: value};
exports.bulk_set_stream_property([sub_data]);
};
exports.set_notification_setting_for_all_streams = function (notification_type, new_setting) {
var sub_data = [];
_.each(stream_data.subscribed_subs(), function (sub) {
if (sub[notification_type] !== new_setting) {
sub_data.push({
stream_id: sub.stream_id,
property: notification_type,
value: new_setting,
});
}
});
exports.bulk_set_stream_property(sub_data);
};
function redraw_privacy_related_stuff(sub_row, sub) {
var stream_settings = settings_for_sub(sub);
var html;
stream_data.update_calculated_fields(sub);
html = templates.render('subscription_setting_icon', sub);
sub_row.find('.icon').expectOne().replaceWith($(html));
html = templates.render('subscription_type', sub);
stream_settings.find('.subscription-type-text').expectOne().html(html);
if (sub.invite_only) {
stream_settings.find(".large-icon")
.removeClass("hash").addClass("lock")
.html("<i class='fa fa-lock' aria-hidden='true'></i>");
} else {
stream_settings.find(".large-icon")
.addClass("hash").removeClass("lock")
.html("");
}
stream_list.redraw_stream_privacy(sub);
}
function change_stream_privacy(e) {
e.stopPropagation();
var stream_id = $(e.target).data("stream-id");
var sub = stream_data.get_sub_by_id(stream_id);
var privacy_setting = $('#stream_privacy_modal input[name=privacy]:checked').val();
var is_announcement_only = $('#stream_privacy_modal input[name=is-announcement-only]').prop('checked');
var invite_only;
var history_public_to_subscribers;
if (privacy_setting === 'invite-only') {
invite_only = true;
history_public_to_subscribers = false;
} else if (privacy_setting === 'invite-only-public-history') {
invite_only = true;
history_public_to_subscribers = true;
} else {
invite_only = false;
history_public_to_subscribers = true;
}
$(".stream_change_property_info").hide();
var data = {
stream_name: sub.name,
// toggle the privacy setting
is_private: JSON.stringify(invite_only),
is_announcement_only: JSON.stringify(is_announcement_only),
history_public_to_subscribers: JSON.stringify(history_public_to_subscribers),
};
channel.patch({
url: "/json/streams/" + stream_id,
data: data,
success: function () {
sub = stream_data.get_sub_by_id(stream_id);
var sub_row = $(".stream-row[data-stream-id='" + stream_id + "']");
// save new privacy settings.
sub.invite_only = invite_only;
sub.is_announcement_only = is_announcement_only;
sub.history_public_to_subscribers = history_public_to_subscribers;
redraw_privacy_related_stuff(sub_row, sub);
$("#stream_privacy_modal").remove();
// For auto update, without rendering whole template
stream_data.update_calculated_fields(sub);
if (!sub.can_change_stream_permissions) {
$(".change-stream-privacy").hide();
}
},
error: function () {
$("#change-stream-privacy-button").text(i18n.t("Try again"));
},
});
}
function stream_desktop_notifications_clicked(e) {
var sub = get_sub_for_target(e.target);
sub.desktop_notifications = !sub.desktop_notifications;
exports.set_stream_property(sub, 'desktop_notifications', sub.desktop_notifications);
}
function stream_audible_notifications_clicked(e) {
var sub = get_sub_for_target(e.target);
sub.audible_notifications = !sub.audible_notifications;
exports.set_stream_property(sub, 'audible_notifications', sub.audible_notifications);
}
function stream_push_notifications_clicked(e) {
var sub = get_sub_for_target(e.target);
sub.push_notifications = !sub.push_notifications;
exports.set_stream_property(sub, 'push_notifications', sub.push_notifications);
}
function stream_email_notifications_clicked(e) {
var sub = get_sub_for_target(e.target);
sub.email_notifications = !sub.email_notifications;
exports.set_stream_property(sub, 'email_notifications', sub.email_notifications);
}
function stream_pin_clicked(e) {
var sub = get_sub_for_target(e.target);
if (!sub) {
blueslip.error('stream_pin_clicked() fails');
return;
}
subs.toggle_pin_to_top_stream(sub);
}
exports.change_stream_name = function (e) {
e.preventDefault();
var sub_settings = $(e.target).closest('.subscription_settings');
var stream_id = $(e.target).closest(".subscription_settings").attr("data-stream-id");
var new_name_box = sub_settings.find('.stream-name-editable');
var new_name = $.trim(new_name_box.text());
$(".stream_change_property_info").hide();
channel.patch({
// Stream names might contain unsafe characters so we must encode it first.
url: "/json/streams/" + stream_id,
data: {new_name: JSON.stringify(new_name)},
success: function () {
new_name_box.val('');
ui_report.success(i18n.t("The stream has been renamed!"), $(".stream_change_property_info"));
},
error: function (xhr) {
new_name_box.text(stream_data.maybe_get_stream_name(stream_id));
ui_report.error(i18n.t("Error"), xhr, $(".stream_change_property_info"));
ui.update_scrollbar($("#subscription_overlay .settings"));
},
});
};
exports.change_stream_description = function (e) {
e.preventDefault();
var sub_settings = $(e.target).closest('.subscription_settings');
var sub = get_sub_for_target(sub_settings);
if (!sub) {
blueslip.error('change_stream_description() fails');
return;
}
var stream_id = sub.stream_id;
var description = sub_settings.find('.stream-description-editable').text().trim();
$(".stream_change_property_info").hide();
channel.patch({
// Description might contain unsafe characters so we must encode it first.
url: '/json/streams/' + stream_id,
data: {
description: JSON.stringify(description),
},
success: function () {
// The event from the server will update the rest of the UI
ui_report.success(i18n.t("The stream description has been updated!"),
$(".stream_change_property_info"));
},
error: function (xhr) {
sub_settings.find('.stream-description-editable').html(sub.rendered_description);
ui_report.error(i18n.t("Error"), xhr, $(".stream_change_property_info"));
ui.update_scrollbar($("#subscription_overlay .settings"));
},
});
};
exports.delete_stream = function (stream_id, alert_element, stream_row) {
channel.del({
url: '/json/streams/' + stream_id,
error: function (xhr) {
ui_report.error(i18n.t("Failed"), xhr, alert_element);
},
success: function () {
stream_row.remove();
},
});
};
exports.initialize = function () {
$("#zfilt").on("click", ".stream_sub_unsub_button", function (e) {
e.preventDefault();
e.stopPropagation();
var stream_name = narrow_state.stream();
if (stream_name === undefined) {
return;
}
var sub = stream_data.get_sub(stream_name);
subs.sub_or_unsub(sub);
});
$("#subscriptions_table").on("click", ".change-stream-privacy", function (e) {
var stream_id = get_stream_id(e.target);
var stream = stream_data.get_sub_by_id(stream_id);
var template_data = {
stream_id: stream_id,
stream_name: stream.name,
is_announcement_only: stream.is_announcement_only,
is_public: !stream.invite_only,
is_private: stream.invite_only && !stream.history_public_to_subscribers,
is_private_with_public_history: stream.invite_only &&
stream.history_public_to_subscribers,
};
var change_privacy_modal = templates.render("subscription_stream_privacy_modal", template_data);
$("#stream_privacy_modal").remove();
$("#subscriptions_table").append(change_privacy_modal);
overlays.open_modal('stream_privacy_modal');
});
$("#subscriptions_table").on('click', '#change-stream-privacy-button',
change_stream_privacy);
$("#subscriptions_table").on('click', '.close-privacy-modal', function (e) {
// This fixes a weird bug in which, subscription_settings hides
// unexpectedly by clicking the cancel button.
e.stopPropagation();
});
$("#subscriptions_table").on("click", "#sub_setting_not_in_home_view",
stream_home_view_clicked);
$("#subscriptions_table").on("click", "#sub_desktop_notifications_setting",
stream_desktop_notifications_clicked);
$("#subscriptions_table").on("click", "#sub_audible_notifications_setting",
stream_audible_notifications_clicked);
$("#subscriptions_table").on("click", "#sub_push_notifications_setting",
stream_push_notifications_clicked);
$("#subscriptions_table").on("click", "#sub_email_notifications_setting",
stream_email_notifications_clicked);
$("#subscriptions_table").on("click", "#sub_pin_setting",
stream_pin_clicked);
$("#subscriptions_table").on("submit", ".subscriber_list_add form", function (e) {
e.preventDefault();
var settings_row = $(e.target).closest('.subscription_settings');
var sub = get_sub_for_target(settings_row);
if (!sub) {
blueslip.error('.subscriber_list_add form submit fails');
return;
}
var text_box = settings_row.find('input[name="principal"]');
var principal = $.trim(text_box.val());
var stream_subscription_info_elem = $('.stream_subscription_info').expectOne();
function invite_success(data) {
text_box.val('');
if (data.subscribed.hasOwnProperty(principal)) {
stream_subscription_info_elem.text(i18n.t("Subscribed successfully!"));
// The rest of the work is done via the subscription -> add event we will get
} else {
stream_subscription_info_elem.text(i18n.t("User already subscribed."));
}
stream_subscription_info_elem.addClass("text-success")
.removeClass("text-error");
}
function invite_failure(xhr) {
var error = JSON.parse(xhr.responseText);
stream_subscription_info_elem.text(error.msg)
.addClass("text-error").removeClass("text-success");
}
exports.invite_user_to_stream(principal, sub, invite_success, invite_failure);
});
$("#subscriptions_table").on("submit", ".subscriber_list_remove form", function (e) {
e.preventDefault();
var list_entry = $(e.target).closest("tr");
var principal = list_entry.children(".subscriber-email").text();
var settings_row = $(e.target).closest('.subscription_settings');
var sub = get_sub_for_target(settings_row);
if (!sub) {
blueslip.error('.subscriber_list_remove form submit fails');
return;
}
var stream_subscription_info_elem = $('.stream_subscription_info').expectOne();
function removal_success(data) {
if (data.removed.length > 0) {
// Remove the user from the subscriber list.
list_entry.remove();
stream_subscription_info_elem.text(i18n.t("Unsubscribed successfully!"));
// The rest of the work is done via the subscription -> remove event we will get
} else {
stream_subscription_info_elem.text(i18n.t("User is already not subscribed."));
}
stream_subscription_info_elem.addClass('text-success')
.removeClass('text-error');
ui.update_scrollbar($("#subscription_overlay .settings"));
}
function removal_failure() {
stream_subscription_info_elem.text(i18n.t("Error removing user from this stream."))
.addClass("text-error").removeClass("text-success");
}
exports.remove_user_from_stream(principal, sub, removal_success,
removal_failure);
});
// This handler isn't part of the normal edit interface; it's the convenient
// checkmark in the subscriber list.
$("#subscriptions_table").on("click", ".sub_unsub_button", function (e) {
var sub = get_sub_for_target(e.target);
var stream_row = $(this).parent();
subs.sub_or_unsub(sub);
var sub_settings = settings_for_sub(sub);
var regular_sub_settings = sub_settings.find(".regular_subscription_settings");
if (!sub.subscribed) {
regular_sub_settings.addClass("in");
exports.show_stream_row(stream_row, true);
} else {
regular_sub_settings.removeClass("in");
}
setup_subscriptions_stream_hash(sub);
e.preventDefault();
e.stopPropagation();
});
$("#subscriptions_table").on("click", ".deactivate", function (e) {
e.preventDefault();
e.stopPropagation();
var stream_id = get_stream_id(e.target);
if (!stream_id) {
ui_report.message(i18n.t("Invalid stream id"), $(".stream_change_property_info"), 'alert-error');
return;
}
var stream_name = stream_data.maybe_get_stream_name(stream_id);
var deactivate_stream_modal = templates.render("deactivation-stream-modal", {stream_name: stream_name});
$(".subscription_settings").append(deactivate_stream_modal);
overlays.open_modal('deactivation_stream_modal');
});
$("#subscriptions_table").on("click", "#do_deactivate_stream_button", function (e) {
var stream_id = get_stream_id(e.target);
overlays.close_modal('deactivation_stream_modal');
$("#deactivation_stream_modal").remove();
if (!stream_id) {
ui_report.message(i18n.t("Invalid stream id"), $(".stream_change_property_info"), 'alert-error');
return;
}
var row = $(".stream-row.active");
exports.delete_stream(stream_id, $(".stream_change_property_info"), row);
});
$("#subscriptions_table").on("hide.bs.modal", "#deactivation_stream_modal", function () {
$("#deactivation_stream_modal").remove();
});
$("#subscriptions_table").on("click", ".stream-row", function (e) {
if ($(e.target).closest(".check, .subscription_settings").length === 0) {
exports.show_stream_row(this, true);
var stream_id = $(this).attr("data-stream-id");
var sub = stream_data.get_sub_by_id(stream_id);
setup_subscriptions_stream_hash(sub);
}
});
$(document).on('peer_subscribe.zulip', function (e, data) {
var sub = stream_data.get_sub(data.stream_name);
subs.rerender_subscriptions_settings(sub);
});
$(document).on('peer_unsubscribe.zulip', function (e, data) {
var sub = stream_data.get_sub(data.stream_name);
subs.rerender_subscriptions_settings(sub);
});
};
return exports;
}());
if (typeof module !== 'undefined') {
module.exports = stream_edit;
}
window.stream_edit = stream_edit; | var sub = stream_data.get_sub_by_id(stream_id);
var sub_settings = settings_for_sub(sub);
|
either.go | package either
type Either[T any] struct {
x *T
err error
}
func (et *Either[T]) IsOk() bool {
return et.err == nil
}
func (et Either[T]) Unwrap() T {
return *et.x
}
func (et Either[T]) UnwrapErr() error {
return et.err
}
func Ok[T any](x T) Either[T] {
return Either[T]{x: &x, err: nil}
}
func Err[T any](err error) Either[T] {
return Either[T]{x: nil, err: err}
}
func Fmap[T any, R any](et Either[T], f func(x T) R) Either[R] {
if et.IsOk() | else {
return Either[R]{x: nil, err: et.UnwrapErr()}
}
}
| {
y := f(et.Unwrap())
return Either[R]{x: &y, err: nil}
} |
uninstall_test.go | package action
import (
"errors"
"io/ioutil"
"testing"
"github.com/cnabio/cnab-go/claim"
"github.com/cnabio/cnab-go/driver"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// makes sure Uninstall implements Action interface
var _ Action = &Uninstall{}
func TestUninstall_Run(t *testing.T) {
out := func(op *driver.Operation) error {
op.Out = ioutil.Discard
return nil
}
t.Run("happy-path", func(t *testing.T) {
c := newClaim()
uninst := &Uninstall{
Driver: &mockDriver{
shouldHandle: true,
Result: driver.OperationResult{
Outputs: map[string]string{
"/tmp/some/path": "SOME CONTENT",
},
},
Error: nil,
},
}
err := uninst.Run(c, mockSet, out)
assert.NoError(t, err)
assert.NotEqual(t, c.Created, c.Modified, "Claim was not updated with modified time stamp during uninstall after uninstall action")
assert.Equal(t, claim.ActionUninstall, c.Result.Action, "Claim result action not successfully updated.")
assert.Equal(t, claim.StatusSuccess, c.Result.Status, "Claim result status not successfully updated.")
assert.Equal(t, map[string]interface{}{"some-output": "SOME CONTENT"}, c.Outputs)
})
| Result: driver.OperationResult{
Outputs: map[string]string{
"/tmp/some/path": "SOME CONTENT",
},
},
Error: nil,
}
inst := &Uninstall{Driver: d}
addFile := func(op *driver.Operation) error {
op.Files["/tmp/another/path"] = "ANOTHER FILE"
return nil
}
require.NoError(t, inst.Run(c, mockSet, out, addFile))
assert.Contains(t, d.Operation.Files, "/tmp/another/path")
})
t.Run("error case: configure operation", func(t *testing.T) {
c := newClaim()
d := &mockDriver{
shouldHandle: true,
Result: driver.OperationResult{
Outputs: map[string]string{
"/tmp/some/path": "SOME CONTENT",
},
},
Error: nil,
}
inst := &Uninstall{Driver: d}
sabotage := func(op *driver.Operation) error {
return errors.New("oops")
}
require.EqualError(t, inst.Run(c, mockSet, out, sabotage), "oops")
})
t.Run("when there are no outputs in the bundle", func(t *testing.T) {
c := newClaim()
c.Bundle.Outputs = nil
uninst := &Uninstall{
Driver: &mockDriver{
shouldHandle: true,
Result: driver.OperationResult{},
Error: nil,
},
}
err := uninst.Run(c, mockSet, out)
assert.NoError(t, err)
assert.NotEqual(t, c.Created, c.Modified, "Claim was not updated with modified time stamp during uninstall after uninstall action")
assert.Equal(t, claim.ActionUninstall, c.Result.Action, "Claim result action not successfully updated.")
assert.Equal(t, claim.StatusSuccess, c.Result.Status, "Claim result status not successfully updated.")
assert.Empty(t, c.Outputs)
})
t.Run("error case: driver doesn't handle image", func(t *testing.T) {
c := newClaim()
uninst := &Uninstall{Driver: &mockDriver{
Error: errors.New("I always fail"),
shouldHandle: false,
}}
err := uninst.Run(c, mockSet, out)
assert.Error(t, err)
assert.Empty(t, c.Outputs)
})
t.Run("error case: driver does handle image", func(t *testing.T) {
c := newClaim()
uninst := &Uninstall{Driver: &mockDriver{
Result: driver.OperationResult{
Outputs: map[string]string{
"/tmp/some/path": "SOME CONTENT",
},
},
Error: errors.New("I always fail"),
shouldHandle: true,
}}
err := uninst.Run(c, mockSet, out)
assert.Error(t, err)
assert.NotEqual(t, "", c.Result.Message, "Expected error message in claim result message")
assert.Equal(t, claim.ActionUninstall, c.Result.Action)
assert.Equal(t, claim.StatusFailure, c.Result.Status)
assert.Equal(t, map[string]interface{}{"some-output": "SOME CONTENT"}, c.Outputs)
})
} | t.Run("configure operation", func(t *testing.T) {
c := newClaim()
d := &mockDriver{
shouldHandle: true, |
localFromImport.after.py | def | ():
for _ range(10):
from package.module import foo
foo
# <ref> | func |
ir_attachment.py | # -*- coding: utf-8 -*-
import logging
import psycopg2
from odoo import api, models
logger = logging.getLogger(__name__)
LARGE_OBJECT_LOCATION = "postgresql:lobject"
class IrAttachment(models.Model):
"""Provide storage as PostgreSQL large objects of attachements with filestore location ``postgresql:lobject``.
Works by overriding the storage handling methods of ``ir.attachment``, as intended by the
default implementation. The overrides call :funct:`super`, so that this is transparent
for other locations.
"""
_name = "ir.attachment"
_inherit = "ir.attachment"
@api.model
def lobject(self, cr, *args):
return cr._cnx.lobject(*args)
@api.model
def | (self, value, checksum):
"""Write the content in a newly created large object.
:param value: base64 encoded payload
:returns str: object id (will be considered the file storage name)
"""
location = self._storage()
if location != LARGE_OBJECT_LOCATION:
return super(IrAttachment, self)._file_write(value, checksum)
lobj = self.lobject(self.env.cr, 0, "wb") # oid=0 means creation
lobj.write(value.decode("base64"))
oid = lobj.oid
return str(oid)
def _file_delete(self, fname):
filestore = False
try:
oid = long(fname)
except Exception:
filestore = True
if not filestore:
try:
return self.lobject(self.env.cr, oid, "rb").unlink()
except (psycopg2.OperationalError, ValueError):
filestore = True
return super(IrAttachment, self)._file_delete(fname)
def _lobject_read(self, fname, bin_size):
"""Read the large object, base64 encoded.
:param fname: file storage name, must be the oid as a string.
"""
lobj = self.lobject(self.env.cr, long(fname), "rb")
if bin_size:
return lobj.seek(0, 2)
return lobj.read().encode(
"base64"
) # GR TODO it must be possible to read-encode in chunks
@api.depends("store_fname", "db_datas")
def _compute_datas(self):
bin_size = self._context.get("bin_size")
for attach in self:
try:
attach.datas = self._lobject_read(attach.store_fname, bin_size)
except (psycopg2.OperationalError, ValueError):
super(IrAttachment, attach)._compute_datas()
| _file_write |
CreateSpecializationModal.tsx | import { useState } from 'react';
import Container from 'react-bootstrap/Container';
import Form from 'react-bootstrap/Form';
import SheetModal from './SheetModal';
type CreateSpecializationModalProps = {
onCreate(name: string): void;
show: boolean;
onHide(): void;
}
export default function CreateSpecializationModal(props: CreateSpecializationModalProps) {
const [name, setName] = useState('');
function reset() {
setName('');
} |
return (
<SheetModal title='Nova Especialização' onExited={reset} show={props.show} onHide={props.onHide}
applyButton={{ name: 'Criar', onApply: () => props.onCreate(name) }}>
<Container fluid>
<Form.Group controlId='createSpecializationName'>
<Form.Label>Nome</Form.Label>
<Form.Control className='theme-element' value={name}
onChange={ev => setName(ev.currentTarget.value)} />
</Form.Group>
</Container>
</SheetModal>
);
} | |
goyeelight.go | // Package goyeelight - Control the Yeelight LED Bulb with Go
package goyeelight
import (
"bufio"
"encoding/json"
"fmt"
"net"
"time"
)
const timeout = time.Duration(10 * time.Second)
// Yeelight instance.
// Create an instance of Yeelight, by using New()
type Yeelight struct {
host, port string
}
type (
// Result struct is used on the standard response message
Result struct {
Status bool `json:"status"`
Data interface{} `json:"data"`
}
// ResponseOk struct is used on the success responses
ResponseOk struct {
ID int `json:"id"`
Result interface{} `json:"result"`
}
// ResponseError struct is used on the error responses
ResponseError struct {
ID int `json:"id"`
Error Error `json:"error"`
}
// Error struct is used on the ResponseError payload
Error struct {
Code int `json:"code"`
Message string `json:"message"`
}
)
// Makes the request
func (y *Yeelight) request(cmd string) string {
conn, err := net.DialTimeout("tcp", y.host+":"+y.port, timeout)
if err != nil {
return result(false, err.Error())
}
conn.SetReadDeadline(time.Now().Add(timeout))
fmt.Fprintf(conn, cmd+"\r\n")
data, err := bufio.NewReader(conn).ReadString('\n')
conn.Close()
if err != nil {
return result(false, err.Error())
}
return response(data)
}
// Handles the response
func response(data string) string {
res := ResponseOk{}
json.Unmarshal([]byte(data), &res)
if res.Result == nil |
// okay
return result(true, res)
}
// Creates a standard response message
func result(status bool, data interface{}) string {
r := Result{Status: status, Data: data}
result, _ := json.Marshal(r)
return string(result)
}
// New returns a new Yeelight instance.
func New(host, port string) *Yeelight {
y := &Yeelight{host: host, port: port}
return y
}
// GetProp method is used to retrieve current property of smart LED.
func (y *Yeelight) GetProp(values string) string {
cmd := `{"id":1,"method":"get_prop","params":[` + values + `]}`
return y.request(cmd)
}
// SetCtAbx method is used to change the color temperature of a smart LED.
func (y *Yeelight) SetCtAbx(value, effect, duration string) string {
cmd := `{"id":2,"method":"set_ct_abx","params":[` + value + `,"` + effect + `",` + duration + `]}`
return y.request(cmd)
}
// SetRGB method is used to change the color RGB of a smart LED.
func (y *Yeelight) SetRGB(value, effect, duration string) string {
cmd := `{"id":3,"method":"set_rgb","params":[` + value + `,"` + effect + `",` + duration + `]}`
return y.request(cmd)
}
// SetHSV method is used to change the color of a smart LED.
func (y *Yeelight) SetHSV(hue, sat, effect, duration string) string {
cmd := `{"id":4,"method":"set_hsv","params":[` + hue + `,` + sat + `,"` + effect + `",` + duration + `]}`
return y.request(cmd)
}
// SetBright method is used to change the brightness of a smart LED.
func (y *Yeelight) SetBright(brightness, effect, duration string) string {
cmd := `{"id":5,"method":"set_bright","params":[` + brightness + `,"` + effect + `",` + duration + `]}`
return y.request(cmd)
}
// SetPower method is used to switch on or off the smart LED (software managed on/off).
func (y *Yeelight) SetPower(power, effect, duration string) string {
cmd := `{"id":6,"method":"set_power","params":["` + power + `","` + effect + `",` + duration + `]}`
return y.request(cmd)
}
// Toogle method is used to toggle the smart LED.
// Note: This method is defined because sometimes user may just want
// to flip the state without knowing the current state.
func (y *Yeelight) Toogle() string {
cmd := `{"id":7,"method":"toggle","params":[]}`
return y.request(cmd)
}
// SetDefault method is used to save current state of smart LED in persistent
// memory. So if user powers off and then powers on the smart LED again (hard power reset),
// the smart LED will show last saved state.
func (y *Yeelight) SetDefault() string {
cmd := `{"id":8,"method":"set_default","params":[]}`
return y.request(cmd)
}
// StartCf method is used to start a color flow. Color flow is a series of smart
// LED visible state changing. It can be brightness changing, color changing or color
// temperature changing.This is the most powerful command. All our recommended scenes,
// e.g. Sunrise/Sunset effect is implemented using this method. With the flow expression, user
// can actually “program” the light effect.
func (y *Yeelight) StartCf(count, action, flowExpression string) string {
cmd := `{"id":9,"method":"start_cf","params":[` + count + `,` + action + `,"` + flowExpression + `"]}`
return y.request(cmd)
}
// StopCf method is used to stop a running color flow.
func (y *Yeelight) StopCf() string {
cmd := `{"id":10,"method":"stop_cf","params":[]}`
return y.request(cmd)
}
// SetScene method is used to set the smart LED directly to specified state.
// If the smart LED is off, then it will turn on the smart LED firstly and then
// apply the specified command.
func (y *Yeelight) SetScene(class, values string) string {
cmd := `{"id":11,"method":"set_scene","params":["` + class + `",` + values + `]}`
fmt.Println(cmd)
return y.request(cmd)
}
// CronAdd method is used to start a timer job on the smart LED.
func (y *Yeelight) CronAdd(t, value string) string {
cmd := `{"id":12,"method":"cron_add","params":[` + t + `,` + value + `]}`
return y.request(cmd)
}
// CronGet method is used to retrieve the setting of the current cron job of the specified type.
func (y *Yeelight) CronGet(t string) string {
cmd := `{"id":13,"method":"cron_get","params":[` + t + `]}`
return y.request(cmd)
}
// CronDel method is used to stop the specified cron job.
func (y *Yeelight) CronDel(t string) string {
cmd := `{"id":14,"method":"cron_del","params":[` + t + `]}`
return y.request(cmd)
}
// SetAdjust method is used to change brightness, CT or color of a smart LED
// without knowing the current value, it's main used by controllers.
func (y *Yeelight) SetAdjust(action, prop string) string {
cmd := `{"id":15,"method":"set_adjust","params":["` + action + `","` + prop + `"]}`
return y.request(cmd)
}
// SetName method is used to name the device. The name will be stored on the
// device and reported in discovering response. User can also read the name
// through “get_prop” method
func (y *Yeelight) SetName(name string) string {
cmd := `{"id":16,"method":"set_name","params":["` + name + `"]}`
return y.request(cmd)
}
// On method is used to switch on the smart LED
func (y *Yeelight) On() string {
return y.SetPower("on", "smooth", "1000")
}
// Off method is used to switch off the smart LED
func (y *Yeelight) Off() string {
return y.SetPower("off", "smooth", "1000")
}
| {
// error
res := ResponseError{}
json.Unmarshal([]byte(data), &res)
return result(false, res)
} |
TestIntegrate.py | import numpy as np
import scipy.special as sp
from termcolor import colored
import sys
if sys.platform == 'linux':
sys.path.append(r'../lib')
else:
sys.path.append(r'../build/x64/Release')
import NumCpp
####################################################################################
NUM_DECIMALS_ROUND = 1
####################################################################################
def | ():
print(colored('Testing Integration Module', 'magenta'))
print(colored('Testing gauss_legendre', 'cyan'))
numCoefficients = np.random.randint(2, 5, [1, ]).item()
coefficients = np.random.randint(-20, 20, [numCoefficients, ])
coefficientsC = NumCpp.NdArray(1, numCoefficients)
coefficientsC.setArray(coefficients)
poly = np.poly1d(np.flipud(coefficients), False)
polyIntegral = poly.integ()
polyC = NumCpp.Poly1d(coefficientsC, False)
a, b = np.sort(np.random.rand(2) * 100 - 50)
area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)
areaC = np.round(NumCpp.integrate_gauss_legendre(polyC, a, b), NUM_DECIMALS_ROUND)
if area == areaC:
print(colored('\tPASS', 'green'))
else:
print(area)
print(areaC)
print(colored('\tFAIL', 'red'))
print(colored('Testing romberg', 'cyan'))
PERCENT_LEEWAY = 0.1
numCoefficients = np.random.randint(2, 5, [1, ]).item()
coefficients = np.random.randint(-20, 20, [numCoefficients, ])
coefficientsC = NumCpp.NdArray(1, numCoefficients)
coefficientsC.setArray(coefficients)
poly = np.poly1d(np.flipud(coefficients), False)
polyIntegral = poly.integ()
polyC = NumCpp.Poly1d(coefficientsC, False)
a, b = np.sort(np.random.rand(2) * 100 - 50)
area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)
areaC = np.round(NumCpp.integrate_romberg(polyC, a, b), NUM_DECIMALS_ROUND)
# romberg is much less acurate so let's give it some leeway
areaLow, areaHigh = np.sort([area * (1 - PERCENT_LEEWAY), area * (1 + PERCENT_LEEWAY)])
if areaLow < areaC < areaHigh:
print(colored('\tPASS', 'green'))
else:
print(area)
print(areaC)
print(colored('\tFAIL', 'red'))
print(colored('Testing simpson', 'cyan'))
numCoefficients = np.random.randint(2, 5, [1, ]).item()
coefficients = np.random.randint(-20, 20, [numCoefficients, ])
coefficientsC = NumCpp.NdArray(1, numCoefficients)
coefficientsC.setArray(coefficients)
poly = np.poly1d(np.flipud(coefficients), False)
polyIntegral = poly.integ()
polyC = NumCpp.Poly1d(coefficientsC, False)
a, b = np.sort(np.random.rand(2) * 100 - 50)
area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)
areaC = np.round(NumCpp.integrate_simpson(polyC, a, b), NUM_DECIMALS_ROUND)
if area == areaC:
print(colored('\tPASS', 'green'))
else:
print(area)
print(areaC)
print(colored('\tFAIL', 'red'))
print(colored('Testing trapazoidal', 'cyan'))
numCoefficients = np.random.randint(2, 5, [1, ]).item()
coefficients = np.random.randint(-20, 20, [numCoefficients, ])
coefficientsC = NumCpp.NdArray(1, numCoefficients)
coefficientsC.setArray(coefficients)
poly = np.poly1d(np.flipud(coefficients), False)
polyIntegral = poly.integ()
polyC = NumCpp.Poly1d(coefficientsC, False)
a, b = np.sort(np.random.rand(2) * 100 - 50)
area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)
areaC = np.round(NumCpp.integrate_trapazoidal(polyC, a, b), NUM_DECIMALS_ROUND)
if area == areaC:
print(colored('\tPASS', 'green'))
else:
print(area)
print(areaC)
print(colored('\tFAIL', 'red'))
####################################################################################
if __name__ == '__main__':
doTest()
| doTest |
expression.rs | use super::command::{Command, CommandContext};
use super::debugger::Debugger;
use anyhow::{anyhow, Result};
use std::convert::TryInto;
use wasminspect_vm::WasmValue;
pub struct ExpressionCommand {}
impl ExpressionCommand {
pub fn new() -> Self {
Self {}
}
}
use structopt::StructOpt;
#[derive(StructOpt)]
struct Opts {
#[structopt(name = "SYMBOL")]
symbol: String,
}
impl<D: Debugger> Command<D> for ExpressionCommand {
fn name(&self) -> &'static str {
"expression"
}
fn description(&self) -> &'static str {
"Evaluate an expression on the process (only support variable name now)."
}
fn | (&self, debugger: &mut D, context: &CommandContext, args: Vec<&str>) -> Result<()> {
let opts = Opts::from_iter_safe(args)?;
let (insts, next_index) = debugger.instructions()?;
let current_index = if next_index == 0 { 0 } else { next_index - 1 };
let current_inst = insts[current_index].clone();
let argument_count = debugger
.current_frame()
.ok_or(anyhow!("function frame not found"))?
.argument_count;
let locals = debugger.locals();
let rbp = match locals
.get(argument_count + 2)
.ok_or(anyhow!("failed to get rbp"))?
{
WasmValue::I32(v) => v,
x => return Err(anyhow!("invalid type rbp: '{:?}'", x)),
};
context.subroutine.display_variable(
current_inst.offset,
TryInto::<u32>::try_into(*rbp)?,
&debugger.memory()?,
opts.symbol,
)?;
Ok(())
}
}
| run |
driver_arguments.py | # This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import absolute_import, unicode_literals
import multiprocessing
import os
import android.adb.commands
from swift_build_support.swift_build_support import targets
from swift_build_support.swift_build_support.targets import \
StdlibDeploymentTarget
from . import argparse
from . import defaults
__all__ = [
'create_argument_parser',
]
class _ApplyDefaultsArgumentParser(argparse.ArgumentParser):
"""Wrapper class around the default ArgumentParser that allows for
post-processing the parsed argument namespace to apply default argument
transformations.
"""
def __init__(self, apply_defaults=None, *args, **kwargs):
self._apply_defaults = apply_defaults
super(_ApplyDefaultsArgumentParser, self).__init__(*args, **kwargs)
def | (self, args=None, namespace=None):
args, argv = super(_ApplyDefaultsArgumentParser, self)\
.parse_known_args(args, namespace)
self._apply_defaults(args)
return args, argv
def _apply_default_arguments(args):
"""Preprocess argument namespace to apply default behaviors.
"""
# Build cmark if any cmark-related options were specified.
if (args.cmark_build_variant is not None):
args.build_cmark = True
# Build LLDB if any LLDB-related options were specified.
if args.lldb_build_variant is not None or \
args.lldb_assertions is not None or \
args.lldb_build_with_xcode is not None:
args.build_lldb = True
# Set the default build variant.
if args.build_variant is None:
args.build_variant = 'Debug'
if args.llvm_build_variant is None:
args.llvm_build_variant = args.build_variant
if args.swift_build_variant is None:
args.swift_build_variant = args.build_variant
if args.swift_stdlib_build_variant is None:
args.swift_stdlib_build_variant = args.build_variant
if args.cmark_build_variant is None:
args.cmark_build_variant = args.swift_build_variant
if args.lldb_build_variant is None:
args.lldb_build_variant = args.build_variant
if args.lldb_build_with_xcode is None:
args.lldb_build_with_xcode = '0'
if args.foundation_build_variant is None:
args.foundation_build_variant = args.build_variant
if args.libdispatch_build_variant is None:
args.libdispatch_build_variant = args.build_variant
if args.libicu_build_variant is None:
args.libicu_build_variant = args.build_variant
# Assertions are enabled by default.
if args.assertions is None:
args.assertions = True
# Propagate the default assertions setting.
if args.cmark_assertions is None:
args.cmark_assertions = args.assertions
if args.llvm_assertions is None:
args.llvm_assertions = args.assertions
if args.swift_assertions is None:
args.swift_assertions = args.assertions
if args.swift_stdlib_assertions is None:
args.swift_stdlib_assertions = args.assertions
if args.llbuild_assertions is None:
args.llbuild_assertions = args.assertions
if args.lldb_assertions is None:
args.lldb_assertions = args.assertions
# Set the default CMake generator.
if args.cmake_generator is None:
args.cmake_generator = 'Ninja'
# --ios-all etc are not supported by open-source Swift.
if args.ios_all:
raise ValueError('error: --ios-all is unavailable in open-source '
'Swift.\nUse --ios to skip iOS device tests.')
if args.tvos_all:
raise ValueError('error: --tvos-all is unavailable in open-source '
'Swift.\nUse --tvos to skip tvOS device tests.')
if args.watchos_all:
raise ValueError('error: --watchos-all is unavailable in open-source '
'Swift.\nUse --watchos to skip watchOS device tests.')
# --skip-{ios,tvos,watchos} or --skip-build-{ios,tvos,watchos} are
# merely shorthands for --skip-build-{**os}-{device,simulator}
if not args.ios or not args.build_ios:
args.build_ios_device = False
args.build_ios_simulator = False
if not args.tvos or not args.build_tvos:
args.build_tvos_device = False
args.build_tvos_simulator = False
if not args.watchos or not args.build_watchos:
args.build_watchos_device = False
args.build_watchos_simulator = False
if not args.android or not args.build_android:
args.build_android = False
# --test-paths implies --test and/or --validation-test
# depending on what directories/files have been specified.
if args.test_paths:
for path in args.test_paths:
if path.startswith('test'):
args.test = True
elif path.startswith('validation-test'):
args.test = True
args.validation_test = True
# --validation-test implies --test.
if args.validation_test:
args.test = True
# --test-optimized implies --test.
if args.test_optimized:
args.test = True
# --test-optimize-size implies --test.
if args.test_optimize_for_size:
args.test = True
# --test-optimize-none-with-implicit-dynamic implies --test.
if args.test_optimize_none_with_implicit_dynamic:
args.test = True
# If none of tests specified skip swift stdlib test on all platforms
if not args.test and not args.validation_test and not args.long_test:
args.test_linux = False
args.test_freebsd = False
args.test_cygwin = False
args.test_osx = False
args.test_ios = False
args.test_tvos = False
args.test_watchos = False
args.test_android = False
args.test_swiftpm = False
args.test_swiftsyntax = False
args.test_indexstoredb = False
args.test_sourcekitlsp = False
args.test_skstresstester = False
args.test_swiftevolve = False
args.test_toolchainbenchmarks = False
# --skip-test-ios is merely a shorthand for host and simulator tests.
if not args.test_ios:
args.test_ios_host = False
args.test_ios_simulator = False
# --skip-test-tvos is merely a shorthand for host and simulator tests.
if not args.test_tvos:
args.test_tvos_host = False
args.test_tvos_simulator = False
# --skip-test-watchos is merely a shorthand for host and simulator
# --tests.
if not args.test_watchos:
args.test_watchos_host = False
args.test_watchos_simulator = False
# --skip-build-{ios,tvos,watchos}-{device,simulator} implies
# --skip-test-{ios,tvos,watchos}-{host,simulator}
if not args.build_ios_device:
args.test_ios_host = False
if not args.build_ios_simulator:
args.test_ios_simulator = False
if not args.build_tvos_device:
args.test_tvos_host = False
if not args.build_tvos_simulator:
args.test_tvos_simulator = False
if not args.build_watchos_device:
args.test_watchos_host = False
if not args.build_watchos_simulator:
args.test_watchos_simulator = False
if not args.build_android:
args.test_android = False
args.test_android_host = False
if not args.test_android:
args.test_android_host = False
if not args.host_test:
args.test_ios_host = False
args.test_tvos_host = False
args.test_watchos_host = False
args.test_android_host = False
def create_argument_parser():
"""Return a configured argument parser."""
# NOTE: USAGE, DESCRIPTION and EPILOG are defined at the bottom of the file
parser = _ApplyDefaultsArgumentParser(
apply_defaults=_apply_default_arguments,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG)
builder = parser.to_builder()
# Prepare DSL functions
option = builder.add_option
set_defaults = builder.set_defaults
in_group = builder.in_group
mutually_exclusive_group = builder.mutually_exclusive_group
# Prepare DSL actions
append = builder.actions.append
store = builder.actions.store
store_true = builder.actions.store_true
store_false = builder.actions.store_false
store_int = builder.actions.store_int
store_path = builder.actions.store_path
toggle_true = builder.actions.toggle_true
toggle_false = builder.actions.toggle_false
unsupported = builder.actions.unsupported
# -------------------------------------------------------------------------
# Top-level options
option(['-n', '--dry-run'], store_true,
help='print the commands that would be executed, but do not '
'execute them')
option('--dump-config', toggle_true,
help='instead of building, write JSON to stdout containing '
'various values used to build in this configuration')
option('--legacy-impl', store_true('legacy_impl'),
help='use legacy implementation')
option('--build-runtime-with-host-compiler', toggle_true,
help='Use the host compiler, not the self-built one to compile the '
'Swift runtime')
option(['-i', '--ios'], store_true,
help='also build for iOS, but disallow tests that require an iOS '
'device')
option(['-I', '--ios-all'], store_true('ios_all'),
help='also build for iOS, and allow all iOS tests')
option(['--skip-local-build'], toggle_true('skip_local_build'),
help='set to skip building for the local platform')
option('--skip-ios', store_false('ios'),
help='set to skip everything iOS-related')
option('--tvos', toggle_true,
help='also build for tvOS, but disallow tests that require a tvos '
'device')
option('--tvos-all', toggle_true('tvos_all'),
help='also build for tvOS, and allow all tvOS tests')
option('--skip-tvos', store_false('tvos'),
help='set to skip everything tvOS-related')
option('--watchos', toggle_true,
help='also build for watchOS, but disallow tests that require an '
'watchOS device')
option('--watchos-all', toggle_true('watchos_all'),
help='also build for Apple watchOS, and allow all Apple watchOS '
'tests')
option('--skip-watchos', store_false('watchos'),
help='set to skip everything watchOS-related')
option('--maccatalyst', toggle_true,
help='Enable building Swift with macCatalyst support')
option('--maccatalyst-ios-tests', toggle_true,
help='When building for macCatalyst run tests with iOS-like '
'target triple')
option('--android', toggle_true,
help='also build for Android')
option('--swift-analyze-code-coverage', store,
choices=['false', 'not-merged', 'merged'],
# so CMake can see the inert mode as a false value
default=defaults.SWIFT_ANALYZE_CODE_COVERAGE,
help='enable code coverage analysis in Swift (false, not-merged, '
'merged).')
option('--build-subdir', store,
metavar='PATH',
help='name of the directory under $SWIFT_BUILD_ROOT where the '
'build products will be placed')
option('--install-prefix', store_path,
default=targets.install_prefix(),
help='The installation prefix. This is where built Swift products '
'(like bin, lib, and include) will be installed.')
option('--install-symroot', store_path,
help='the path to install debug symbols into')
option('--install-destdir', store_path,
help='the path to use as the filesystem root for the installation')
option(['-j', '--jobs'], store_int('build_jobs'),
default=multiprocessing.cpu_count(),
help='the number of parallel build jobs to use')
option('--darwin-xcrun-toolchain', store,
help='the name of the toolchain to use on Darwin')
option('--cmake', store_path(executable=True),
help='the path to a CMake executable that will be used to build '
'Swift')
option('--show-sdks', toggle_true,
help='print installed Xcode and SDK versions')
option('--extra-swift-args', append,
help='Pass through extra flags to swift in the form of a CMake '
'list "module_regexp;flag". Can be called multiple times to '
'add multiple such module_regexp flag pairs. All semicolons '
'in flags must be escaped with a "\\"')
option('--host-cc', store_path(executable=True),
help='the absolute path to CC, the "clang" compiler for the host '
'platform. Default is auto detected.')
option('--host-cxx', store_path(executable=True),
help='the absolute path to CXX, the "clang++" compiler for the '
'host platform. Default is auto detected.')
option('--cmake-c-launcher', store_path(executable=True),
default=os.environ.get('C_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_C_COMPILER_LAUNCHER')
option('--cmake-cxx-launcher', store_path(executable=True),
default=os.environ.get('CXX_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_CXX_COMPILER_LAUNCHER')
option('--host-lipo', store_path(executable=True),
help='the absolute path to lipo. Default is auto detected.')
option('--host-libtool', store_path(executable=True),
help='the absolute path to libtool. Default is auto detected.')
option('--distcc', toggle_true,
default=os.environ.get('USE_DISTCC') == '1',
help='use distcc in pump mode')
option('--enable-asan', toggle_true,
help='enable Address Sanitizer')
option('--enable-ubsan', toggle_true,
help='enable Undefined Behavior Sanitizer')
option('--enable-tsan', toggle_true,
help='enable Thread Sanitizer for swift tools')
option('--enable-tsan-runtime', toggle_true,
help='enable Thread Sanitizer on the swift runtime')
option('--enable-lsan', toggle_true,
help='enable Leak Sanitizer for swift tools')
option('--enable-sanitize-coverage', toggle_true,
help='enable sanitizer coverage for swift tools. Necessary for '
'fuzzing swiftc')
option('--compiler-vendor', store,
choices=['none', 'apple'],
default=defaults.COMPILER_VENDOR,
help='Compiler vendor name')
option('--clang-compiler-version', store,
type=argparse.ClangVersionType(),
metavar='MAJOR.MINOR.PATCH',
help='string that indicates a compiler version for Clang')
option('--clang-user-visible-version', store,
type=argparse.ClangVersionType(),
default=defaults.CLANG_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR.PATCH',
help='User-visible version of the embedded Clang and LLVM '
'compilers')
option('--swift-compiler-version', store,
type=argparse.SwiftVersionType(),
metavar='MAJOR.MINOR',
help='string that indicates a compiler version for Swift')
option('--swift-user-visible-version', store,
type=argparse.SwiftVersionType(),
default=defaults.SWIFT_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR',
help='User-visible version of the embedded Swift compiler')
option('--darwin-deployment-version-osx', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_OSX,
metavar='MAJOR.MINOR',
help='minimum deployment target version for OS X')
option('--darwin-deployment-version-ios', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_IOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for iOS')
option('--darwin-deployment-version-tvos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_TVOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for tvOS')
option('--darwin-deployment-version-watchos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for watchOS')
option('--extra-cmake-options', append,
type=argparse.ShellSplitType(),
help='Pass through extra options to CMake in the form of comma '
'separated options "-DCMAKE_VAR1=YES,-DCMAKE_VAR2=/tmp". Can '
'be called multiple times to add multiple such options.')
option('--build-args', store,
type=argparse.ShellSplitType(),
default=[],
help='arguments to the build tool. This would be prepended to the '
'default argument that is "-j8" when CMake generator is '
'"Ninja".')
option('--verbose-build', toggle_true,
help='print the commands executed during the build')
option('--lto', store('lto_type'),
choices=['thin', 'full'],
const='full',
default=None,
metavar='LTO_TYPE',
help='use lto optimization on llvm/swift tools. This does not '
'imply using lto on the swift standard library or runtime. '
'Options: thin, full. If no optional arg is provided, full is '
'chosen by default')
option('--clang-profile-instr-use', store_path,
help='profile file to use for clang PGO')
option('--llvm-max-parallel-lto-link-jobs', store_int,
default=defaults.LLVM_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling llvm')
option('--swift-tools-max-parallel-lto-link-jobs', store_int,
default=defaults.SWIFT_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling swift tools.')
option('--disable-guaranteed-normal-arguments', store_true,
help='Disable guaranteed normal arguments')
option('--enable-stdlibcore-exclusivity-checking', store_true,
help='Enable exclusivity checking in stdlibCore')
option('--force-optimized-typechecker', store_true,
help='Force the type checker to be built with '
'optimization')
option('--lit-args', store,
default='-sv',
metavar='LITARGS',
help='lit args to use when testing')
option('--coverage-db', store_path,
help='coverage database to use when prioritizing testing')
# -------------------------------------------------------------------------
in_group('Host and cross-compilation targets')
option('--host-target', store,
default=StdlibDeploymentTarget.host_target().name,
help='The host target. LLVM, Clang, and Swift will be built for '
'this target. The built LLVM and Clang will be used to '
'compile Swift for the cross-compilation targets.')
option('--cross-compile-hosts', append,
type=argparse.ShellSplitType(),
default=[],
help='A space separated list of targets to cross-compile host '
'Swift tools for. Can be used multiple times.')
option('--stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=None,
help='The targets to compile or cross-compile the Swift standard '
'library for. %(default)s by default.'
' Comma separated list: {}'.format(
' '.join(StdlibDeploymentTarget.get_target_names())))
option('--build-stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=['all'],
help='A space-separated list that filters which of the configured '
'targets to build the Swift standard library for, or "all".')
option('--swift-darwin-supported-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure on '
'Darwin platforms. If left empty all default architectures '
'are configured.')
option('--swift-darwin-module-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure Swift '
'module-only targets on Darwin platforms. These targets are '
'in addition to the full library targets.')
# -------------------------------------------------------------------------
in_group('Options to select projects')
option(['-l', '--lldb'], store_true('build_lldb'),
help='build LLDB')
option(['-b', '--llbuild'], store_true('build_llbuild'),
help='build llbuild')
option(['--libcxx'], store_true('build_libcxx'),
help='build libcxx')
option(['-p', '--swiftpm'], toggle_true('build_swiftpm'),
help='build swiftpm')
option(['--install-swiftpm'], toggle_true('install_swiftpm'),
help='install swiftpm')
option(['--swiftsyntax'], store_true('build_swiftsyntax'),
help='build swiftSyntax')
option(['--skstresstester'], store_true('build_skstresstester'),
help='build the SourceKit stress tester')
option(['--swiftevolve'], store_true('build_swiftevolve'),
help='build the swift-evolve tool')
option(['--indexstore-db'], toggle_true('build_indexstoredb'),
help='build IndexStoreDB')
option(['--sourcekit-lsp'], toggle_true('build_sourcekitlsp'),
help='build SourceKitLSP')
option('--install-swiftsyntax', toggle_true('install_swiftsyntax'),
help='install SwiftSyntax')
option('--swiftsyntax-verify-generated-files',
toggle_true('swiftsyntax_verify_generated_files'),
help='set to verify that the generated files in the source tree '
'match the ones that would be generated from current master')
option(['--install-pythonkit'], toggle_true('install_pythonkit'),
help='install PythonKit')
option(['--install-sourcekit-lsp'], toggle_true('install_sourcekitlsp'),
help='install SourceKitLSP')
option(['--install-skstresstester'], toggle_true('install_skstresstester'),
help='install the SourceKit stress tester')
option(['--install-swiftevolve'], toggle_true('install_swiftevolve'),
help='install SwiftEvolve')
option(['--toolchain-benchmarks'],
toggle_true('build_toolchainbenchmarks'),
help='build Swift Benchmarks using swiftpm against the just built '
'toolchain')
option('--xctest', toggle_true('build_xctest'),
help='build xctest')
option('--foundation', toggle_true('build_foundation'),
help='build foundation')
option('--libdispatch', toggle_true('build_libdispatch'),
help='build libdispatch')
option('--libicu', toggle_true('build_libicu'),
help='build libicu')
option('--playgroundsupport', toggle_true('build_playgroundsupport'),
help='build PlaygroundSupport')
option('--install-playgroundsupport',
store_true('install_playgroundsupport'),
help='install playground support')
option('--pythonkit', store_true('build_pythonkit'),
help='build PythonKit')
option('--tensorflow-swift-apis', store_true('build_tensorflow_swift_apis'),
help='build TensorFlow Swift APIs')
option('--install-tensorflow-swift-apis',
store_true('install_tensorflow_swift_apis'),
help='install TensorFlow Swift APIs')
option('--build-ninja', toggle_true,
help='build the Ninja tool')
option(['--build-libparser-only'], store_true('build_libparser_only'),
help='build only libParser for SwiftSyntax')
option('--skip-build-clang-tools-extra',
toggle_false('build_clang_tools_extra'),
default=True,
help='skip building clang-tools-extra as part of llvm')
# -------------------------------------------------------------------------
in_group('Extra actions to perform before or in addition to building')
option(['-c', '--clean'], store_true,
help='do a clean build')
option('--export-compile-commands', toggle_true,
help='generate compilation databases in addition to building')
option('--symbols-package', store_path,
help='if provided, an archive of the symbols directory will be '
'generated at this path')
# -------------------------------------------------------------------------
in_group('Build variant')
with mutually_exclusive_group():
set_defaults(build_variant='Debug')
option(['-d', '--debug'], store('build_variant'),
const='Debug',
help='build the Debug variant of everything (LLVM, Clang, '
'Swift host tools, target Swift standard libraries, LLDB) '
'(default is %(default)s)')
option(['-r', '--release-debuginfo'], store('build_variant'),
const='RelWithDebInfo',
help='build the RelWithDebInfo variant of everything (default '
'is %(default)s)')
option(['-R', '--release'], store('build_variant'),
const='Release',
help='build the Release variant of everything (default is '
'%(default)s)')
# -------------------------------------------------------------------------
in_group('Override build variant for a specific project')
option('--debug-llvm', store('llvm_build_variant'),
const='Debug',
help='build the Debug variant of LLVM')
option('--debug-swift', store('swift_build_variant'),
const='Debug',
help='build the Debug variant of Swift host tools')
option('--debug-swift-stdlib', store('swift_stdlib_build_variant'),
const='Debug',
help='build the Debug variant of the Swift standard library and '
' SDK overlay')
option('--debug-lldb', store('lldb_build_variant'),
const='Debug',
help='build the Debug variant of LLDB')
option('--lldb-build-with-xcode', store('lldb_build_with_xcode'),
const='1',
help='build LLDB using xcodebuild, if possible')
option('--lldb-build-with-cmake', store('lldb_build_with_xcode'),
const='0',
help='build LLDB using CMake')
option('--debug-cmark', store('cmark_build_variant'),
const='Debug',
help='build the Debug variant of CommonMark')
option('--debug-foundation', store('foundation_build_variant'),
const='Debug',
help='build the Debug variant of Foundation')
option('--debug-libdispatch', store('libdispatch_build_variant'),
const='Debug',
help='build the Debug variant of libdispatch')
option('--debug-libicu', store('libicu_build_variant'),
const='Debug',
help='build the Debug variant of libicu')
# -------------------------------------------------------------------------
# Assertions group
with mutually_exclusive_group():
set_defaults(assertions=True)
# TODO: Convert to store_true
option(['-a', '--assertions'], store,
const=True,
help='enable assertions in all projects')
# TODO: Convert to store_false
option(['-A', '--no-assertions'], store('assertions'),
const=False,
help='disable assertions in all projects')
# -------------------------------------------------------------------------
in_group('Control assertions in a specific project')
option('--cmark-assertions', store,
const=True,
help='enable assertions in CommonMark')
option('--llvm-assertions', store,
const=True,
help='enable assertions in LLVM')
option('--no-llvm-assertions', store('llvm_assertions'),
const=False,
help='disable assertions in LLVM')
option('--swift-assertions', store,
const=True,
help='enable assertions in Swift')
option('--no-swift-assertions', store('swift_assertions'),
const=False,
help='disable assertions in Swift')
option('--swift-stdlib-assertions', store,
const=True,
help='enable assertions in the Swift standard library')
option('--no-swift-stdlib-assertions', store('swift_stdlib_assertions'),
const=False,
help='disable assertions in the Swift standard library')
option('--lldb-assertions', store,
const=True,
help='enable assertions in LLDB')
option('--no-lldb-assertions', store('lldb_assertions'),
const=False,
help='disable assertions in LLDB')
option('--llbuild-assertions', store,
const=True,
help='enable assertions in llbuild')
option('--no-llbuild-assertions', store('llbuild_assertions'),
const=False,
help='disable assertions in llbuild')
# -------------------------------------------------------------------------
in_group('Select the CMake generator')
set_defaults(cmake_generator=defaults.CMAKE_GENERATOR)
option(['-e', '--eclipse'], store('cmake_generator'),
const='Eclipse CDT4 - Ninja',
help="use CMake's Eclipse generator (%(default)s by default)")
option(['-m', '--make'], store('cmake_generator'),
const='Unix Makefiles',
help="use CMake's Makefile generator (%(default)s by default)")
option(['-x', '--xcode'], store('cmake_generator'),
const='Xcode',
help="use CMake's Xcode generator (%(default)s by default)")
# -------------------------------------------------------------------------
in_group('Run tests')
# NOTE: We can't merge -t and --test, because nargs='?' makes
# `-ti` to be treated as `-t=i`.
# FIXME: Convert to store_true action
option('-t', store('test', const=True),
help='test Swift after building')
option('--test', toggle_true,
help='test Swift after building')
option('-T', store('validation_test', const=True),
help='run the validation test suite (implies --test)')
option('--validation-test', toggle_true,
help='run the validation test suite (implies --test)')
# FIXME: Convert to store_true action
option('-o', store('test_optimized', const=True),
help='run the test suite in optimized mode too (implies --test)')
option('--test-optimized', toggle_true,
help='run the test suite in optimized mode too (implies --test)')
# FIXME: Convert to store_true action
option('-s', store('test_optimize_for_size', const=True),
help='run the test suite in optimize for size mode too '
'(implies --test)')
option('--test-optimize-for-size', toggle_true,
help='run the test suite in optimize for size mode too '
'(implies --test)')
# FIXME: Convert to store_true action
option('-y', store('test_optimize_none_with_implicit_dynamic', const=True),
help='run the test suite in optimize none with implicit dynamic'
' mode too (implies --test)')
option('--test-optimize-none-with-implicit-dynamic', toggle_true,
help='run the test suite in optimize none with implicit dynamic'
'mode too (implies --test)')
option('--long-test', toggle_true,
help='run the long test suite')
option('--stress-test', toggle_true,
help='run the stress test suite')
option('--host-test', toggle_true,
help='run executable tests on host devices (such as iOS or tvOS)')
option('--only-executable-test', toggle_true,
help='Only run executable tests. Does nothing if host-test is not '
'allowed')
option('--only-non-executable-test', toggle_true,
help='Only run non-executable tests.')
option('--test-paths', append,
type=argparse.ShellSplitType(),
help='run tests located in specific directories and/or files '
'(implies --test and/or --validation-test)')
option(['-B', '--benchmark'], store_true,
help='run the Swift Benchmark Suite after building')
option('--benchmark-num-o-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -O')
option('--benchmark-num-onone-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -Onone')
# We want to run the TSan (compiler-rt) libdispatch tests on Linux, where
# libdispatch is just another library and not available by default. To do
# so we build Clang/LLVM/libdispatch and use it to compile/run the TSan
# libdispatch tests.
option('--tsan-libdispatch-test', toggle_true,
help='Builds a new toolchain including the libdispatch C library. '
'Then re-builds the TSan runtime (compiler-rt) using this '
'freshly-built Clang and runs the TSan libdispatch tests.')
option('--skip-test-osx', toggle_false('test_osx'),
help='skip testing Swift stdlibs for Mac OS X')
option('--skip-test-linux', toggle_false('test_linux'),
help='skip testing Swift stdlibs for Linux')
option('--skip-test-freebsd', toggle_false('test_freebsd'),
help='skip testing Swift stdlibs for FreeBSD')
option('--skip-test-cygwin', toggle_false('test_cygwin'),
help='skip testing Swift stdlibs for Cygwin')
option('--test-pythonkit', toggle_true('test_pythonkit'),
help='skip testing PythonKit')
# -------------------------------------------------------------------------
in_group('Run build')
option('--build-swift-dynamic-stdlib', toggle_true,
default=True,
help='build dynamic variants of the Swift standard library')
option('--build-swift-static-stdlib', toggle_true,
help='build static variants of the Swift standard library')
option('--build-swift-dynamic-sdk-overlay', toggle_true,
default=True,
help='build dynamic variants of the Swift SDK overlay')
option('--build-swift-static-sdk-overlay', toggle_true,
help='build static variants of the Swift SDK overlay')
option('--build-swift-stdlib-unittest-extra', toggle_true,
help='Build optional StdlibUnittest components')
option(['-S', '--skip-build'], store_true,
help='generate build directory only without building')
option('--skip-build-linux', toggle_false('build_linux'),
help='skip building Swift stdlibs for Linux')
option('--skip-build-freebsd', toggle_false('build_freebsd'),
help='skip building Swift stdlibs for FreeBSD')
option('--skip-build-cygwin', toggle_false('build_cygwin'),
help='skip building Swift stdlibs for Cygwin')
option('--skip-build-osx', toggle_false('build_osx'),
help='skip building Swift stdlibs for MacOSX')
option('--skip-build-ios', toggle_false('build_ios'),
help='skip building Swift stdlibs for iOS')
option('--skip-build-ios-device', toggle_false('build_ios_device'),
help='skip building Swift stdlibs for iOS devices '
'(i.e. build simulators only)')
option('--skip-build-ios-simulator', toggle_false('build_ios_simulator'),
help='skip building Swift stdlibs for iOS simulator '
'(i.e. build devices only)')
option('--skip-build-tvos', toggle_false('build_tvos'),
help='skip building Swift stdlibs for tvOS')
option('--skip-build-tvos-device', toggle_false('build_tvos_device'),
help='skip building Swift stdlibs for tvOS devices '
'(i.e. build simulators only)')
option('--skip-build-tvos-simulator', toggle_false('build_tvos_simulator'),
help='skip building Swift stdlibs for tvOS simulator '
'(i.e. build devices only)')
option('--skip-build-watchos', toggle_false('build_watchos'),
help='skip building Swift stdlibs for watchOS')
option('--skip-build-watchos-device', toggle_false('build_watchos_device'),
help='skip building Swift stdlibs for watchOS devices '
'(i.e. build simulators only)')
option('--skip-build-watchos-simulator',
toggle_false('build_watchos_simulator'),
help='skip building Swift stdlibs for watchOS simulator '
'(i.e. build devices only)')
option('--skip-build-android', toggle_false('build_android'),
help='skip building Swift stdlibs for Android')
option('--skip-build-benchmarks', toggle_false('build_benchmarks'),
help='skip building Swift Benchmark Suite')
option('--build-external-benchmarks', toggle_true,
help='skip building Swift Benchmark Suite')
# -------------------------------------------------------------------------
in_group('Skip testing specified targets')
option('--skip-test-ios',
toggle_false('test_ios'),
help='skip testing all iOS targets. Equivalent to specifying both '
'--skip-test-ios-simulator and --skip-test-ios-host')
option('--skip-test-ios-simulator',
toggle_false('test_ios_simulator'),
help='skip testing iOS simulator targets')
option('--skip-test-ios-32bit-simulator',
toggle_false('test_ios_32bit_simulator'),
help='skip testing iOS 32 bit simulator targets')
option('--skip-test-ios-host',
toggle_false('test_ios_host'),
help='skip testing iOS device targets on the host machine (the '
'phone itself)')
option('--skip-test-tvos',
toggle_false('test_tvos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-tvos-simulator and --skip-test-tvos-host')
option('--skip-test-tvos-simulator',
toggle_false('test_tvos_simulator'),
help='skip testing tvOS simulator targets')
option('--skip-test-tvos-host',
toggle_false('test_tvos_host'),
help='skip testing tvOS device targets on the host machine (the '
'TV itself)')
option('--skip-test-watchos',
toggle_false('test_watchos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-watchos-simulator and --skip-test-watchos-host')
option('--skip-test-watchos-simulator',
toggle_false('test_watchos_simulator'),
help='skip testing watchOS simulator targets')
option('--skip-test-watchos-host',
toggle_false('test_watchos_host'),
help='skip testing watchOS device targets on the host machine (the '
'watch itself)')
option('--skip-test-android',
toggle_false('test_android'),
help='skip testing all Android targets.')
option('--skip-test-android-host',
toggle_false('test_android_host'),
help='skip testing Android device targets on the host machine (the '
'phone itself)')
option('--skip-test-swiftpm', toggle_false('test_swiftpm'),
help='skip testing swiftpm')
option('--skip-test-swiftsyntax', toggle_false('test_swiftsyntax'),
help='skip testing SwiftSyntax')
option('--skip-test-indexstore-db', toggle_false('test_indexstoredb'),
help='skip testing indexstore-db')
option('--skip-test-sourcekit-lsp', toggle_false('test_sourcekitlsp'),
help='skip testing sourcekit-lsp')
option('--skip-test-playgroundsupport',
toggle_false('test_playgroundsupport'),
help='skip testing PlaygroundSupport')
option('--skip-test-skstresstester', toggle_false('test_skstresstester'),
help='skip testing the SourceKit Stress tester')
option('--skip-test-swiftevolve', toggle_false('test_swiftevolve'),
help='skip testing SwiftEvolve')
option('--skip-test-toolchain-benchmarks',
toggle_false('test_toolchainbenchmarks'),
help='skip testing toolchain benchmarks')
# -------------------------------------------------------------------------
in_group('Build settings specific for LLVM')
option('--llvm-targets-to-build', store,
default='X86;ARM;AArch64;PowerPC;SystemZ;Mips',
help='LLVM target generators to build')
# -------------------------------------------------------------------------
in_group('Build settings for Android')
option('--android-ndk', store_path,
help='An absolute path to the NDK that will be used as a libc '
'implementation for Android builds')
option('--android-api-level', store,
default='21',
help='The Android API level to target when building for Android. '
'Currently only 21 or above is supported')
option('--android-ndk-gcc-version', store,
choices=['4.8', '4.9'],
default='4.9',
help='The GCC version to use when building for Android. Currently '
'only 4.9 is supported. %(default)s is also the default '
'value. This option may be used when experimenting with '
'versions of the Android NDK not officially supported by '
'Swift')
option('--android-icu-uc', store_path,
help='Path to libicuuc.so')
option('--android-icu-uc-include', store_path,
help='Path to a directory containing headers for libicuuc')
option('--android-icu-i18n', store_path,
help='Path to libicui18n.so')
option('--android-icu-i18n-include', store_path,
help='Path to a directory containing headers libicui18n')
option('--android-icu-data', store_path,
help='Path to libicudata.so')
option('--android-deploy-device-path', store_path,
default=android.adb.commands.DEVICE_TEMP_DIR,
help='Path on an Android device to which built Swift stdlib '
'products will be deployed. If running host tests, specify '
'the "{}" directory.'.format(
android.adb.commands.DEVICE_TEMP_DIR))
option('--android-arch', store,
choices=['armv7', 'aarch64'],
default='armv7',
help='The Android target architecture when building for Android. '
'Currently only armv7 and aarch64 are supported. '
'%(default)s is the default.')
# -------------------------------------------------------------------------
in_group('Experimental language features')
option('--enable-experimental-differentiable-programming', toggle_true,
default=True,
help='Enable experimental Swift differentiable programming language'
' features.')
# -------------------------------------------------------------------------
in_group('Unsupported options')
option('--build-jobs', unsupported)
option('--common-cmake-options', unsupported)
option('--only-execute', unsupported)
option('--skip-test-optimize-for-size', unsupported)
option('--skip-test-optimize-none-with-implicit-dynamic', unsupported)
option('--skip-test-optimized', unsupported)
# -------------------------------------------------------------------------
in_group('Build-script-impl arguments (for disambiguation)')
# We need to list --skip-test-swift explicitly because otherwise argparse
# will auto-expand arguments like --skip-test-swift to the only known
# argument --skip-test-swiftevolve.
# These arguments are forwarded to impl_args in migration.py
option('--install-swift', toggle_true('impl_install_swift'))
option('--skip-test-swift', toggle_true('impl_skip_test_swift'))
# -------------------------------------------------------------------------
return builder.build()
# ----------------------------------------------------------------------------
USAGE = """
%(prog)s [-h | --help] [OPTION ...]
%(prog)s --preset=NAME [SUBSTITUTION ...]
"""
DESCRIPTION = """
Use this tool to build, test, and prepare binary distribution archives of Swift
and related tools.
Builds Swift (and, optionally, LLDB), incrementally, optionally
testing it thereafter. Different build configurations are maintained in
parallel.
"""
EPILOG = """
Using option presets:
--preset-file=PATH load presets from the specified file
--preset=NAME use the specified option preset
The preset mode is mutually exclusive with other options. It is not
possible to add ad-hoc customizations to a preset. This is a deliberate
design decision. (Rationale: a preset is a certain important set of
options that we want to keep in a centralized location. If you need to
customize it, you should create another preset in a centralized location,
rather than scattering the knowledge about the build across the system.)
Presets support substitutions for controlled customizations. Substitutions
are defined in the preset file. Values for substitutions are supplied
using the name=value syntax on the command line.
Any arguments not listed are forwarded directly to Swift's
'build-script-impl'. See that script's help for details. The listed
build-script-impl arguments are only for disambiguation in the argument parser.
Environment variables
---------------------
This script respects a few environment variables, should you
choose to set them:
SWIFT_SOURCE_ROOT: a directory containing the source for LLVM, Clang, Swift.
If this script is located in a Swift
source directory, the location of SWIFT_SOURCE_ROOT will be
inferred if the variable is not set.
'build-script' expects the sources to be laid out in the following way:
$SWIFT_SOURCE_ROOT/llvm
/clang
/swift
/lldb (optional)
/llbuild (optional)
/swiftpm (optional, requires llbuild)
/swift-syntax (optional, requires swiftpm)
/swift-stress-tester (optional,
requires swift-syntax)
/compiler-rt (optional)
/swift-corelibs-xctest (optional)
/swift-corelibs-foundation (optional)
/swift-corelibs-libdispatch (optional)
/icu (optional)
SWIFT_BUILD_ROOT: a directory in which to create out-of-tree builds.
Defaults to "$SWIFT_SOURCE_ROOT/build/".
Preparing to run this script
----------------------------
See README.md for instructions on cloning Swift subprojects.
If you intend to use the -l, -L, --lldb, or --debug-lldb options.
That's it; you're ready to go!
Examples
--------
Given the above layout of sources, the simplest invocation of 'build-script' is
just:
[~/src/s]$ ./swift/utils/build-script
This builds LLVM, Clang, Swift and Swift standard library in debug mode.
All builds are incremental. To incrementally build changed files, repeat the
same 'build-script' command.
Typical uses of 'build-script'
------------------------------
To build everything with optimization without debug information:
[~/src/s]$ ./swift/utils/build-script -R
To run tests, add '-t':
[~/src/s]$ ./swift/utils/build-script -R -t
To run normal tests and validation tests, add '-T':
[~/src/s]$ ./swift/utils/build-script -R -T
To build LLVM+Clang with optimization without debug information, and a
debuggable Swift compiler:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift
To build a debuggable Swift standard library:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift-stdlib
iOS build targets are always configured and present, but are not built by
default. To build the standard library for OS X, iOS simulator and iOS device:
[~/src/s]$ ./swift/utils/build-script -R -i
To run OS X and iOS tests that don't require a device:
[~/src/s]$ ./swift/utils/build-script -R -i -t
To use 'make' instead of 'ninja', use '-m':
[~/src/s]$ ./swift/utils/build-script -m -R
To create Xcode projects that can build Swift, use '-x':
[~/src/s]$ ./swift/utils/build-script -x -R
Preset mode in build-script
---------------------------
All buildbots and automated environments use 'build-script' in *preset mode*.
In preset mode, the command line only specifies the preset name and allows
limited customization (extra output paths). The actual options come from
the selected preset in 'utils/build-presets.ini'. For example, to build like
the incremental buildbot, run:
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_incremental
To build with AddressSanitizer:
[~/src/s]$ ./swift/utils/build-script --preset=asan
To build a root for Xcode XYZ, '/tmp/xcode-xyz-root.tar.gz':
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_BNI_internal_XYZ \\
install_destdir="/tmp/install"
install_symroot="/tmp/symroot"
installable_package="/tmp/xcode-xyz-root.tar.gz"
If you have your own favorite set of options, you can create your own, local,
preset. For example, let's create a preset called 'ds' (which stands for
Debug Swift):
$ cat > ~/.swift-build-presets
[preset: ds]
release
debug-swift
debug-swift-stdlib
test
build-subdir=ds
To use it, specify the '--preset=' argument:
[~/src/s]$ ./swift/utils/build-script --preset=ds
./swift/utils/build-script: using preset 'ds', which expands to
./swift/utils/build-script --release --debug-swift --debug-swift-stdlib \
--test
--build-subdir=ds --
...
Existing presets can be found in `utils/build-presets.ini`
Philosophy
----------
While you can invoke CMake directly to build Swift, this tool will save you
time by taking away the mechanical parts of the process, providing you controls
for the important options.
For all automated build environments, this tool is regarded as *the* *only* way
to build Swift. This is not a technical limitation of the Swift build system.
It is a policy decision aimed at making the builds uniform across all
environments and easily reproducible by engineers who are not familiar with the
details of the setups of other systems or automated environments.
"""
| parse_known_args |
sidebar.module.ts | import {NgModule} from '@angular/core';
import {CommonModule} from '@angular/common';
import {FuiSidebar} from './components/sidebar';
import {FuiSidebarContainer} from './components/sidebar-container';
import {FuiSidebarSibling} from './components/sidebar-sibling';
@NgModule({
imports: [
CommonModule
],
declarations: [
FuiSidebar,
FuiSidebarContainer,
FuiSidebarSibling
],
exports: [
FuiSidebar,
FuiSidebarContainer,
FuiSidebarSibling
]
})
export class | {
}
| FuiSidebarModule |
redis.js | "use strict"; | function RedisDatabase(options) {
this.options = options;
if (options.databaseConfig.redis.nodes && options.databaseConfig.redis.nodes.length != 0) {
this._redis = new Redis.Cluster(options.databaseConfig.redis.nodes, options.databaseConfig.redis.options);
}
else {
this._redis = new Redis(options.databaseConfig.redis);
}
}
RedisDatabase.prototype.get = function (key) {
var _this = this;
return new Promise(function (resolve, reject) {
_this._redis.get(key).then(function (value) { return resolve(JSON.parse(value)); });
});
};
RedisDatabase.prototype.set = function (key, value) {
this._redis.set(key, JSON.stringify(value));
if (this.options.databaseConfig.publishPresence === true && /^presence-.*:members$/.test(key)) {
this._redis.publish('PresenceChannelUpdated', JSON.stringify({
"event": {
"channel": key,
"members": value
}
}));
}
};
return RedisDatabase;
}());
exports.RedisDatabase = RedisDatabase; | Object.defineProperty(exports, "__esModule", { value: true });
var Redis = require('ioredis');
var RedisDatabase = (function () { |
qcompilation.py | import qtm.base
import qtm.optimizer
import qtm.loss
import qtm.utilities
import numpy as np
import typing, types
import qiskit
import matplotlib.pyplot as plt
class QuantumCompilation():
def __init__(self) -> None:
self.u = None
self.vdagger = None
self.is_trained = False
self.optimizer = None
self.loss_func = None
self.thetas = None
self.thetass = []
self.loss_values = []
self.fidelities = []
self.traces = []
self.kwargs = None
return
def __init__(self, u: typing.Union[types.FunctionType, qiskit.QuantumCircuit], vdagger: typing.Union[types.FunctionType, qiskit.QuantumCircuit], optimizer: typing.Union[types.FunctionType, str], loss_func: typing.Union[types.FunctionType, str], thetas: np.ndarray = np.array([]), **kwargs):
"""_summary_
Args:
- u (typing.Union[types.FunctionType, qiskit.QuantumCircuit]): In quantum state preparation problem, this is the ansatz. In tomography, this is the circuit that generate random Haar state.
- vdagger (typing.Union[types.FunctionType, qiskit.QuantumCircuit]): In quantum tomography problem, this is the ansatz. In state preparation, this is the circuit that generate random Haar state.
- optimizer (typing.Union[types.FunctionType, str]): You can put either string or function here. If type string, qcompilation produces some famous optimizers such as: 'sgd', 'adam', 'qng-fubini-study', 'qng-qfim', 'qng-adam'.
- loss_func (typing.Union[types.FunctionType, str]): You can put either string or function here. If type string, qcompilation produces some famous optimizers such as: 'loss_basic' (1 - p0) and 'loss_fubini_study' (\sqrt{(1 - p0)}).
- thetas (np.ndarray, optional): initial parameters. Note that it must fit with your ansatz. Defaults to np.array([]).
"""
self.set_u(u)
self.set_vdagger(vdagger)
self.set_optimizer(optimizer)
self.set_loss_func(loss_func)
self.set_kwargs(**kwargs)
self.set_thetas(thetas)
return
def set_u(self, _u: typing.Union[types.FunctionType, qiskit.QuantumCircuit]):
"""In quantum state preparation problem, this is the ansatz. In tomography, this is the circuit that generate random Haar state.
Args:
- _u (typing.Union[types.FunctionType, qiskit.QuantumCircuit]): init circuit
"""
if callable(_u) or isinstance(_u, qiskit.QuantumCircuit):
self.u = _u
else:
raise ValueError('The U part must be a function f: thetas -> qiskit.QuantumCircuit or a determined quantum circuit')
return
def set_vdagger(self, _vdagger):
"""In quantum state tomography problem, this is the ansatz. In state preparation, this is the circuit that generate random Haar state.
Args:
- _vdagger (typing.Union[types.FunctionType, qiskit.QuantumCircuit]): init circuit
"""
if callable(_vdagger) or isinstance(_vdagger, qiskit.QuantumCircuit):
self.vdagger = _vdagger
else:
raise ValueError('The V dagger part must be a function f: thetas -> qiskit.QuantumCircuit or a determined quantum circuit')
return
def set_loss_func(self, _loss_func: typing.Union[types.FunctionType, str]):
"""Set the loss function for compiler
Args:
- _loss_func (typing.Union[types.FunctionType, str])
Raises:
ValueError: when you pass wrong type
"""
if callable(_loss_func):
self.loss_func = _loss_func
elif isinstance(_loss_func, str):
if _loss_func == 'loss-basic':
self.loss_func = qtm.loss.loss_basis
elif _loss_func == 'loss-fubini-study':
self.loss_func = qtm.loss.loss_fubini_study
else:
raise ValueError('The loss function must be a function f: measurement value -> loss value or string in ["loss_basic", "loss_fubini_study"]')
return
def set_optimizer(self, _optimizer: typing.Union[types.FunctionType, str]):
"""Change the optimizer of the compiler
Args:
- _optimizer (typing.Union[types.FunctionType, str])
Raises:
ValueError: when you pass wrong type
"""
if callable(_optimizer):
self.optimizer = _optimizer
elif isinstance(_optimizer,str):
if _optimizer == 'sgd':
self.optimizer = qtm.optimizer.sgd
elif _optimizer == 'adam':
self.optimizer = qtm.optimizer.adam
elif _optimizer == 'qng-fubini-study':
self.optimizer = qtm.optimizer.qng_fubini_study
elif _optimizer == 'qng-qfim':
self.optimizer = qtm.optimizer.qng_qfim
elif _optimizer == 'qng-adam':
self.optimizer = qtm.optimizer.qng_adam
else:
raise ValueError('The optimizer must be a function f: thetas -> thetas or string in ["sgd", "adam", "qng_qfim", "qng_fubini_study", "qng_adam"]')
return
def set_num_step(self, _num_step: int):
"""Set the number of iteration for compiler
Args:
- _num_step (int): number of iterations
Raises:
ValueError: when you pass a nasty value
"""
if _num_step > 0 and isinstance(_num_step, int):
self.num_step = _num_step
else:
raise ValueError('Number of iterations must be a integer, such that 10 or 100.')
return
def set_thetas(self, _thetas: np.ndarray):
"""Set parameter, it will be updated at each iteration
Args:
_thetas (np.ndarray): parameter for u or vdagger
"""
if isinstance(_thetas, np.ndarray):
self.thetas = _thetas
else:
raise ValueError('The parameter must be numpy array')
return
def set_kwargs(self, **kwargs):
"""Arguments supported for u or vdagger only. Ex: number of layer
"""
self.__dict__.update(**kwargs)
self.kwargs = kwargs
return
def fit(self, num_steps: int = 100, verbose: int = 0):
"""Optimize the thetas parameters
Args:
- num_steps: number of iterations
- verbose (int, optional): 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per 10 steps. Verbose 1 is good for timing training time, verbose 2 if you want to log loss values to a file. Please install package tdqm if you want to use verbose 1.
"""
self.thetass, self.loss_values = qtm.base.fit(
self.u, self.vdagger, self.thetas, num_steps, self.loss_func, self.optimizer, verbose, is_return_all_thetas=True, **self.kwargs)
self.is_trained = True
if callable(self.u):
self.traces, self.fidelities = qtm.utilities.calculate_state_preparation_metrics(self.u, self.vdagger, self.thetass, **self.kwargs)
else:
self.traces, self.fidelities = qtm.utilities.calculate_state_tomography_metrics(self.u, self.vdagger, self.thetass, **self.kwargs)
return
def save(self, metric: str = "", text = "", path = './', save_all: bool = False):
|
def reset(self):
"""Delete all current property of compiler
"""
self.u = None
self.vdagger = None
self.is_trained = False
self.optimizer = None
self.loss_func = None
self.num_step = 0
self.thetas = None
self.thetass = []
self.loss_values = []
return
| """_summary_
Args:
- metric (str)
- text (str): Defaults to './'. Additional file name string
- path (str, optional): Defaults to './'.
- save_all (bool, optional): Save thetass, fidelity, trace and loss_value if save_all = True
Raises:
ValueError: if save_all = False and metric is not right.
"""
if save_all:
np.savetxt(path + "/thetass" + text + ".csv", self.thetass, delimiter=",")
np.savetxt(path + "/fidelities"+ text + ".csv", self.fidelities, delimiter=",")
np.savetxt(path + "/traces" + text + ".csv", self.traces, delimiter=",")
np.savetxt(path + "/loss_values" + text + ".csv", self.loss_values, delimiter=",")
else:
if metric == 'thetas':
np.savetxt(path + "/thetass" + text + ".csv", self.thetass, delimiter=",")
elif metric == 'fidelity':
np.savetxt(path + "/fidelities" + text + ".csv", self.fidelities, delimiter=",")
elif metric == 'trace':
np.savetxt(path + "/traces" + text + ".csv", self.traces, delimiter=",")
elif metric == 'loss_value':
np.savetxt(path + "/loss_values" + text + ".csv", self.loss_values, delimiter=",")
else:
raise ValueError('The metric must be thetas, fidelity, trace or loss_value')
print("Saved " + metric + " at " + path)
return |
models.py | from django.db import models
from django.utils import timezone
from users.models import CustomUser
from primaseru import choices
class StudentStatus(models.Model):
| student = models.OneToOneField(CustomUser, on_delete=models.CASCADE)
accepted = models.BooleanField('Diterima', db_index=True, null=True)
major = models.CharField('Diterima dijurusan', choices=choices.MAJOR, max_length=4, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f'{self.student} Status' |
|
ProfileBanner.js | import React, { useEffect, useState } from "react";
import styled from "styled-components";
import {
getUserBio,
getUserImg,
isUserLoggedIn,
} from "../../features/authentication/authSlice.js";
import { useSelector } from "react-redux";
import {
faCog,
faPlus,
faCheckCircle,
} from "@fortawesome/free-solid-svg-icons";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import { useHistory } from "react-router-dom";
import { CheckFollowing } from "../../services/profiles";
import { Follow } from "../../services/profiles";
import { UnFollow } from "../../services/profiles";
function | ({ username, LoggedInUsername }) {
const bio = useSelector(getUserBio);
const [isFollowing, setIsFollowing] = useState(false);
const history = useHistory();
const isLoggedIn = useSelector(isUserLoggedIn);
const image =
useSelector(getUserImg) ||
"https://static.productionready.io/images/smiley-cyrus.jpg";
const goToSettings = () => {
history.push("/settings");
};
useEffect(() => {
const intializeState = async () => {
const res = await CheckFollowing(username);
if (res?.data) setIsFollowing(res.profile.following);
};
intializeState();
}, []);
const followUser = async () => {
if (isLoggedIn === true) {
const profile = await Follow(username);
setIsFollowing(profile.profile.following);
} else {
history.push("/signin");
}
};
const UnfollowUser = async () => {
const profile = await UnFollow(username);
setIsFollowing(profile.profile.following);
};
return (
<BannerContainer>
<ProfileDetails>
<img src={image} alt="profile" />
<h3>{username}</h3>
<p>{bio}</p>
</ProfileDetails>
{LoggedInUsername !== username ? (
<button onClick={isFollowing ? UnfollowUser : followUser}>
<FontAwesomeIcon icon={isFollowing ? faCheckCircle : faPlus} />
{isFollowing ? <span>Unfollow</span> : <span>Follow</span>}
</button>
) : (
<button onClick={goToSettings}>
<span>
<FontAwesomeIcon icon={faCog} />
</span>
<span> Edit Profile Settings</span>
</button>
)}
</BannerContainer>
);
}
const BannerContainer = styled.div`
background: #f3f3f3;
padding-top: 1.5rem;
> button {
display: inline-block;
position: absolute;
right: 10vw;
cursor: pointer;
padding: 0.25rem 0.5rem;
font-size: 0.875rem;
border-radius: 0.2rem;
border: none;
color: #999;
border: 1px solid #999;
z-index: 0;
}
`;
const ProfileDetails = styled.div`
display: flex;
flex-direction: column;
place-items: center;
> h3 {
margin-bottom: 0px;
}
> p {
margin-top: 0px;
}
> img {
width: 100px;
height: 100px;
border-radius: 100px;
}
`;
export default ProfileBanner;
| ProfileBanner |
legacy.go | /*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"strconv"
"github.com/hyperledger/fabric-chaincode-go/shim"
pb "github.com/hyperledger/fabric-protos-go/peer"
)
// SimpleChaincode example simple Chaincode implementation
type SimpleChaincode struct {
}
// Init - initialize the state
func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {
fmt.Printf("########### example_cc0 Init ###########\n")
_, args := stub.GetFunctionAndParameters()
var A, B string // Entities
var Aval, Bval int // Asset holdings
var err error
if len(args) != 4 {
return shim.Error("Incorrect number of arguments. Expecting 4")
}
// Initialize the chaincode
A = args[0]
Aval, err = strconv.Atoi(args[1])
if err != nil {
return shim.Error("Expecting integer value for asset holding")
}
B = args[2]
Bval, err = strconv.Atoi(args[3])
if err != nil {
return shim.Error("Expecting integer value for asset holding")
}
fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval)
// Write the state to the ledger
err = stub.PutState(A, []byte(strconv.Itoa(Aval)))
if err != nil {
return shim.Error(err.Error())
}
err = stub.PutState(B, []byte(strconv.Itoa(Bval)))
if err != nil |
fmt.Printf("########### example_cc0 Init Complete###########\n")
return shim.Success(nil)
}
// Invoke - Transaction makes payment of X units from A to B
func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response {
fmt.Printf("########### example_cc0 Invoke ###########")
function, args := stub.GetFunctionAndParameters()
fmt.Printf("\nfunction: ", function)
fmt.Printf("\nargs: ", args)
if function == "delete" {
// Deletes an entity from its state
return t.delete(stub, args)
}
if function == "query" {
// queries an entity state
return t.query(stub, args)
}
if function == "throwError" {
return t.throwError(stub, args)
}
if function == "move" {
// Deletes an entity from its state
return t.move(stub, args)
}
if function == "init" {
// Deletes an entity from its state
return t.Init(stub)
}
fmt.Printf("Unknown action, check the first argument, must be one of 'delete', 'query', or 'move'. But got: %v", function)
return shim.Error(fmt.Sprintf("Unknown action, check the first argument, must be one of 'delete', 'query', or 'move'. But got: %v", function))
}
func (t *SimpleChaincode) move(stub shim.ChaincodeStubInterface, args []string) pb.Response {
// must be an invoke
var A, B string // Entities
var Aval, Bval int // Asset holdings
var X int // Transaction value
var err error
if len(args) != 3 {
return shim.Error("Incorrect number of arguments. Expecting 4, function followed by 2 names and 1 value")
}
A = args[0]
B = args[1]
// Get the state from the ledger
// TODO: will be nice to have a GetAllState call to ledger
Avalbytes, err := stub.GetState(A)
if err != nil {
return shim.Error("Failed to get state")
}
if Avalbytes == nil {
return shim.Error("Entity not found")
}
Aval, _ = strconv.Atoi(string(Avalbytes))
Bvalbytes, err := stub.GetState(B)
if err != nil {
return shim.Error("Failed to get state")
}
if Bvalbytes == nil {
return shim.Error("Entity not found")
}
Bval, _ = strconv.Atoi(string(Bvalbytes))
// Perform the execution
X, err = strconv.Atoi(args[2])
if err != nil {
return shim.Error("Invalid transaction amount, expecting a integer value")
}
Aval = Aval - X
Bval = Bval + X
fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval)
// Write the state back to the ledger
err = stub.PutState(A, []byte(strconv.Itoa(Aval)))
if err != nil {
return shim.Error(err.Error())
}
err = stub.PutState(B, []byte(strconv.Itoa(Bval)))
if err != nil {
return shim.Error(err.Error())
}
return shim.Success([]byte("move succeed"))
}
// Deletes an entity from state
func (t *SimpleChaincode) delete(stub shim.ChaincodeStubInterface, args []string) pb.Response {
if len(args) != 1 {
return shim.Error("Incorrect number of arguments. Expecting 1")
}
A := args[0]
// Delete the key from the state in ledger
err := stub.DelState(A)
if err != nil {
return shim.Error("Failed to delete state")
}
return shim.Success(nil)
}
// Query callback representing the query of a chaincode
func (t *SimpleChaincode) query(stub shim.ChaincodeStubInterface, args []string) pb.Response {
var A string // Entities
var err error
if len(args) != 1 {
return shim.Error("Incorrect number of arguments. Expecting name of the person to query")
}
A = args[0]
// Get the state from the ledger
Avalbytes, err := stub.GetState(A)
if err != nil {
jsonResp := "{\"Error\":\"Failed to get state for " + A + "\"}"
return shim.Error(jsonResp)
}
if Avalbytes == nil {
jsonResp := "{\"Error\":\"Nil amount for " + A + "\"}"
return shim.Error(jsonResp)
}
jsonResp := "{\"Name\":\"" + A + "\",\"Amount\":\"" + string(Avalbytes) + "\"}"
fmt.Printf("Query Response:%s\n", jsonResp)
return shim.Success(Avalbytes)
}
func (t *SimpleChaincode) throwError(stub shim.ChaincodeStubInterface, args []string) pb.Response {
err := fmt.Errorf("throwError: an error occurred")
return shim.Error(err.Error())
}
func main() {
err := shim.Start(new(SimpleChaincode))
if err != nil {
fmt.Printf("Error starting Simple chaincode: %s", err)
}
}
| {
return shim.Error(err.Error())
} |
local_rootNotDirectory.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Verify that attempts to create local filesystems with nonexistent roots fails as expected
"""
def te | :
# support
import pyre.primitives
# my package
import pyre.filesystem
# make a path out of a regular file
dummy = pyre.primitives.path("./local_rootNotDirectory.py")
# attempt to
try:
# mount a filesystem there
pyre.filesystem.local(root=dummy)
# which should fail so we can't reach here
assert False
# if it fails as expected
except pyre.filesystem.MountPointError as error:
# check that the error message is correct
assert str(error) == (
"error while mounting '{}': invalid mount point".format(dummy.resolve()))
# all done
return
# main
if __name__ == "__main__":
# skip pyre initialization since we don't rely on the executive
pyre_noboot = True
# do...
test()
# end of file
| st() |
views.py | from flask import Blueprint, render_template, redirect, url_for, flash, jsonify
from sqlalchemy import exc
from application import db
from application.routes.leads.models import Lead
from application.routes.leads.forms import AddLeadForm
leads = Blueprint("leads", __name__)
@leads.route("/")
def index():
return render_template("leads_index.html", leads=Lead.query.all())
@leads.route("/add", methods=['GET', 'POST'])
def add():
|
@leads.route("/json/names")
def json_names():
names = tuple("#%d - %s <%s> {%s}" % (int(lead.id), lead.name, lead.email, lead.company) for lead in Lead.query.all())
return jsonify(names)
| form = AddLeadForm()
if form.validate_on_submit():
print(form)
item = Lead(**form.to_dict())
db.session.add(item)
try:
db.session.commit()
except exc.IntegrityError as e:
flash("Lead already exists for this email.")
print(e)
except exc.SQLAlchemyError as e:
flash("An unknown error occurred while adding Lead.")
print(e)
else:
return redirect(url_for("leads.index"))
elif form.errors:
flash(form.errors)
return render_template("leads_add.html", form=form) |
service.py | # -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
try:
from pyarrow import plasma
except ImportError: # pragma: no cover
plasma = None
from ..config import options
from .. import resource
from ..utils import parse_readable_size, readable_size
from ..compat import six
from .status import StatusActor
from .quota import QuotaActor, MemQuotaActor
from .chunkholder import ChunkHolderActor
from .dispatcher import DispatchActor
from .events import EventsActor
from .execution import ExecutionActor
from .calc import CpuCalcActor
from .transfer import ReceiverActor, SenderActor
from .prochelper import ProcessHelperActor
from .transfer import ResultSenderActor
from .spill import SpillActor
from .utils import WorkerClusterInfoActor
logger = logging.getLogger(__name__)
class WorkerService(object):
def | (self, **kwargs):
self._plasma_store = None
self._chunk_holder_ref = None
self._task_queue_ref = None
self._mem_quota_ref = None
self._dispatch_ref = None
self._events_ref = None
self._status_ref = None
self._execution_ref = None
self._daemon_ref = None
self._cluster_info_ref = None
self._cpu_calc_actors = []
self._sender_actors = []
self._receiver_actors = []
self._spill_actors = []
self._process_helper_actors = []
self._result_sender_ref = None
self._advertise_addr = kwargs.pop('advertise_addr', None)
self._n_cpu_process = int(kwargs.pop('n_cpu_process', None) or resource.cpu_count())
self._n_net_process = int(kwargs.pop('n_net_process', None) or '1')
self._spill_dirs = kwargs.pop('spill_dirs', None)
if self._spill_dirs:
if isinstance(self._spill_dirs, six.string_types):
from .spill import parse_spill_dirs
self._spill_dirs = options.worker.spill_directory = parse_spill_dirs(self._spill_dirs)
else:
options.worker.spill_directory = self._spill_dirs
else:
self._spill_dirs = options.worker.spill_directory = []
options.worker.disk_compression = kwargs.pop('disk_compression', None) or \
options.worker.disk_compression
options.worker.transfer_compression = kwargs.pop('transfer_compression', None) or \
options.worker.transfer_compression
self._total_mem = kwargs.pop('total_mem', None)
self._cache_mem_limit = kwargs.pop('cache_mem_limit', None)
self._soft_mem_limit = kwargs.pop('soft_mem_limit', None) or '80%'
self._hard_mem_limit = kwargs.pop('hard_mem_limit', None) or '90%'
self._ignore_avail_mem = kwargs.pop('ignore_avail_mem', None) or False
self._min_mem_size = kwargs.pop('min_mem_size', None) or 128 * 1024 ** 2
self._soft_quota_limit = self._soft_mem_limit
self._calc_memory_limits()
if kwargs: # pragma: no cover
raise TypeError('Keyword arguments %r cannot be recognized.' % ', '.join(kwargs))
@property
def n_process(self):
return 1 + self._n_cpu_process + self._n_net_process + (1 if self._spill_dirs else 0)
def _calc_memory_limits(self):
def _calc_size_limit(limit_str, total_size):
if limit_str is None:
return None
if isinstance(limit_str, int):
return limit_str
mem_limit, is_percent = parse_readable_size(limit_str)
if is_percent:
return int(total_size * mem_limit)
else:
return int(mem_limit)
mem_stats = resource.virtual_memory()
if self._total_mem:
self._total_mem = _calc_size_limit(self._total_mem, mem_stats.total)
else:
self._total_mem = mem_stats.total
self._min_mem_size = _calc_size_limit(self._min_mem_size, self._total_mem)
self._hard_mem_limit = _calc_size_limit(self._hard_mem_limit, self._total_mem)
self._cache_mem_limit = _calc_size_limit(self._cache_mem_limit, self._total_mem)
if self._cache_mem_limit is None:
self._cache_mem_limit = mem_stats.free // 2
self._soft_mem_limit = _calc_size_limit(self._soft_mem_limit, self._total_mem)
actual_used = self._total_mem - mem_stats.available
if self._ignore_avail_mem:
self._soft_quota_limit = self._soft_mem_limit
else:
self._soft_quota_limit = self._soft_mem_limit - self._cache_mem_limit - actual_used
if self._soft_quota_limit < self._min_mem_size:
raise MemoryError('Memory not enough. soft_limit=%s, cache_limit=%s, used=%s' %
tuple(readable_size(k) for k in (
self._soft_mem_limit, self._cache_mem_limit, actual_used)))
logger.info('Setting soft limit to %s.', readable_size(self._soft_quota_limit))
def start_plasma(self):
self._plasma_store = plasma.start_plasma_store(self._cache_mem_limit)
options.worker.plasma_socket, _ = self._plasma_store.__enter__()
def start(self, endpoint, pool, distributed=True, discoverer=None, process_start_index=0):
# create plasma key mapper
from .chunkstore import PlasmaKeyMapActor
pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid())
# create WorkerClusterInfoActor
self._cluster_info_ref = pool.create_actor(
WorkerClusterInfoActor, discoverer, uid=WorkerClusterInfoActor.default_uid())
if distributed:
# create process daemon
from .daemon import WorkerDaemonActor
actor_holder = self._daemon_ref = pool.create_actor(
WorkerDaemonActor, uid=WorkerDaemonActor.default_uid())
# create StatusActor
if ':' not in self._advertise_addr:
self._advertise_addr += ':' + endpoint.rsplit(':', 1)[-1]
self._status_ref = pool.create_actor(
StatusActor, self._advertise_addr, uid=StatusActor.default_uid())
else:
# create StatusActor
self._status_ref = pool.create_actor(
StatusActor, endpoint, uid=StatusActor.default_uid())
actor_holder = pool
if self._ignore_avail_mem:
# start a QuotaActor instead of MemQuotaActor to avoid memory size detection
# for debug purpose only, DON'T USE IN PRODUCTION
self._mem_quota_ref = pool.create_actor(
QuotaActor, self._soft_mem_limit, uid=MemQuotaActor.default_uid())
else:
self._mem_quota_ref = pool.create_actor(
MemQuotaActor, self._soft_quota_limit, self._hard_mem_limit, uid=MemQuotaActor.default_uid())
# create ChunkHolderActor
self._chunk_holder_ref = pool.create_actor(
ChunkHolderActor, self._cache_mem_limit, uid=ChunkHolderActor.default_uid())
# create DispatchActor
self._dispatch_ref = pool.create_actor(DispatchActor, uid=DispatchActor.default_uid())
# create EventsActor
self._events_ref = pool.create_actor(EventsActor, uid=EventsActor.default_uid())
# create ExecutionActor
self._execution_ref = pool.create_actor(ExecutionActor, uid=ExecutionActor.default_uid())
# create CpuCalcActor
if not distributed:
self._n_cpu_process = pool.cluster_info.n_process - 1 - process_start_index
for cpu_id in range(self._n_cpu_process):
uid = 'w:%d:mars-calc-%d-%d' % (cpu_id + 1, os.getpid(), cpu_id)
actor = actor_holder.create_actor(CpuCalcActor, uid=uid)
self._cpu_calc_actors.append(actor)
start_pid = 1 + process_start_index + self._n_cpu_process
if distributed:
# create SenderActor and ReceiverActor
for sender_id in range(self._n_net_process):
uid = 'w:%d:mars-sender-%d-%d' % (start_pid + sender_id, os.getpid(), sender_id)
actor = actor_holder.create_actor(SenderActor, uid=uid)
self._sender_actors.append(actor)
# Mutable requires ReceiverActor (with LocalClusterSession)
for receiver_id in range(2 * self._n_net_process):
uid = 'w:%d:mars-receiver-%d-%d' % (start_pid + receiver_id // 2, os.getpid(), receiver_id)
actor = actor_holder.create_actor(ReceiverActor, uid=uid)
self._receiver_actors.append(actor)
# create ProcessHelperActor
for proc_id in range(pool.cluster_info.n_process - process_start_index):
uid = 'w:%d:mars-process-helper-%d-%d' % (proc_id, os.getpid(), proc_id)
actor = actor_holder.create_actor(ProcessHelperActor, uid=uid)
self._process_helper_actors.append(actor)
# create ResultSenderActor
self._result_sender_ref = pool.create_actor(ResultSenderActor, uid=ResultSenderActor.default_uid())
# create SpillActor
start_pid = pool.cluster_info.n_process - 1
if options.worker.spill_directory:
for spill_id in range(len(options.worker.spill_directory) * 2):
uid = 'w:%d:mars-spill-%d-%d' % (start_pid, os.getpid(), spill_id)
actor = actor_holder.create_actor(SpillActor, uid=uid)
self._spill_actors.append(actor)
# worker can be registered when everything is ready
self._status_ref.enable_status_upload(_tell=True)
def handle_process_down(self, pool, proc_indices):
logger.debug('Process %r halt. Trying to recover.', proc_indices)
for pid in proc_indices:
pool.restart_process(pid)
self._daemon_ref.handle_process_down(proc_indices, _tell=True)
def stop(self):
try:
if self._result_sender_ref:
self._result_sender_ref.destroy(wait=False)
if self._status_ref:
self._status_ref.destroy(wait=False)
if self._chunk_holder_ref:
self._chunk_holder_ref.destroy(wait=False)
if self._events_ref:
self._events_ref.destroy(wait=False)
if self._dispatch_ref:
self._dispatch_ref.destroy(wait=False)
if self._execution_ref:
self._execution_ref.destroy(wait=False)
for actor in (self._cpu_calc_actors + self._sender_actors
+ self._receiver_actors + self._spill_actors + self._process_helper_actors):
actor.destroy(wait=False)
finally:
self._plasma_store.__exit__(None, None, None)
| __init__ |
icon-digital-library-regular.tsx | /*
* This file was autogenerated. Don't edit this file!
*
* To update, execute "yarn start" inside "import-mistica-icons"
*/
import * as React from 'react';
import {useTheme} from '../../hooks';
import {useIsInverseVariant} from '../../theme-variant-context';
import type {IconProps} from '../../utils/types';
const IconDigitalLibraryRegular: React.FC<IconProps> = ({color, size = 24, children, ...rest}) => {
const {colors} = useTheme();
const isInverse = useIsInverseVariant();
const fillColor = color ?? (isInverse ? colors.inverse : colors.neutralHigh);
return (
<svg width={size} height={size} viewBox="0 0 24 24" role="presentation" {...rest}>
<path
d="M13.266 17.677a13.95 13.95 0 00-4.428.958v-7.149c1.66-.81 3.523-.927 4.428-.93v7.12zm-10.059-7.12c.905.002 2.762.117 4.429.93v7.148a13.95 13.95 0 00-4.429-.958v-7.12zM5.79 7.03c0-.664.157-1.163.462-1.485.317-.33.818-.501 1.49-.501l10.79-.006c.672 0 1.177.168 1.49.501.306.323.46.821.46 1.485v5.224c0 .667-.154 1.171-.462 1.499-.317.339-.818.51-1.488.51l-4.064.002V9.954a.614.614 0 00-.555-.619c-.129-.008-3.075-.23-5.68 1.078a9.927 9.927 0 00-2.443-.831V7.03zm12.745 8.467c1.008 0 1.801-.3 2.356-.89.53-.566.798-1.362.798-2.359V7.024c0-1-.272-1.79-.804-2.35-.552-.58-1.342-.874-2.353-.874l-10.79.006c-1.01 0-1.8.294-2.353.874-.532.563-.804 1.352-.804 2.352v2.362a11.704 11.704 0 00-2.03-.056.616.616 0 00-.555.619v8.33a.61.61 0 00.602.62c3.031 0 5.33 1.215 5.353 1.226a.57.57 0 00.277.07c.017 0 .031-.01.045-.014a.572.572 0 00.236-.058c.022-.012 2.305-1.227 5.353-1.227a.61.61 0 00.602-.62v-.77h6.616a.61.61 0 00.602-.619.61.61 0 00-.602-.619h-6.616V15.5l4.067-.003z"
fill={fillColor}
/>
</svg>
);
};
export default IconDigitalLibraryRegular; | ||
aggregate_test.go | // Copyright 2021 VMware, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package intermediate
import (
"container/heap"
"net"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/vmware/go-ipfix/pkg/entities"
"github.com/vmware/go-ipfix/pkg/registry"
)
var (
fields = []string{
"sourcePodName",
"sourcePodNamespace",
"sourceNodeName",
"destinationPodName",
"destinationPodNamespace",
"destinationNodeName",
"destinationClusterIPv4",
"destinationClusterIPv6",
"destinationServicePort",
"ingressNetworkPolicyRuleAction",
"egressNetworkPolicyRuleAction",
"ingressNetworkPolicyRulePriority",
}
nonStatsElementList = []string{
"flowEndSeconds",
"flowEndReason",
"tcpState",
}
statsElementList = []string{
"packetTotalCount",
"packetDeltaCount",
"octetTotalCount",
"reversePacketTotalCount",
"reversePacketDeltaCount",
"reverseOctetTotalCount",
}
antreaSourceStatsElementList = []string{
"packetTotalCountFromSourceNode",
"packetDeltaCountFromSourceNode",
"octetTotalCountFromSourceNode",
"reversePacketTotalCountFromSourceNode",
"reversePacketDeltaCountFromSourceNode",
"reverseOctetTotalCountFromSourceNode",
}
antreaDestinationStatsElementList = []string{
"packetTotalCountFromDestinationNode",
"packetDeltaCountFromDestinationNode",
"octetTotalCountFromDestinationNode",
"reversePacketTotalCountFromDestinationNode",
"reversePacketDeltaCountFromDestinationNode",
"reverseOctetTotalCountFromDestinationNode",
}
antreaFlowEndSecondsElementList = []string{
"flowEndSecondsFromSourceNode",
"flowEndSecondsFromDestinationNode",
}
antreaThroughputElementList = []string{
"throughput",
"reverseThroughput",
}
antreaSourceThroughputElementList = []string{
"throughputFromSourceNode",
"reverseThroughputFromSourceNode",
}
antreaDestinationThroughputElementList = []string{
"throughputFromDestinationNode",
"reverseThroughputFromDestinationNode",
}
)
func init() {
registry.LoadRegistry()
MaxRetries = 1
MinExpiryTime = 0
}
const (
testTemplateID = uint16(256)
testActiveExpiry = 100 * time.Millisecond
testInactiveExpiry = 150 * time.Millisecond
testMaxRetries = 2
)
func createMsgwithTemplateSet(isIPv6 bool) *entities.Message {
set := entities.NewSet(true)
set.PrepareSet(entities.Template, testTemplateID)
elements := make([]entities.InfoElementWithValue, 0)
ie3 := entities.NewUnsigned16InfoElement(entities.NewInfoElement("sourceTransportPort", 7, 2, 0, 2), 0)
ie4 := entities.NewUnsigned16InfoElement(entities.NewInfoElement("destinationTransportPort", 11, 2, 0, 2), 0)
ie5 := entities.NewUnsigned8InfoElement(entities.NewInfoElement("protocolIdentifier", 4, 1, 0, 1), 0)
ie6 := entities.NewStringInfoElement(entities.NewInfoElement("sourcePodName", 101, 13, registry.AntreaEnterpriseID, 65535), "")
ie7 := entities.NewStringInfoElement(entities.NewInfoElement("destinationPodName", 103, 13, registry.AntreaEnterpriseID, 65535), "")
ie9 := entities.NewUnsigned16InfoElement(entities.NewInfoElement("destinationServicePort", 107, 2, registry.AntreaEnterpriseID, 2), 0)
var ie1, ie2, ie8 entities.InfoElementWithValue
if !isIPv6 {
ie1 = entities.NewIPAddressInfoElement(entities.NewInfoElement("sourceIPv4Address", 8, 18, 0, 4), nil)
ie2 = entities.NewIPAddressInfoElement(entities.NewInfoElement("destinationIPv4Address", 12, 18, 0, 4), nil)
ie8 = entities.NewIPAddressInfoElement(entities.NewInfoElement("destinationClusterIPv4", 106, 18, registry.AntreaEnterpriseID, 4), nil)
} else {
ie1 = entities.NewIPAddressInfoElement(entities.NewInfoElement("sourceIPv6Address", 8, 19, 0, 16), nil)
ie2 = entities.NewIPAddressInfoElement(entities.NewInfoElement("destinationIPv6Address", 12, 19, 0, 16), nil)
ie8 = entities.NewIPAddressInfoElement(entities.NewInfoElement("destinationClusterIPv6", 106, 19, registry.AntreaEnterpriseID, 16), nil)
}
ie10 := entities.NewDateTimeSecondsInfoElement(entities.NewInfoElement("flowEndSeconds", 151, 14, 0, 4), 0)
ie11 := entities.NewUnsigned8InfoElement(entities.NewInfoElement("flowType", 137, 1, registry.AntreaEnterpriseID, 1), 0)
ie12 := entities.NewUnsigned8InfoElement(entities.NewInfoElement("ingressNetworkPolicyRuleAction", 139, 1, registry.AntreaEnterpriseID, 1), 0)
ie13 := entities.NewUnsigned8InfoElement(entities.NewInfoElement("egressNetworkPolicyRuleAction", 140, 1, registry.AntreaEnterpriseID, 1), 0)
ie14 := entities.NewSigned32InfoElement(entities.NewInfoElement("ingressNetworkPolicyRulePriority", 116, 7, registry.AntreaEnterpriseID, 4), 0)
elements = append(elements, ie1, ie2, ie3, ie4, ie5, ie6, ie7, ie8, ie9, ie10, ie11, ie12, ie13, ie14)
set.AddRecord(elements, 256)
message := entities.NewMessage(true)
message.SetVersion(10)
message.SetMessageLen(40)
message.SetSequenceNum(1)
message.SetObsDomainID(5678)
message.SetExportTime(0)
if isIPv6 {
message.SetExportAddress("::1")
} else {
message.SetExportAddress("127.0.0.1")
}
message.AddSet(set)
return message
}
// TODO:Cleanup this function using a loop, to make it easy to add elements for testing.
func createDataMsgForSrc(t *testing.T, isIPv6 bool, isIntraNode bool, isUpdatedRecord bool, isToExternal bool, isEgressDeny bool) *entities.Message {
set := entities.NewSet(true)
set.PrepareSet(entities.Data, testTemplateID)
elements := make([]entities.InfoElementWithValue, 0)
var srcPod, dstPod string
srcPod = "pod1"
if !isIntraNode {
dstPod = ""
} else {
dstPod = "pod2"
}
ie3 := entities.NewUnsigned16InfoElement(entities.NewInfoElement("sourceTransportPort", 7, 2, 0, 2), uint16(1234))
ie4 := entities.NewUnsigned16InfoElement(entities.NewInfoElement("destinationTransportPort", 11, 2, 0, 2), uint16(5678))
ie5 := entities.NewUnsigned8InfoElement(entities.NewInfoElement("protocolIdentifier", 4, 1, 0, 1), uint8(6))
ie6 := entities.NewStringInfoElement(entities.NewInfoElement("sourcePodName", 101, 13, registry.AntreaEnterpriseID, 65535), srcPod)
ie7 := entities.NewStringInfoElement(entities.NewInfoElement("destinationPodName", 103, 13, registry.AntreaEnterpriseID, 65535), dstPod)
ie9 := entities.NewUnsigned16InfoElement(entities.NewInfoElement("destinationServicePort", 107, 2, registry.AntreaEnterpriseID, 2), uint16(4739))
var ie1, ie2, ie8, ie10, ie11, ie12, ie13, ie14, ie15, ie16 entities.InfoElementWithValue
if !isIPv6 {
ie1 = entities.NewIPAddressInfoElement(entities.NewInfoElement("sourceIPv4Address", 8, 18, 0, 4), net.ParseIP("10.0.0.1").To4())
ie2 = entities.NewIPAddressInfoElement(entities.NewInfoElement("destinationIPv4Address", 12, 18, 0, 4), net.ParseIP("10.0.0.2").To4())
ie8 = entities.NewIPAddressInfoElement(entities.NewInfoElement("destinationClusterIPv4", 106, 18, registry.AntreaEnterpriseID, 4), net.ParseIP("192.168.0.1").To4())
} else {
ie1 = entities.NewIPAddressInfoElement(entities.NewInfoElement("sourceIPv6Address", 8, 19, 0, 16), net.ParseIP("2001:0:3238:DFE1:63::FEFB"))
ie2 = entities.NewIPAddressInfoElement(entities.NewInfoElement("destinationIPv6Address", 12, 19, 0, 16), net.ParseIP("2001:0:3238:DFE1:63::FEFC"))
ie8 = entities.NewIPAddressInfoElement(entities.NewInfoElement("destinationClusterIPv6", 106, 19, registry.AntreaEnterpriseID, 16), net.ParseIP("2001:0:3238:BBBB:63::AAAA"))
}
tmpFlowStartSecs, _ := registry.GetInfoElement("flowStartSeconds", registry.IANAEnterpriseID)
tmpFlowEndSecs, _ := registry.GetInfoElement("flowEndSeconds", registry.IANAEnterpriseID)
tmpFlowEndReason, _ := registry.GetInfoElement("flowEndReason", registry.IANAEnterpriseID)
tmpTCPState, _ := registry.GetInfoElement("tcpState", registry.AntreaEnterpriseID)
if !isUpdatedRecord {
ie10 = entities.NewDateTimeSecondsInfoElement(tmpFlowEndSecs, uint32(1))
ie12 = entities.NewUnsigned8InfoElement(tmpFlowEndReason, registry.ActiveTimeoutReason)
ie13 = entities.NewStringInfoElement(tmpTCPState, "ESTABLISHED")
} else {
ie10 = entities.NewDateTimeSecondsInfoElement(tmpFlowEndSecs, uint32(10))
ie12 = entities.NewUnsigned8InfoElement(tmpFlowEndReason, registry.EndOfFlowReason)
ie13 = entities.NewStringInfoElement(tmpTCPState, "TIME_WAIT")
}
if isToExternal {
ie11 = entities.NewUnsigned8InfoElement(entities.NewInfoElement("flowType", 137, 1, registry.AntreaEnterpriseID, 1), registry.FlowTypeToExternal)
ie16 = entities.NewSigned32InfoElement(entities.NewInfoElement("ingressNetworkPolicyRulePriority", 116, 7, registry.AntreaEnterpriseID, 4), int32(50000))
} else if !isIntraNode {
ie11 = entities.NewUnsigned8InfoElement(entities.NewInfoElement("flowType", 137, 1, registry.AntreaEnterpriseID, 1), registry.FlowTypeInterNode)
ie16 = entities.NewSigned32InfoElement(entities.NewInfoElement("ingressNetworkPolicyRulePriority", 116, 7, registry.AntreaEnterpriseID, 4), int32(0))
} else {
ie11 = entities.NewUnsigned8InfoElement(entities.NewInfoElement("flowType", 137, 1, registry.AntreaEnterpriseID, 1), registry.FlowTypeIntraNode)
ie16 = entities.NewSigned32InfoElement(entities.NewInfoElement("ingressNetworkPolicyRulePriority", 116, 7, registry.AntreaEnterpriseID, 4), int32(50000))
}
ie14 = entities.NewUnsigned8InfoElement(entities.NewInfoElement("ingressNetworkPolicyRuleAction", 139, 1, registry.AntreaEnterpriseID, 1), registry.NetworkPolicyRuleActionNoAction)
if isEgressDeny {
ie15 = entities.NewUnsigned8InfoElement(entities.NewInfoElement("egressNetworkPolicyRuleAction", 140, 1, registry.AntreaEnterpriseID, 1), registry.NetworkPolicyRuleActionDrop)
} else {
ie15 = entities.NewUnsigned8InfoElement(entities.NewInfoElement("egressNetworkPolicyRuleAction", 140, 1, registry.AntreaEnterpriseID, 1), registry.NetworkPolicyRuleActionNoAction)
}
ie17 := entities.NewDateTimeSecondsInfoElement(tmpFlowStartSecs, uint32(0))
elements = append(elements, ie1, ie2, ie3, ie4, ie5, ie6, ie7, ie8, ie9, ie10, ie11, ie12, ie13, ie14, ie15, ie16, ie17)
// Add all elements in statsElements.
for _, element := range statsElementList {
var e *entities.InfoElement
if !strings.Contains(element, "reverse") {
e, _ = registry.GetInfoElement(element, registry.IANAEnterpriseID)
} else {
e, _ = registry.GetInfoElement(element, registry.IANAReversedEnterpriseID)
}
var ieWithValue entities.InfoElementWithValue
switch element {
case "packetTotalCount", "reversePacketTotalCount":
if !isUpdatedRecord {
ieWithValue = entities.NewUnsigned64InfoElement(e, uint64(500))
} else {
ieWithValue = entities.NewUnsigned64InfoElement(e, uint64(1000))
}
case "packetDeltaCount", "reversePacketDeltaCount":
if !isUpdatedRecord {
ieWithValue = entities.NewUnsigned64InfoElement(e, uint64(0))
} else {
ieWithValue = entities.NewUnsigned64InfoElement(e, uint64(500))
}
case "octetTotalCount", "reverseOctetTotalCount":
if !isUpdatedRecord {
ieWithValue = entities.NewUnsigned64InfoElement(e, uint64(1000))
} else {
ieWithValue = entities.NewUnsigned64InfoElement(e, uint64(2000))
}
}
elements = append(elements, ieWithValue)
}
err := set.AddRecord(elements, 256)
assert.NoError(t, err)
message := entities.NewMessage(true)
message.SetVersion(10)
message.SetMessageLen(32)
message.SetSequenceNum(1)
message.SetObsDomainID(1234)
message.SetExportTime(0)
if isIPv6 {
message.SetExportAddress("::1")
} else {
message.SetExportAddress("127.0.0.1")
}
message.AddSet(set)
return message
}
func createDataMsgForDst(t *testing.T, isIPv6 bool, isIntraNode bool, isUpdatedRecord bool, isIngressReject bool, isIngressDrop bool) *entities.Message {
set := entities.NewSet(true)
set.PrepareSet(entities.Data, testTemplateID)
elements := make([]entities.InfoElementWithValue, 0)
var srcAddr, dstAddr, svcAddr []byte
var flowStartTime, flowEndTime uint32
var flowEndReason, ingressNetworkPolicyRuleAction, antreaFlowType uint8
var srcPod, dstPod, tcpState string
var svcPort uint16
srcPort := uint16(1234)
dstPort := uint16(5678)
proto := uint8(6)
if isIngressReject {
ingressNetworkPolicyRuleAction = registry.NetworkPolicyRuleActionReject
} else if isIngressDrop {
ingressNetworkPolicyRuleAction = registry.NetworkPolicyRuleActionDrop
} else {
ingressNetworkPolicyRuleAction = registry.NetworkPolicyRuleActionNoAction
}
egressNetworkPolicyRuleAction := registry.NetworkPolicyRuleActionNoAction
ingressNetworkPolicyRulePriority := int32(50000)
if !isIntraNode {
svcPort = uint16(0)
srcPod = ""
} else {
svcPort = uint16(4739)
srcPod = "pod1"
}
dstPod = "pod2"
ie3 := entities.NewUnsigned16InfoElement(entities.NewInfoElement("sourceTransportPort", 7, 2, 0, 2), srcPort)
ie4 := entities.NewUnsigned16InfoElement(entities.NewInfoElement("destinationTransportPort", 11, 2, 0, 2), dstPort)
ie5 := entities.NewUnsigned8InfoElement(entities.NewInfoElement("protocolIdentifier", 4, 1, 0, 1), proto)
ie6 := entities.NewStringInfoElement(entities.NewInfoElement("sourcePodName", 101, 13, registry.AntreaEnterpriseID, 65535), srcPod)
ie7 := entities.NewStringInfoElement(entities.NewInfoElement("destinationPodName", 103, 13, registry.AntreaEnterpriseID, 65535), dstPod)
ie9 := entities.NewUnsigned16InfoElement(entities.NewInfoElement("destinationServicePort", 107, 2, registry.AntreaEnterpriseID, 2), svcPort)
var ie1, ie2, ie8, ie11 entities.InfoElementWithValue
if !isIPv6 {
srcAddr = net.ParseIP("10.0.0.1").To4()
dstAddr = net.ParseIP("10.0.0.2").To4()
svcAddr = net.ParseIP("0.0.0.0").To4()
ie1 = entities.NewIPAddressInfoElement(entities.NewInfoElement("sourceIPv4Address", 8, 18, 0, 4), srcAddr)
ie2 = entities.NewIPAddressInfoElement(entities.NewInfoElement("destinationIPv4Address", 12, 18, 0, 4), dstAddr)
ie8 = entities.NewIPAddressInfoElement(entities.NewInfoElement("destinationClusterIPv4", 106, 18, registry.AntreaEnterpriseID, 4), svcAddr)
} else {
srcAddr = net.ParseIP("2001:0:3238:DFE1:63::FEFB")
dstAddr = net.ParseIP("2001:0:3238:DFE1:63::FEFC")
if !isIntraNode {
svcAddr = net.ParseIP("::0")
} else {
svcAddr = net.ParseIP("2001:0:3238:BBBB:63::AAAA")
}
ie1 = entities.NewIPAddressInfoElement(entities.NewInfoElement("sourceIPv6Address", 8, 19, 0, 16), srcAddr)
ie2 = entities.NewIPAddressInfoElement(entities.NewInfoElement("destinationIPv6Address", 12, 19, 0, 16), dstAddr)
ie8 = entities.NewIPAddressInfoElement(entities.NewInfoElement("destinationClusterIPv6", 106, 19, registry.AntreaEnterpriseID, 16), svcAddr)
}
flowStartTime = uint32(0)
if !isUpdatedRecord {
flowEndTime = uint32(1)
flowEndReason = registry.ActiveTimeoutReason
tcpState = "ESTABLISHED"
} else {
flowEndTime = uint32(10)
flowEndReason = registry.EndOfFlowReason
tcpState = "TIME_WAIT"
}
tmpElement, _ := registry.GetInfoElement("flowStartSeconds", registry.IANAEnterpriseID)
ie17 := entities.NewDateTimeSecondsInfoElement(tmpElement, flowStartTime)
tmpElement, _ = registry.GetInfoElement("flowEndSeconds", registry.IANAEnterpriseID)
ie10 := entities.NewDateTimeSecondsInfoElement(tmpElement, flowEndTime)
if !isIntraNode {
antreaFlowType = registry.FlowTypeInterNode
} else {
antreaFlowType = registry.FlowTypeIntraNode
}
ie11 = entities.NewUnsigned8InfoElement(entities.NewInfoElement("flowType", 137, 1, registry.AntreaEnterpriseID, 1), antreaFlowType)
tmpElement, _ = registry.GetInfoElement("flowEndReason", registry.IANAEnterpriseID)
ie12 := entities.NewUnsigned8InfoElement(tmpElement, flowEndReason)
tmpElement, _ = registry.GetInfoElement("tcpState", registry.AntreaEnterpriseID)
ie13 := entities.NewStringInfoElement(tmpElement, tcpState)
ie14 := entities.NewUnsigned8InfoElement(entities.NewInfoElement("ingressNetworkPolicyRuleAction", 139, 1, registry.AntreaEnterpriseID, 1), ingressNetworkPolicyRuleAction)
ie15 := entities.NewUnsigned8InfoElement(entities.NewInfoElement("egressNetworkPolicyRuleAction", 140, 1, registry.AntreaEnterpriseID, 1), egressNetworkPolicyRuleAction)
ie16 := entities.NewSigned32InfoElement(entities.NewInfoElement("ingressNetworkPolicyRulePriority", 116, 7, registry.AntreaEnterpriseID, 4), ingressNetworkPolicyRulePriority)
elements = append(elements, ie1, ie2, ie3, ie4, ie5, ie6, ie7, ie8, ie9, ie10, ie11, ie12, ie13, ie14, ie15, ie16, ie17)
// Add all elements in statsElements.
for _, element := range statsElementList {
var e *entities.InfoElement
if !strings.Contains(element, "reverse") {
e, _ = registry.GetInfoElement(element, registry.IANAEnterpriseID)
} else {
e, _ = registry.GetInfoElement(element, registry.IANAReversedEnterpriseID)
}
var ieWithValue entities.InfoElementWithValue
switch element {
case "packetTotalCount", "reversePacketTotalCount":
if !isUpdatedRecord {
ieWithValue = entities.NewUnsigned64InfoElement(e, uint64(502))
} else {
ieWithValue = entities.NewUnsigned64InfoElement(e, uint64(1005))
}
case "packetDeltaCount", "reversePacketDeltaCount":
if !isUpdatedRecord {
ieWithValue = entities.NewUnsigned64InfoElement(e, uint64(0))
} else {
ieWithValue = entities.NewUnsigned64InfoElement(e, uint64(503))
}
case "octetTotalCount", "reverseOctetTotalCount":
if !isUpdatedRecord {
ieWithValue = entities.NewUnsigned64InfoElement(e, uint64(1020))
} else {
ieWithValue = entities.NewUnsigned64InfoElement(e, uint64(2050))
}
}
elements = append(elements, ieWithValue)
}
err := set.AddRecord(elements, 256)
assert.NoError(t, err)
message := entities.NewMessage(true)
message.SetVersion(10)
message.SetMessageLen(32)
message.SetSequenceNum(1)
message.SetObsDomainID(1234)
message.SetExportTime(0)
if isIPv6 {
message.SetExportAddress("::1")
} else {
message.SetExportAddress("127.0.0.1")
}
message.AddSet(set)
return message
}
func TestInitAggregationProcess(t *testing.T) {
input := AggregationInput{
MessageChan: nil,
WorkerNum: 2,
CorrelateFields: fields,
}
aggregationProcess, err := InitAggregationProcess(input)
assert.NotNil(t, err)
assert.Nil(t, aggregationProcess)
messageChan := make(chan *entities.Message)
input.MessageChan = messageChan
aggregationProcess, err = InitAggregationProcess(input)
assert.Nil(t, err)
assert.Equal(t, 2, aggregationProcess.workerNum)
}
func TestGetTupleRecordMap(t *testing.T) {
messageChan := make(chan *entities.Message)
input := AggregationInput{
MessageChan: messageChan,
WorkerNum: 2,
CorrelateFields: fields,
}
aggregationProcess, _ := InitAggregationProcess(input)
assert.Equal(t, aggregationProcess.flowKeyRecordMap, aggregationProcess.flowKeyRecordMap)
}
func TestAggregateMsgByFlowKey(t *testing.T) {
messageChan := make(chan *entities.Message)
input := AggregationInput{
MessageChan: messageChan,
WorkerNum: 2,
CorrelateFields: fields,
ActiveExpiryTimeout: testActiveExpiry,
InactiveExpiryTimeout: testInactiveExpiry,
}
aggregationProcess, _ := InitAggregationProcess(input)
// Template records with IPv4 fields should be ignored
message := createMsgwithTemplateSet(false)
err := aggregationProcess.AggregateMsgByFlowKey(message)
assert.NoError(t, err)
assert.Empty(t, aggregationProcess.flowKeyRecordMap)
assert.Empty(t, aggregationProcess.expirePriorityQueue.Len())
// Data records should be processed and stored with corresponding flow key
message = createDataMsgForSrc(t, false, false, false, false, false)
err = aggregationProcess.AggregateMsgByFlowKey(message)
assert.NoError(t, err)
assert.NotZero(t, uint64(1), aggregationProcess.GetNumFlows())
assert.NotZero(t, aggregationProcess.expirePriorityQueue.Len())
flowKey := FlowKey{"10.0.0.1", "10.0.0.2", 6, 1234, 5678}
aggRecord := aggregationProcess.flowKeyRecordMap[flowKey]
assert.NotNil(t, aggregationProcess.flowKeyRecordMap[flowKey])
item := aggregationProcess.expirePriorityQueue.Peek()
assert.NotNil(t, item)
ieWithValue, _, exist := aggRecord.Record.GetInfoElementWithValue("sourceIPv4Address")
assert.Equal(t, true, exist)
assert.Equal(t, net.IP{0xa, 0x0, 0x0, 0x1}, ieWithValue.GetIPAddressValue())
assert.Equal(t, message.GetSet().GetRecords()[0], aggRecord.Record)
// Template records with IPv6 fields should be ignored
message = createMsgwithTemplateSet(true)
err = aggregationProcess.AggregateMsgByFlowKey(message)
assert.NoError(t, err)
// It should have only data record with IPv4 fields that is added before.
assert.Equal(t, int64(1), aggregationProcess.GetNumFlows())
assert.Equal(t, 1, aggregationProcess.expirePriorityQueue.Len())
// Data record with IPv6 addresses should be processed and stored correctly
message = createDataMsgForSrc(t, true, false, false, false, false)
err = aggregationProcess.AggregateMsgByFlowKey(message)
assert.NoError(t, err)
assert.Equal(t, int64(2), aggregationProcess.GetNumFlows())
assert.Equal(t, 2, aggregationProcess.expirePriorityQueue.Len())
flowKey = FlowKey{"2001:0:3238:dfe1:63::fefb", "2001:0:3238:dfe1:63::fefc", 6, 1234, 5678}
assert.NotNil(t, aggregationProcess.flowKeyRecordMap[flowKey])
aggRecord = aggregationProcess.flowKeyRecordMap[flowKey]
ieWithValue, _, exist = aggRecord.Record.GetInfoElementWithValue("sourceIPv6Address")
assert.Equal(t, true, exist)
assert.Equal(t, net.IP{0x20, 0x1, 0x0, 0x0, 0x32, 0x38, 0xdf, 0xe1, 0x0, 0x63, 0x0, 0x0, 0x0, 0x0, 0xfe, 0xfb}, ieWithValue.GetIPAddressValue())
assert.Equal(t, message.GetSet().GetRecords()[0], aggRecord.Record)
// Test data record with invalid "flowEndSeconds" field
element, _, exists := message.GetSet().GetRecords()[0].GetInfoElementWithValue("flowEndSeconds")
assert.True(t, exists)
element.ResetValue()
err = aggregationProcess.AggregateMsgByFlowKey(message)
assert.NoError(t, err)
}
func TestAggregationProcess(t *testing.T) {
messageChan := make(chan *entities.Message)
input := AggregationInput{
MessageChan: messageChan,
WorkerNum: 2,
CorrelateFields: fields,
}
aggregationProcess, _ := InitAggregationProcess(input)
dataMsg := createDataMsgForSrc(t, false, false, false, false, false)
go func() {
messageChan <- createMsgwithTemplateSet(false)
time.Sleep(time.Second)
messageChan <- dataMsg
time.Sleep(time.Second)
close(messageChan)
aggregationProcess.Stop()
}()
// the Start() function is blocking until above goroutine with Stop() finishes
// Proper usage of aggregation process is to have Start() in a goroutine with external channel
aggregationProcess.Start()
flowKey := FlowKey{
"10.0.0.1", "10.0.0.2", 6, 1234, 5678,
}
aggRecord := aggregationProcess.flowKeyRecordMap[flowKey]
assert.Equalf(t, aggRecord.Record, dataMsg.GetSet().GetRecords()[0], "records should be equal")
}
func TestCorrelateRecordsForInterNodeFlow(t *testing.T) {
messageChan := make(chan *entities.Message)
input := AggregationInput{
MessageChan: messageChan,
WorkerNum: 2,
CorrelateFields: fields,
ActiveExpiryTimeout: testActiveExpiry,
InactiveExpiryTimeout: testInactiveExpiry,
}
ap, _ := InitAggregationProcess(input)
// Test IPv4 fields.
// Test the scenario, where record1 is added first and then record2.
record1 := createDataMsgForSrc(t, false, false, false, false, false).GetSet().GetRecords()[0]
record2 := createDataMsgForDst(t, false, false, false, false, false).GetSet().GetRecords()[0]
runCorrelationAndCheckResult(t, ap, record1, record2, false, false, true)
// Cleanup the flowKeyMap in aggregation process.
flowKey1, _, _ := getFlowKeyFromRecord(record1)
err := ap.deleteFlowKeyFromMap(*flowKey1)
assert.NoError(t, err)
heap.Pop(&ap.expirePriorityQueue)
// Test the scenario, where record2 is added first and then record1.
record1 = createDataMsgForSrc(t, false, false, false, false, false).GetSet().GetRecords()[0]
record2 = createDataMsgForDst(t, false, false, false, false, false).GetSet().GetRecords()[0]
runCorrelationAndCheckResult(t, ap, record2, record1, false, false, true)
// Cleanup the flowKeyMap in aggregation process.
err = ap.deleteFlowKeyFromMap(*flowKey1)
assert.NoError(t, err)
heap.Pop(&ap.expirePriorityQueue)
// Test IPv6 fields.
// Test the scenario, where record1 is added first and then record2.
record1 = createDataMsgForSrc(t, true, false, false, false, false).GetSet().GetRecords()[0]
record2 = createDataMsgForDst(t, true, false, false, false, false).GetSet().GetRecords()[0]
runCorrelationAndCheckResult(t, ap, record1, record2, true, false, true)
// Cleanup the flowKeyMap in aggregation process.
flowKey1, _, _ = getFlowKeyFromRecord(record1)
err = ap.deleteFlowKeyFromMap(*flowKey1)
assert.NoError(t, err)
heap.Pop(&ap.expirePriorityQueue)
// Test the scenario, where record2 is added first and then record1.
record1 = createDataMsgForSrc(t, true, false, false, false, false).GetSet().GetRecords()[0]
record2 = createDataMsgForDst(t, true, false, false, false, false).GetSet().GetRecords()[0]
runCorrelationAndCheckResult(t, ap, record2, record1, true, false, true)
}
func TestCorrelateRecordsForInterNodeDenyFlow(t *testing.T) {
messageChan := make(chan *entities.Message)
input := AggregationInput{
MessageChan: messageChan,
WorkerNum: 2,
CorrelateFields: fields,
}
ap, _ := InitAggregationProcess(input)
// Test the scenario, where src record has egress deny rule
record1 := createDataMsgForSrc(t, false, false, false, false, true).GetSet().GetRecords()[0]
runCorrelationAndCheckResult(t, ap, record1, nil, false, false, false)
// Cleanup the flowKeyMap in aggregation process.
flowKey1, _, _ := getFlowKeyFromRecord(record1)
ap.deleteFlowKeyFromMap(*flowKey1)
heap.Pop(&ap.expirePriorityQueue)
// Test the scenario, where dst record has ingress reject rule
record2 := createDataMsgForDst(t, false, false, false, true, false).GetSet().GetRecords()[0]
runCorrelationAndCheckResult(t, ap, record2, nil, false, false, false)
// Cleanup the flowKeyMap in aggregation process.
ap.deleteFlowKeyFromMap(*flowKey1)
heap.Pop(&ap.expirePriorityQueue)
// Test the scenario, where dst record has ingress drop rule
record1 = createDataMsgForSrc(t, false, false, false, false, false).GetSet().GetRecords()[0]
record2 = createDataMsgForDst(t, false, false, false, false, true).GetSet().GetRecords()[0]
runCorrelationAndCheckResult(t, ap, record1, record2, false, false, true)
// Cleanup the flowKeyMap in aggregation process.
ap.deleteFlowKeyFromMap(*flowKey1)
}
func TestCorrelateRecordsForIntraNodeFlow(t *testing.T) {
messageChan := make(chan *entities.Message)
input := AggregationInput{
MessageChan: messageChan,
WorkerNum: 2,
CorrelateFields: fields,
ActiveExpiryTimeout: testActiveExpiry,
InactiveExpiryTimeout: testInactiveExpiry,
}
ap, _ := InitAggregationProcess(input)
// Test IPv4 fields.
record1 := createDataMsgForSrc(t, false, true, false, false, false).GetSet().GetRecords()[0]
runCorrelationAndCheckResult(t, ap, record1, nil, false, true, false)
// Cleanup the flowKeyMap in aggregation process.
flowKey1, _, _ := getFlowKeyFromRecord(record1)
err := ap.deleteFlowKeyFromMap(*flowKey1)
assert.NoError(t, err)
heap.Pop(&ap.expirePriorityQueue)
// Test IPv6 fields.
record1 = createDataMsgForSrc(t, true, true, false, false, false).GetSet().GetRecords()[0]
runCorrelationAndCheckResult(t, ap, record1, nil, true, true, false)
}
func TestCorrelateRecordsForToExternalFlow(t *testing.T) {
messageChan := make(chan *entities.Message)
input := AggregationInput{
MessageChan: messageChan,
WorkerNum: 2,
CorrelateFields: fields,
ActiveExpiryTimeout: testActiveExpiry,
InactiveExpiryTimeout: testInactiveExpiry,
}
ap, _ := InitAggregationProcess(input)
// Test IPv4 fields.
record1 := createDataMsgForSrc(t, false, true, false, true, false).GetSet().GetRecords()[0]
runCorrelationAndCheckResult(t, ap, record1, nil, false, true, false)
// Cleanup the flowKeyMap in aggregation process.
flowKey1, _, _ := getFlowKeyFromRecord(record1)
err := ap.deleteFlowKeyFromMap(*flowKey1)
assert.NoError(t, err)
heap.Pop(&ap.expirePriorityQueue)
// Test IPv6 fields.
record1 = createDataMsgForSrc(t, true, true, false, true, false).GetSet().GetRecords()[0]
runCorrelationAndCheckResult(t, ap, record1, nil, true, true, false)
}
func TestAggregateRecordsForInterNodeFlow(t *testing.T) {
messageChan := make(chan *entities.Message)
aggElements := &AggregationElements{
NonStatsElements: nonStatsElementList,
StatsElements: statsElementList,
AggregatedSourceStatsElements: antreaSourceStatsElementList,
AggregatedDestinationStatsElements: antreaDestinationStatsElementList,
AntreaFlowEndSecondsElements: antreaFlowEndSecondsElementList,
ThroughputElements: antreaThroughputElementList,
SourceThroughputElements: antreaSourceThroughputElementList,
DestinationThroughputElements: antreaDestinationThroughputElementList,
}
input := AggregationInput{
MessageChan: messageChan,
WorkerNum: 2,
CorrelateFields: fields,
AggregateElements: aggElements,
ActiveExpiryTimeout: testActiveExpiry,
InactiveExpiryTimeout: testInactiveExpiry,
}
ap, _ := InitAggregationProcess(input)
// Test the scenario (added in order): srcRecord, dstRecord, record1_updated, record2_updated
srcRecord := createDataMsgForSrc(t, false, false, false, false, false).GetSet().GetRecords()[0]
dstRecord := createDataMsgForDst(t, false, false, false, false, false).GetSet().GetRecords()[0]
latestSrcRecord := createDataMsgForSrc(t, false, false, true, false, false).GetSet().GetRecords()[0]
latestDstRecord := createDataMsgForDst(t, false, false, true, false, false).GetSet().GetRecords()[0]
runAggregationAndCheckResult(t, ap, srcRecord, dstRecord, latestSrcRecord, latestDstRecord, false)
}
func TestDeleteFlowKeyFromMapWithLock(t *testing.T) {
messageChan := make(chan *entities.Message)
input := AggregationInput{
MessageChan: messageChan,
WorkerNum: 2,
CorrelateFields: fields,
}
aggregationProcess, _ := InitAggregationProcess(input)
message := createDataMsgForSrc(t, false, false, false, false, false)
flowKey1 := FlowKey{"10.0.0.1", "10.0.0.2", 6, 1234, 5678}
flowKey2 := FlowKey{"2001:0:3238:dfe1:63::fefb", "2001:0:3238:dfe1:63::fefc", 6, 1234, 5678}
aggFlowRecord := &AggregationFlowRecord{
Record: message.GetSet().GetRecords()[0],
PriorityQueueItem: &ItemToExpire{},
ReadyToSend: true,
waitForReadyToSendRetries: 0,
areCorrelatedFieldsFilled: false,
areExternalFieldsFilled: false,
}
aggregationProcess.flowKeyRecordMap[flowKey1] = aggFlowRecord
assert.Equal(t, int64(1), aggregationProcess.GetNumFlows())
err := aggregationProcess.deleteFlowKeyFromMap(flowKey2)
assert.Error(t, err)
assert.Equal(t, int64(1), aggregationProcess.GetNumFlows())
err = aggregationProcess.deleteFlowKeyFromMap(flowKey1)
assert.NoError(t, err)
assert.Empty(t, aggregationProcess.flowKeyRecordMap)
}
func TestGetExpiryFromExpirePriorityQueue(t *testing.T) {
messageChan := make(chan *entities.Message)
input := AggregationInput{
MessageChan: messageChan,
WorkerNum: 2,
CorrelateFields: fields,
ActiveExpiryTimeout: testActiveExpiry,
InactiveExpiryTimeout: testInactiveExpiry,
}
ap, _ := InitAggregationProcess(input)
// Add records with IPv4 fields.
recordIPv4Src := createDataMsgForSrc(t, false, false, false, false, false).GetSet().GetRecords()[0]
recordIPv4Dst := createDataMsgForDst(t, false, false, false, false, false).GetSet().GetRecords()[0]
// Add records with IPv6 fields.
recordIPv6Src := createDataMsgForSrc(t, true, false, false, false, false).GetSet().GetRecords()[0]
recordIPv6Dst := createDataMsgForDst(t, true, false, false, false, false).GetSet().GetRecords()[0]
testCases := []struct {
name string
records []entities.Record
}{
{
"empty queue",
nil,
},
{
"One aggregation record",
[]entities.Record{recordIPv4Src, recordIPv4Dst},
},
{
"Two aggregation records",
[]entities.Record{recordIPv4Src, recordIPv4Dst, recordIPv6Src, recordIPv6Dst},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
for _, record := range tc.records {
flowKey, isIPv4, _ := getFlowKeyFromRecord(record)
err := ap.addOrUpdateRecordInMap(flowKey, record, isIPv4)
assert.NoError(t, err)
}
expiryTime := ap.GetExpiryFromExpirePriorityQueue()
assert.LessOrEqualf(t, expiryTime.Nanoseconds(), testActiveExpiry.Nanoseconds(), "incorrect expiry time")
})
}
}
func assertElementMap(t *testing.T, record map[string]interface{}, ipv6 bool) {
if ipv6 {
assert.Equal(t, net.ParseIP("2001:0:3238:dfe1:63::fefb"), record["sourceIPv6Address"])
assert.Equal(t, net.ParseIP("2001:0:3238:dfe1:63::fefc"), record["destinationIPv6Address"])
assert.Equal(t, net.ParseIP("2001:0:3238:bbbb:63::aaaa"), record["destinationClusterIPv6"])
} else {
assert.Equal(t, net.ParseIP("10.0.0.1").To4(), record["sourceIPv4Address"])
assert.Equal(t, net.ParseIP("10.0.0.2").To4(), record["destinationIPv4Address"])
assert.Equal(t, net.ParseIP("192.168.0.1").To4(), record["destinationClusterIPv4"])
}
assert.Equal(t, uint16(1234), record["sourceTransportPort"])
assert.Equal(t, uint16(5678), record["destinationTransportPort"])
assert.Equal(t, uint8(6), record["protocolIdentifier"])
assert.Equal(t, "pod1", record["sourcePodName"])
assert.Equal(t, "pod2", record["destinationPodName"])
assert.Equal(t, uint16(4739), record["destinationServicePort"])
assert.Equal(t, uint32(0), record["flowStartSeconds"])
assert.Equal(t, uint32(1), record["flowEndSeconds"])
assert.Equal(t, uint32(1), record["flowEndSecondsFromSourceNode"])
assert.Equal(t, uint32(1), record["flowEndSecondsFromDestinationNode"])
assert.Equal(t, uint8(2), record["flowType"])
assert.Equal(t, uint8(2), record["flowEndReason"])
assert.Equal(t, "ESTABLISHED", record["tcpState"])
assert.Equal(t, uint8(0), record["ingressNetworkPolicyRuleAction"])
assert.Equal(t, uint8(0), record["egressNetworkPolicyRuleAction"])
assert.Equal(t, int32(50000), record["ingressNetworkPolicyRulePriority"])
assert.Equal(t, uint64(502), record["packetTotalCount"])
assert.Equal(t, uint64(502), record["reversePacketTotalCount"])
assert.Equal(t, uint64(1020), record["octetTotalCount"])
assert.Equal(t, uint64(1020), record["reverseOctetTotalCount"])
assert.Equal(t, uint64(1020*8), record["throughput"])
assert.Equal(t, uint64(1020*8), record["reverseThroughput"])
assert.Equal(t, uint64(1000*8), record["throughputFromSourceNode"])
assert.Equal(t, uint64(1000*8), record["reverseThroughputFromSourceNode"])
assert.Equal(t, uint64(1020*8), record["throughputFromDestinationNode"])
assert.Equal(t, uint64(1020*8), record["reverseThroughputFromDestinationNode"])
assert.Equal(t, uint64(0), record["packetDeltaCount"])
assert.Equal(t, uint64(502), record["reversePacketTotalCount"])
assert.Equal(t, uint64(0), record["reversePacketDeltaCount"])
}
func TestGetRecords(t *testing.T) {
messageChan := make(chan *entities.Message)
aggElements := &AggregationElements{
NonStatsElements: nonStatsElementList,
StatsElements: statsElementList,
AggregatedSourceStatsElements: antreaSourceStatsElementList,
AggregatedDestinationStatsElements: antreaDestinationStatsElementList,
AntreaFlowEndSecondsElements: antreaFlowEndSecondsElementList,
ThroughputElements: antreaThroughputElementList,
SourceThroughputElements: antreaSourceThroughputElementList,
DestinationThroughputElements: antreaDestinationThroughputElementList,
}
input := AggregationInput{
MessageChan: messageChan,
WorkerNum: 2,
CorrelateFields: fields,
AggregateElements: aggElements,
ActiveExpiryTimeout: testActiveExpiry,
InactiveExpiryTimeout: testInactiveExpiry,
}
ap, _ := InitAggregationProcess(input)
// Add records with IPv4 fields.
recordIPv4Src := createDataMsgForSrc(t, false, false, false, false, false).GetSet().GetRecords()[0]
recordIPv4Dst := createDataMsgForDst(t, false, false, false, false, false).GetSet().GetRecords()[0]
// Add records with IPv6 fields.
recordIPv6Src := createDataMsgForSrc(t, true, false, false, false, false).GetSet().GetRecords()[0]
recordIPv6Dst := createDataMsgForDst(t, true, false, false, false, false).GetSet().GetRecords()[0]
records := []entities.Record{recordIPv4Src, recordIPv4Dst, recordIPv6Src, recordIPv6Dst}
for _, record := range records {
flowKey, isIPv4, _ := getFlowKeyFromRecord(record)
err := ap.addOrUpdateRecordInMap(flowKey, record, isIPv4)
assert.NoError(t, err)
}
flowKeyIPv4, _, _ := getFlowKeyFromRecord(recordIPv4Src)
partialFlowKeyIPv6 := &FlowKey{
SourceAddress: "2001:0:3238:dfe1:63::fefb",
}
testCases := []struct {
name string
flowKey *FlowKey
expectedLen int
}{
{
"Empty flowkey",
nil,
2,
},
{
"IPv4 flowkey",
flowKeyIPv4,
1,
},
{
"IPv6 flowkey",
partialFlowKeyIPv6,
1,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
records := ap.GetRecords(tc.flowKey)
assert.Equalf(t, tc.expectedLen, len(records), "%s: Number of records string is incorrect, expected %d got %d", tc.name, tc.expectedLen, len(records))
if tc.flowKey != nil {
assertElementMap(t, records[0], tc.name == "IPv6 flowkey")
} else {
if _, ok := records[0]["sourceIPv6Address"]; ok {
assertElementMap(t, records[0], true)
assertElementMap(t, records[1], false)
} else {
assertElementMap(t, records[0], false)
assertElementMap(t, records[1], true)
}
}
})
}
}
func | (t *testing.T) {
messageChan := make(chan *entities.Message)
input := AggregationInput{
MessageChan: messageChan,
WorkerNum: 2,
CorrelateFields: fields,
ActiveExpiryTimeout: testActiveExpiry,
InactiveExpiryTimeout: testInactiveExpiry,
}
ap, _ := InitAggregationProcess(input)
// Add records with IPv4 fields.
recordIPv4Src := createDataMsgForSrc(t, false, false, false, false, false).GetSet().GetRecords()[0]
recordIPv4Dst := createDataMsgForDst(t, false, false, false, false, false).GetSet().GetRecords()[0]
// Add records with IPv6 fields.
recordIPv6Src := createDataMsgForSrc(t, true, false, false, false, false).GetSet().GetRecords()[0]
recordIPv6Dst := createDataMsgForDst(t, true, false, false, false, false).GetSet().GetRecords()[0]
numExecutions := 0
testCallback := func(key FlowKey, record *AggregationFlowRecord) error {
numExecutions = numExecutions + 1
return nil
}
testCases := []struct {
name string
records []entities.Record
expectedExecutions int
expectedPQLen int
}{
{
"empty queue",
nil,
0,
0,
},
{
"One aggregation record and none expired",
[]entities.Record{recordIPv4Src, recordIPv4Dst},
0,
1,
},
{
"One aggregation record and one expired",
[]entities.Record{recordIPv4Src, recordIPv4Dst},
1,
1,
},
{
"Two aggregation records and one expired",
[]entities.Record{recordIPv4Src, recordIPv4Dst, recordIPv6Src, recordIPv6Dst},
1,
2,
},
{
"Two aggregation records and two expired",
[]entities.Record{recordIPv4Src, recordIPv4Dst, recordIPv6Src, recordIPv6Dst},
2,
0,
},
{
"One aggregation record and waitForReadyToSendRetries reach maximum",
[]entities.Record{recordIPv4Src},
0,
0,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
numExecutions = 0
for _, record := range tc.records {
flowKey, isIPv4, _ := getFlowKeyFromRecord(record)
err := ap.addOrUpdateRecordInMap(flowKey, record, isIPv4)
assert.NoError(t, err)
}
switch tc.name {
case "One aggregation record and one expired":
time.Sleep(testActiveExpiry)
err := ap.ForAllExpiredFlowRecordsDo(testCallback)
assert.NoError(t, err)
case "Two aggregation records and one expired":
time.Sleep(testActiveExpiry)
secondAggRec := ap.expirePriorityQueue[1]
ap.expirePriorityQueue.Update(secondAggRec, secondAggRec.flowKey,
secondAggRec.flowRecord, secondAggRec.activeExpireTime.Add(testActiveExpiry), secondAggRec.inactiveExpireTime.Add(testInactiveExpiry))
err := ap.ForAllExpiredFlowRecordsDo(testCallback)
assert.NoError(t, err)
case "Two aggregation records and two expired":
time.Sleep(2 * testActiveExpiry)
err := ap.ForAllExpiredFlowRecordsDo(testCallback)
assert.NoError(t, err)
case "One aggregation record and waitForReadyToSendRetries reach maximum":
for i := 0; i < testMaxRetries; i++ {
time.Sleep(testActiveExpiry)
err := ap.ForAllExpiredFlowRecordsDo(testCallback)
assert.NoError(t, err)
}
default:
break
}
assert.Equalf(t, tc.expectedExecutions, numExecutions, "number of callback executions are incorrect")
assert.Equalf(t, tc.expectedPQLen, ap.expirePriorityQueue.Len(), "expected pq length not correct")
})
}
}
func runCorrelationAndCheckResult(t *testing.T, ap *AggregationProcess, record1, record2 entities.Record, isIPv6, isIntraNode, needsCorrleation bool) {
flowKey1, isIPv4, _ := getFlowKeyFromRecord(record1)
err := ap.addOrUpdateRecordInMap(flowKey1, record1, isIPv4)
assert.NoError(t, err)
item := ap.expirePriorityQueue.Peek()
oldActiveExpiryTime := item.activeExpireTime
oldInactiveExpiryTime := item.inactiveExpireTime
if !isIntraNode && needsCorrleation {
flowKey2, isIPv4, _ := getFlowKeyFromRecord(record2)
assert.Equalf(t, *flowKey1, *flowKey2, "flow keys should be equal.")
err = ap.addOrUpdateRecordInMap(flowKey2, record2, isIPv4)
assert.NoError(t, err)
}
assert.Equal(t, int64(1), ap.GetNumFlows())
assert.Equal(t, 1, ap.expirePriorityQueue.Len())
aggRecord, _ := ap.flowKeyRecordMap[*flowKey1]
item = ap.expirePriorityQueue.Peek()
assert.Equal(t, *aggRecord, *item.flowRecord)
assert.Equal(t, oldActiveExpiryTime, item.activeExpireTime)
if !isIntraNode && needsCorrleation {
assert.NotEqual(t, oldInactiveExpiryTime, item.inactiveExpireTime)
assert.True(t, ap.AreCorrelatedFieldsFilled(*aggRecord))
}
if !isIntraNode && !needsCorrleation {
// for inter-Node deny connections, either src or dst Pod info will be resolved.
sourcePodName, _, _ := aggRecord.Record.GetInfoElementWithValue("sourcePodName")
destinationPodName, _, _ := aggRecord.Record.GetInfoElementWithValue("destinationPodName")
assert.True(t, sourcePodName.GetStringValue() == "" || destinationPodName.GetStringValue() == "")
egress, _, _ := aggRecord.Record.GetInfoElementWithValue("egressNetworkPolicyRuleAction")
ingress, _, _ := aggRecord.Record.GetInfoElementWithValue("ingressNetworkPolicyRuleAction")
assert.True(t, egress.GetUnsigned8Value() != 0 || ingress.GetUnsigned8Value() != 0)
assert.False(t, ap.AreCorrelatedFieldsFilled(*aggRecord))
} else {
ieWithValue, _, _ := aggRecord.Record.GetInfoElementWithValue("sourcePodName")
assert.Equal(t, "pod1", ieWithValue.GetStringValue())
ieWithValue, _, _ = aggRecord.Record.GetInfoElementWithValue("destinationPodName")
assert.Equal(t, "pod2", ieWithValue.GetStringValue())
if !isIPv6 {
ieWithValue, _, _ = aggRecord.Record.GetInfoElementWithValue("destinationClusterIPv4")
assert.Equal(t, net.ParseIP("192.168.0.1").To4(), ieWithValue.GetIPAddressValue())
} else {
ieWithValue, _, _ = aggRecord.Record.GetInfoElementWithValue("destinationClusterIPv6")
assert.Equal(t, net.ParseIP("2001:0:3238:BBBB:63::AAAA"), ieWithValue.GetIPAddressValue())
}
ieWithValue, _, _ = aggRecord.Record.GetInfoElementWithValue("destinationServicePort")
assert.Equal(t, uint16(4739), ieWithValue.GetUnsigned16Value())
ingressPriority, _, _ := aggRecord.Record.GetInfoElementWithValue("ingressNetworkPolicyRulePriority")
assert.Equal(t, ingressPriority.GetSigned32Value(), int32(50000))
assert.True(t, ap.AreCorrelatedFieldsFilled(*aggRecord))
}
}
func runAggregationAndCheckResult(t *testing.T, ap *AggregationProcess, srcRecord, dstRecord, srcRecordLatest, dstRecordLatest entities.Record, isIntraNode bool) {
flowKey, isIPv4, _ := getFlowKeyFromRecord(srcRecord)
err := ap.addOrUpdateRecordInMap(flowKey, srcRecord, isIPv4)
assert.NoError(t, err)
item := ap.expirePriorityQueue.Peek()
oldActiveExpiryTime := item.activeExpireTime
oldInactiveExpiryTime := item.inactiveExpireTime
if !isIntraNode {
err = ap.addOrUpdateRecordInMap(flowKey, dstRecord, isIPv4)
assert.NoError(t, err)
}
err = ap.addOrUpdateRecordInMap(flowKey, srcRecordLatest, isIPv4)
assert.NoError(t, err)
if !isIntraNode {
err = ap.addOrUpdateRecordInMap(flowKey, dstRecordLatest, isIPv4)
assert.NoError(t, err)
}
assert.Equal(t, int64(1), ap.GetNumFlows())
assert.Equal(t, 1, ap.expirePriorityQueue.Len())
aggRecord, _ := ap.flowKeyRecordMap[*flowKey]
item = ap.expirePriorityQueue.Peek()
assert.Equal(t, *aggRecord, *item.flowRecord)
assert.Equal(t, oldActiveExpiryTime, item.activeExpireTime)
if !isIntraNode {
assert.NotEqual(t, oldInactiveExpiryTime, item.inactiveExpireTime)
}
ieWithValue, _, _ := aggRecord.Record.GetInfoElementWithValue("sourcePodName")
assert.Equal(t, "pod1", ieWithValue.GetStringValue())
ieWithValue, _, _ = aggRecord.Record.GetInfoElementWithValue("destinationPodName")
assert.Equal(t, "pod2", ieWithValue.GetStringValue())
ieWithValue, _, _ = aggRecord.Record.GetInfoElementWithValue("destinationClusterIPv4")
assert.Equal(t, net.ParseIP("192.168.0.1").To4(), ieWithValue.GetIPAddressValue())
ieWithValue, _, _ = aggRecord.Record.GetInfoElementWithValue("destinationServicePort")
assert.Equal(t, uint16(4739), ieWithValue.GetUnsigned16Value())
ieWithValue, _, _ = aggRecord.Record.GetInfoElementWithValue("ingressNetworkPolicyRuleAction")
assert.Equal(t, registry.NetworkPolicyRuleActionNoAction, ieWithValue.GetUnsigned8Value())
for _, e := range nonStatsElementList {
ieWithValue, _, _ = aggRecord.Record.GetInfoElementWithValue(e)
expectedIE, _, _ := dstRecordLatest.GetInfoElementWithValue(e)
switch ieWithValue.GetDataType() {
case entities.Unsigned8:
assert.Equal(t, ieWithValue.GetUnsigned8Value(), expectedIE.GetUnsigned8Value())
case entities.String:
assert.Equal(t, ieWithValue.GetStringValue(), expectedIE.GetStringValue())
case entities.Signed32:
assert.Equal(t, ieWithValue.GetSigned32Value(), expectedIE.GetSigned32Value())
}
}
for _, e := range statsElementList {
ieWithValue, _, _ = aggRecord.Record.GetInfoElementWithValue(e)
aggVal := ieWithValue.GetUnsigned64Value()
latestRecord, _, _ := dstRecordLatest.GetInfoElementWithValue(e)
latestVal := latestRecord.GetUnsigned64Value()
if !strings.Contains(e, "Delta") {
assert.Equalf(t, latestVal, aggVal, "values should be equal for element %v", e)
} else {
assert.Equalf(t, latestVal, aggVal, "values should be equal for element %v", e)
}
}
for i, e := range antreaSourceStatsElementList {
ieWithValue, _, _ = aggRecord.Record.GetInfoElementWithValue(e)
latestRecord, _, _ := srcRecordLatest.GetInfoElementWithValue(statsElementList[i])
assert.Equalf(t, latestRecord.GetUnsigned64Value(), ieWithValue.GetUnsigned64Value(), "values should be equal for element %v", e)
}
for i, e := range antreaDestinationStatsElementList {
ieWithValue, _, _ = aggRecord.Record.GetInfoElementWithValue(e)
latestRecord, _, _ := dstRecordLatest.GetInfoElementWithValue(statsElementList[i])
assert.Equalf(t, latestRecord.GetUnsigned64Value(), ieWithValue.GetUnsigned64Value(), "values should be equal for element %v", e)
}
for _, e := range antreaThroughputElementList {
ieWithValue, _, _ = aggRecord.Record.GetInfoElementWithValue(e)
expectedVal := 915
assert.Equalf(t, uint64(expectedVal), ieWithValue.GetUnsigned64Value(), "values should be equal for element %v", e)
}
for _, e := range antreaSourceThroughputElementList {
ieWithValue, _, _ = aggRecord.Record.GetInfoElementWithValue(e)
expectedVal := 888
assert.Equalf(t, uint64(expectedVal), ieWithValue.GetUnsigned64Value(), "values should be equal for element %v", e)
}
for _, e := range antreaDestinationThroughputElementList {
ieWithValue, _, _ = aggRecord.Record.GetInfoElementWithValue(e)
expectedVal := 915
assert.Equalf(t, uint64(expectedVal), ieWithValue.GetUnsigned64Value(), "values should be equal for element %v", e)
}
}
| TestForAllExpiredFlowRecordsDo |
applicationMenu.ts | import { Menu, MenuItemConstructorOptions } from 'electron';
import { IS_DEVELOPEMENT } from '@constants';
import {
appMenuItem,
editMenuItem,
developerMenuItem
} from './menuItems';
/**
* Menu template for creating application menu displayed in macOS
*/
const applicationMenuTemplate: MenuItemConstructorOptions[] = [
appMenuItem, | editMenuItem
// Temporary disable `copyFormatMenuItem` and `themeMenuItem`
// Since importing `applicationMenu` and `settingMenu` at `setAppTheme()`
// breaks functions of other menu items
// TODO: Figure out a way to sync checked option value between two menus
];
if (IS_DEVELOPEMENT) {
applicationMenuTemplate.push(developerMenuItem);
}
/**
* Application menu displayed in macOS
*/
const applicationMenu = Menu.buildFromTemplate(applicationMenuTemplate);
export default applicationMenu; | |
CloudAPI.go | // Code generated by mockery v1.0.0. DO NOT EDIT.
package mocks
import context "context"
import ec2 "github.com/aws/aws-sdk-go/service/ec2"
import ec2metadata "github.com/aws/aws-sdk-go/aws/ec2metadata"
import elbv2 "github.com/aws/aws-sdk-go/service/elbv2"
import mock "github.com/stretchr/testify/mock"
import resourcegroupstaggingapi "github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi"
import types "github.com/kubernetes-sigs/aws-alb-ingress-controller/pkg/util/types"
import waf "github.com/aws/aws-sdk-go/service/waf"
import wafregional "github.com/aws/aws-sdk-go/service/wafregional"
// CloudAPI is an autogenerated mock type for the CloudAPI type
type CloudAPI struct {
mock.Mock
}
// AssociateWAF provides a mock function with given fields: ctx, resourceArn, webACLId
func (_m *CloudAPI) AssociateWAF(ctx context.Context, resourceArn *string, webACLId *string) (*wafregional.AssociateWebACLOutput, error) {
ret := _m.Called(ctx, resourceArn, webACLId)
var r0 *wafregional.AssociateWebACLOutput
if rf, ok := ret.Get(0).(func(context.Context, *string, *string) *wafregional.AssociateWebACLOutput); ok {
r0 = rf(ctx, resourceArn, webACLId)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*wafregional.AssociateWebACLOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *string, *string) error); ok {
r1 = rf(ctx, resourceArn, webACLId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// AuthorizeSecurityGroupIngressWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) AuthorizeSecurityGroupIngressWithContext(_a0 context.Context, _a1 *ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *ec2.AuthorizeSecurityGroupIngressOutput
if rf, ok := ret.Get(0).(func(context.Context, *ec2.AuthorizeSecurityGroupIngressInput) *ec2.AuthorizeSecurityGroupIngressOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*ec2.AuthorizeSecurityGroupIngressOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *ec2.AuthorizeSecurityGroupIngressInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateListenerWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) CreateListenerWithContext(_a0 context.Context, _a1 *elbv2.CreateListenerInput) (*elbv2.CreateListenerOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.CreateListenerOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.CreateListenerInput) *elbv2.CreateListenerOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.CreateListenerOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.CreateListenerInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateLoadBalancerWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) CreateLoadBalancerWithContext(_a0 context.Context, _a1 *elbv2.CreateLoadBalancerInput) (*elbv2.CreateLoadBalancerOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.CreateLoadBalancerOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.CreateLoadBalancerInput) *elbv2.CreateLoadBalancerOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.CreateLoadBalancerOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.CreateLoadBalancerInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateRuleWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) CreateRuleWithContext(_a0 context.Context, _a1 *elbv2.CreateRuleInput) (*elbv2.CreateRuleOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.CreateRuleOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.CreateRuleInput) *elbv2.CreateRuleOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.CreateRuleOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.CreateRuleInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateSecurityGroupWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) CreateSecurityGroupWithContext(_a0 context.Context, _a1 *ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *ec2.CreateSecurityGroupOutput
if rf, ok := ret.Get(0).(func(context.Context, *ec2.CreateSecurityGroupInput) *ec2.CreateSecurityGroupOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*ec2.CreateSecurityGroupOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *ec2.CreateSecurityGroupInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateTagsWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) CreateTagsWithContext(_a0 context.Context, _a1 *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *ec2.CreateTagsOutput
if rf, ok := ret.Get(0).(func(context.Context, *ec2.CreateTagsInput) *ec2.CreateTagsOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*ec2.CreateTagsOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *ec2.CreateTagsInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateTargetGroupWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) CreateTargetGroupWithContext(_a0 context.Context, _a1 *elbv2.CreateTargetGroupInput) (*elbv2.CreateTargetGroupOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.CreateTargetGroupOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.CreateTargetGroupInput) *elbv2.CreateTargetGroupOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.CreateTargetGroupOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.CreateTargetGroupInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeleteListenersByArn provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) DeleteListenersByArn(_a0 context.Context, _a1 string) error {
ret := _m.Called(_a0, _a1)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(_a0, _a1)
} else {
r0 = ret.Error(0)
}
return r0
}
// DeleteLoadBalancerByArn provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) DeleteLoadBalancerByArn(_a0 context.Context, _a1 string) error {
ret := _m.Called(_a0, _a1)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(_a0, _a1)
} else {
r0 = ret.Error(0)
}
return r0
}
// DeleteRuleWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) DeleteRuleWithContext(_a0 context.Context, _a1 *elbv2.DeleteRuleInput) (*elbv2.DeleteRuleOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.DeleteRuleOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.DeleteRuleInput) *elbv2.DeleteRuleOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.DeleteRuleOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.DeleteRuleInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeleteSecurityGroupByID provides a mock function with given fields: _a0
func (_m *CloudAPI) DeleteSecurityGroupByID(_a0 string) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
// DeleteTargetGroupByArn provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) DeleteTargetGroupByArn(_a0 context.Context, _a1 string) error {
ret := _m.Called(_a0, _a1)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(_a0, _a1)
} else {
r0 = ret.Error(0)
}
return r0
}
// DeregisterTargetsWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) DeregisterTargetsWithContext(_a0 context.Context, _a1 *elbv2.DeregisterTargetsInput) (*elbv2.DeregisterTargetsOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.DeregisterTargetsOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.DeregisterTargetsInput) *elbv2.DeregisterTargetsOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.DeregisterTargetsOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.DeregisterTargetsInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DescribeELBV2TagsWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) DescribeELBV2TagsWithContext(_a0 context.Context, _a1 *elbv2.DescribeTagsInput) (*elbv2.DescribeTagsOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.DescribeTagsOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.DescribeTagsInput) *elbv2.DescribeTagsOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.DescribeTagsOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.DescribeTagsInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DescribeLoadBalancerAttributesWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) DescribeLoadBalancerAttributesWithContext(_a0 context.Context, _a1 *elbv2.DescribeLoadBalancerAttributesInput) (*elbv2.DescribeLoadBalancerAttributesOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.DescribeLoadBalancerAttributesOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.DescribeLoadBalancerAttributesInput) *elbv2.DescribeLoadBalancerAttributesOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.DescribeLoadBalancerAttributesOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.DescribeLoadBalancerAttributesInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DescribeTargetGroupAttributesWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) DescribeTargetGroupAttributesWithContext(_a0 context.Context, _a1 *elbv2.DescribeTargetGroupAttributesInput) (*elbv2.DescribeTargetGroupAttributesOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.DescribeTargetGroupAttributesOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.DescribeTargetGroupAttributesInput) *elbv2.DescribeTargetGroupAttributesOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.DescribeTargetGroupAttributesOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.DescribeTargetGroupAttributesInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DescribeTargetHealthWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) DescribeTargetHealthWithContext(_a0 context.Context, _a1 *elbv2.DescribeTargetHealthInput) (*elbv2.DescribeTargetHealthOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.DescribeTargetHealthOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.DescribeTargetHealthInput) *elbv2.DescribeTargetHealthOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.DescribeTargetHealthOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.DescribeTargetHealthInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DisassociateWAF provides a mock function with given fields: ctx, resourceArn
func (_m *CloudAPI) DisassociateWAF(ctx context.Context, resourceArn *string) (*wafregional.DisassociateWebACLOutput, error) {
ret := _m.Called(ctx, resourceArn)
var r0 *wafregional.DisassociateWebACLOutput
if rf, ok := ret.Get(0).(func(context.Context, *string) *wafregional.DisassociateWebACLOutput); ok {
r0 = rf(ctx, resourceArn)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*wafregional.DisassociateWebACLOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *string) error); ok {
r1 = rf(ctx, resourceArn)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetClusterSubnets provides a mock function with given fields:
func (_m *CloudAPI) GetClusterSubnets() (map[string]types.EC2Tags, error) {
ret := _m.Called()
var r0 map[string]types.EC2Tags
if rf, ok := ret.Get(0).(func() map[string]types.EC2Tags); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[string]types.EC2Tags)
}
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetInstanceIdentityDocument provides a mock function with given fields:
func (_m *CloudAPI) GetInstanceIdentityDocument() (ec2metadata.EC2InstanceIdentityDocument, error) {
ret := _m.Called()
var r0 ec2metadata.EC2InstanceIdentityDocument
if rf, ok := ret.Get(0).(func() ec2metadata.EC2InstanceIdentityDocument); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(ec2metadata.EC2InstanceIdentityDocument)
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetInstancesByIDs provides a mock function with given fields: _a0
func (_m *CloudAPI) GetInstancesByIDs(_a0 []string) ([]*ec2.Instance, error) {
ret := _m.Called(_a0)
var r0 []*ec2.Instance
if rf, ok := ret.Get(0).(func([]string) []*ec2.Instance); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*ec2.Instance)
}
}
var r1 error
if rf, ok := ret.Get(1).(func([]string) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetLoadBalancerByArn provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) GetLoadBalancerByArn(_a0 context.Context, _a1 string) (*elbv2.LoadBalancer, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.LoadBalancer
if rf, ok := ret.Get(0).(func(context.Context, string) *elbv2.LoadBalancer); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.LoadBalancer)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetLoadBalancerByName provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) GetLoadBalancerByName(_a0 context.Context, _a1 string) (*elbv2.LoadBalancer, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.LoadBalancer
if rf, ok := ret.Get(0).(func(context.Context, string) *elbv2.LoadBalancer); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.LoadBalancer)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetResourcesByFilters provides a mock function with given fields: tagFilters, resourceTypeFilters
func (_m *CloudAPI) GetResourcesByFilters(tagFilters map[string][]string, resourceTypeFilters ...string) ([]string, error) {
_va := make([]interface{}, len(resourceTypeFilters))
for _i := range resourceTypeFilters {
_va[_i] = resourceTypeFilters[_i]
}
var _ca []interface{}
_ca = append(_ca, tagFilters)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 []string
if rf, ok := ret.Get(0).(func(map[string][]string, ...string) []string); ok {
r0 = rf(tagFilters, resourceTypeFilters...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]string)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(map[string][]string, ...string) error); ok {
r1 = rf(tagFilters, resourceTypeFilters...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetRules provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) GetRules(_a0 context.Context, _a1 string) ([]*elbv2.Rule, error) {
ret := _m.Called(_a0, _a1)
var r0 []*elbv2.Rule
if rf, ok := ret.Get(0).(func(context.Context, string) []*elbv2.Rule); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*elbv2.Rule)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetSecurityGroupByID provides a mock function with given fields: _a0
func (_m *CloudAPI) GetSecurityGroupByID(_a0 string) (*ec2.SecurityGroup, error) {
ret := _m.Called(_a0)
var r0 *ec2.SecurityGroup
if rf, ok := ret.Get(0).(func(string) *ec2.SecurityGroup); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*ec2.SecurityGroup)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetSecurityGroupByName provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) GetSecurityGroupByName(_a0 string, _a1 string) (*ec2.SecurityGroup, error) {
ret := _m.Called(_a0, _a1)
var r0 *ec2.SecurityGroup
if rf, ok := ret.Get(0).(func(string, string) *ec2.SecurityGroup); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*ec2.SecurityGroup)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, string) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetSecurityGroupsByName provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) GetSecurityGroupsByName(_a0 context.Context, _a1 []string) ([]*ec2.SecurityGroup, error) {
ret := _m.Called(_a0, _a1)
var r0 []*ec2.SecurityGroup
if rf, ok := ret.Get(0).(func(context.Context, []string) []*ec2.SecurityGroup); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil |
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, []string) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetSubnetsByNameOrID provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) GetSubnetsByNameOrID(_a0 context.Context, _a1 []string) ([]*ec2.Subnet, error) {
ret := _m.Called(_a0, _a1)
var r0 []*ec2.Subnet
if rf, ok := ret.Get(0).(func(context.Context, []string) []*ec2.Subnet); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*ec2.Subnet)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, []string) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetTargetGroupByArn provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) GetTargetGroupByArn(_a0 context.Context, _a1 string) (*elbv2.TargetGroup, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.TargetGroup
if rf, ok := ret.Get(0).(func(context.Context, string) *elbv2.TargetGroup); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.TargetGroup)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetTargetGroupByName provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) GetTargetGroupByName(_a0 context.Context, _a1 string) (*elbv2.TargetGroup, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.TargetGroup
if rf, ok := ret.Get(0).(func(context.Context, string) *elbv2.TargetGroup); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.TargetGroup)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetVPC provides a mock function with given fields: _a0
func (_m *CloudAPI) GetVPC(_a0 *string) (*ec2.Vpc, error) {
ret := _m.Called(_a0)
var r0 *ec2.Vpc
if rf, ok := ret.Get(0).(func(*string) *ec2.Vpc); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*ec2.Vpc)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*string) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetVPCID provides a mock function with given fields:
func (_m *CloudAPI) GetVPCID() (*string, error) {
ret := _m.Called()
var r0 *string
if rf, ok := ret.Get(0).(func() *string); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*string)
}
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetWebACLSummary provides a mock function with given fields: ctx, resourceArn
func (_m *CloudAPI) GetWebACLSummary(ctx context.Context, resourceArn *string) (*waf.WebACLSummary, error) {
ret := _m.Called(ctx, resourceArn)
var r0 *waf.WebACLSummary
if rf, ok := ret.Get(0).(func(context.Context, *string) *waf.WebACLSummary); ok {
r0 = rf(ctx, resourceArn)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*waf.WebACLSummary)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *string) error); ok {
r1 = rf(ctx, resourceArn)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// IsNodeHealthy provides a mock function with given fields: _a0
func (_m *CloudAPI) IsNodeHealthy(_a0 string) (bool, error) {
ret := _m.Called(_a0)
var r0 bool
if rf, ok := ret.Get(0).(func(string) bool); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ListListenersByLoadBalancer provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) ListListenersByLoadBalancer(_a0 context.Context, _a1 string) ([]*elbv2.Listener, error) {
ret := _m.Called(_a0, _a1)
var r0 []*elbv2.Listener
if rf, ok := ret.Get(0).(func(context.Context, string) []*elbv2.Listener); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*elbv2.Listener)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ModifyListenerWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) ModifyListenerWithContext(_a0 context.Context, _a1 *elbv2.ModifyListenerInput) (*elbv2.ModifyListenerOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.ModifyListenerOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.ModifyListenerInput) *elbv2.ModifyListenerOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.ModifyListenerOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.ModifyListenerInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ModifyLoadBalancerAttributesWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) ModifyLoadBalancerAttributesWithContext(_a0 context.Context, _a1 *elbv2.ModifyLoadBalancerAttributesInput) (*elbv2.ModifyLoadBalancerAttributesOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.ModifyLoadBalancerAttributesOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.ModifyLoadBalancerAttributesInput) *elbv2.ModifyLoadBalancerAttributesOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.ModifyLoadBalancerAttributesOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.ModifyLoadBalancerAttributesInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ModifyNetworkInterfaceAttributeWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) ModifyNetworkInterfaceAttributeWithContext(_a0 context.Context, _a1 *ec2.ModifyNetworkInterfaceAttributeInput) (*ec2.ModifyNetworkInterfaceAttributeOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *ec2.ModifyNetworkInterfaceAttributeOutput
if rf, ok := ret.Get(0).(func(context.Context, *ec2.ModifyNetworkInterfaceAttributeInput) *ec2.ModifyNetworkInterfaceAttributeOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*ec2.ModifyNetworkInterfaceAttributeOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *ec2.ModifyNetworkInterfaceAttributeInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ModifyRuleWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) ModifyRuleWithContext(_a0 context.Context, _a1 *elbv2.ModifyRuleInput) (*elbv2.ModifyRuleOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.ModifyRuleOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.ModifyRuleInput) *elbv2.ModifyRuleOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.ModifyRuleOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.ModifyRuleInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ModifyTargetGroupAttributesWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) ModifyTargetGroupAttributesWithContext(_a0 context.Context, _a1 *elbv2.ModifyTargetGroupAttributesInput) (*elbv2.ModifyTargetGroupAttributesOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.ModifyTargetGroupAttributesOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.ModifyTargetGroupAttributesInput) *elbv2.ModifyTargetGroupAttributesOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.ModifyTargetGroupAttributesOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.ModifyTargetGroupAttributesInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ModifyTargetGroupWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) ModifyTargetGroupWithContext(_a0 context.Context, _a1 *elbv2.ModifyTargetGroupInput) (*elbv2.ModifyTargetGroupOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.ModifyTargetGroupOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.ModifyTargetGroupInput) *elbv2.ModifyTargetGroupOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.ModifyTargetGroupOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.ModifyTargetGroupInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// RegisterTargetsWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) RegisterTargetsWithContext(_a0 context.Context, _a1 *elbv2.RegisterTargetsInput) (*elbv2.RegisterTargetsOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.RegisterTargetsOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.RegisterTargetsInput) *elbv2.RegisterTargetsOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.RegisterTargetsOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.RegisterTargetsInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// RevokeSecurityGroupIngressWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) RevokeSecurityGroupIngressWithContext(_a0 context.Context, _a1 *ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *ec2.RevokeSecurityGroupIngressOutput
if rf, ok := ret.Get(0).(func(context.Context, *ec2.RevokeSecurityGroupIngressInput) *ec2.RevokeSecurityGroupIngressOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*ec2.RevokeSecurityGroupIngressOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *ec2.RevokeSecurityGroupIngressInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// SetIpAddressTypeWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) SetIpAddressTypeWithContext(_a0 context.Context, _a1 *elbv2.SetIpAddressTypeInput) (*elbv2.SetIpAddressTypeOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.SetIpAddressTypeOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.SetIpAddressTypeInput) *elbv2.SetIpAddressTypeOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.SetIpAddressTypeOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.SetIpAddressTypeInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// SetSecurityGroupsWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) SetSecurityGroupsWithContext(_a0 context.Context, _a1 *elbv2.SetSecurityGroupsInput) (*elbv2.SetSecurityGroupsOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.SetSecurityGroupsOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.SetSecurityGroupsInput) *elbv2.SetSecurityGroupsOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.SetSecurityGroupsOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.SetSecurityGroupsInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// SetSubnetsWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) SetSubnetsWithContext(_a0 context.Context, _a1 *elbv2.SetSubnetsInput) (*elbv2.SetSubnetsOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *elbv2.SetSubnetsOutput
if rf, ok := ret.Get(0).(func(context.Context, *elbv2.SetSubnetsInput) *elbv2.SetSubnetsOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*elbv2.SetSubnetsOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *elbv2.SetSubnetsInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// StatusACM provides a mock function with given fields:
func (_m *CloudAPI) StatusACM() func() error {
ret := _m.Called()
var r0 func() error
if rf, ok := ret.Get(0).(func() func() error); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(func() error)
}
}
return r0
}
// StatusEC2 provides a mock function with given fields:
func (_m *CloudAPI) StatusEC2() func() error {
ret := _m.Called()
var r0 func() error
if rf, ok := ret.Get(0).(func() func() error); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(func() error)
}
}
return r0
}
// StatusELBV2 provides a mock function with given fields:
func (_m *CloudAPI) StatusELBV2() func() error {
ret := _m.Called()
var r0 func() error
if rf, ok := ret.Get(0).(func() func() error); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(func() error)
}
}
return r0
}
// StatusIAM provides a mock function with given fields:
func (_m *CloudAPI) StatusIAM() func() error {
ret := _m.Called()
var r0 func() error
if rf, ok := ret.Get(0).(func() func() error); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(func() error)
}
}
return r0
}
// TagResourcesWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) TagResourcesWithContext(_a0 context.Context, _a1 *resourcegroupstaggingapi.TagResourcesInput) (*resourcegroupstaggingapi.TagResourcesOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *resourcegroupstaggingapi.TagResourcesOutput
if rf, ok := ret.Get(0).(func(context.Context, *resourcegroupstaggingapi.TagResourcesInput) *resourcegroupstaggingapi.TagResourcesOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*resourcegroupstaggingapi.TagResourcesOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *resourcegroupstaggingapi.TagResourcesInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// UntagResourcesWithContext provides a mock function with given fields: _a0, _a1
func (_m *CloudAPI) UntagResourcesWithContext(_a0 context.Context, _a1 *resourcegroupstaggingapi.UntagResourcesInput) (*resourcegroupstaggingapi.UntagResourcesOutput, error) {
ret := _m.Called(_a0, _a1)
var r0 *resourcegroupstaggingapi.UntagResourcesOutput
if rf, ok := ret.Get(0).(func(context.Context, *resourcegroupstaggingapi.UntagResourcesInput) *resourcegroupstaggingapi.UntagResourcesOutput); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*resourcegroupstaggingapi.UntagResourcesOutput)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *resourcegroupstaggingapi.UntagResourcesInput) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// WebACLExists provides a mock function with given fields: ctx, webACLId
func (_m *CloudAPI) WebACLExists(ctx context.Context, webACLId *string) (bool, error) {
ret := _m.Called(ctx, webACLId)
var r0 bool
if rf, ok := ret.Get(0).(func(context.Context, *string) bool); ok {
r0 = rf(ctx, webACLId)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *string) error); ok {
r1 = rf(ctx, webACLId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
| {
r0 = ret.Get(0).([]*ec2.SecurityGroup)
} |
syncmanager.py | import asyncio
import traceback
from datetime import datetime
from neo.Network.core.header import Header
from typing import TYPE_CHECKING, List
from neo.Network.flightinfo import FlightInfo
from neo.Network.requestinfo import RequestInfo
from neo.Network.payloads.inventory import InventoryType
from neo.Network.common import msgrouter
from neo.Network.common.singleton import Singleton
from contextlib import suppress
from neo.Network.core.uint256 import UInt256
from neo.logging import log_manager
logger = log_manager.getLogger('syncmanager')
# log_manager.config_stdio([('syncmanager', 10)])
if TYPE_CHECKING:
from neo.Network.nodemanager import NodeManager
from neo.Network.payloads import Block
class SyncManager(Singleton):
HEADER_MAX_LOOK_AHEAD = 6000
HEADER_REQUEST_TIMEOUT = 5
BLOCK_MAX_CACHE_SIZE = 500
BLOCK_NETWORK_REQ_LIMIT = 500
BLOCK_REQUEST_TIMEOUT = 5
def init(self, nodemgr: 'NodeManager'):
self.nodemgr = nodemgr
self.controller = None
self.block_requests = dict() # header_hash:RequestInfo
self.header_request = None # type: RequestInfo
self.ledger = None
self.block_cache = []
self.header_cache = []
self.raw_block_cache = []
self.is_persisting_blocks = False
self.is_persisting_headers = False
self.keep_running = True
self.service_task = None
self.persist_task = None
self.health_task = None
msgrouter.on_headers += self.on_headers_received
msgrouter.on_block += self.on_block_received
async def start(self) -> None:
while not self.nodemgr.running:
await asyncio.sleep(0.1)
self.service_task = asyncio.create_task(self.run_service())
self.health_task = asyncio.create_task(self.block_health())
async def shutdown(self):
print("Shutting down sync manager...", end='')
self.keep_running = False
self.block_cache = []
shutdown_tasks = []
# start up errors can cause the tasks to not have been assigned,
# so we must validate their presence before feeding them to `gather`
if self.service_task:
shutdown_tasks.append(self.service_task)
if self.health_task:
shutdown_tasks.append(self.health_task)
if self.persist_task:
shutdown_tasks.append(self.persist_task)
await asyncio.gather(*shutdown_tasks, return_exceptions=True)
print("DONE")
async def block_health(self):
# TODO: move this to nodemanager, once the network in general supports ping/pong
# we can then make smarter choices by looking at individual nodes advancing or not and dropping just those
error_counter = 0
last_height = await self.ledger.cur_block_height()
while self.keep_running:
await asyncio.sleep(15)
cur_height = await self.ledger.cur_block_height()
if cur_height == last_height: | error_counter += 1
if error_counter == 3:
to_disconnect = list(map(lambda n: n, self.nodemgr.nodes))
logger.debug(f"Block height not advancing. Replacing nodes: {to_disconnect}")
for n in to_disconnect:
await self.nodemgr.replace_node(n)
else:
error_counter = 0
last_height = cur_height
async def run_service(self):
while self.keep_running:
await self.check_timeout()
await self.sync()
await asyncio.sleep(1)
async def sync(self) -> None:
await self.sync_header()
await self.sync_block()
await self.persist_headers()
if not self.is_persisting_blocks:
self.persist_task = asyncio.create_task(self.persist_blocks())
async def sync_header(self) -> None:
if self.header_request:
return
cur_header_height = await self.ledger.cur_header_height()
cur_block_height = await self.ledger.cur_block_height()
if cur_header_height - cur_block_height >= self.HEADER_MAX_LOOK_AHEAD:
return
node = self.nodemgr.get_next_node(cur_header_height + 1)
if not node:
# No connected nodes or no nodes with our height. We'll wait for node manager to resolve this
# or for the nodes to increase their height on the next produced block
return
self.header_request = RequestInfo(cur_header_height + 1)
self.header_request.add_new_flight(FlightInfo(node.nodeid, cur_header_height + 1))
cur_header_hash = await self.ledger.header_hash_by_height(cur_header_height)
await node.get_headers(hash_start=cur_header_hash)
logger.debug(f"Requested headers starting at {cur_header_height + 1} from node {node.nodeid_human}")
node.nodeweight.append_new_request_time()
async def persist_headers(self):
self.is_persisting_headers = True
if len(self.header_cache) > 0:
while self.keep_running:
try:
headers = self.header_cache.pop(0)
try:
await self.ledger.add_headers(headers)
except Exception as e:
print(traceback.format_exc())
await asyncio.sleep(0)
except IndexError:
# cache empty
break
# reset header_request such that the a new header sync task can be added
self.header_request = None
logger.debug("Finished processing headers")
self.is_persisting_headers = False
async def sync_block(self) -> None:
# to simplify syncing, don't ask for more data if we still have requests in flight
if len(self.block_requests) > 0:
return
# the block cache might not have been fully processed, so we want to avoid asking for data we actually already have
best_block_height = await self.get_best_stored_block_height()
cur_header_height = await self.ledger.cur_header_height()
blocks_to_fetch = cur_header_height - best_block_height
if blocks_to_fetch <= 0:
return
block_cache_space = self.BLOCK_MAX_CACHE_SIZE - len(self.block_cache)
if block_cache_space <= 0:
return
if blocks_to_fetch > block_cache_space or blocks_to_fetch > self.BLOCK_NETWORK_REQ_LIMIT:
blocks_to_fetch = min(block_cache_space, self.BLOCK_NETWORK_REQ_LIMIT)
try:
best_node_height = max(map(lambda node: node.best_height, self.nodemgr.nodes))
except ValueError:
# if the node list is empty max() fails on an empty list
return
node = self.nodemgr.get_next_node(best_node_height)
if not node:
# no nodes with our desired height. We'll wait for node manager to resolve this
# or for the nodes to increase their height on the next produced block
return
hashes = []
endheight = None
for i in range(1, blocks_to_fetch + 1):
next_block_height = best_block_height + i
if self.is_in_blockcache(next_block_height):
continue
if next_block_height > best_node_height:
break
next_header_hash = await self.ledger.header_hash_by_height(next_block_height)
if next_header_hash == UInt256.zero():
# we do not have enough headers to fill the block cache. That's fine, just return
break
endheight = next_block_height
hashes.append(next_header_hash)
self.add_block_flight_info(node.nodeid, next_block_height, next_header_hash)
if len(hashes) > 0:
logger.debug(f"Asking for blocks {best_block_height + 1} - {endheight} from {node.nodeid_human}")
await node.get_data(InventoryType.block, hashes)
node.nodeweight.append_new_request_time()
async def persist_blocks(self) -> None:
self.is_persisting_blocks = True
while self.keep_running:
try:
b = self.block_cache.pop(0)
raw_b = self.raw_block_cache.pop(0)
await self.ledger.add_block(raw_b)
await asyncio.sleep(0.001)
except IndexError:
# cache empty
break
self.is_persisting_blocks = False
async def check_timeout(self) -> None:
task1 = asyncio.create_task(self.check_header_timeout())
task2 = asyncio.create_task(self.check_block_timeout())
try:
await asyncio.gather(task1, task2)
except Exception:
logger.debug(traceback.format_exc())
async def check_header_timeout(self) -> None:
if not self.header_request:
# no data requests outstanding
return
last_flight_info = self.header_request.most_recent_flight()
now = datetime.utcnow().timestamp()
delta = now - last_flight_info.start_time
if delta < self.HEADER_REQUEST_TIMEOUT:
# we're still good on time
return
node = self.nodemgr.get_node_by_nodeid(last_flight_info.node_id)
if node:
logger.debug(f"Header timeout limit exceeded by {delta - self.HEADER_REQUEST_TIMEOUT:.2f}s for node {node.nodeid_human}")
cur_header_height = await self.ledger.cur_header_height()
if last_flight_info.height <= cur_header_height:
# it has already come in in the mean time
# reset so sync_header will request new headers
self.header_request = None
return
# punish node that is causing header_timeout and retry using another node
self.header_request.mark_failed_node(last_flight_info.node_id)
await self.nodemgr.add_node_timeout_count(last_flight_info.node_id)
# retry with a new node
node = self.nodemgr.get_node_with_min_failed_time(self.header_request)
if node is None:
# only happens if there are no nodes that have data matching our needed height
self.header_request = None
return
hash = await self.ledger.header_hash_by_height(last_flight_info.height - 1)
logger.debug(f"Retry requesting headers starting at {last_flight_info.height} from new node {node.nodeid_human}")
await node.get_headers(hash_start=hash)
# restart start_time of flight info or else we'll timeout too fast for the next node
self.header_request.add_new_flight(FlightInfo(node.nodeid, last_flight_info.height))
node.nodeweight.append_new_request_time()
async def check_block_timeout(self) -> None:
if len(self.block_requests) == 0:
# no data requests outstanding
return
now = datetime.utcnow().timestamp()
block_timeout_flights = dict()
# test for timeout
for block_hash, request_info in self.block_requests.items(): # type: _, RequestInfo
flight_info = request_info.most_recent_flight()
if now - flight_info.start_time > self.BLOCK_REQUEST_TIMEOUT:
block_timeout_flights[block_hash] = flight_info
if len(block_timeout_flights) == 0:
# no timeouts
return
# 1) we first filter out invalid requests as some might have come in by now
# 2) for each block_sync cycle we requested blocks in batches of max 500 per node, now when resending we try to
# create another batch
# 3) Blocks arrive one by one in 'inv' messages. In the block_sync cycle we created a FlightInfo object per
# requested block such that we can determine speed among others. If one block in a request times out all
# others for the same request will of course do as well (as they arrive in a linear fashion from the same node).
# As such we only want to tag the individual node once (per request) for being slower than our timeout threshold not 500 times.
remaining_requests = []
nodes_to_tag_for_timeout = set()
nodes_to_mark_failed = dict()
best_stored_block_height = await self.get_best_stored_block_height()
for block_hash, fi in block_timeout_flights.items(): # type: _, FlightInfo
nodes_to_tag_for_timeout.add(fi.node_id)
try:
request_info = self.block_requests[block_hash]
except KeyError:
# means on_block_received popped it of the list
# we don't have to retry for data anymore
continue
if fi.height <= best_stored_block_height:
with suppress(KeyError):
self.block_requests.pop(block_hash)
continue
nodes_to_mark_failed[request_info] = fi.node_id
remaining_requests.append((block_hash, fi.height, request_info))
for nodeid in nodes_to_tag_for_timeout:
await self.nodemgr.add_node_timeout_count(nodeid)
for request_info, node_id in nodes_to_mark_failed.items():
request_info.mark_failed_node(node_id)
# for the remaining requests that need to be queued again, we create new FlightInfo objects that use a new node
# and ask them in a single batch from that new node.
hashes = []
if len(remaining_requests) > 0:
# retry the batch with a new node
ri_first = remaining_requests[0][2]
ri_last = remaining_requests[-1][2]
# using `ri_last` because this has the highest block height and we want a node that supports that
node = self.nodemgr.get_node_with_min_failed_time(ri_last)
if not node:
return
for block_hash, height, ri in remaining_requests: # type: _, int, RequestInfo
ri.add_new_flight(FlightInfo(node.nodeid, height))
hashes.append(block_hash)
if len(hashes) > 0:
logger.debug(f"Block time out for blocks {ri_first.height} - {ri_last.height}. Trying again using new node {node.nodeid_human} {hashes[0]}")
await node.get_data(InventoryType.block, hashes)
node.nodeweight.append_new_request_time()
async def on_headers_received(self, from_nodeid, headers: List[Header]) -> int:
if len(headers) == 0:
return -1
if self.header_request is None:
return -2
height = headers[0].index
if height != self.header_request.height:
# received headers we did not ask for
return -3
logger.debug(f"Headers received {headers[0].index} - {headers[-1].index}")
if headers in self.header_cache:
return -4
cur_header_height = await self.ledger.cur_header_height()
if height <= cur_header_height:
return -5
self.header_cache.append(headers)
return 1
async def on_block_received(self, from_nodeid, block: 'Block', raw_block) -> None:
# TODO: take out raw_block and raw_block_cache once we can serialize a full block
# print(f"{block.index} {block.hash} received")
next_header_height = await self.ledger.cur_header_height() + 1
if block.index > next_header_height:
return
cur_block_height = await self.ledger.cur_block_height()
if block.index <= cur_block_height:
return
try:
ri = self.block_requests.pop(block.hash) # type: RequestInfo
fi = ri.flights.pop(from_nodeid) # type: FlightInfo
now = datetime.utcnow().timestamp()
delta_time = now - fi.start_time
speed = (block._size / 1024) / delta_time # KB/s
node = self.nodemgr.get_node_by_nodeid(fi.node_id)
if node:
node.nodeweight.append_new_speed(speed)
except KeyError:
# it's a block we did not ask for
# this can either be caused by rogue actors sending bad blocks
# or as a reply to our `get_data` on a broadcasted `inv` message by the node.
# (neo-cli nodes broadcast `inv` messages with their latest hash, we currently need to do a `get_data`
# and receive the full block to know what their best height is as we have no other mechanism (yet))
# TODO: remove once the network all start using neo-cli 2.10.1 or above which support ping/pong for height
sync_distance = block.index - cur_block_height
if sync_distance != 1:
return
# but if the distance is 1 we're in sync so we add the block anyway
# to avoid having the `sync_block` task request the same data again
# this is also necessary for neo-cli nodes because they maintain a TaskSession and refuse to send recently requested data
if not self.is_in_blockcache(block.index) and self.keep_running:
self.block_cache.append(block)
self.raw_block_cache.append(raw_block)
async def get_best_stored_block_height(self) -> int:
"""
Helper to return the highest block in our possession (either in ledger or in block_cache)
"""
best_block_cache_height = 0
if len(self.block_cache) > 0:
best_block_cache_height = self.block_cache[-1].index
ledger_height = await self.ledger.cur_block_height()
return max(ledger_height, best_block_cache_height)
def is_in_blockcache(self, block_height: int) -> bool:
for b in self.block_cache:
if b.index == block_height:
return True
else:
return False
def add_block_flight_info(self, nodeid, height, header_hash) -> None:
request_info = self.block_requests.get(header_hash, None) # type: RequestInfo
if request_info is None:
# no outstanding requests for this particular hash, so we create it
req = RequestInfo(height)
req.add_new_flight(FlightInfo(nodeid, height))
self.block_requests[header_hash] = req
else:
request_info.flights.update({nodeid: FlightInfo(nodeid, height)})
def reset(self) -> None:
self.header_request = None
self.block_requests = dict()
self.block_cache = []
self.raw_block_cache = [] | |
insert_sort.py | """
循环不变式 就是 : A[0, j-1]
"""
from random_array import random
def insert_sort(arr):
if len(arr) < | == '__main__':
arr=random(10)
print(insert_sort(arr))
| 2:
raise Exception("array is too short to sort")
for j in range(1, len(arr)):
pivot = arr[j]
i = j - 1
while i >= 0 and arr[i] > pivot:
arr[i+1] = arr[i]
i -= 1
arr[i+1] = pivot
return arr
if __name__ |
town.rs | use super::{BuilderChain, BuilderMap, InitialMapBuilder, TileType, Position};
use std::collections::HashSet;
pub fn town_builder(new_depth: i32, _rng: &mut rltk::RandomNumberGenerator, width: i32, height: i32) -> BuilderChain {
let mut chain = BuilderChain::new(new_depth, width, height);
chain.start_with(TownBuilder::new());
chain
}
pub struct TownBuilder {}
impl InitialMapBuilder for TownBuilder {
#[allow(dead_code)]
fn build_map(&mut self, rng: &mut rltk::RandomNumberGenerator, build_data : &mut BuilderMap) {
self.build_rooms(rng, build_data);
}
}
enum BuildingTag {
Pub, Temple, Blacksmith, Clothier, Alchemist, PlayerHouse, Hovel, Abandoned, Unassigned
}
impl TownBuilder {
pub fn new() -> Box<TownBuilder> {
Box::new(TownBuilder{})
}
pub fn build_rooms(&mut self, rng: &mut rltk::RandomNumberGenerator, build_data : &mut BuilderMap) {
self.grass_layer(build_data);
self.water_and_piers(rng, build_data);
let (mut available_building_tiles, wall_gap_y) = self.town_walls(rng, build_data);
let mut buildings = self.buildings(rng, build_data, &mut available_building_tiles);
let doors = self.add_doors(rng, build_data, &mut buildings, wall_gap_y);
self.add_paths(build_data, &doors);
let exit_idx = build_data.map.xy_idx(build_data.width-5, wall_gap_y);
build_data.map.tiles[exit_idx] = TileType::DownStairs;
let building_size = self.sort_buildings(&buildings);
self.building_factory(rng, build_data, &buildings, &building_size);
// Make visible for screenshot
for t in build_data.map.visible_tiles.iter_mut() {
*t = true;
}
build_data.take_snapshot();
}
fn grass_layer(&mut self, build_data : &mut BuilderMap) {
// We'll start with a nice layer of grass
for t in build_data.map.tiles.iter_mut() {
*t = TileType::Grass;
}
build_data.take_snapshot();
}
fn water_and_piers(&mut self, rng: &mut rltk::RandomNumberGenerator, build_data : &mut BuilderMap) {
let mut n = (rng.roll_dice(1, 65535) as f32) / 65535f32;
let mut water_width : Vec<i32> = Vec::new();
for y in 0..build_data.height {
let n_water = (f32::sin(n) * 10.0) as i32 + 14 + rng.roll_dice(1, 6);
water_width.push(n_water);
n += 0.1;
for x in 0..n_water {
let idx = build_data.map.xy_idx(x, y);
build_data.map.tiles[idx] = TileType::DeepWater;
}
for x in n_water .. n_water+3 {
let idx = build_data.map.xy_idx(x, y);
build_data.map.tiles[idx] = TileType::ShallowWater;
}
}
build_data.take_snapshot();
// Add piers
for _i in 0..rng.roll_dice(1, 4)+6 {
let y = rng.roll_dice(1, build_data.height)-1;
for x in 2 + rng.roll_dice(1, 6) .. water_width[y as usize] + 4 {
let idx = build_data.map.xy_idx(x, y);
build_data.map.tiles[idx] = TileType::WoodFloor;
}
}
build_data.take_snapshot();
}
fn | (&mut self, rng: &mut rltk::RandomNumberGenerator, build_data : &mut BuilderMap)
-> (HashSet<usize>, i32)
{
let mut available_building_tiles : HashSet<usize> = HashSet::new();
let wall_gap_y = rng.roll_dice(1, build_data.height - 9) + 5;
for y in 1 .. build_data.height-2 {
if !(y > wall_gap_y-4 && y < wall_gap_y+4) {
let idx = build_data.map.xy_idx(30, y);
build_data.map.tiles[idx] = TileType::Wall;
build_data.map.tiles[idx-1] = TileType::Floor;
let idx_right = build_data.map.xy_idx(build_data.width - 2, y);
build_data.map.tiles[idx_right] = TileType::Wall;
for x in 31 .. build_data.width-2 {
let gravel_idx = build_data.map.xy_idx(x, y);
build_data.map.tiles[gravel_idx] = TileType::Gravel;
if y > 2 && y < build_data.height-1 {
available_building_tiles.insert(gravel_idx);
}
}
} else {
for x in 30 .. build_data.width {
let road_idx = build_data.map.xy_idx(x, y);
build_data.map.tiles[road_idx] = TileType::Road;
}
}
}
build_data.take_snapshot();
for x in 30 .. build_data.width-1 {
let idx_top = build_data.map.xy_idx(x, 1);
build_data.map.tiles[idx_top] = TileType::Wall;
let idx_bot = build_data.map.xy_idx(x, build_data.height-2);
build_data.map.tiles[idx_bot] = TileType::Wall;
}
build_data.take_snapshot();
(available_building_tiles, wall_gap_y)
}
fn buildings(&mut self,
rng: &mut rltk::RandomNumberGenerator,
build_data : &mut BuilderMap,
available_building_tiles : &mut HashSet<usize>)
-> Vec<(i32, i32, i32, i32)>
{
let mut buildings : Vec<(i32, i32, i32, i32)> = Vec::new();
let mut n_buildings = 0;
while n_buildings < 12 {
let bx = rng.roll_dice(1, build_data.map.width - 32) + 30;
let by = rng.roll_dice(1, build_data.map.height)-2;
let bw = rng.roll_dice(1, 8)+4;
let bh = rng.roll_dice(1, 8)+4;
let mut possible = true;
for y in by .. by+bh {
for x in bx .. bx+bw {
if x < 0 || x > build_data.width-1 || y < 0 || y > build_data.height-1 {
possible = false;
} else {
let idx = build_data.map.xy_idx(x, y);
if !available_building_tiles.contains(&idx) { possible = false; }
}
}
}
if possible {
n_buildings += 1;
buildings.push((bx, by, bw, bh));
for y in by .. by+bh {
for x in bx .. bx+bw {
let idx = build_data.map.xy_idx(x, y);
build_data.map.tiles[idx] = TileType::WoodFloor;
available_building_tiles.remove(&idx);
available_building_tiles.remove(&(idx+1));
available_building_tiles.remove(&(idx+build_data.width as usize));
available_building_tiles.remove(&(idx-1));
available_building_tiles.remove(&(idx-build_data.width as usize));
}
}
build_data.take_snapshot();
}
}
// Outline buildings
let mut mapclone = build_data.map.clone();
for y in 2..build_data.height-2 {
for x in 32..build_data.width-2 {
let idx = build_data.map.xy_idx(x, y);
if build_data.map.tiles[idx] == TileType::WoodFloor {
let mut neighbors = 0;
if build_data.map.tiles[idx - 1] != TileType::WoodFloor { neighbors +=1; }
if build_data.map.tiles[idx + 1] != TileType::WoodFloor { neighbors +=1; }
if build_data.map.tiles[idx-build_data.width as usize] != TileType::WoodFloor { neighbors +=1; }
if build_data.map.tiles[idx+build_data.width as usize] != TileType::WoodFloor { neighbors +=1; }
if neighbors > 0 {
mapclone.tiles[idx] = TileType::Wall;
}
}
}
}
build_data.map = mapclone;
build_data.take_snapshot();
buildings
}
fn add_doors(&mut self,
rng: &mut rltk::RandomNumberGenerator,
build_data : &mut BuilderMap,
buildings: &mut Vec<(i32, i32, i32, i32)>,
wall_gap_y : i32)
-> Vec<usize>
{
let mut doors = Vec::new();
for building in buildings.iter() {
let door_x = building.0 + 1 + rng.roll_dice(1, building.2 - 3);
let cy = building.1 + (building.3 / 2);
let idx = if cy > wall_gap_y {
// Door on the north wall
build_data.map.xy_idx(door_x, building.1)
} else {
build_data.map.xy_idx(door_x, building.1 + building.3 - 1)
};
build_data.map.tiles[idx] = TileType::Floor;
build_data.spawn_list.push((idx, "Door".to_string()));
doors.push(idx);
}
build_data.take_snapshot();
doors
}
fn add_paths(&mut self,
build_data : &mut BuilderMap,
doors : &[usize])
{
let mut roads = Vec::new();
for y in 0..build_data.height {
for x in 0..build_data.width {
let idx = build_data.map.xy_idx(x, y);
if build_data.map.tiles[idx] == TileType::Road {
roads.push(idx);
}
}
}
build_data.map.populate_blocked();
for door_idx in doors.iter() {
let mut nearest_roads : Vec<(usize, f32)> = Vec::new();
let door_pt = rltk::Point::new( *door_idx as i32 % build_data.map.width as i32, *door_idx as i32 / build_data.map.width as i32 );
for r in roads.iter() {
nearest_roads.push((
*r,
rltk::DistanceAlg::PythagorasSquared.distance2d(
door_pt,
rltk::Point::new( *r as i32 % build_data.map.width, *r as i32 / build_data.map.width )
)
));
}
nearest_roads.sort_by(|a,b| a.1.partial_cmp(&b.1).unwrap());
let destination = nearest_roads[0].0;
let path = rltk::a_star_search(*door_idx, destination, &build_data.map);
if path.success {
for step in path.steps.iter() {
let idx = *step as usize;
build_data.map.tiles[idx] = TileType::Road;
roads.push(idx);
}
}
build_data.take_snapshot();
}
}
fn sort_buildings(&mut self, buildings: &[(i32, i32, i32, i32)]) -> Vec<(usize, i32, BuildingTag)>
{
let mut building_size : Vec<(usize, i32, BuildingTag)> = Vec::new();
for (i,building) in buildings.iter().enumerate() {
building_size.push((
i,
building.2 * building.3,
BuildingTag::Unassigned
));
}
building_size.sort_by(|a,b| b.1.cmp(&a.1));
building_size[0].2 = BuildingTag::Pub;
building_size[1].2 = BuildingTag::Temple;
building_size[2].2 = BuildingTag::Blacksmith;
building_size[3].2 = BuildingTag::Clothier;
building_size[4].2 = BuildingTag::Alchemist;
building_size[5].2 = BuildingTag::PlayerHouse;
for b in building_size.iter_mut().skip(6) {
b.2 = BuildingTag::Hovel;
}
let last_index = building_size.len()-1;
building_size[last_index].2 = BuildingTag::Abandoned;
building_size
}
fn building_factory(&mut self,
rng: &mut rltk::RandomNumberGenerator,
build_data : &mut BuilderMap,
buildings: &[(i32, i32, i32, i32)],
building_index : &[(usize, i32, BuildingTag)])
{
for (i,building) in buildings.iter().enumerate() {
let build_type = &building_index[i].2;
match build_type {
BuildingTag::Pub => self.build_pub(&building, build_data, rng),
BuildingTag::Temple => self.build_temple(&building, build_data, rng),
BuildingTag::Blacksmith => self.build_smith(&building, build_data, rng),
BuildingTag::Clothier => self.build_clothier(&building, build_data, rng),
BuildingTag::Alchemist => self.build_alchemist(&building, build_data, rng),
BuildingTag::PlayerHouse => self.build_my_house(&building, build_data, rng),
BuildingTag::Hovel => self.build_hovel(&building, build_data, rng),
BuildingTag::Abandoned => self.build_abandoned_house(&building, build_data, rng),
_ => {}
}
}
}
fn random_building_spawn(
&mut self,
building: &(i32, i32, i32, i32),
build_data : &mut BuilderMap,
rng: &mut rltk::RandomNumberGenerator,
to_place : &mut Vec<&str>,
player_idx : usize)
{
for y in building.1 .. building.1 + building.3 {
for x in building.0 .. building.0 + building.2 {
let idx = build_data.map.xy_idx(x, y);
if build_data.map.tiles[idx] == TileType::WoodFloor && idx != player_idx && rng.roll_dice(1, 3)==1 && !to_place.is_empty() {
let entity_tag = to_place[0];
to_place.remove(0);
build_data.spawn_list.push((idx, entity_tag.to_string()));
}
}
}
}
fn build_pub(&mut self,
building: &(i32, i32, i32, i32),
build_data : &mut BuilderMap,
rng: &mut rltk::RandomNumberGenerator)
{
// Place the player
build_data.starting_position = Some(Position{
x : building.0 + (building.2 / 2),
y : building.1 + (building.3 / 2)
});
let player_idx = build_data.map.xy_idx(building.0 + (building.2 / 2),
building.1 + (building.3 / 2));
// Place other items
let mut to_place : Vec<&str> = vec!["Barkeep", "Shady Salesman", "Patron", "Patron", "Keg",
"Table", "Chair", "Table", "Chair"];
self.random_building_spawn(building, build_data, rng, &mut to_place, player_idx);
}
fn build_temple(&mut self,
building: &(i32, i32, i32, i32),
build_data : &mut BuilderMap,
rng: &mut rltk::RandomNumberGenerator)
{
// Place items
let mut to_place : Vec<&str> = vec!["Priest", "Parishioner", "Parishioner", "Chair", "Chair", "Candle", "Candle"];
self.random_building_spawn(building, build_data, rng, &mut to_place, 0);
}
fn build_smith(&mut self,
building: &(i32, i32, i32, i32),
build_data : &mut BuilderMap,
rng: &mut rltk::RandomNumberGenerator)
{
// Place items
let mut to_place : Vec<&str> = vec!["Blacksmith", "Anvil", "Water Trough", "Weapon Rack", "Armor Stand"];
self.random_building_spawn(building, build_data, rng, &mut to_place, 0);
}
fn build_clothier(&mut self,
building: &(i32, i32, i32, i32),
build_data : &mut BuilderMap,
rng: &mut rltk::RandomNumberGenerator)
{
// Place items
let mut to_place : Vec<&str> = vec!["Clothier", "Cabinet", "Table", "Loom", "Hide Rack"];
self.random_building_spawn(building, build_data, rng, &mut to_place, 0);
}
fn build_alchemist(&mut self,
building: &(i32, i32, i32, i32),
build_data : &mut BuilderMap,
rng: &mut rltk::RandomNumberGenerator)
{
// Place items
let mut to_place : Vec<&str> = vec!["Alchemist", "Chemistry Set", "Dead Thing", "Chair", "Table"];
self.random_building_spawn(building, build_data, rng, &mut to_place, 0);
}
fn build_my_house(&mut self,
building: &(i32, i32, i32, i32),
build_data : &mut BuilderMap,
rng: &mut rltk::RandomNumberGenerator)
{
// Place items
let mut to_place : Vec<&str> = vec!["Mom", "Bed", "Cabinet", "Chair", "Table"];
self.random_building_spawn(building, build_data, rng, &mut to_place, 0);
}
fn build_hovel(&mut self,
building: &(i32, i32, i32, i32),
build_data : &mut BuilderMap,
rng: &mut rltk::RandomNumberGenerator)
{
// Place items
let mut to_place : Vec<&str> = vec!["Peasant", "Bed", "Chair", "Table"];
self.random_building_spawn(building, build_data, rng, &mut to_place, 0);
}
fn build_abandoned_house(&mut self,
building: &(i32, i32, i32, i32),
build_data : &mut BuilderMap,
rng: &mut rltk::RandomNumberGenerator)
{
for y in building.1 .. building.1 + building.3 {
for x in building.0 .. building.0 + building.2 {
let idx = build_data.map.xy_idx(x, y);
if build_data.map.tiles[idx] == TileType::WoodFloor && idx != 0 && rng.roll_dice(1, 2)==1 {
build_data.spawn_list.push((idx, "Rat".to_string()));
}
}
}
}
}
| town_walls |
urlencoder.go | // The MIT License (MIT)
//
// Copyright (c) 2016 Fredy Wijaya
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package main
import (
"fmt"
"net/url"
"os"
)
func urlEncode(s string) string {
return url.QueryEscape(s)
}
func | () {
if len(os.Args) != 2 {
printUsage()
}
}
func printUsage() {
fmt.Println("Usage:", os.Args[0], "<string to encode>")
os.Exit(0)
}
func main() {
validateArgs()
s := os.Args[1]
fmt.Println(urlEncode(s))
}
| validateArgs |
text.py | from pathlib import Path
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.RegexpTokenizer(r"([A-Z][A-Z0-9.]+|[0-9]+[,.][0-9]+|[cdjlmnst]'|qu'|[\w'-]+|\S)")
class Sentence:
def __init__(self, text, nth):
self.text = text
self.nth = nth
def __len__(self):
return len(tokenizer.tokenize(self.text))
@property
def id(self):
return self.nth
def contains_pos(self, postag):
return False
def count_pos(self, postag):
|
def read_corpus(path):
corpus = []
with open(path) as input_stream:
content = input_stream.read()
sents = [item.replace("\n", " ") for item in sent_tokenize(content)]
for nth, sent in enumerate(sents):
corpus.append(Sentence(sent, nth))
return corpus
| return 0 |
_io.py | """
Credits:
This file was adopted from: https://github.com/pydata/xarray # noqa
Source file: https://github.com/pydata/xarray/blob/1d7bcbdc75b6d556c04e2c7d7a042e4379e15303/xarray/backends/rasterio_.py # noqa
"""
import contextlib
import os
import re
import threading
import warnings
import numpy as np
import rasterio
from packaging import version
from rasterio.errors import NotGeoreferencedWarning
from rasterio.vrt import WarpedVRT
from xarray import Dataset, IndexVariable
from xarray.backends.common import BackendArray
from xarray.backends.file_manager import CachingFileManager, FileManager
from xarray.backends.locks import SerializableLock
from xarray.coding import times, variables
from xarray.core import indexing
from xarray.core.dataarray import DataArray
from xarray.core.dtypes import maybe_promote
from xarray.core.utils import is_scalar
from xarray.core.variable import as_variable
from rioxarray.exceptions import RioXarrayError
from rioxarray.rioxarray import _generate_spatial_coords
# TODO: should this be GDAL_LOCK instead?
RASTERIO_LOCK = SerializableLock()
NO_LOCK = contextlib.nullcontext()
class FileHandleLocal(threading.local):
"""
This contains the thread local ThreadURIManager
"""
def __init__(self): # pylint: disable=super-init-not-called
self.thread_manager = None # Initialises in each thread
class ThreadURIManager:
"""
This handles opening & closing file handles in each thread.
"""
def __init__(
self,
opener,
*args,
mode="r",
kwargs=None,
):
self._opener = opener
self._args = args
self._mode = mode
self._kwargs = {} if kwargs is None else dict(kwargs)
self._file_handle = None
@property
def file_handle(self):
"""
File handle returned by the opener.
"""
if self._file_handle is not None:
return self._file_handle
self._file_handle = self._opener(*self._args, mode=self._mode, **self._kwargs)
return self._file_handle
def close(self):
"""
Close file handle.
"""
if self._file_handle is not None:
self._file_handle.close()
self._file_handle = None
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
class URIManager(FileManager):
"""
The URI manager is used for lockless reading
"""
def __init__(
self,
opener,
*args,
mode="r",
kwargs=None,
):
self._opener = opener
self._args = args
self._mode = mode
self._kwargs = {} if kwargs is None else dict(kwargs)
self._local = FileHandleLocal()
def acquire(self, needs_lock=True):
if self._local.thread_manager is None:
self._local.thread_manager = ThreadURIManager(
self._opener, *self._args, mode=self._mode, kwargs=self._kwargs
)
return self._local.thread_manager.file_handle
@contextlib.contextmanager
def acquire_context(self, needs_lock=True):
try:
yield self.acquire(needs_lock=needs_lock)
except Exception:
self.close(needs_lock=needs_lock)
raise
def close(self, needs_lock=True):
if self._local.thread_manager is not None:
self._local.thread_manager.close()
self._local.thread_manager = None
def __del__(self):
self.close(needs_lock=False)
def __getstate__(self):
"""State for pickling."""
return (self._opener, self._args, self._mode, self._kwargs)
def __setstate__(self, state):
"""Restore from a pickle."""
opener, args, mode, kwargs = state
self.__init__(opener, *args, mode=mode, kwargs=kwargs)
class RasterioArrayWrapper(BackendArray):
"""A wrapper around rasterio dataset objects"""
# pylint: disable=too-many-instance-attributes
def __init__(
self,
manager,
lock,
name,
vrt_params=None,
masked=False,
mask_and_scale=False,
unsigned=False,
):
self.manager = manager
self.lock = lock
self.masked = masked or mask_and_scale
self.mask_and_scale = mask_and_scale
# cannot save riods as an attribute: this would break pickleability
riods = manager.acquire()
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
self.vrt_params = vrt_params
self._shape = (riods.count, riods.height, riods.width)
self._dtype = None
dtypes = riods.dtypes
if not np.all(np.asarray(dtypes) == dtypes[0]):
raise ValueError("All bands should have the same dtype")
dtype = _rasterio_to_numpy_dtype(dtypes)
# handle unsigned case
if mask_and_scale and unsigned and dtype.kind == "i":
self._dtype = np.dtype(f"u{dtype.itemsize}")
elif mask_and_scale and unsigned:
warnings.warn(
f"variable {name!r} has _Unsigned attribute but is not "
"of integer type. Ignoring attribute.",
variables.SerializationWarning,
stacklevel=3,
)
self._fill_value = riods.nodata
if self._dtype is None:
if self.masked:
self._dtype, self._fill_value = maybe_promote(dtype)
else:
self._dtype = dtype
@property
def dtype(self):
"""
Data type of the array
"""
return self._dtype
@property
def fill_value(self):
"""
Fill value of the array
"""
return self._fill_value
@property
def shape(self):
"""
Shape of the array
"""
return self._shape
def _get_indexer(self, key):
"""Get indexer for rasterio array.
Parameter
---------
key: tuple of int
Returns
-------
band_key: an indexer for the 1st dimension
window: two tuples. Each consists of (start, stop).
squeeze_axis: axes to be squeezed
np_ind: indexer for loaded numpy array
See also
--------
indexing.decompose_indexer
"""
if len(key) != 3:
raise RioXarrayError("rasterio datasets should always be 3D")
# bands cannot be windowed but they can be listed
band_key = key[0]
np_inds = []
# bands (axis=0) cannot be windowed but they can be listed
if isinstance(band_key, slice):
start, stop, step = band_key.indices(self.shape[0])
band_key = np.arange(start, stop, step)
# be sure we give out a list
band_key = (np.asarray(band_key) + 1).tolist()
if isinstance(band_key, list): # if band_key is not a scalar
np_inds.append(slice(None))
# but other dims can only be windowed
window = []
squeeze_axis = []
for iii, (ikey, size) in enumerate(zip(key[1:], self.shape[1:])):
if isinstance(ikey, slice):
# step is always positive. see indexing.decompose_indexer
start, stop, step = ikey.indices(size)
np_inds.append(slice(None, None, step))
elif is_scalar(ikey):
# windowed operations will always return an array
# we will have to squeeze it later
squeeze_axis.append(-(2 - iii))
start = ikey
stop = ikey + 1
else:
start, stop = np.min(ikey), np.max(ikey) + 1
np_inds.append(ikey - start)
window.append((start, stop))
if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray):
# do outer-style indexing
np_inds[-2:] = np.ix_(*np_inds[-2:])
return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds)
def _getitem(self, key):
band_key, window, squeeze_axis, np_inds = self._get_indexer(key)
if not band_key or any(start == stop for (start, stop) in window):
# no need to do IO
shape = (len(band_key),) + tuple(stop - start for (start, stop) in window)
out = np.zeros(shape, dtype=self.dtype)
else:
with self.lock:
riods = self.manager.acquire(needs_lock=False)
if self.vrt_params is not None:
riods = WarpedVRT(riods, **self.vrt_params)
out = riods.read(band_key, window=window, masked=self.masked)
if self.masked:
out = np.ma.filled(out.astype(self.dtype), self.fill_value)
if self.mask_and_scale:
for iii, band_iii in enumerate(np.atleast_1d(band_key) - 1):
out[iii] = (
out[iii] * riods.scales[band_iii] + riods.offsets[band_iii]
)
if squeeze_axis:
out = np.squeeze(out, axis=squeeze_axis)
return out[np_inds]
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER, self._getitem
)
def _parse_envi(meta):
"""Parse ENVI metadata into Python data structures.
See the link for information on the ENVI header file format:
http://www.harrisgeospatial.com/docs/enviheaderfiles.html
Parameters
----------
meta : dict
Dictionary of keys and str values to parse, as returned by the rasterio
tags(ns='ENVI') call.
Returns
-------
parsed_meta : dict
Dictionary containing the original keys and the parsed values
"""
def parsevec(value):
return np.fromstring(value.strip("{}"), dtype="float", sep=",")
def default(value):
return value.strip("{}")
parse = {"wavelength": parsevec, "fwhm": parsevec}
parsed_meta = {key: parse.get(key, default)(value) for key, value in meta.items()}
return parsed_meta
def _rasterio_to_numpy_dtype(dtypes):
"""Numpy dtype from first entry of rasterio dataset.dtypes"""
# rasterio has some special dtype names (complex_int16 -> np.complex64)
if dtypes[0] == "complex_int16":
dtype = np.dtype("complex64")
else:
dtype = np.dtype(dtypes[0])
return dtype
def _to_numeric(value):
"""
Convert the value to a number
"""
try:
value = int(value)
except (TypeError, ValueError):
try:
value = float(value)
except (TypeError, ValueError):
pass
return value
def _parse_tag(key, value):
# NC_GLOBAL is appended to tags with netcdf driver and is not really needed
key = key.split("NC_GLOBAL#")[-1]
if value.startswith("{") and value.endswith("}"):
try:
new_val = np.fromstring(value.strip("{}"), dtype="float", sep=",")
# pylint: disable=len-as-condition
value = new_val if len(new_val) else _to_numeric(value)
except ValueError:
value = _to_numeric(value)
else:
value = _to_numeric(value)
return key, value
def _parse_tags(tags):
parsed_tags = {}
for key, value in tags.items():
key, value = _parse_tag(key, value)
parsed_tags[key] = value
return parsed_tags
NETCDF_DTYPE_MAP = {
0: object, # NC_NAT
1: np.byte, # NC_BYTE
2: np.char, # NC_CHAR
3: np.short, # NC_SHORT
4: np.int_, # NC_INT, NC_LONG
5: float, # NC_FLOAT
6: np.double, # NC_DOUBLE
7: np.ubyte, # NC_UBYTE
8: np.ushort, # NC_USHORT
9: np.uint, # NC_UINT
10: np.int64, # NC_INT64
11: np.uint64, # NC_UINT64
12: object, # NC_STRING
}
def _load_netcdf_attrs(tags, data_array):
"""
Loads the netCDF attributes into the data array
Attributes stored in this format:
- variable_name#attr_name: attr_value
"""
for key, value in tags.items():
key, value = _parse_tag(key, value)
key_split = key.split("#")
if len(key_split) != 2:
continue
variable_name, attr_name = key_split
if variable_name in data_array.coords:
data_array.coords[variable_name].attrs.update({attr_name: value})
def _load_netcdf_1d_coords(tags):
"""
Dimension information:
- NETCDF_DIM_EXTRA: '{time}' (comma separated list of dim names)
- NETCDF_DIM_time_DEF: '{2,6}' (dim size, dim dtype)
- NETCDF_DIM_time_VALUES: '{0,872712.659688}' (comma separated list of data)
"""
dim_names = tags.get("NETCDF_DIM_EXTRA")
if not dim_names:
return {}
dim_names = dim_names.strip("{}").split(",")
coords = {}
for dim_name in dim_names:
dim_def = tags.get(f"NETCDF_DIM_{dim_name}_DEF")
if not dim_def:
continue
# pylint: disable=unused-variable
dim_size, dim_dtype = dim_def.strip("{}").split(",")
dim_dtype = NETCDF_DTYPE_MAP.get(int(dim_dtype), object)
dim_values = tags[f"NETCDF_DIM_{dim_name}_VALUES"].strip("{}")
coords[dim_name] = IndexVariable(
dim_name, np.fromstring(dim_values, dtype=dim_dtype, sep=",")
)
return coords
def build_subdataset_filter(group_names, variable_names):
"""
Example::
'HDF4_EOS:EOS_GRID:"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf":
MODIS_Grid_2D:sur_refl_b01_1'
Parameters
----------
group_names: str or list or tuple
Name or names of netCDF groups to filter by.
variable_names: str or list or tuple
Name or names of netCDF variables to filter by.
Returns
-------
re.SRE_Pattern: output of re.compile()
"""
variable_query = r"\w+"
if variable_names is not None:
if not isinstance(variable_names, (tuple, list)):
variable_names = [variable_names]
variable_names = [re.escape(variable_name) for variable_name in variable_names]
variable_query = rf"(?:{'|'.join(variable_names)})"
if group_names is not None:
if not isinstance(group_names, (tuple, list)):
group_names = [group_names]
group_names = [re.escape(group_name) for group_name in group_names]
group_query = rf"(?:{'|'.join(group_names)})"
else:
return re.compile(r"".join([r".*(?:\:/|\:)(/+)?", variable_query, r"$"]))
return re.compile(
r"".join(
[r".*(?:\:/|\:)(/+)?", group_query, r"[:/](/+)?", variable_query, r"$"]
)
)
def _rio_transform(riods):
"""
Get the transform from a rasterio dataset
reguardless of rasterio version.
"""
try:
return riods.transform
except AttributeError:
return riods.affine # rasterio < 1.0
def _get_rasterio_attrs(riods):
|
def _decode_datetime_cf(data_array, decode_times, decode_timedelta):
"""
Decide the datetime based on CF conventions
"""
if decode_timedelta is None:
decode_timedelta = decode_times
for coord in data_array.coords:
time_var = None
if decode_times and "since" in data_array[coord].attrs.get("units", ""):
time_var = times.CFDatetimeCoder(use_cftime=True).decode(
as_variable(data_array[coord]), name=coord
)
elif (
decode_timedelta
and data_array[coord].attrs.get("units") in times.TIME_UNITS
):
time_var = times.CFTimedeltaCoder().decode(
as_variable(data_array[coord]), name=coord
)
if time_var is not None:
dimensions, data, attributes, encoding = variables.unpack_for_decoding(
time_var
)
data_array = data_array.assign_coords(
{
coord: IndexVariable(
dims=dimensions,
data=data,
attrs=attributes,
encoding=encoding,
)
}
)
return data_array
def _parse_driver_tags(riods, attrs, coords):
# Parse extra metadata from tags, if supported
parsers = {"ENVI": _parse_envi}
driver = riods.driver
if driver in parsers:
meta = parsers[driver](riods.tags(ns=driver))
for key, value in meta.items():
# Add values as coordinates if they match the band count,
# as attributes otherwise
if isinstance(value, (list, np.ndarray)) and len(value) == riods.count:
coords[key] = ("band", np.asarray(value))
else:
attrs[key] = value
def _load_subdatasets(
riods,
group,
variable,
parse_coordinates,
chunks,
cache,
lock,
masked,
mask_and_scale,
decode_times,
decode_timedelta,
**open_kwargs,
):
"""
Load in rasterio subdatasets
"""
base_tags = _parse_tags(riods.tags())
dim_groups = {}
subdataset_filter = None
if any((group, variable)):
subdataset_filter = build_subdataset_filter(group, variable)
for subdataset in riods.subdatasets:
if subdataset_filter is not None and not subdataset_filter.match(subdataset):
continue
with rasterio.open(subdataset) as rds:
shape = rds.shape
rioda = open_rasterio(
subdataset,
parse_coordinates=shape not in dim_groups and parse_coordinates,
chunks=chunks,
cache=cache,
lock=lock,
masked=masked,
mask_and_scale=mask_and_scale,
default_name=subdataset.split(":")[-1].lstrip("/").replace("/", "_"),
decode_times=decode_times,
decode_timedelta=decode_timedelta,
**open_kwargs,
)
if shape not in dim_groups:
dim_groups[shape] = {rioda.name: rioda}
else:
dim_groups[shape][rioda.name] = rioda
if len(dim_groups) > 1:
dataset = [
Dataset(dim_group, attrs=base_tags) for dim_group in dim_groups.values()
]
elif not dim_groups:
dataset = Dataset(attrs=base_tags)
else:
dataset = Dataset(list(dim_groups.values())[0], attrs=base_tags)
return dataset
def _prepare_dask(result, riods, filename, chunks):
"""
Prepare the data for dask computations
"""
# pylint: disable=import-outside-toplevel
from dask.base import tokenize
# augment the token with the file modification time
try:
mtime = os.path.getmtime(filename)
except OSError:
# the filename is probably an s3 bucket rather than a regular file
mtime = None
if chunks in (True, "auto"):
import dask
from dask.array.core import normalize_chunks
if version.parse(dask.__version__) < version.parse("0.18.0"):
msg = (
"Automatic chunking requires dask.__version__ >= 0.18.0 . "
f"You currently have version {dask.__version__}"
)
raise NotImplementedError(msg)
block_shape = (1,) + riods.block_shapes[0]
chunks = normalize_chunks(
chunks=(1, "auto", "auto"),
shape=(riods.count, riods.height, riods.width),
dtype=riods.dtypes[0],
previous_chunks=tuple((c,) for c in block_shape),
)
token = tokenize(filename, mtime, chunks)
name_prefix = f"open_rasterio-{token}"
return result.chunk(chunks, name_prefix=name_prefix, token=token)
def _handle_encoding(result, mask_and_scale, masked, da_name):
"""
Make sure encoding handled properly
"""
if "grid_mapping" in result.attrs:
variables.pop_to(result.attrs, result.encoding, "grid_mapping", name=da_name)
if mask_and_scale:
if "scale_factor" in result.attrs:
variables.pop_to(
result.attrs, result.encoding, "scale_factor", name=da_name
)
if "add_offset" in result.attrs:
variables.pop_to(result.attrs, result.encoding, "add_offset", name=da_name)
if masked:
if "_FillValue" in result.attrs:
variables.pop_to(result.attrs, result.encoding, "_FillValue", name=da_name)
if "missing_value" in result.attrs:
variables.pop_to(
result.attrs, result.encoding, "missing_value", name=da_name
)
def open_rasterio(
filename,
parse_coordinates=None,
chunks=None,
cache=None,
lock=None,
masked=False,
mask_and_scale=False,
variable=None,
group=None,
default_name=None,
decode_times=True,
decode_timedelta=None,
**open_kwargs,
):
# pylint: disable=too-many-statements,too-many-locals,too-many-branches
"""Open a file with rasterio (experimental).
This should work with any file that rasterio can open (most often:
geoTIFF). The x and y coordinates are generated automatically from the
file's geoinformation, shifted to the center of each pixel (see
`"PixelIsArea" Raster Space
<http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_
for more information).
Parameters
----------
filename: str, rasterio.io.DatasetReader, or rasterio.vrt.WarpedVRT
Path to the file to open. Or already open rasterio dataset.
parse_coordinates: bool, optional
Whether to parse the x and y coordinates out of the file's
``transform`` attribute or not. The default is to automatically
parse the coordinates only if they are rectilinear (1D).
It can be useful to set ``parse_coordinates=False``
if your files are very large or if you don't need the coordinates.
chunks: int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new
DataArray into a dask array. Chunks can also be set to
``True`` or ``"auto"`` to choose sensible chunk sizes according to
``dask.config.get("array.chunk-size")``.
cache: bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False.
lock: bool or dask.utils.SerializableLock, optional
If chunks is provided, this argument is used to ensure that only one
thread per process is reading from a rasterio file object at a time.
By default and when a lock instance is provided,
a :class:`xarray.backends.CachingFileManager` is used to cache File objects.
Since rasterio also caches some data, this will make repeated reads from the
same object fast.
When ``lock=False``, no lock is used, allowing for completely parallel reads
from multiple threads or processes. However, a new file handle is opened on
each request.
masked: bool, optional
If True, read the mask and set values to NaN. Defaults to False.
mask_and_scale: bool, optional
Lazily scale (using the `scales` and `offsets` from rasterio) and mask.
If the _Unsigned attribute is present treat integer arrays as unsigned.
variable: str or list or tuple, optional
Variable name or names to use to filter loading.
group: str or list or tuple, optional
Group name or names to use to filter loading.
default_name: str, optional
The name of the data array if none exists. Default is None.
decode_times: bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
decode_timedelta: bool, optional
If True, decode variables and coordinates with time units in
{“days”, “hours”, “minutes”, “seconds”, “milliseconds”, “microseconds”}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
**open_kwargs: kwargs, optional
Optional keyword arguments to pass into rasterio.open().
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray` | List[:obj:`xarray.Dataset`]:
The newly created dataset(s).
"""
parse_coordinates = True if parse_coordinates is None else parse_coordinates
masked = masked or mask_and_scale
vrt_params = None
if isinstance(filename, rasterio.io.DatasetReader):
filename = filename.name
elif isinstance(filename, rasterio.vrt.WarpedVRT):
vrt = filename
filename = vrt.src_dataset.name
vrt_params = dict(
src_crs=vrt.src_crs.to_string() if vrt.src_crs else None,
crs=vrt.crs.to_string() if vrt.crs else None,
resampling=vrt.resampling,
tolerance=vrt.tolerance,
src_nodata=vrt.src_nodata,
nodata=vrt.nodata,
width=vrt.width,
height=vrt.height,
src_transform=vrt.src_transform,
transform=vrt.transform,
dtype=vrt.working_dtype,
warp_extras=vrt.warp_extras,
)
if lock in (True, None):
lock = RASTERIO_LOCK
elif lock is False:
lock = NO_LOCK
# ensure default for sharing is False
# ref https://github.com/mapbox/rasterio/issues/1504
open_kwargs["sharing"] = open_kwargs.get("sharing", False)
with warnings.catch_warnings(record=True) as rio_warnings:
if lock is not NO_LOCK:
manager = CachingFileManager(
rasterio.open, filename, lock=lock, mode="r", kwargs=open_kwargs
)
else:
manager = URIManager(rasterio.open, filename, mode="r", kwargs=open_kwargs)
riods = manager.acquire()
captured_warnings = rio_warnings.copy()
# raise the NotGeoreferencedWarning if applicable
for rio_warning in captured_warnings:
if not riods.subdatasets or not isinstance(
rio_warning.message, NotGeoreferencedWarning
):
warnings.warn(str(rio_warning.message), type(rio_warning.message))
# open the subdatasets if they exist
if riods.subdatasets:
return _load_subdatasets(
riods=riods,
group=group,
variable=variable,
parse_coordinates=parse_coordinates,
chunks=chunks,
cache=cache,
lock=lock,
masked=masked,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
decode_timedelta=decode_timedelta,
**open_kwargs,
)
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
if cache is None:
cache = chunks is None
# Get bands
if riods.count < 1:
raise ValueError("Unknown dims")
# parse tags & load alternate coords
attrs = _get_rasterio_attrs(riods=riods)
coords = _load_netcdf_1d_coords(riods.tags())
_parse_driver_tags(riods=riods, attrs=attrs, coords=coords)
for coord in coords:
if f"NETCDF_DIM_{coord}" in attrs:
coord_name = coord
attrs.pop(f"NETCDF_DIM_{coord}")
break
else:
coord_name = "band"
coords[coord_name] = np.asarray(riods.indexes)
# Get geospatial coordinates
if parse_coordinates:
coords.update(
_generate_spatial_coords(_rio_transform(riods), riods.width, riods.height)
)
unsigned = False
encoding = {}
if mask_and_scale and "_Unsigned" in attrs:
unsigned = variables.pop_to(attrs, encoding, "_Unsigned") == "true"
if masked:
encoding["dtype"] = str(_rasterio_to_numpy_dtype(riods.dtypes))
da_name = attrs.pop("NETCDF_VARNAME", default_name)
data = indexing.LazilyOuterIndexedArray(
RasterioArrayWrapper(
manager,
lock,
name=da_name,
vrt_params=vrt_params,
masked=masked,
mask_and_scale=mask_and_scale,
unsigned=unsigned,
)
)
# this lets you write arrays loaded with rasterio
data = indexing.CopyOnWriteArray(data)
if cache and chunks is None:
data = indexing.MemoryCachedArray(data)
result = DataArray(
data=data, dims=(coord_name, "y", "x"), coords=coords, attrs=attrs, name=da_name
)
result.encoding = encoding
# update attributes from NetCDF attributess
_load_netcdf_attrs(riods.tags(), result)
result = _decode_datetime_cf(
result, decode_times=decode_times, decode_timedelta=decode_timedelta
)
# make sure the _FillValue is correct dtype
if "_FillValue" in attrs:
attrs["_FillValue"] = result.dtype.type(attrs["_FillValue"])
# handle encoding
_handle_encoding(result, mask_and_scale, masked, da_name)
# Affine transformation matrix (always available)
# This describes coefficients mapping pixel coordinates to CRS
# For serialization store as tuple of 6 floats, the last row being
# always (0, 0, 1) per definition (see
# https://github.com/sgillies/affine)
result.rio.write_transform(_rio_transform(riods), inplace=True)
if hasattr(riods, "crs") and riods.crs:
result.rio.write_crs(riods.crs, inplace=True)
if chunks is not None:
result = _prepare_dask(result, riods, filename, chunks)
# Make the file closeable
result.set_close(manager.close)
result.rio._manager = manager
# add file path to encoding
result.encoding["source"] = riods.name
result.encoding["rasterio_dtype"] = str(riods.dtypes[0])
return result
| """
Get rasterio specific attributes
"""
# pylint: disable=too-many-branches
# Add rasterio attributes
attrs = _parse_tags(riods.tags(1))
if hasattr(riods, "nodata") and riods.nodata is not None:
# The nodata values for the raster bands
attrs["_FillValue"] = riods.nodata
if hasattr(riods, "scales"):
# The scale values for the raster bands
if len(set(riods.scales)) > 1:
attrs["scales"] = riods.scales
warnings.warn(
"Offsets differ across bands. The 'scale_factor' attribute will "
"not be added. See the 'scales' attribute."
)
else:
attrs["scale_factor"] = riods.scales[0]
if hasattr(riods, "offsets"):
# The offset values for the raster bands
if len(set(riods.offsets)) > 1:
attrs["offsets"] = riods.offsets
warnings.warn(
"Offsets differ across bands. The 'add_offset' attribute will "
"not be added. See the 'offsets' attribute."
)
else:
attrs["add_offset"] = riods.offsets[0]
if hasattr(riods, "descriptions") and any(riods.descriptions):
if len(set(riods.descriptions)) == 1:
attrs["long_name"] = riods.descriptions[0]
else:
# Descriptions for each dataset band
attrs["long_name"] = riods.descriptions
if hasattr(riods, "units") and any(riods.units):
# A list of units string for each dataset band
if len(riods.units) == 1:
attrs["units"] = riods.units[0]
else:
attrs["units"] = riods.units
return attrs |
generic.py | from flask import g, abort, redirect, request, render_template, send_from_directory, url_for
from http import HTTPStatus
from os import getenv, path
from lnbits.core import core_app
from lnbits.decorators import check_user_exists, validate_uuids
from lnbits.settings import SERVICE_FEE
from ..crud import (
create_account,
get_user,
update_user_extension,
create_wallet,
delete_wallet,
)
@core_app.route("/favicon.ico")
def favicon():
return send_from_directory(path.join(core_app.root_path, "static"), "favicon.ico")
@core_app.route("/")
def home():
return render_template("core/index.html", lnurl=request.args.get("lightning", None))
@core_app.route("/extensions")
@validate_uuids(["usr"], required=True)
@check_user_exists()
def extensions():
extension_to_enable = request.args.get("enable", type=str)
extension_to_disable = request.args.get("disable", type=str)
if extension_to_enable and extension_to_disable:
abort(HTTPStatus.BAD_REQUEST, "You can either `enable` or `disable` an extension.")
if extension_to_enable:
update_user_extension(user_id=g.user.id, extension=extension_to_enable, active=1)
elif extension_to_disable:
update_user_extension(user_id=g.user.id, extension=extension_to_disable, active=0)
return render_template("core/extensions.html", user=get_user(g.user.id))
@core_app.route("/wallet")
@validate_uuids(["usr", "wal"])
def wallet():
user_id = request.args.get("usr", type=str)
wallet_id = request.args.get("wal", type=str)
wallet_name = request.args.get("nme", type=str)
service_fee = int(SERVICE_FEE) if int(SERVICE_FEE) == SERVICE_FEE else SERVICE_FEE
# just wallet_name: create a new user, then create a new wallet for user with wallet_name
# just user_id: return the first user wallet or create one if none found (with default wallet_name)
# user_id and wallet_name: create a new wallet for user with wallet_name
# user_id and wallet_id: return that wallet if user is the owner
# nothing: create everything
if not user_id:
user = get_user(create_account().id)
else:
user = get_user(user_id) or abort(HTTPStatus.NOT_FOUND, "User does not exist.")
allowed_users = getenv("LNBITS_ALLOWED_USERS", "all")
if allowed_users != "all" and user_id not in allowed_users.split(","):
abort(HTTPStatus.UNAUTHORIZED, f"User not authorized.")
if not wallet_id:
if user.wallets and not wallet_name:
wallet = user.wallets[0]
else:
wallet = create_wallet(user_id=user.id, wallet_name=wallet_name)
return redirect(url_for("core.wallet", usr=user.id, wal=wallet.id))
if wallet_id not in user.wallet_ids:
abort(HTTPStatus.FORBIDDEN, "Not your wallet.")
return render_template("core/wallet.html", user=user, wallet=user.get_wallet(wallet_id), service_fee=service_fee)
@core_app.route("/deletewallet")
@validate_uuids(["usr", "wal"], required=True)
@check_user_exists()
def | ():
wallet_id = request.args.get("wal", type=str)
user_wallet_ids = g.user.wallet_ids
if wallet_id not in user_wallet_ids:
abort(HTTPStatus.FORBIDDEN, "Not your wallet.")
else:
delete_wallet(user_id=g.user.id, wallet_id=wallet_id)
user_wallet_ids.remove(wallet_id)
if user_wallet_ids:
return redirect(url_for("core.wallet", usr=g.user.id, wal=user_wallet_ids[0]))
return redirect(url_for("core.home"))
| deletewallet |
prometheus_test.go | /* Copyright 2016 Csergő Bálint github.com/deathowl
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package prom
// Forked from github.com/deathowl
// Some parts of this file have been modified to make it functional in this package
import (
"github.com/go-chassis/go-chassis/core/config"
"github.com/go-chassis/go-chassis/core/config/model"
m "github.com/go-chassis/go-chassis/metrics"
"github.com/rcrowley/go-metrics"
"github.com/stretchr/testify/assert"
"testing"
"time"
)
var (
makeCounterFunc = func() interface{} { return metrics.NewCounter() }
makeTimerFunc = func() interface{} { return metrics.NewTimer() }
makeGaugeFunc = func() interface{} { return metrics.NewGauge() }
makeGaugeFloat64Func = func() interface{} { return metrics.NewGaugeFloat64() }
makeHistogramFunc = func() interface{} { return metrics.NewHistogram(metrics.NewExpDecaySample(1028, 0.015)) }
makeMeterFunc = func() interface{} { return metrics.NewMeter() }
)
func TestPrometheusSinker_UpdatePrometheusMetrics(t *testing.T) {
config.GlobalDefinition = new(model.GlobalCfg)
config.GlobalDefinition.Cse.Metrics.EnableGoRuntimeMetrics = false
prometheusSinker := GetPrometheusSinker(m.GetSystemRegistry())
t.Log("registering various metric types to go-metrics registry")
c, _ := m.GetSystemRegistry().GetOrRegister("server.attempts", makeCounterFunc).(metrics.Counter)
c.Inc(1)
c, _ = m.GetSystemRegistry().GetOrRegister("server.successes", makeCounterFunc).(metrics.Counter)
c.Inc(1)
timer, _ := m.GetSystemRegistry().GetOrRegister("server.totalDuration", makeTimerFunc).(metrics.Timer)
timer.Update(time.Millisecond * 10)
g, _ := m.GetSystemRegistry().GetOrRegister("server.memory", makeGaugeFunc).(metrics.Gauge)
g.Update(1200)
gFloat := m.GetSystemRegistry().GetOrRegister("server.thread", makeGaugeFloat64Func).(metrics.GaugeFloat64)
gFloat.Update(341)
h, _ := m.GetSystemRegistry().GetOrRegister("server.requestDuration", makeHistogramFunc).(metrics.Histogram)
h.Update(23)
meter, _ := m.GetSystemRegistry().GetOrRegister("foo", makeMeterFunc).(metrics.Meter)
meter.Mark(12)
prometheusSinker.UpdatePrometheusMetricsOnce()
metricsFamilies, _ := m.GetSystemPrometheusRegistry().Gather()
assert.Equal(t, len(metricsFamilies), 6)
} | Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, |
test_bitmex_trio_websocket.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `bitmex_trio_websocket` package."""
from bitmex_trio_websocket.exceptions import BitMEXWebsocketApiError
import os
from random import random
import pytest
from async_generator import aclosing
import pendulum
from trio_websocket import ConnectionRejected, WebSocketConnection, ConnectionClosed
from bitmex_trio_websocket import open_bitmex_websocket, BitMEXWebsocket
from slurry import Pipeline
from slurry.sections import Group
async def test_auth_fail():
with pytest.raises(ConnectionRejected):
async with open_bitmex_websocket('testnet', 'abcd1234', 'efgh5678') as bws:
async with aclosing(bws.listen('position')) as aiter:
async for item in aiter:
assert False
# async def test_auth_success():
# bitmex_websocket = BitMEXWebsocket()
# try:
# async with bitmex_websocket._connect('testnet', os.getenv('TESTNET_API_KEY'), os.getenv('TESTNET_API_SECRET'), False):
# async with aclosing(bitmex_websocket._websocket_parser()) as agen:
# assert isinstance(bitmex_websocket._ws, WebSocketConnection)
# await bitmex_websocket._ws.send_message(ujson.dumps({'op': 'subscribe', 'args': ['margin', 'position', 'order', 'execution']}))
# async for msg in agen:
# assert isinstance(msg, dict)
# assert 'action' in msg
# await bitmex_websocket._ws.aclose()
# except ConnectionClosed as e:
# assert e.reason.code == 1000
# async def test_multisymbol():
# bitmex_websocket = BitMEXWebsocket()
# try:
# async with bitmex_websocket._connect('testnet', os.getenv('TESTNET_API_KEY'), os.getenv('TESTNET_API_SECRET'), False):
# count = 0
# async with aclosing(bitmex_websocket._websocket_parser()) as agen:
# await bitmex_websocket._ws.send_message(ujson.dumps({'op': 'subscribe', 'args': ['instrument:XBTUSD', 'instrument:ETHUSD']}))
# async for msg in agen:
# assert isinstance(msg, dict)
# count += 1
# if count >= 3:
# print(count)
# await bitmex_websocket._ws.aclose()
# except ConnectionClosed as e:
# assert e.reason.code == 1000
# async def test_context_manager():
# async with open_bitmex_websocket('testnet', os.getenv('TESTNET_API_KEY'), os.getenv('TESTNET_API_SECRET')) as bitmex_ws:
# count = 0
# async with aclosing(bitmex_ws.listen('instrument', 'XBTUSD')) as agen:
# async for msg in agen:
# count += 1
# if count >= 3:
# break
# assert True
async def test_orderbook():
async with open_bitmex_websocket('testnet') as bws:
async with aclosing(bws.listen('orderBookL2', 'XBTUSD')) as agen:
async for msg in agen:
assert len(msg) == 2
break
async def test_network_argument():
async with open_bitmex_websocket('mainnet') as s:
assert getattr(s, 'listen', None) is not None
async with open_bitmex_websocket('testnet') as s:
assert getattr(s, 'listen', None) is not None
with pytest.raises(ValueError): | async with open_bitmex_websocket('testnet') as ws:
async with Pipeline.create(
Group(2, ws.listen('funding'))
) as pipeline, pipeline.tap() as aiter:
async for bundle in aiter:
for funding in bundle:
funding['timestamp'] = pendulum.parse(funding['timestamp'])
funding['fundingInterval'] = pendulum.parse(funding['fundingInterval'])
assert isinstance(bundle, tuple)
assert len(bundle) > 1
return
assert False, 'This should not happen.'
async def test_spam_requests():
with pytest.raises(BitMEXWebsocketApiError):
async with open_bitmex_websocket('testnet') as ws:
async with Pipeline.create(
ws.listen('instrument', 'PAROTCOIN')
) as pipeline, pipeline.tap() as aiter:
async for bundle in aiter:
break | async with open_bitmex_websocket('incorrect') as s:
assert False, 'BitMEXWebsocket.connect accepted erroneous network argument.'
async def test_funding(): |
wave.py | """Stuff to parse WAVE files.
Usage.
Reading WAVE files:
f = wave.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
When the setpos() and rewind() methods are not used, the seek()
method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for linear samples)
getcompname() -- returns human-readable version of
compression type ('not compressed' linear samples)
getparams() -- returns a namedtuple consisting of all of the
above in the above order
getmarkers() -- returns None (for compatibility with the
aifc module)
getmark(id) -- raises an error since the mark does not
exist (for compatibility with the aifc module)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell() and the position given to setpos()
are compatible and have nothing to do with the actual position in the
file.
The close() method is called automatically when the class instance
is destroyed.
Writing WAVE files:
f = wave.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
tell() -- return current position in output file
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes(b'') or
close() to patch up the sizes in the header.
The close() method is called automatically when the class instance
is destroyed.
"""
import builtins
__all__ = ["open", "openfp", "Error", "Wave_read", "Wave_write"]
class Error(Exception):
pass
WAVE_FORMAT_PCM = 0x0001
_array_fmts = None, 'b', 'h', None, 'i'
import audioop
import struct
import sys
from chunk import Chunk
from collections import namedtuple
_wave_params = namedtuple('_wave_params',
'nchannels sampwidth framerate nframes comptype compname')
class Wave_read:
"""Variables used in this class:
These variables are available to the user though appropriate
methods of this class:
_file -- the open file with methods read(), close(), and seek()
set through the __init__() method
_nchannels -- the number of audio channels
available through the getnchannels() method
_nframes -- the number of audio frames
available through the getnframes() method
_sampwidth -- the number of bytes per audio sample
available through the getsampwidth() method
_framerate -- the sampling frequency
available through the getframerate() method
_comptype -- the AIFF-C compression type ('NONE' if AIFF)
available through the getcomptype() method
_compname -- the human-readable AIFF-C compression type
available through the getcomptype() method
_soundpos -- the position in the audio stream
available through the tell() method, set through the
setpos() method
These variables are used internally only:
_fmt_chunk_read -- 1 iff the FMT chunk has been read
_data_seek_needed -- 1 iff positioned correctly in audio
file for readframes()
_data_chunk -- instantiation of a chunk class for the DATA chunk
_framesize -- size of one frame in the file
"""
def initfp(self, file):
self._convert = None
self._soundpos = 0
self._file = Chunk(file, bigendian = 0)
if self._file.getname() != b'RIFF':
raise Error('file does not start with RIFF id')
if self._file.read(4) != b'WAVE':
raise Error('not a WAVE file')
self._fmt_chunk_read = 0
self._data_chunk = None
while 1:
self._data_seek_needed = 1
try:
chunk = Chunk(self._file, bigendian = 0)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == b'fmt ':
self._read_fmt_chunk(chunk)
self._fmt_chunk_read = 1
elif chunkname == b'data':
if not self._fmt_chunk_read:
raise Error('data chunk before fmt chunk')
self._data_chunk = chunk
self._nframes = chunk.chunksize // self._framesize
self._data_seek_needed = 0
break
chunk.skip()
if not self._fmt_chunk_read or not self._data_chunk:
raise Error('fmt chunk and/or data chunk missing')
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, str):
f = builtins.open(f, 'rb')
self._i_opened_the_file = f
# else, assume it is an open file object already
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._data_seek_needed = 1
self._soundpos = 0
def close(self):
self._file = None
file = self._i_opened_the_file
if file:
self._i_opened_the_file = None
file.close()
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def getparams(self):
return _wave_params(self.getnchannels(), self.getsampwidth(),
self.getframerate(), self.getnframes(),
self.getcomptype(), self.getcompname())
def getmarkers(self):
return None
def getmark(self, id):
raise Error('no marks')
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error('position not in range')
self._soundpos = pos
self._data_seek_needed = 1
def readframes(self, nframes):
if self._data_seek_needed:
self._data_chunk.seek(0, 0)
pos = self._soundpos * self._framesize
if pos:
self._data_chunk.seek(pos, 0)
self._data_seek_needed = 0
if nframes == 0:
return b''
data = self._data_chunk.read(nframes * self._framesize)
if self._sampwidth != 1 and sys.byteorder == 'big':
|
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
return data
#
# Internal methods.
#
def _read_fmt_chunk(self, chunk):
wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack_from('<HHLLH', chunk.read(14))
if wFormatTag == WAVE_FORMAT_PCM:
sampwidth = struct.unpack_from('<H', chunk.read(2))[0]
self._sampwidth = (sampwidth + 7) // 8
else:
raise Error('unknown format: %r' % (wFormatTag,))
self._framesize = self._nchannels * self._sampwidth
self._comptype = 'NONE'
self._compname = 'not compressed'
class Wave_write:
"""Variables used in this class:
These variables are user settable through appropriate methods
of this class:
_file -- the open file with methods write(), close(), tell(), seek()
set through the __init__() method
_comptype -- the AIFF-C compression type ('NONE' in AIFF)
set through the setcomptype() or setparams() method
_compname -- the human-readable AIFF-C compression type
set through the setcomptype() or setparams() method
_nchannels -- the number of audio channels
set through the setnchannels() or setparams() method
_sampwidth -- the number of bytes per audio sample
set through the setsampwidth() or setparams() method
_framerate -- the sampling frequency
set through the setframerate() or setparams() method
_nframes -- the number of audio frames written to the header
set through the setnframes() or setparams() method
These variables are used internally only:
_datalength -- the size of the audio samples written to the header
_nframeswritten -- the number of frames actually written
_datawritten -- the size of the audio samples actually written
"""
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, str):
f = builtins.open(f, 'wb')
self._i_opened_the_file = f
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def initfp(self, file):
self._file = file
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._headerwritten = False
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
#
# User visible methods.
#
def setnchannels(self, nchannels):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if nchannels < 1:
raise Error('bad # of channels')
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error('number of channels not set')
return self._nchannels
def setsampwidth(self, sampwidth):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if sampwidth < 1 or sampwidth > 4:
raise Error('bad sample width')
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error('sample width not set')
return self._sampwidth
def setframerate(self, framerate):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if framerate <= 0:
raise Error('bad frame rate')
self._framerate = int(round(framerate))
def getframerate(self):
if not self._framerate:
raise Error('frame rate not set')
return self._framerate
def setnframes(self, nframes):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if comptype not in ('NONE',):
raise Error('unsupported compression type')
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def setparams(self, params):
nchannels, sampwidth, framerate, nframes, comptype, compname = params
if self._datawritten:
raise Error('cannot change parameters after starting to write')
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error('not all parameters set')
return _wave_params(self._nchannels, self._sampwidth, self._framerate,
self._nframes, self._comptype, self._compname)
def setmark(self, id, pos, name):
raise Error('setmark() not supported')
def getmark(self, id):
raise Error('no marks')
def getmarkers(self):
return None
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
if not isinstance(data, (bytes, bytearray)):
data = memoryview(data).cast('B')
self._ensure_header_written(len(data))
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
if self._sampwidth != 1 and sys.byteorder == 'big':
data = audioop.byteswap(data, self._sampwidth)
self._file.write(data)
self._datawritten += len(data)
self._nframeswritten = self._nframeswritten + nframes
def writeframes(self, data):
self.writeframesraw(data)
if self._datalength != self._datawritten:
self._patchheader()
def close(self):
try:
if self._file:
self._ensure_header_written(0)
if self._datalength != self._datawritten:
self._patchheader()
self._file.flush()
finally:
self._file = None
file = self._i_opened_the_file
if file:
self._i_opened_the_file = None
file.close()
#
# Internal methods.
#
def _ensure_header_written(self, datasize):
if not self._headerwritten:
if not self._nchannels:
raise Error('# channels not specified')
if not self._sampwidth:
raise Error('sample width not specified')
if not self._framerate:
raise Error('sampling rate not specified')
self._write_header(datasize)
def _write_header(self, initlength):
assert not self._headerwritten
self._file.write(b'RIFF')
if not self._nframes:
self._nframes = initlength // (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
try:
self._form_length_pos = self._file.tell()
except (AttributeError, OSError):
self._form_length_pos = None
self._file.write(struct.pack('<L4s4sLHHLLHH4s',
36 + self._datalength, b'WAVE', b'fmt ', 16,
WAVE_FORMAT_PCM, self._nchannels, self._framerate,
self._nchannels * self._framerate * self._sampwidth,
self._nchannels * self._sampwidth,
self._sampwidth * 8, b'data'))
if self._form_length_pos is not None:
self._data_length_pos = self._file.tell()
self._file.write(struct.pack('<L', self._datalength))
self._headerwritten = True
def _patchheader(self):
assert self._headerwritten
if self._datawritten == self._datalength:
return
curpos = self._file.tell()
self._file.seek(self._form_length_pos, 0)
self._file.write(struct.pack('<L', 36 + self._datawritten))
self._file.seek(self._data_length_pos, 0)
self._file.write(struct.pack('<L', self._datawritten))
self._file.seek(curpos, 0)
self._datalength = self._datawritten
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Wave_read(f)
elif mode in ('w', 'wb'):
return Wave_write(f)
else:
raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
openfp = open # B/W compatibility
| data = audioop.byteswap(data, self._sampwidth) |
analyticsflow.go | package platformclientv2
import (
"github.com/leekchan/timeutil"
"encoding/json"
"strconv"
"strings"
)
// Analyticsflow
type Analyticsflow struct {
// EndingLanguage - Flow ending language, e.g. en-us
EndingLanguage *string `json:"endingLanguage,omitempty"`
// EntryReason - The particular entry reason for this flow, e.g. an address, userId, or flowId
EntryReason *string `json:"entryReason,omitempty"`
// EntryType - The entry type for this flow, e.g. dnis, dialer, agent, flow, or direct
EntryType *string `json:"entryType,omitempty"`
// ExitReason - The exit reason for this flow, e.g. DISCONNECT
ExitReason *string `json:"exitReason,omitempty"`
// FlowId - The unique identifier of this flow
FlowId *string `json:"flowId,omitempty"`
// FlowName - The name of this flow at the time of flow execution
FlowName *string `json:"flowName,omitempty"`
// FlowType - The type of this flow
FlowType *string `json:"flowType,omitempty"`
// FlowVersion - The version of this flow
FlowVersion *string `json:"flowVersion,omitempty"`
// IssuedCallback - Flag indicating whether the flow issued a callback
IssuedCallback *bool `json:"issuedCallback,omitempty"`
// RecognitionFailureReason - The recognition failure reason causing to exit/disconnect
RecognitionFailureReason *string `json:"recognitionFailureReason,omitempty"`
// StartingLanguage - Flow starting language, e.g. en-us
StartingLanguage *string `json:"startingLanguage,omitempty"`
// TransferTargetAddress - The address of a flow transfer target, e.g. a phone number, an email address, or a queueId
TransferTargetAddress *string `json:"transferTargetAddress,omitempty"`
// TransferTargetName - The name of a flow transfer target
TransferTargetName *string `json:"transferTargetName,omitempty"`
// TransferType - The type of transfer for flows that ended with a transfer
TransferType *string `json:"transferType,omitempty"`
// Outcomes - Flow outcomes
Outcomes *[]Analyticsflowoutcome `json:"outcomes,omitempty"`
}
func (o *Analyticsflow) MarshalJSON() ([]byte, error) {
// Redundant initialization to avoid unused import errors for models with no Time values
_ = timeutil.Timedelta{}
type Alias Analyticsflow
return json.Marshal(&struct {
EndingLanguage *string `json:"endingLanguage,omitempty"`
EntryReason *string `json:"entryReason,omitempty"`
EntryType *string `json:"entryType,omitempty"`
ExitReason *string `json:"exitReason,omitempty"`
FlowId *string `json:"flowId,omitempty"`
FlowName *string `json:"flowName,omitempty"`
FlowType *string `json:"flowType,omitempty"`
FlowVersion *string `json:"flowVersion,omitempty"`
IssuedCallback *bool `json:"issuedCallback,omitempty"`
RecognitionFailureReason *string `json:"recognitionFailureReason,omitempty"`
StartingLanguage *string `json:"startingLanguage,omitempty"`
TransferTargetAddress *string `json:"transferTargetAddress,omitempty"`
TransferTargetName *string `json:"transferTargetName,omitempty"`
TransferType *string `json:"transferType,omitempty"`
Outcomes *[]Analyticsflowoutcome `json:"outcomes,omitempty"`
*Alias
}{
EndingLanguage: o.EndingLanguage,
EntryReason: o.EntryReason,
EntryType: o.EntryType,
ExitReason: o.ExitReason,
FlowId: o.FlowId,
FlowName: o.FlowName,
FlowType: o.FlowType,
FlowVersion: o.FlowVersion,
IssuedCallback: o.IssuedCallback,
RecognitionFailureReason: o.RecognitionFailureReason,
StartingLanguage: o.StartingLanguage,
TransferTargetAddress: o.TransferTargetAddress,
TransferTargetName: o.TransferTargetName,
TransferType: o.TransferType,
Outcomes: o.Outcomes,
Alias: (*Alias)(o),
})
}
func (o *Analyticsflow) UnmarshalJSON(b []byte) error {
var AnalyticsflowMap map[string]interface{}
err := json.Unmarshal(b, &AnalyticsflowMap)
if err != nil {
return err
}
if EndingLanguage, ok := AnalyticsflowMap["endingLanguage"].(string); ok {
o.EndingLanguage = &EndingLanguage
}
if EntryReason, ok := AnalyticsflowMap["entryReason"].(string); ok {
o.EntryReason = &EntryReason
}
if EntryType, ok := AnalyticsflowMap["entryType"].(string); ok |
if ExitReason, ok := AnalyticsflowMap["exitReason"].(string); ok {
o.ExitReason = &ExitReason
}
if FlowId, ok := AnalyticsflowMap["flowId"].(string); ok {
o.FlowId = &FlowId
}
if FlowName, ok := AnalyticsflowMap["flowName"].(string); ok {
o.FlowName = &FlowName
}
if FlowType, ok := AnalyticsflowMap["flowType"].(string); ok {
o.FlowType = &FlowType
}
if FlowVersion, ok := AnalyticsflowMap["flowVersion"].(string); ok {
o.FlowVersion = &FlowVersion
}
if IssuedCallback, ok := AnalyticsflowMap["issuedCallback"].(bool); ok {
o.IssuedCallback = &IssuedCallback
}
if RecognitionFailureReason, ok := AnalyticsflowMap["recognitionFailureReason"].(string); ok {
o.RecognitionFailureReason = &RecognitionFailureReason
}
if StartingLanguage, ok := AnalyticsflowMap["startingLanguage"].(string); ok {
o.StartingLanguage = &StartingLanguage
}
if TransferTargetAddress, ok := AnalyticsflowMap["transferTargetAddress"].(string); ok {
o.TransferTargetAddress = &TransferTargetAddress
}
if TransferTargetName, ok := AnalyticsflowMap["transferTargetName"].(string); ok {
o.TransferTargetName = &TransferTargetName
}
if TransferType, ok := AnalyticsflowMap["transferType"].(string); ok {
o.TransferType = &TransferType
}
if Outcomes, ok := AnalyticsflowMap["outcomes"].([]interface{}); ok {
OutcomesString, _ := json.Marshal(Outcomes)
json.Unmarshal(OutcomesString, &o.Outcomes)
}
return nil
}
// String returns a JSON representation of the model
func (o *Analyticsflow) String() string {
j, _ := json.Marshal(o)
str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\u`, `\u`, -1))
return str
}
| {
o.EntryType = &EntryType
} |
logic.js | // GLOBAL VARIABLES
// ==============================================================
var wordBank = [
'alpha',
'bravo',
'charlie',
'delta',
'echo',
'foxtrot',
'golf',
'hotel',
'india',
'juliett',
'kilo',
'lima',
'mike', | 'papa',
'quebec',
'romeo',
'sierra',
'tango',
'uniform',
'victor',
'whiskey',
'x-ray',
'yankee',
'zulu'
];
var targetWord = "";
var wrongLetters = [];
var lettersInWord = [];
var blanksAndLetters = [];
var frequency = [];
// Game Counters
var numBlanks = 0;
var guessesLeft = 9;
var winCount = 0;
var lossCount = 0;
// FUNCTIONS
// ==============================================================
function startGame()
{
targetWord = randomWord();
lettersInWord = targetWord.split("");
numBlanks = lettersInWord.length;
// Reset
guessesLeft = 9;
blanksAndLetters = [];
wrongLetters = [];
// Populate Blanks and Successes with right number of blanks
for(var i = 0; i<numBlanks; i++)
{
blanksAndLetters.push("_");
}
// Special Case for X
if(targetWord == 'x-ray')
{
blanksAndLetters[1] = '-';
}
// Change HTML to reflect game state
document.getElementById('targetWord').innerHTML = blanksAndLetters.join(" ");
document.getElementById('wrongLetters').innerHTML = wrongLetters.join(" ");
document.getElementById('numGuesses').innerHTML = guessesLeft;
document.getElementById('winCounter').innerHTML = winCount;
document.getElementById('lossCounter').innerHTML = lossCount;
}
function checkLetter(letter)
{
var regEx = /[a-z]/; // Chapter 9 Eloquent Javascript
if(regEx.test(letter))//Tests if letter is a through z
{
var isLetterInWord = false;
// Check if letter exists in word
for(var i = 0; i<targetWord.length; i++)
{
if(letter == targetWord[i])
{
isLetterInWord = true;
}
}
if(isLetterInWord)
{
for(var j = 0; j<numBlanks; j++)
{
if(letter == targetWord[j])
{
blanksAndLetters[j] = letter;
//console.log(blanksAndLetters);
}
}
}
else {
// Checks to see if this letter is already in
// WrongLetters array
// If not found, add it
if(wrongLetters.indexOf(letter) == -1)
{
wrongLetters.push(letter);
guessesLeft--;
}
// If not found do nothing
}
}
}
function completedRound()
{
// Determine win/loss condition
if(blanksAndLetters.toString() == lettersInWord.toString())
{
playAudio();
winCount++;
setTimeout(startGame, 1500);
}
else if(guessesLeft == 0)
{
playAudio();
blanksAndLetters = targetWord.split("");
lossCount++;
setTimeout(startGame, 1500);
}
//Update Board to reflect changes
document.getElementById('numGuesses').innerHTML = guessesLeft;
document.getElementById('targetWord').innerHTML = blanksAndLetters.join(" ").toUpperCase();
document.getElementById('wrongLetters').innerHTML = wrongLetters.join(" ");
}
function playAudio()
{
var filePath = './assets/sounds/' + targetWord + '.mp3';
var audio = new Audio(filePath);
audio.play();
}
function randomWord()
{
var word = wordBank[Math.floor(Math.random() * wordBank.length)];
if(frequency.indexOf(word) != -1){
word = randomWord();
}
else{
frequency.push(word);
}
if(frequency.length > 4){
frequency.shift();
}
return word;
}
// MAIN PROCESS
// ==============================================================
startGame();
// Register Key Clicks
document.onkeyup = function(event)
{
var letterGuessed = String.fromCharCode(event.keyCode).toLowerCase();
checkLetter(letterGuessed);
completedRound();
}
// TESTING/DEBUGGING
// ==============================================================
/*
console.log(targetWord);
console.log(lettersInWord);
console.log(numBlanks);
console.log(blanksAndLetters);
*/ | 'november',
'oscar', |
python_code.py | print("Hello Github!") | ||
mutex.rs | use std::cell::UnsafeCell;
use std::ops::{Deref, DerefMut, Drop};
use abi;
#[repr(transparent)]
struct MutexInner(u32);
pub struct MutexGuard<'a, T: 'a> {
pfex_item: &'a MutexInner,
data: &'a mut T,
}
unsafe impl<T: Send> Send for Mutex<T> {}
unsafe impl<T: Send> Sync for Mutex<T> {}
pub struct Mutex<T> {
pfex_item: MutexInner,
data: UnsafeCell<T>,
}
impl<T> Mutex<T> {
pub const fn new(data: T) -> Mutex<T> {
Mutex {
pfex_item: MutexInner(0),
data: UnsafeCell::new(data),
}
}
pub fn lock(&self) -> MutexGuard<T> {
let addr = &self.pfex_item as *const _ as *const u32;
// println!("lock addr: {:p}", addr);
unsafe {
abi::pfex_acquire(addr);
}
MutexGuard {
pfex_item: &self.pfex_item,
data: unsafe { &mut *self.data.get() },
}
}
}
impl<'a, T: 'a> Deref for MutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T |
}
impl<'a, T: 'a> DerefMut for MutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
self.data
}
}
impl<'a, T: 'a> Drop for MutexGuard<'a, T> {
fn drop(&mut self) {
unsafe {
let addr = self.pfex_item as *const _ as *const u32;
abi::pfex_release(addr);
}
}
}
| {
self.data
} |
mod.rs | pub mod base_types;
pub mod chain;
pub mod monitor;
#[cfg(test)]
pub mod test_helpers {
pub fn iter_to_string<I, T>(it: I) -> String
where
I: Iterator<Item = T>,
T: std::fmt::Display,
{
format!(
"[{}]",
it.map(|x| x.to_string())
.fold(String::new(), |s, x| if s.is_empty() {
x
} else {
format!("{},{}", s, x)
})
)
}
pub fn custom_encoded<T>(original: T, value: &str) -> Result<(), serde_json::Error>
where
T: serde::Serialize,
{
let encoded = serde_json::to_string(&original)?;
assert_eq!(&encoded, value); | }
pub fn custom_decoded<'a, T>(message: &'a str, expected: T) -> Result<(), serde_json::Error>
where
T: serde::Deserialize<'a>,
T: std::fmt::Debug,
T: Eq,
{
let decoded: T = serde_json::from_str(message)?;
assert_eq!(decoded, expected);
Ok(())
}
} | Ok(()) |
customobj.py | """
Tools to allow users to place custom meshes on a building
"""
import bpy
import bmesh
from mathutils import Matrix, Vector
from bpy.props import PointerProperty
from .facemap import (
FaceMap,
add_faces_to_map,
add_facemap_for_groups
)
from ..utils import (
select,
local_xyz,
bm_to_obj,
crash_safe,
bm_from_obj,
popup_message,
calc_faces_median,
calc_verts_median,
get_bounding_verts,
calc_face_dimensions,
bmesh_from_active_object,
subdivide_face_vertically,
subdivide_face_horizontally,
get_selected_face_dimensions,
)
from ..utils import VEC_UP, VEC_FORWARD
from .array import ArrayProperty, ArrayGetSet
from .sizeoffset import SizeOffsetProperty, SizeOffsetGetSet
class CustomObjectProperty(bpy.types.PropertyGroup, SizeOffsetGetSet, ArrayGetSet):
array: PointerProperty(type=ArrayProperty)
size_offset: PointerProperty(type=SizeOffsetProperty)
def init(self, wall_dimensions):
self["wall_dimensions"] = wall_dimensions
self.size_offset.init(
(self["wall_dimensions"][0] / self.count, self["wall_dimensions"][1]),
default_size=(1.0, 1.0),
default_offset=(0.0, 0.0),
)
def draw(self, context, layout):
box = layout.box()
self.size_offset.draw(context, box)
layout.prop(self.array, "count")
@crash_safe
def add_custom_execute(self, context):
custom_obj = context.scene.btools_custom_object
if not custom_obj:
# Custom object has not been assigned
self.report({'INFO'}, "No Object Selected!")
return {"CANCELLED"}
if custom_obj.users == 0 or custom_obj.name not in context.view_layer.objects:
# Object was already deleted
self.report({'INFO'}, "Object has been deleted!")
return {"CANCELLED"}
self.props.init(get_selected_face_dimensions(context))
apply_transforms(context, custom_obj)
place_custom_object(context, self.props, custom_obj)
transfer_materials(custom_obj, context.object)
return {'FINISHED'}
class BTOOLS_OT_add_custom(bpy.types.Operator):
"""Place custom meshes on the selected faces"""
bl_idname = "btools.add_custom"
bl_label = "Add Custom Geometry"
bl_options = {"REGISTER", "UNDO", "PRESET"}
props: PointerProperty(type=CustomObjectProperty)
@classmethod
def poll(cls, context):
return context.object is not None and context.mode == "EDIT_MESH"
def execute(self, context):
add_facemap_for_groups([FaceMap.CUSTOM])
return add_custom_execute(self, context)
def draw(self, context):
self.props.draw(context, self.layout)
def apply_transforms(context, obj):
# -- store the current active object
mode_previous = context.mode
active_previous = context.active_object
# -- switch to object mode, if we are not already there
if context.mode != "OBJECT":
bpy.ops.object.mode_set(mode='OBJECT')
# -- make obj the active object and select it
bpy.context.view_layer.objects.active = obj
select(bpy.context.view_layer.objects, False)
obj.select_set(True)
# -- apply transform
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
# -- resume the previous state
bpy.context.view_layer.objects.active = active_previous
select(bpy.context.view_layer.objects, False)
active_previous.select_set(True)
bpy.ops.object.mode_set(mode=mode_previous.replace('_MESH', ""))
def place_custom_object(context, prop, custom_obj):
with bmesh_from_active_object(context) as bm:
faces = [face for face in bm.faces if face.select]
for face in faces:
face.select = False
# No support for upward/downward facing
if face.normal.z:
popup_message("Faces with Z+/Z- normals not supported!", title="Invalid Face Selection")
continue
array_faces = subdivide_face_horizontally(bm, face, widths=[prop.size_offset.size.x] * prop.count)
for aface in array_faces:
# -- Create split and place obj
split_face = create_split(bm, aface, prop.size_offset.size, prop.size_offset.offset)
place_object_on_face(bm, split_face, custom_obj, prop)
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.0001)
def transfer_materials(from_object, to_obj):
"""Transfer materials from 'from_object' to 'to_object'"""
materials = from_object.data.materials
if not materials:
return
# -- copy materials
to_mats = to_obj.data.materials
if not to_mats:
# -- to_obj has no materials
list(map(to_mats.append, materials))
else:
# -- to_obj has some materials, ensure we are not duplicating
for mat in materials:
if mat.name not in to_mats:
to_mats.append(mat)
def mat_name_from_idx(idx):
for i, m in enumerate(materials):
if i == idx:
return m.name.encode()
return "".encode()
# -- store material names on the face layer
bm = bm_from_obj(from_object)
bm.faces.layers.string.verify()
mat_name = bm.faces.layers.string.active
for face in bm.faces:
face[mat_name] = mat_name_from_idx(face.material_index)
bm_to_obj(bm, from_object)
def | (bm, obj):
"""Copy all the mesh data in obj to the bm
Return the newly inserted faces
"""
max_index = len(bm.faces)
bm.from_mesh(obj.data.copy())
return [f for f in bm.faces if f.index >= max_index]
# TODO(ranjian0) refactor function (duplicated from create_window_split)
def create_split(bm, face, size, offset):
"""Use properties from SplitOffset to subdivide face into regular quads"""
wall_w, wall_h = calc_face_dimensions(face)
# horizontal split
h_widths = [wall_w / 2 + offset.x - size.x / 2, size.x, wall_w / 2 - offset.x - size.x / 2]
h_faces = subdivide_face_horizontally(bm, face, h_widths)
# vertical split
v_width = [wall_h / 2 + offset.y - size.y / 2, size.y, wall_h / 2 - offset.y - size.y / 2]
v_faces = subdivide_face_vertically(bm, h_faces[1], v_width)
return v_faces[1]
def place_object_on_face(bm, face, custom_obj, prop):
"""Place the custom_object mesh flush on the face"""
# XXX get mesh from custom_obj into bm
face_idx = face.index
custom_faces = duplicate_into_bm(bm, custom_obj)
face = [f for f in bm.faces if f.index == face_idx].pop() # restore reference
add_faces_to_map(bm, custom_faces, FaceMap.CUSTOM)
custom_verts = list({v for f in custom_faces for v in f.verts})
# (preprocess)calculate bounds of the object
# NOTE: bounds are calculated before any transform is made
dims = custom_obj.dimensions
current_size = [max(dims.x, dims.y), dims.z]
# -- move the custom faces into proper position on this face
transform_parallel_to_face(bm, custom_faces, face)
scale_to_size(bm, custom_verts, current_size, prop.size_offset.size, local_xyz(face))
# cleanup
bmesh.ops.delete(bm, geom=[face], context="FACES_ONLY")
def get_coplanar_faces(face_verts):
""" Determine extent faces that should be coplanar to walls"""
bounds = get_bounding_verts(face_verts)
coplanar_faces = (
list(bounds.topleft.link_faces) +
list(bounds.topright.link_faces) +
list(bounds.botleft.link_faces) +
list(bounds.botright.link_faces)
)
return set(coplanar_faces)
def calc_coplanar_median(face_verts):
""" Determine the median point for coplanar faces"""
return calc_faces_median(get_coplanar_faces(face_verts))
def calc_coplanar_normal(faces):
face_verts = list({v for f in faces for v in f.verts})
coplanar_faces = get_coplanar_faces(face_verts)
normals = {f.normal.copy().to_tuple(3) for f in coplanar_faces}
return Vector(normals.pop())
def transform_parallel_to_face(bm, custom_faces, target_face):
"""Move and rotate verts(mesh) so that it lies with it's
forward-extreme faces parallel to `face`
"""
target_normal = target_face.normal.copy()
target_median = target_face.calc_center_median()
verts = list({v for f in custom_faces for v in f.verts})
verts_median = calc_verts_median(verts)
custom_normal = calc_coplanar_normal(custom_faces)
try:
angle = target_normal.xy.angle_signed(custom_normal.xy)
except ValueError:
# TODO(ranjian0) Support all mesh shapes when placing along face
angle = 0
bmesh.ops.rotate(
bm, verts=verts,
cent=verts_median,
matrix=Matrix.Rotation(angle, 4, VEC_UP)
)
# -- determine the median of the faces that should be coplanar to the walls
coplanar_median = calc_coplanar_median(verts)
coplanar_median.z = verts_median.z # Compensate on Z axis for any coplanar faces not considered in calculations
# -- move the custom faces to the target face based on coplanar median
transform_diff = target_median - coplanar_median
bmesh.ops.translate(bm, verts=verts, vec=transform_diff)
def scale_to_size(bm, verts, current_size, target_size, local_dir):
"""Scale verts to target size along local direction (x and y)"""
x_dir, y_dir, z_dir = local_dir
target_width, target_height = target_size
current_width, current_height = current_size
# --scale
scale_x = x_dir * (target_width / current_width)
scale_y = y_dir * (target_height / current_height)
scale_z = Vector(map(abs, z_dir))
bmesh.ops.scale(
bm, verts=verts, vec=scale_x + scale_y + scale_z,
space=Matrix.Translation(-calc_verts_median(verts))
)
def set_face_materials(bm, faces):
mat_name = bm.faces.layers.string.active
if not mat_name:
return
obj_mats = bpy.context.object.data.materials
for f in faces:
mat = obj_mats.get(f[mat_name].decode())
f.material_index = list(obj_mats).index(mat)
classes = (CustomObjectProperty, BTOOLS_OT_add_custom)
def register_custom():
bpy.types.Scene.btools_custom_object = PointerProperty(
type=bpy.types.Object, description="Object to use for custom placement"
)
for cls in classes:
bpy.utils.register_class(cls)
def unregister_custom():
del bpy.types.Scene.btools_custom_object
for cls in classes:
bpy.utils.unregister_class(cls)
| duplicate_into_bm |
cli.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
//! Functionality related to the command line interface of the Move prover.
use abigen::AbigenOptions;
use anyhow::anyhow;
use clap::{App, Arg};
use docgen::DocgenOptions;
use errmapgen::ErrmapOptions;
use log::LevelFilter;
use serde::{Deserialize, Serialize};
use simplelog::{
CombinedLogger, Config, ConfigBuilder, LevelPadding, SimpleLogger, TermLogger, TerminalMode,
};
use spec_lang::env::VerificationScope;
use std::sync::atomic::{AtomicBool, Ordering};
/// Represents the virtual path to the boogie prelude which is inlined into the binary.
pub const INLINE_PRELUDE: &str = "<inline-prelude>";
/// Default flags passed to boogie. Additional flags will be added to this via the -B option.
const DEFAULT_BOOGIE_FLAGS: &[&str] = &[
"-doModSetAnalysis",
"-printVerifiedProceduresCount:0",
"-printModel:4",
];
/// Atomic used to prevent re-initialization of logging.
static LOGGER_CONFIGURED: AtomicBool = AtomicBool::new(false);
/// Atomic used to detect whether we are running in test mode.
static TEST_MODE: AtomicBool = AtomicBool::new(false);
/// Represents options provided to the tool. Most of those options are configured via a toml
/// source; some over the command line flags.
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(default, deny_unknown_fields)]
pub struct Options {
/// Path to the boogie prelude. The special string `INLINE_PRELUDE` is used to refer to
/// a prelude build into this binary.
pub prelude_path: String,
/// The path to the boogie output which represents the verification problem.
pub output_path: String,
/// Verbosity level for logging.
pub verbosity_level: LevelFilter,
/// Whether to run the documentation generator instead of the prover.
pub run_docgen: bool,
/// Whether to run the ABI generator instead of the prover.
pub run_abigen: bool,
/// Whether to run the error map generator instead of the prover.
pub run_errmapgen: bool,
/// Whether to run a static analysis that computes the set of types that may be packed by the
/// Move code under analysis instead of the prover.
pub run_packed_types_gen: bool,
/// An account address to use if none is specified in the source.
pub account_address: String,
/// The paths to the Move sources.
pub move_sources: Vec<String>,
/// The paths to any dependencies for the Move sources. Those will not be verified but
/// can be used by `move_sources`.
pub move_deps: Vec<String>,
/// Options for the prover.
pub prover: ProverOptions,
/// Options for the prover backend.
pub backend: BackendOptions,
/// Options for the documentation generator.
pub docgen: DocgenOptions,
/// Options for the ABI generator.
pub abigen: AbigenOptions,
/// Options for the error map generator.
/// TODO: this currently create errors during deserialization, so skip them for this.
#[serde(skip_serializing)]
pub errmapgen: ErrmapOptions,
}
impl Default for Options {
fn default() -> Self {
Self {
prelude_path: INLINE_PRELUDE.to_string(),
output_path: "output.bpl".to_string(),
run_docgen: false,
run_abigen: false,
run_errmapgen: false,
run_packed_types_gen: false,
account_address: "0x234567".to_string(),
verbosity_level: LevelFilter::Info,
move_sources: vec![],
move_deps: vec![],
prover: ProverOptions::default(),
backend: BackendOptions::default(),
docgen: DocgenOptions::default(),
abigen: AbigenOptions::default(),
errmapgen: ErrmapOptions::default(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default, deny_unknown_fields)]
pub struct ProverOptions {
/// Whether to only generate backend code.
pub generate_only: bool,
/// Whether to generate stubs for native functions.
pub native_stubs: bool,
/// Whether to minimize execution traces in errors.
pub minimize_execution_trace: bool,
/// Whether to omit debug information in generated model.
pub omit_model_debug: bool,
/// Whether output for e.g. diagnosis shall be stable/redacted so it can be used in test
/// output.
pub stable_test_output: bool,
/// Scope of what functions to verify.
pub verify_scope: VerificationScope,
/// [deprecated] Whether to emit global axiom that resources are well-formed.
pub resource_wellformed_axiom: bool,
/// Whether to assume wellformedness when elements are read from memory, instead of on
/// function entry.
pub assume_wellformed_on_access: bool,
/// Whether to assume a global invariant when the related memory
/// is accessed, instead of on function entry. This is currently known to be slower
/// if one than off, so off by default.
pub assume_invariant_on_access: bool,
/// Whether pack/unpack should recurse over the structure.
pub deep_pack_unpack: bool,
/// Whether to automatically debug trace values of specification expression leafs.
pub debug_trace: bool,
/// Report warnings. This is not on by default. We may turn it on if the warnings
/// are better filtered, e.g. do not contain unused schemas intended for other modules.
pub report_warnings: bool,
/// Whether to dump the transformed stackless bytecode to a file
pub dump_bytecode: bool,
/// Number of Boogie instances to be run concurrently.
pub num_instances: usize,
/// Whether to run Boogie instances sequentially.
pub sequential_task: bool,
/// Run negative verification checks.
pub negative_checks: bool,
}
impl Default for ProverOptions {
fn default() -> Self {
Self {
generate_only: false,
native_stubs: false,
minimize_execution_trace: true,
omit_model_debug: false,
stable_test_output: false,
verify_scope: VerificationScope::All,
resource_wellformed_axiom: false,
assume_wellformed_on_access: false,
deep_pack_unpack: false,
debug_trace: false,
report_warnings: false,
assume_invariant_on_access: false,
dump_bytecode: false,
num_instances: 1,
sequential_task: false,
negative_checks: false,
}
}
}
/// Backend options.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default, deny_unknown_fields)]
pub struct BackendOptions {
/// Path to the boogie executable.
pub boogie_exe: String,
/// Path to the z3 executable.
pub z3_exe: String,
/// Whether to use cvc4.
pub use_cvc4: bool,
/// Path to the cvc4 executable.
pub cvc4_exe: String,
/// List of flags to pass on to boogie.
pub boogie_flags: Vec<String>,
/// Whether to use native array theory.
pub use_array_theory: bool,
/// Whether to produce an SMT file for each verification problem.
pub generate_smt: bool,
/// Whether native instead of stratified equality should be used.
pub native_equality: bool,
/// A string determining the type of requires used for parameter type checks. Can be
/// `"requires"` or `"free requires`".
pub type_requires: String,
/// The depth until which stratified functions are expanded.
pub stratification_depth: usize,
/// A string to be used to inline a function of medium size. Can be empty or `{:inline}`.
pub aggressive_func_inline: String,
/// A string to be used to inline a function of small size. Can be empty or `{:inline}`.
pub func_inline: String,
/// A bound to apply to the length of serialization results.
pub serialize_bound: usize,
/// How many times to call the prover backend for the verification problem. This is used for
/// benchmarking.
pub bench_repeat: usize,
/// Whether to use the sequence theory as the internal representation for $Vector type.
pub vector_using_sequences: bool,
/// A seed for the prover.
pub random_seed: usize,
/// The number of cores to use for parallel processing of verification conditions.
pub proc_cores: usize,
/// A (soft) timeout for the solver, per verification condition, in seconds.
pub vc_timeout: usize,
/// Whether Boogie output and log should be saved.
pub keep_artifacts: bool,
/// Eager threshold for quantifier instantiation.
pub eager_threshold: usize,
/// Lazy threshold for quantifier instantiation.
pub lazy_threshold: usize,
}
impl Default for BackendOptions {
fn default() -> Self {
let get_env = |s| std::env::var(s).unwrap_or_else(|_| String::new());
Self {
bench_repeat: 1,
boogie_exe: get_env("BOOGIE_EXE"),
z3_exe: get_env("Z3_EXE"),
use_cvc4: false,
cvc4_exe: get_env("CVC4_EXE"),
boogie_flags: vec![],
use_array_theory: false,
generate_smt: false,
native_equality: false,
type_requires: "free requires".to_owned(),
stratification_depth: 4,
aggressive_func_inline: "".to_owned(),
func_inline: "{:inline}".to_owned(),
serialize_bound: 4,
vector_using_sequences: false,
random_seed: 1,
proc_cores: 1,
vc_timeout: 40,
keep_artifacts: false,
eager_threshold: 100,
lazy_threshold: 100,
}
}
}
impl Options {
/// Creates options from toml configuration source.
pub fn create_from_toml(toml_source: &str) -> anyhow::Result<Options> {
Ok(toml::from_str(toml_source)?)
}
/// Creates options from toml configuration file.
pub fn create_from_toml_file(toml_file: &str) -> anyhow::Result<Options> {
Self::create_from_toml(&std::fs::read_to_string(toml_file)?)
}
// Creates options from command line arguments. This parses the arguments and terminates
// the program on errors, printing usage information. The first argument is expected to be
// the program name.
pub fn create_from_args(args: &[String]) -> anyhow::Result<Options> {
// Clap definition of the command line interface.
let is_number = |s: String| {
s.parse::<usize>()
.map(|_| ())
.map_err(|_| "expected number".to_string())
};
let cli = App::new("mvp")
.version("0.1.0")
.about("The Move Prover")
.author("The Libra Core Contributors")
.arg(
Arg::with_name("config")
.short("c")
.long("config")
.takes_value(true)
.value_name("TOML_FILE")
.env("MOVE_PROVER_CONFIG")
.help("path to a configuration file. \
Values in this file will be overridden by command line flags"),
)
.arg(
Arg::with_name("config-str")
.conflicts_with("config")
.short("C")
.long("config-str")
.takes_value(true)
.multiple(true)
.number_of_values(1)
.value_name("TOML_STRING")
.help("inline configuration string in toml syntax. Can be repeated. \
Use as in `-C=prover.opt=value -C=backend.opt=value`"),
)
.arg(
Arg::with_name("print-config")
.long("print-config")
.help("prints the effective toml configuration, then exits")
)
.arg(
Arg::with_name("output")
.short("o")
.long("output")
.takes_value(true)
.value_name("BOOGIE_FILE")
.help("path to the boogie output which represents the verification problem"),
)
.arg(
Arg::with_name("verbosity")
.short("v")
.long("verbose")
.takes_value(true)
.possible_values(&["error", "warn", "info", "debug"])
.help("verbosity level."),
)
.arg(
Arg::with_name("generate-only")
.long("generate-only")
.short("g")
.help("only generate boogie file but do not call boogie"),
)
.arg(
Arg::with_name("warn")
.long("warn")
.short("w")
.help("produces warnings")
)
.arg(
Arg::with_name("trace")
.long("trace")
.short("t")
.help("enables automatic tracing of expressions in prover errors")
)
.arg(
Arg::with_name("keep")
.long("keep")
.short("k")
.help("keep intermediate artifacts of the backend around")
)
.arg(
Arg::with_name("negative")
.long("negative")
.help("run negative verification checks")
).arg(
Arg::with_name("seed")
.long("seed")
.short("s")
.takes_value(true)
.value_name("NUMBER")
.validator(is_number)
.help("sets a random seed for the prover (default 0)")
)
.arg(
Arg::with_name("cores")
.long("cores")
.takes_value(true)
.value_name("NUMBER")
.validator(is_number)
.help("sets the number of cores to use. \
NOTE: multiple cores may currently lead to scrambled model \
output from boogie (default 1)")
)
.arg(
Arg::with_name("timeout")
.long("timeout")
.short("T")
.takes_value(true)
.value_name("NUMBER")
.validator(is_number)
.help("sets a timeout (in seconds) for each \
individual verification condition (default 40)")
)
.arg(
Arg::with_name("docgen")
.long("docgen")
.help("run the documentation generator instead of the prover. \
Generated docs will be written into the directory `./doc` unless configured otherwise via toml"),
)
.arg(
Arg::with_name("docgen-template")
.long("docgen-template")
.takes_value(true)
.value_name("FILE")
.help("a template for documentation generation."),
)
.arg(
Arg::with_name("abigen")
.long("abigen")
.help("run the ABI generator instead of the prover. \
Generated ABIs will be written into the directory `./abi` unless configured otherwise via toml"),
)
.arg(
Arg::with_name("errmapgen")
.long("errmapgen")
.help("run the error map generator instead of the prover. \
The generated error map will be written to `errmap` unless configured otherwise"),
)
.arg(
Arg::with_name("packedtypesgen")
.long("packedtypesgen")
.help("run the packed types generator instead of the prover.")
)
.arg(
Arg::with_name("verify")
.long("verify")
.takes_value(true)
.possible_values(&["public", "all", "none"])
.value_name("SCOPE")
.help("default scope of verification \
(can be overridden by `pragma verify=true|false`)"),
)
.arg(
Arg::with_name("bench-repeat")
.long("bench-repeat")
.takes_value(true)
.value_name("COUNT")
.validator(is_number)
.help(
"for benchmarking: how many times to call the backend on the verification problem",
),
)
.arg(
Arg::with_name("dependencies")
.long("dependency")
.short("d")
.multiple(true)
.number_of_values(1)
.takes_value(true)
.value_name("PATH_TO_DEPENDENCY")
.help("path to a Move file, or a directory which will be searched for \
Move files, containing dependencies which will not be verified")
)
.arg(
Arg::with_name("sources")
.multiple(true)
.value_name("PATH_TO_SOURCE_FILE")
.min_values(1)
.help("the source files to verify"),
)
.arg(
Arg::with_name("eager-threshold")
.long("eager-threshold")
.takes_value(true)
.value_name("NUMBER")
.validator(is_number)
.help("sets the eager threshold for quantifier instantiation (default 100)")
)
.arg(
Arg::with_name("lazy-threshold")
.long("lazy-threshold")
.takes_value(true)
.value_name("NUMBER")
.validator(is_number)
.help("sets the lazy threshold for quantifier instantiation (default 100)")
)
.arg(
Arg::with_name("dump-bytecode")
.long("dump-bytecode")
.help("whether to dump the transformed bytecode to a file")
)
.arg(
Arg::with_name("num-instances")
.long("num-instances")
.takes_value(true)
.value_name("NUMBER")
.validator(is_number)
.help("sets the number of Boogie instances to run concurrently (default 1)")
)
.arg(
Arg::with_name("sequential")
.long("sequential")
.help("whether to run the Boogie instances sequentially")
)
.arg(
Arg::with_name("use-cvc4")
.long("use-cvc4")
.help("use cvc4 solver instead of z3")
)
.after_help("More options available via `--config file` or `--config-str str`. \
Use `--print-config` to see format and current values. \
See `move-prover/src/cli.rs::Option` for documentation.");
// Parse the arguments. This will abort the program on parsing errors and print help.
// It will also accept options like --help.
let matches = cli.get_matches_from(args);
// Initialize options.
let get_vec = |s: &str| -> Vec<String> {
match matches.values_of(s) {
Some(vs) => vs.map(|v| v.to_string()).collect(),
_ => vec![],
}
};
let mut options = if matches.is_present("config") {
if matches.is_present("config-str") {
return Err(anyhow!(
"currently, if `--config` (including via $MOVE_PROVER_CONFIG) is given \
`--config-str` cannot be used. Consider editing your \
configuration file instead."
));
}
Self::create_from_toml_file(matches.value_of("config").unwrap())?
} else if matches.is_present("config-str") {
Self::create_from_toml(matches.value_of("config-str").unwrap())?
} else {
Options::default()
};
// Analyze arguments.
if matches.is_present("output") {
options.output_path = matches.value_of("output").unwrap().to_string();
}
if matches.is_present("verbosity") {
options.verbosity_level = match matches.value_of("verbosity").unwrap() {
"error" => LevelFilter::Error,
"warn" => LevelFilter::Warn,
"info" => LevelFilter::Info,
"debug" => LevelFilter::Debug,
_ => unreachable!("should not happen"),
}
}
if matches.is_present("generate-only") {
options.prover.generate_only = true;
}
if matches.occurrences_of("sources") > 0 {
options.move_sources = get_vec("sources");
}
if matches.occurrences_of("dependencies") > 0 {
options.move_deps = get_vec("dependencies");
}
if matches.is_present("verify") {
options.prover.verify_scope = match matches.value_of("verify").unwrap() {
"public" => VerificationScope::Public,
"all" => VerificationScope::All,
"none" => VerificationScope::None,
_ => unreachable!("should not happen"),
}
}
if matches.is_present("bench-repeat") {
options.backend.bench_repeat =
matches.value_of("bench-repeat").unwrap().parse::<usize>()?;
}
if matches.is_present("docgen") {
options.run_docgen = true;
}
if matches.is_present("docgen-template") {
options.run_docgen = true;
options.docgen.root_doc_template =
matches.value_of("docgen-template").map(|s| s.to_string());
}
if matches.is_present("abigen") {
options.run_abigen = true;
}
if matches.is_present("errmapgen") {
options.run_errmapgen = true;
}
if matches.is_present("packedtypesgen") {
options.run_packed_types_gen = true;
}
if matches.is_present("warn") {
options.prover.report_warnings = true;
}
if matches.is_present("trace") {
options.prover.debug_trace = true;
}
if matches.is_present("dump-bytecode") {
options.prover.dump_bytecode = true;
}
if matches.is_present("num-instances") {
let num_instances = matches
.value_of("num-instances")
.unwrap()
.parse::<usize>()?;
options.prover.num_instances = std::cmp::max(num_instances, 1); // at least one instance
}
if matches.is_present("sequential") {
options.prover.sequential_task = true;
}
if matches.is_present("keep") {
options.backend.keep_artifacts = true;
}
if matches.is_present("negative") {
options.prover.negative_checks = true;
}
if matches.is_present("seed") {
options.backend.random_seed = matches.value_of("seed").unwrap().parse::<usize>()?;
}
if matches.is_present("timeout") {
options.backend.vc_timeout = matches.value_of("timeout").unwrap().parse::<usize>()?;
}
if matches.is_present("cores") {
options.backend.proc_cores = matches.value_of("cores").unwrap().parse::<usize>()?;
}
if matches.is_present("eager-threshold") {
options.backend.eager_threshold = matches
.value_of("eager-threshold")
.unwrap()
.parse::<usize>()?;
}
if matches.is_present("lazy-threshold") {
options.backend.lazy_threshold = matches
.value_of("lazy-threshold")
.unwrap()
.parse::<usize>()?;
}
if matches.is_present("use-cvc4") {
options.backend.use_cvc4 = true;
}
if matches.is_present("print-config") {
println!("{}", toml::to_string(&options).unwrap());
Err(anyhow!("exiting"))
} else {
Ok(options)
}
}
/// Sets up logging based on provided options. This should be called as early as possible
/// and before any use of info!, warn! etc.
pub fn setup_logging(&self) {
CombinedLogger::init(vec![TermLogger::new(
self.verbosity_level,
ConfigBuilder::new()
.set_time_level(LevelFilter::Debug)
.set_level_padding(LevelPadding::Off)
.build(),
TerminalMode::Mixed,
)])
.expect("Unexpected CombinedLogger init failure");
}
pub fn setup_logging_for_test(&self) {
// Loggers are global static, so we have to protect against reinitializing.
if LOGGER_CONFIGURED.compare_and_swap(false, true, Ordering::Relaxed) {
return;
}
TEST_MODE.store(true, Ordering::Relaxed);
SimpleLogger::init(self.verbosity_level, Config::default())
.expect("UnexpectedSimpleLogger failure");
}
/// Returns command line to call boogie.
pub fn get_boogie_command(&self, boogie_file: &str) -> Vec<String> {
let mut result = vec![self.backend.boogie_exe.clone()];
let mut add = |sl: &[&str]| result.extend(sl.iter().map(|s| (*s).to_string()));
add(DEFAULT_BOOGIE_FLAGS);
if !self.prover.negative_checks {
// Right now, we let boogie only produce one error per procedure. The boogie wrapper isn't
// capable to sort out multiple errors and associate them with models otherwise.
add(&["-errorLimit:1"]);
}
if self.backend.use_cvc4 {
add(&[
"-proverOpt:SOLVER=cvc4",
&format!("-proverOpt:PROVER_PATH={}", &self.backend.cvc4_exe),
]);
} else {
add(&[&format!("-proverOpt:PROVER_PATH={}", &self.backend.z3_exe)]);
}
if self.backend.use_array_theory | else {
add(&[&format!(
"-proverOpt:O:smt.QI.EAGER_THRESHOLD={}",
self.backend.eager_threshold
)]);
add(&[&format!(
"-proverOpt:O:smt.QI.LAZY_THRESHOLD={}",
self.backend.lazy_threshold
)]);
}
add(&[&format!(
"-vcsCores:{}",
if self.prover.stable_test_output {
// Do not use multiple cores if stable test output is requested.
// Error messages may appear in non-deterministic order otherwise.
1
} else {
self.backend.proc_cores
}
)]);
// TODO: see what we can make out of these flags.
//add(&["-proverOpt:O:smt.QI.PROFILE=true"]);
//add(&["-proverOpt:O:trace=true"]);
//add(&["-proverOpt:VERBOSITY=3"]);
//add(&["-proverOpt:C:-st"]);
if self.backend.generate_smt {
add(&["-proverLog:@[email protected]"]);
}
for f in &self.backend.boogie_flags {
add(&[f.as_str()]);
}
add(&[boogie_file]);
result
}
/// Returns name of file where to log boogie output.
pub fn get_boogie_log_file(&self, boogie_file: &str) -> String {
format!("{}.log", boogie_file)
}
/// Adjust a timeout value, given in seconds, for the runtime environment.
pub fn adjust_timeout(&self, time: usize) -> usize {
// If running on a Linux flavor as in Ci, add 100% to the timeout for added
// robustness against flakiness.
match std::env::consts::OS {
"linux" | "freebsd" | "openbsd" => time + time,
_ => time,
}
}
/// Convenience function to enable debugging (like high verbosity) on this instance.
pub fn enable_debug(&mut self) {
self.verbosity_level = LevelFilter::Debug;
}
}
| {
add(&[
"-useArrayTheory",
"/proverOpt:O:smt.array.extensional=false",
]);
} |
user_test.go | package message
import (
"reflect"
"testing"
)
func TestMakeUser(t *testing.T) | {
var actual, expected []byte
s := &MockScreen{}
u := NewUserScreen(SimpleID("foo"), s)
m := NewAnnounceMsg("hello")
defer u.Close()
u.Send(m)
u.HandleMsg(u.ConsumeOne())
s.Read(&actual)
expected = []byte(m.String() + Newline)
if !reflect.DeepEqual(actual, expected) {
t.Errorf("Got: `%s`; Expected: `%s`", actual, expected)
}
} |
|
codec.go | package v1beta1
import (
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/codec/legacy"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/msgservice"
authzcodec "github.com/cosmos/cosmos-sdk/x/authz/codec"
)
// RegisterLegacyAminoCodec registers all the necessary types and interfaces for the
// governance module.
func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
cdc.RegisterInterface((*Content)(nil), nil)
legacy.RegisterAminoMsg(cdc, &MsgSubmitProposal{}, "cosmos-sdk/MsgSubmitProposal")
legacy.RegisterAminoMsg(cdc, &MsgDeposit{}, "cosmos-sdk/MsgDeposit")
legacy.RegisterAminoMsg(cdc, &MsgVote{}, "cosmos-sdk/MsgVote")
legacy.RegisterAminoMsg(cdc, &MsgVoteWeighted{}, "cosmos-sdk/MsgVoteWeighted")
cdc.RegisterConcrete(&TextProposal{}, "cosmos-sdk/TextProposal", nil)
}
func | (registry codectypes.InterfaceRegistry) {
registry.RegisterImplementations((*sdk.Msg)(nil),
&MsgSubmitProposal{},
&MsgVote{},
&MsgVoteWeighted{},
&MsgDeposit{},
)
registry.RegisterInterface(
"cosmos.gov.v1beta1.Content",
(*Content)(nil),
&TextProposal{},
)
msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc)
}
var (
amino = codec.NewLegacyAmino()
ModuleCdc = codec.NewAminoCodec(amino)
)
func init() {
RegisterLegacyAminoCodec(amino)
cryptocodec.RegisterCrypto(amino)
sdk.RegisterLegacyAminoCodec(amino)
// Register all Amino interfaces and concrete types on the authz Amino codec so that this can later be
// used to properly serialize MsgGrant and MsgExec instances
RegisterLegacyAminoCodec(authzcodec.Amino)
}
| RegisterInterfaces |
build.rs | extern crate cc;
use std::env;
use std::process::{Command, Stdio};
fn build_win_msvc() {
cc::Build::new()
.cpp(true)
.flag("-Zi")
.flag("-Gm")
.flag("-INCREMENTAL")
.warnings(false)
.define("WIN32", None)
.define("_WINDOWS", None)
.include("./src")
.include("./src/osg")
.file("./src/tileset.cpp")
.file("./src/shp23dtile.cpp")
.file("./src/osgb23dtile.cpp")
.file("./src/dxt_img.cpp")
.compile("_3dtile");
// -------------
println!("cargo:rustc-link-search=native=./lib");
// -------------
println!("cargo:rustc-link-lib=gdal_i");
println!("cargo:rustc-link-lib=OpenThreads");
println!("cargo:rustc-link-lib=osg");
println!("cargo:rustc-link-lib=osgDB");
println!("cargo:rustc-link-lib=osgUtil");
println!("cargo:rustc-link-lib=osgViewer");
Command::new("cmd")
.args(&[
"/C",
"xcopy",
r#".\bin"#,
&format!(r#".\target\{}"#, env::var("PROFILE").unwrap()),
"/y",
"/e",
])
.stdout(Stdio::inherit())
.output()
.unwrap();
}
fn build_win_gun() {
cc::Build::new()
.cpp(true)
.flag("-std=c++11")
.warnings(false)
.define("WIN32", None)
.include("./src")
.include("./src/osg")
.file("./src/tileset.cpp")
.file("./src/shp23dtile.cpp")
.file("./src/osgb23dtile.cpp")
.file("./src/dxt_img.cpp")
.compile("_3dtile");
// -------------
println!("cargo:rustc-link-search=native=./lib");
// -------------
println!("cargo:rustc-link-lib=gdal_i");
println!("cargo:rustc-link-lib=osg");
println!("cargo:rustc-link-lib=osgDB");
println!("cargo:rustc-link-lib=osgUtil");
println!("cargo:rustc-link-lib=OpenThreads");
Command::new("cmd")
.args(&[
"/C",
"xcopy",
r#".\bin"#,
&format!(r#".\target\{}"#, env::var("PROFILE").unwrap()),
"/y",
"/e",
])
.stdout(Stdio::inherit())
.output()
.unwrap();
}
fn build_linux_unkonw() |
fn main() {
match env::var("TARGET") {
Ok(val) => match val.as_str() {
"x86_64-pc-windows-gnu" => build_win_gun(),
"x86_64-unknown-linux-gnu" => build_linux_unkonw(),
"x86_64-pc-windows-msvc" => build_win_msvc(),
&_ => {}
},
_ => {}
}
}
| {
cc::Build::new()
.cpp(true)
.flag("-std=c++11")
.warnings(false)
.include("./src")
.include("./src/osg")
.file("./src/tileset.cpp")
.file("./src/shp23dtile.cpp")
.file("./src/osgb23dtile.cpp")
.file("./src/dxt_img.cpp")
.compile("_3dtile");
// -------------
println!("cargo:rustc-link-search=native=./lib");
// -------------
println!("cargo:rustc-link-lib=OpenThreads");
println!("cargo:rustc-link-lib=osg");
println!("cargo:rustc-link-lib=osgDB");
println!("cargo:rustc-link-lib=osgUtil");
} |
anonymizer.min.js | "use strict";!function(r,i){r(function(){r(".anonymize").each(function(t,n){var n=r(n),a=String(n.attr("data-anonymize")),e=0;n.empty().text(i(function(){var t=a.substr(4*e++,4);return parseInt(t,16)/65536}))})})}(jQuery,sillyName); | ||
train_NFM.py | import argparse
import datetime
import os
import traceback
import kornia
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader
from tqdm.autonotebook import tqdm
import models
from datasets import LowLightDataset, LowLightFDataset
from models import PSNR, SSIM, CosineLR
from tools import SingleSummaryWriter
from tools import saver, mutils
def get_args():
parser = argparse.ArgumentParser('Breaking Downing the Darkness')
parser.add_argument('--num_gpus', type=int, default=1, help='number of gpus being used')
parser.add_argument('--num_workers', type=int, default=12, help='num_workers of dataloader')
parser.add_argument('--batch_size', type=int, default=1, help='The number of images per batch among all devices')
parser.add_argument('-m1', '--model1', type=str, default='INet',
help='Model1 Name')
parser.add_argument('-m2', '--model2', type=str, default='NSNet',
help='Model2 Name')
parser.add_argument('-m3', '--model3', type=str, default='NSNet',
help='Model3 Name')
parser.add_argument('-m1w', '--model1_weight', type=str, default=None,
help='Model Name')
parser.add_argument('-m2w', '--model2_weight', type=str, default=None,
help='Model Name')
parser.add_argument('--comment', type=str, default='default',
help='Project comment')
parser.add_argument('--graph', action='store_true')
parser.add_argument('--no_sche', action='store_true')
parser.add_argument('--sampling', action='store_true')
parser.add_argument('--slope', type=float, default=2.)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--optim', type=str, default='adam', help='select optimizer for training, '
'suggest using \'admaw\' until the'
' very final stage then switch to \'sgd\'')
parser.add_argument('--num_epochs', type=int, default=500)
parser.add_argument('--val_interval', type=int, default=1, help='Number of epoches between valing phases')
parser.add_argument('--save_interval', type=int, default=500, help='Number of steps between saving')
parser.add_argument('--data_path', type=str, default='./data/LOL',
help='the root folder of dataset')
parser.add_argument('--log_path', type=str, default='logs/')
parser.add_argument('--saved_path', type=str, default='logs/')
args = parser.parse_args()
return args
class ModelNSNet(nn.Module):
def | (self, model1, model2, model3):
super().__init__()
self.texture_loss = models.SSIML1Loss(channels=1)
self.model_ianet = model1(in_channels=1, out_channels=1)
self.model_nsnet = model2(in_channels=2, out_channels=1)
self.model_fusenet = model3(in_channels=3, out_channels=1)
assert opt.model1_weight is not None
self.load_weight(self.model_ianet, opt.model1_weight)
self.load_weight(self.model_nsnet, opt.model2_weight)
self.model_ianet.eval()
self.model_nsnet.eval()
self.eps = 1e-2
def load_weight(self, model, weight_pth):
state_dict = torch.load(weight_pth)
ret = model.load_state_dict(state_dict, strict=True)
print(ret)
def noise_syn(self, illumi, strength):
return torch.exp(-illumi) * strength
def forward(self, image, image_gt, training=True):
texture_nss = []
with torch.no_grad():
if training:
image = image.squeeze(0)
image_gt = image_gt.repeat(8, 1, 1, 1)
texture_in, _, _ = torch.split(kornia.color.rgb_to_ycbcr(image), 1, dim=1)
texture_gt, _, _ = torch.split(kornia.color.rgb_to_ycbcr(image_gt), 1, dim=1)
texture_in_down = F.interpolate(texture_in, scale_factor=0.5, mode='bicubic', align_corners=True)
illumi = self.model_ianet(texture_in_down)
illumi = F.interpolate(illumi, scale_factor=2, mode='bicubic', align_corners=True)
noisy_gt = texture_in / torch.clamp_min(illumi, self.eps)
for strength in [0, 0.05, 0.1]:
illumi = torch.clamp(illumi, 0., 1.)
attention = self.noise_syn(illumi, strength=strength)
texture_res = self.model_nsnet(torch.cat([noisy_gt, attention], dim=1))
texture_ns = noisy_gt + texture_res
texture_nss.append(texture_ns)
texture_nss = torch.cat(texture_nss, dim=1).detach()
texture_fuse = self.model_fusenet(texture_nss)
restor_loss = self.texture_loss(texture_fuse, texture_gt)
psnr = PSNR(texture_fuse, texture_gt)
ssim = SSIM(texture_fuse, texture_gt).item()
return noisy_gt, texture_nss, texture_fuse, texture_res, illumi, restor_loss, psnr, ssim
def train(opt):
if torch.cuda.is_available():
torch.cuda.manual_seed(42)
else:
torch.manual_seed(42)
timestamp = mutils.get_formatted_time()
opt.saved_path = opt.saved_path + f'/{opt.comment}/{timestamp}'
opt.log_path = opt.log_path + f'/{opt.comment}/{timestamp}/tensorboard/'
os.makedirs(opt.log_path, exist_ok=True)
os.makedirs(opt.saved_path, exist_ok=True)
training_params = {'batch_size': opt.batch_size,
'shuffle': True,
'drop_last': True,
'num_workers': opt.num_workers}
val_params = {'batch_size': 1,
'shuffle': False,
'drop_last': True,
'num_workers': opt.num_workers}
training_set = LowLightFDataset(os.path.join(opt.data_path, 'train'), image_split='images_aug')
training_generator = DataLoader(training_set, **training_params)
val_set = LowLightDataset(os.path.join(opt.data_path, 'eval'))
val_generator = DataLoader(val_set, **val_params)
model1 = getattr(models, opt.model1)
model2 = getattr(models, opt.model2)
model3 = getattr(models, opt.model3)
writer = SingleSummaryWriter(opt.log_path + f'/{datetime.datetime.now().strftime("%Y%m%d-%H%M%S")}/')
model = ModelNSNet(model1, model2, model3)
print(model)
if opt.num_gpus > 0:
model = model.cuda()
if opt.num_gpus > 1:
model = nn.DataParallel(model)
if opt.optim == 'adam':
optimizer = torch.optim.Adam(model.model_fusenet.parameters(), opt.lr)
else:
optimizer = torch.optim.SGD(model.model_fusenet.parameters(), opt.lr, momentum=0.9, nesterov=True)
scheduler = CosineLR(optimizer, opt.lr, opt.num_epochs)
epoch = 0
step = 0
model.model_fusenet.train()
num_iter_per_epoch = len(training_generator)
try:
for epoch in range(opt.num_epochs):
last_epoch = step // num_iter_per_epoch
if epoch < last_epoch:
continue
epoch_loss = []
progress_bar = tqdm(training_generator)
saver.base_url = os.path.join(opt.saved_path, 'results', '%03d' % epoch)
if not opt.sampling:
for iter, (data, target, name) in enumerate(progress_bar):
if iter < step - last_epoch * num_iter_per_epoch:
progress_bar.update()
continue
try:
if opt.num_gpus == 1:
data = data.cuda()
target = target.cuda()
optimizer.zero_grad()
noisy_gt, texture_nss, texture_fuse, texture_res, \
illumi, restor_loss, psnr, ssim = model(data, target, training=True)
loss = restor_loss
loss.backward()
optimizer.step()
epoch_loss.append(float(loss))
progress_bar.set_description(
'Step: {}. Epoch: {}/{}. Iteration: {}/{}. restor_loss: {:.5f}, psnr: {:.5f}, ssim: {:.5f}'.format(
step, epoch, opt.num_epochs, iter + 1, num_iter_per_epoch, restor_loss.item(), psnr,
ssim))
writer.add_scalar('Loss/train', loss, step)
writer.add_scalar('PSNR/train', psnr, step)
writer.add_scalar('SSIM/train', ssim, step)
# log learning_rate
current_lr = optimizer.param_groups[0]['lr']
writer.add_scalar('learning_rate', current_lr, step)
step += 1
except Exception as e:
print('[Error]', traceback.format_exc())
print(e)
continue
if not opt.no_sche:
scheduler.step()
if epoch % opt.val_interval == 0:
model.model_fusenet.eval()
loss_ls = []
psnrs = []
ssims = []
for iter, (data, target, name) in enumerate(val_generator):
with torch.no_grad():
if opt.num_gpus == 1:
data = data.cuda()
target = target.cuda()
noisy_gt, texture_nss, texture_fuse, texture_res, \
illumi, restor_loss, psnr, ssim = model(data, target, training=False)
texture_gt, _, _ = torch.split(kornia.color.rgb_to_ycbcr(target), 1, dim=1)
saver.save_image(noisy_gt, name=os.path.splitext(name[0])[0] + '_in')
saver.save_image(texture_nss.transpose(0, 1), name=os.path.splitext(name[0])[0] + '_ns')
saver.save_image(texture_fuse, name=os.path.splitext(name[0])[0] + '_fuse')
saver.save_image(texture_res, name=os.path.splitext(name[0])[0] + '_res')
saver.save_image(illumi, name=os.path.splitext(name[0])[0] + '_ill')
saver.save_image(target, name=os.path.splitext(name[0])[0] + '_gt')
loss = restor_loss
loss_ls.append(loss.item())
psnrs.append(psnr)
ssims.append(ssim)
loss = np.mean(np.array(loss_ls))
psnr = np.mean(np.array(psnrs))
ssim = np.mean(np.array(ssims))
print(
'Val. Epoch: {}/{}. Loss: {:1.5f}, psnr: {:.5f}, ssim: {:.5f}'.format(
epoch, opt.num_epochs, loss, psnr, ssim))
writer.add_scalar('Loss/val', loss, step)
writer.add_scalar('PSNR/val', psnr, step)
writer.add_scalar('SSIM/val', ssim, step)
save_checkpoint(model, f'{opt.model3}_{"%03d" % epoch}_{psnr}_{ssim}_{step}.pth')
model.model_fusenet.train()
if opt.sampling:
exit(0)
except KeyboardInterrupt:
save_checkpoint(model, f'{opt.model3}_{epoch}_{step}_keyboardInterrupt.pth')
writer.close()
writer.close()
def save_checkpoint(model, name):
if isinstance(model, nn.DataParallel):
torch.save(model.module.model_fusenet.state_dict(), os.path.join(opt.saved_path, name))
else:
torch.save(model.model_fdnet.state_dict(), os.path.join(opt.saved_path, name))
if __name__ == '__main__':
opt = get_args()
train(opt)
| __init__ |
SettingsPage.tsx | import { connect } from 'react-redux';
import Settings from '../components/Settings/Settings';
export interface SettingsState {
dataStore: any;
}
const mapStateToProps = (state: SettingsState, ownProps) => { | return {
dataStore: ownProps.dataStore
};
};
export default connect(mapStateToProps)(Settings); | |
properties.rs | use crate::{BlockId, BlockKind};
impl BlockId {
#[doc = "Determines whether or not a block has the `age_0_15` property."]
pub fn has_age_0_15(self) -> bool {
match self.kind() {
BlockKind::Fire | BlockKind::Cactus | BlockKind::SugarCane => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `age_0_2` property."]
pub fn has_age_0_2(self) -> bool {
match self.kind() {
BlockKind::Cocoa => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `age_0_25` property."]
pub fn has_age_0_25(self) -> bool {
match self.kind() {
BlockKind::Kelp => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `age_0_3` property."]
pub fn has_age_0_3(self) -> bool {
match self.kind() {
BlockKind::NetherWart | BlockKind::Beetroots | BlockKind::FrostedIce => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `age_0_5` property."]
pub fn has_age_0_5(self) -> bool {
match self.kind() {
BlockKind::ChorusFlower => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `age_0_7` property."]
pub fn has_age_0_7(self) -> bool {
match self.kind() {
BlockKind::Wheat
| BlockKind::PumpkinStem
| BlockKind::MelonStem
| BlockKind::Carrots
| BlockKind::Potatoes => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `attached` property."]
pub fn has_attached(self) -> bool {
match self.kind() {
BlockKind::TripwireHook | BlockKind::Tripwire => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `axis_xyz` property."]
pub fn has_axis_xyz(self) -> bool {
match self.kind() {
BlockKind::OakLog
| BlockKind::SpruceLog
| BlockKind::BirchLog
| BlockKind::JungleLog
| BlockKind::AcaciaLog
| BlockKind::DarkOakLog
| BlockKind::StrippedSpruceLog
| BlockKind::StrippedBirchLog
| BlockKind::StrippedJungleLog
| BlockKind::StrippedAcaciaLog
| BlockKind::StrippedDarkOakLog
| BlockKind::StrippedOakLog
| BlockKind::OakWood
| BlockKind::SpruceWood
| BlockKind::BirchWood
| BlockKind::JungleWood
| BlockKind::AcaciaWood
| BlockKind::DarkOakWood
| BlockKind::StrippedOakWood
| BlockKind::StrippedSpruceWood
| BlockKind::StrippedBirchWood
| BlockKind::StrippedJungleWood
| BlockKind::StrippedAcaciaWood
| BlockKind::StrippedDarkOakWood
| BlockKind::QuartzPillar
| BlockKind::HayBlock
| BlockKind::PurpurPillar
| BlockKind::BoneBlock => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `axis_xz` property."]
pub fn has_axis_xz(self) -> bool {
match self.kind() {
BlockKind::NetherPortal => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `bites` property."]
pub fn has_bites(self) -> bool {
match self.kind() {
BlockKind::Cake => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `cauldron_level` property."]
pub fn has_cauldron_level(self) -> bool {
match self.kind() {
BlockKind::Cauldron => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `chest_kind` property."]
pub fn has_chest_kind(self) -> bool {
match self.kind() {
BlockKind::Chest | BlockKind::TrappedChest => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `comparator_mode` property."] | match self.kind() {
BlockKind::Comparator => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `conditional` property."]
pub fn has_conditional(self) -> bool {
match self.kind() {
BlockKind::CommandBlock
| BlockKind::RepeatingCommandBlock
| BlockKind::ChainCommandBlock => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `delay` property."]
pub fn has_delay(self) -> bool {
match self.kind() {
BlockKind::Repeater => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `disarmed` property."]
pub fn has_disarmed(self) -> bool {
match self.kind() {
BlockKind::Tripwire => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `distance` property."]
pub fn has_distance(self) -> bool {
match self.kind() {
BlockKind::OakLeaves
| BlockKind::SpruceLeaves
| BlockKind::BirchLeaves
| BlockKind::JungleLeaves
| BlockKind::AcaciaLeaves
| BlockKind::DarkOakLeaves => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `down` property."]
pub fn has_down(self) -> bool {
match self.kind() {
BlockKind::BrownMushroomBlock
| BlockKind::RedMushroomBlock
| BlockKind::MushroomStem
| BlockKind::ChorusPlant => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `drag` property."]
pub fn has_drag(self) -> bool {
match self.kind() {
BlockKind::BubbleColumn => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `east_connected` property."]
pub fn has_east_connected(self) -> bool {
match self.kind() {
BlockKind::Fire
| BlockKind::OakFence
| BlockKind::BrownMushroomBlock
| BlockKind::RedMushroomBlock
| BlockKind::MushroomStem
| BlockKind::IronBars
| BlockKind::GlassPane
| BlockKind::Vine
| BlockKind::NetherBrickFence
| BlockKind::Tripwire
| BlockKind::CobblestoneWall
| BlockKind::MossyCobblestoneWall
| BlockKind::WhiteStainedGlassPane
| BlockKind::OrangeStainedGlassPane
| BlockKind::MagentaStainedGlassPane
| BlockKind::LightBlueStainedGlassPane
| BlockKind::YellowStainedGlassPane
| BlockKind::LimeStainedGlassPane
| BlockKind::PinkStainedGlassPane
| BlockKind::GrayStainedGlassPane
| BlockKind::LightGrayStainedGlassPane
| BlockKind::CyanStainedGlassPane
| BlockKind::PurpleStainedGlassPane
| BlockKind::BlueStainedGlassPane
| BlockKind::BrownStainedGlassPane
| BlockKind::GreenStainedGlassPane
| BlockKind::RedStainedGlassPane
| BlockKind::BlackStainedGlassPane
| BlockKind::SpruceFence
| BlockKind::BirchFence
| BlockKind::JungleFence
| BlockKind::AcaciaFence
| BlockKind::DarkOakFence
| BlockKind::ChorusPlant => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `east_wire` property."]
pub fn has_east_wire(self) -> bool {
match self.kind() {
BlockKind::RedstoneWire => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `eggs` property."]
pub fn has_eggs(self) -> bool {
match self.kind() {
BlockKind::TurtleEgg => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `enabled` property."]
pub fn has_enabled(self) -> bool {
match self.kind() {
BlockKind::Hopper => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `extended` property."]
pub fn has_extended(self) -> bool {
match self.kind() {
BlockKind::StickyPiston | BlockKind::Piston => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `eye` property."]
pub fn has_eye(self) -> bool {
match self.kind() {
BlockKind::EndPortalFrame => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `face` property."]
pub fn has_face(self) -> bool {
match self.kind() {
BlockKind::Lever
| BlockKind::StoneButton
| BlockKind::OakButton
| BlockKind::SpruceButton
| BlockKind::BirchButton
| BlockKind::JungleButton
| BlockKind::AcaciaButton
| BlockKind::DarkOakButton => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `facing_cardinal` property."]
pub fn has_facing_cardinal(self) -> bool {
match self.kind() {
BlockKind::WhiteBed
| BlockKind::OrangeBed
| BlockKind::MagentaBed
| BlockKind::LightBlueBed
| BlockKind::YellowBed
| BlockKind::LimeBed
| BlockKind::PinkBed
| BlockKind::GrayBed
| BlockKind::LightGrayBed
| BlockKind::CyanBed
| BlockKind::PurpleBed
| BlockKind::BlueBed
| BlockKind::BrownBed
| BlockKind::GreenBed
| BlockKind::RedBed
| BlockKind::BlackBed
| BlockKind::WallTorch
| BlockKind::OakStairs
| BlockKind::Chest
| BlockKind::Furnace
| BlockKind::OakDoor
| BlockKind::Ladder
| BlockKind::CobblestoneStairs
| BlockKind::WallSign
| BlockKind::Lever
| BlockKind::IronDoor
| BlockKind::RedstoneWallTorch
| BlockKind::StoneButton
| BlockKind::CarvedPumpkin
| BlockKind::JackOLantern
| BlockKind::Repeater
| BlockKind::OakTrapdoor
| BlockKind::SpruceTrapdoor
| BlockKind::BirchTrapdoor
| BlockKind::JungleTrapdoor
| BlockKind::AcaciaTrapdoor
| BlockKind::DarkOakTrapdoor
| BlockKind::AttachedPumpkinStem
| BlockKind::AttachedMelonStem
| BlockKind::OakFenceGate
| BlockKind::BrickStairs
| BlockKind::StoneBrickStairs
| BlockKind::NetherBrickStairs
| BlockKind::EndPortalFrame
| BlockKind::Cocoa
| BlockKind::SandstoneStairs
| BlockKind::EnderChest
| BlockKind::TripwireHook
| BlockKind::SpruceStairs
| BlockKind::BirchStairs
| BlockKind::JungleStairs
| BlockKind::OakButton
| BlockKind::SpruceButton
| BlockKind::BirchButton
| BlockKind::JungleButton
| BlockKind::AcaciaButton
| BlockKind::DarkOakButton
| BlockKind::SkeletonWallSkull
| BlockKind::WitherSkeletonWallSkull
| BlockKind::ZombieWallHead
| BlockKind::PlayerWallHead
| BlockKind::CreeperWallHead
| BlockKind::DragonWallHead
| BlockKind::Anvil
| BlockKind::ChippedAnvil
| BlockKind::DamagedAnvil
| BlockKind::TrappedChest
| BlockKind::Comparator
| BlockKind::QuartzStairs
| BlockKind::AcaciaStairs
| BlockKind::DarkOakStairs
| BlockKind::IronTrapdoor
| BlockKind::PrismarineStairs
| BlockKind::PrismarineBrickStairs
| BlockKind::DarkPrismarineStairs
| BlockKind::WhiteWallBanner
| BlockKind::OrangeWallBanner
| BlockKind::MagentaWallBanner
| BlockKind::LightBlueWallBanner
| BlockKind::YellowWallBanner
| BlockKind::LimeWallBanner
| BlockKind::PinkWallBanner
| BlockKind::GrayWallBanner
| BlockKind::LightGrayWallBanner
| BlockKind::CyanWallBanner
| BlockKind::PurpleWallBanner
| BlockKind::BlueWallBanner
| BlockKind::BrownWallBanner
| BlockKind::GreenWallBanner
| BlockKind::RedWallBanner
| BlockKind::BlackWallBanner
| BlockKind::RedSandstoneStairs
| BlockKind::SpruceFenceGate
| BlockKind::BirchFenceGate
| BlockKind::JungleFenceGate
| BlockKind::AcaciaFenceGate
| BlockKind::DarkOakFenceGate
| BlockKind::SpruceDoor
| BlockKind::BirchDoor
| BlockKind::JungleDoor
| BlockKind::AcaciaDoor
| BlockKind::DarkOakDoor
| BlockKind::PurpurStairs
| BlockKind::WhiteGlazedTerracotta
| BlockKind::OrangeGlazedTerracotta
| BlockKind::MagentaGlazedTerracotta
| BlockKind::LightBlueGlazedTerracotta
| BlockKind::YellowGlazedTerracotta
| BlockKind::LimeGlazedTerracotta
| BlockKind::PinkGlazedTerracotta
| BlockKind::GrayGlazedTerracotta
| BlockKind::LightGrayGlazedTerracotta
| BlockKind::CyanGlazedTerracotta
| BlockKind::PurpleGlazedTerracotta
| BlockKind::BlueGlazedTerracotta
| BlockKind::BrownGlazedTerracotta
| BlockKind::GreenGlazedTerracotta
| BlockKind::RedGlazedTerracotta
| BlockKind::BlackGlazedTerracotta
| BlockKind::DeadTubeCoralWallFan
| BlockKind::DeadBrainCoralWallFan
| BlockKind::DeadBubbleCoralWallFan
| BlockKind::DeadFireCoralWallFan
| BlockKind::DeadHornCoralWallFan
| BlockKind::TubeCoralWallFan
| BlockKind::BrainCoralWallFan
| BlockKind::BubbleCoralWallFan
| BlockKind::FireCoralWallFan
| BlockKind::HornCoralWallFan => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `facing_cardinal_and_down` property."]
pub fn has_facing_cardinal_and_down(self) -> bool {
match self.kind() {
BlockKind::Hopper => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `facing_cubic` property."]
pub fn has_facing_cubic(self) -> bool {
match self.kind() {
BlockKind::Dispenser
| BlockKind::StickyPiston
| BlockKind::Piston
| BlockKind::PistonHead
| BlockKind::MovingPiston
| BlockKind::CommandBlock
| BlockKind::Dropper
| BlockKind::EndRod
| BlockKind::RepeatingCommandBlock
| BlockKind::ChainCommandBlock
| BlockKind::Observer
| BlockKind::ShulkerBox
| BlockKind::WhiteShulkerBox
| BlockKind::OrangeShulkerBox
| BlockKind::MagentaShulkerBox
| BlockKind::LightBlueShulkerBox
| BlockKind::YellowShulkerBox
| BlockKind::LimeShulkerBox
| BlockKind::PinkShulkerBox
| BlockKind::GrayShulkerBox
| BlockKind::LightGrayShulkerBox
| BlockKind::CyanShulkerBox
| BlockKind::PurpleShulkerBox
| BlockKind::BlueShulkerBox
| BlockKind::BrownShulkerBox
| BlockKind::GreenShulkerBox
| BlockKind::RedShulkerBox
| BlockKind::BlackShulkerBox => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `half_top_bottom` property."]
pub fn has_half_top_bottom(self) -> bool {
match self.kind() {
BlockKind::OakStairs
| BlockKind::CobblestoneStairs
| BlockKind::OakTrapdoor
| BlockKind::SpruceTrapdoor
| BlockKind::BirchTrapdoor
| BlockKind::JungleTrapdoor
| BlockKind::AcaciaTrapdoor
| BlockKind::DarkOakTrapdoor
| BlockKind::BrickStairs
| BlockKind::StoneBrickStairs
| BlockKind::NetherBrickStairs
| BlockKind::SandstoneStairs
| BlockKind::SpruceStairs
| BlockKind::BirchStairs
| BlockKind::JungleStairs
| BlockKind::QuartzStairs
| BlockKind::AcaciaStairs
| BlockKind::DarkOakStairs
| BlockKind::IronTrapdoor
| BlockKind::PrismarineStairs
| BlockKind::PrismarineBrickStairs
| BlockKind::DarkPrismarineStairs
| BlockKind::RedSandstoneStairs
| BlockKind::PurpurStairs => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `half_upper_lower` property."]
pub fn has_half_upper_lower(self) -> bool {
match self.kind() {
BlockKind::TallSeagrass
| BlockKind::OakDoor
| BlockKind::IronDoor
| BlockKind::Sunflower
| BlockKind::Lilac
| BlockKind::RoseBush
| BlockKind::Peony
| BlockKind::TallGrass
| BlockKind::LargeFern
| BlockKind::SpruceDoor
| BlockKind::BirchDoor
| BlockKind::JungleDoor
| BlockKind::AcaciaDoor
| BlockKind::DarkOakDoor => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `has_bottle_0` property."]
pub fn has_has_bottle_0(self) -> bool {
match self.kind() {
BlockKind::BrewingStand => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `has_bottle_1` property."]
pub fn has_has_bottle_1(self) -> bool {
match self.kind() {
BlockKind::BrewingStand => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `has_bottle_2` property."]
pub fn has_has_bottle_2(self) -> bool {
match self.kind() {
BlockKind::BrewingStand => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `has_record` property."]
pub fn has_has_record(self) -> bool {
match self.kind() {
BlockKind::Jukebox => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `hatch` property."]
pub fn has_hatch(self) -> bool {
match self.kind() {
BlockKind::TurtleEgg => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `hinge` property."]
pub fn has_hinge(self) -> bool {
match self.kind() {
BlockKind::OakDoor
| BlockKind::IronDoor
| BlockKind::SpruceDoor
| BlockKind::BirchDoor
| BlockKind::JungleDoor
| BlockKind::AcaciaDoor
| BlockKind::DarkOakDoor => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `in_wall` property."]
pub fn has_in_wall(self) -> bool {
match self.kind() {
BlockKind::OakFenceGate
| BlockKind::SpruceFenceGate
| BlockKind::BirchFenceGate
| BlockKind::JungleFenceGate
| BlockKind::AcaciaFenceGate
| BlockKind::DarkOakFenceGate => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `instrument` property."]
pub fn has_instrument(self) -> bool {
match self.kind() {
BlockKind::NoteBlock => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `inverted` property."]
pub fn has_inverted(self) -> bool {
match self.kind() {
BlockKind::DaylightDetector => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `layers` property."]
pub fn has_layers(self) -> bool {
match self.kind() {
BlockKind::Snow => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `lit` property."]
pub fn has_lit(self) -> bool {
match self.kind() {
BlockKind::Furnace
| BlockKind::RedstoneOre
| BlockKind::RedstoneTorch
| BlockKind::RedstoneWallTorch
| BlockKind::RedstoneLamp => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `locked` property."]
pub fn has_locked(self) -> bool {
match self.kind() {
BlockKind::Repeater => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `moisture` property."]
pub fn has_moisture(self) -> bool {
match self.kind() {
BlockKind::Farmland => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `north_connected` property."]
pub fn has_north_connected(self) -> bool {
match self.kind() {
BlockKind::Fire
| BlockKind::OakFence
| BlockKind::BrownMushroomBlock
| BlockKind::RedMushroomBlock
| BlockKind::MushroomStem
| BlockKind::IronBars
| BlockKind::GlassPane
| BlockKind::Vine
| BlockKind::NetherBrickFence
| BlockKind::Tripwire
| BlockKind::CobblestoneWall
| BlockKind::MossyCobblestoneWall
| BlockKind::WhiteStainedGlassPane
| BlockKind::OrangeStainedGlassPane
| BlockKind::MagentaStainedGlassPane
| BlockKind::LightBlueStainedGlassPane
| BlockKind::YellowStainedGlassPane
| BlockKind::LimeStainedGlassPane
| BlockKind::PinkStainedGlassPane
| BlockKind::GrayStainedGlassPane
| BlockKind::LightGrayStainedGlassPane
| BlockKind::CyanStainedGlassPane
| BlockKind::PurpleStainedGlassPane
| BlockKind::BlueStainedGlassPane
| BlockKind::BrownStainedGlassPane
| BlockKind::GreenStainedGlassPane
| BlockKind::RedStainedGlassPane
| BlockKind::BlackStainedGlassPane
| BlockKind::SpruceFence
| BlockKind::BirchFence
| BlockKind::JungleFence
| BlockKind::AcaciaFence
| BlockKind::DarkOakFence
| BlockKind::ChorusPlant => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `north_wire` property."]
pub fn has_north_wire(self) -> bool {
match self.kind() {
BlockKind::RedstoneWire => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `note` property."]
pub fn has_note(self) -> bool {
match self.kind() {
BlockKind::NoteBlock => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `occupied` property."]
pub fn has_occupied(self) -> bool {
match self.kind() {
BlockKind::WhiteBed
| BlockKind::OrangeBed
| BlockKind::MagentaBed
| BlockKind::LightBlueBed
| BlockKind::YellowBed
| BlockKind::LimeBed
| BlockKind::PinkBed
| BlockKind::GrayBed
| BlockKind::LightGrayBed
| BlockKind::CyanBed
| BlockKind::PurpleBed
| BlockKind::BlueBed
| BlockKind::BrownBed
| BlockKind::GreenBed
| BlockKind::RedBed
| BlockKind::BlackBed => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `open` property."]
pub fn has_open(self) -> bool {
match self.kind() {
BlockKind::OakDoor
| BlockKind::IronDoor
| BlockKind::OakTrapdoor
| BlockKind::SpruceTrapdoor
| BlockKind::BirchTrapdoor
| BlockKind::JungleTrapdoor
| BlockKind::AcaciaTrapdoor
| BlockKind::DarkOakTrapdoor
| BlockKind::OakFenceGate
| BlockKind::IronTrapdoor
| BlockKind::SpruceFenceGate
| BlockKind::BirchFenceGate
| BlockKind::JungleFenceGate
| BlockKind::AcaciaFenceGate
| BlockKind::DarkOakFenceGate
| BlockKind::SpruceDoor
| BlockKind::BirchDoor
| BlockKind::JungleDoor
| BlockKind::AcaciaDoor
| BlockKind::DarkOakDoor => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `part` property."]
pub fn has_part(self) -> bool {
match self.kind() {
BlockKind::WhiteBed
| BlockKind::OrangeBed
| BlockKind::MagentaBed
| BlockKind::LightBlueBed
| BlockKind::YellowBed
| BlockKind::LimeBed
| BlockKind::PinkBed
| BlockKind::GrayBed
| BlockKind::LightGrayBed
| BlockKind::CyanBed
| BlockKind::PurpleBed
| BlockKind::BlueBed
| BlockKind::BrownBed
| BlockKind::GreenBed
| BlockKind::RedBed
| BlockKind::BlackBed => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `persistent` property."]
pub fn has_persistent(self) -> bool {
match self.kind() {
BlockKind::OakLeaves
| BlockKind::SpruceLeaves
| BlockKind::BirchLeaves
| BlockKind::JungleLeaves
| BlockKind::AcaciaLeaves
| BlockKind::DarkOakLeaves => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `pickles` property."]
pub fn has_pickles(self) -> bool {
match self.kind() {
BlockKind::SeaPickle => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `piston_kind` property."]
pub fn has_piston_kind(self) -> bool {
match self.kind() {
BlockKind::PistonHead | BlockKind::MovingPiston => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `power` property."]
pub fn has_power(self) -> bool {
match self.kind() {
BlockKind::RedstoneWire
| BlockKind::LightWeightedPressurePlate
| BlockKind::HeavyWeightedPressurePlate
| BlockKind::DaylightDetector => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `powered` property."]
pub fn has_powered(self) -> bool {
match self.kind() {
BlockKind::NoteBlock
| BlockKind::PoweredRail
| BlockKind::DetectorRail
| BlockKind::OakDoor
| BlockKind::Lever
| BlockKind::StonePressurePlate
| BlockKind::IronDoor
| BlockKind::OakPressurePlate
| BlockKind::SprucePressurePlate
| BlockKind::BirchPressurePlate
| BlockKind::JunglePressurePlate
| BlockKind::AcaciaPressurePlate
| BlockKind::DarkOakPressurePlate
| BlockKind::StoneButton
| BlockKind::Repeater
| BlockKind::OakTrapdoor
| BlockKind::SpruceTrapdoor
| BlockKind::BirchTrapdoor
| BlockKind::JungleTrapdoor
| BlockKind::AcaciaTrapdoor
| BlockKind::DarkOakTrapdoor
| BlockKind::OakFenceGate
| BlockKind::TripwireHook
| BlockKind::Tripwire
| BlockKind::OakButton
| BlockKind::SpruceButton
| BlockKind::BirchButton
| BlockKind::JungleButton
| BlockKind::AcaciaButton
| BlockKind::DarkOakButton
| BlockKind::Comparator
| BlockKind::ActivatorRail
| BlockKind::IronTrapdoor
| BlockKind::SpruceFenceGate
| BlockKind::BirchFenceGate
| BlockKind::JungleFenceGate
| BlockKind::AcaciaFenceGate
| BlockKind::DarkOakFenceGate
| BlockKind::SpruceDoor
| BlockKind::BirchDoor
| BlockKind::JungleDoor
| BlockKind::AcaciaDoor
| BlockKind::DarkOakDoor
| BlockKind::Observer => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `powered_rail_shape` property."]
pub fn has_powered_rail_shape(self) -> bool {
match self.kind() {
BlockKind::PoweredRail | BlockKind::DetectorRail | BlockKind::ActivatorRail => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `rail_shape` property."]
pub fn has_rail_shape(self) -> bool {
match self.kind() {
BlockKind::Rail => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `rotation` property."]
pub fn has_rotation(self) -> bool {
match self.kind() {
BlockKind::Sign
| BlockKind::SkeletonSkull
| BlockKind::WitherSkeletonSkull
| BlockKind::ZombieHead
| BlockKind::PlayerHead
| BlockKind::CreeperHead
| BlockKind::DragonHead
| BlockKind::WhiteBanner
| BlockKind::OrangeBanner
| BlockKind::MagentaBanner
| BlockKind::LightBlueBanner
| BlockKind::YellowBanner
| BlockKind::LimeBanner
| BlockKind::PinkBanner
| BlockKind::GrayBanner
| BlockKind::LightGrayBanner
| BlockKind::CyanBanner
| BlockKind::PurpleBanner
| BlockKind::BlueBanner
| BlockKind::BrownBanner
| BlockKind::GreenBanner
| BlockKind::RedBanner
| BlockKind::BlackBanner => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `short` property."]
pub fn has_short(self) -> bool {
match self.kind() {
BlockKind::PistonHead => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `slab_kind` property."]
pub fn has_slab_kind(self) -> bool {
match self.kind() {
BlockKind::PrismarineSlab
| BlockKind::PrismarineBrickSlab
| BlockKind::DarkPrismarineSlab
| BlockKind::OakSlab
| BlockKind::SpruceSlab
| BlockKind::BirchSlab
| BlockKind::JungleSlab
| BlockKind::AcaciaSlab
| BlockKind::DarkOakSlab
| BlockKind::StoneSlab
| BlockKind::SandstoneSlab
| BlockKind::PetrifiedOakSlab
| BlockKind::CobblestoneSlab
| BlockKind::BrickSlab
| BlockKind::StoneBrickSlab
| BlockKind::NetherBrickSlab
| BlockKind::QuartzSlab
| BlockKind::RedSandstoneSlab
| BlockKind::PurpurSlab => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `snowy` property."]
pub fn has_snowy(self) -> bool {
match self.kind() {
BlockKind::GrassBlock | BlockKind::Podzol | BlockKind::Mycelium => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `south_connected` property."]
pub fn has_south_connected(self) -> bool {
match self.kind() {
BlockKind::Fire
| BlockKind::OakFence
| BlockKind::BrownMushroomBlock
| BlockKind::RedMushroomBlock
| BlockKind::MushroomStem
| BlockKind::IronBars
| BlockKind::GlassPane
| BlockKind::Vine
| BlockKind::NetherBrickFence
| BlockKind::Tripwire
| BlockKind::CobblestoneWall
| BlockKind::MossyCobblestoneWall
| BlockKind::WhiteStainedGlassPane
| BlockKind::OrangeStainedGlassPane
| BlockKind::MagentaStainedGlassPane
| BlockKind::LightBlueStainedGlassPane
| BlockKind::YellowStainedGlassPane
| BlockKind::LimeStainedGlassPane
| BlockKind::PinkStainedGlassPane
| BlockKind::GrayStainedGlassPane
| BlockKind::LightGrayStainedGlassPane
| BlockKind::CyanStainedGlassPane
| BlockKind::PurpleStainedGlassPane
| BlockKind::BlueStainedGlassPane
| BlockKind::BrownStainedGlassPane
| BlockKind::GreenStainedGlassPane
| BlockKind::RedStainedGlassPane
| BlockKind::BlackStainedGlassPane
| BlockKind::SpruceFence
| BlockKind::BirchFence
| BlockKind::JungleFence
| BlockKind::AcaciaFence
| BlockKind::DarkOakFence
| BlockKind::ChorusPlant => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `south_wire` property."]
pub fn has_south_wire(self) -> bool {
match self.kind() {
BlockKind::RedstoneWire => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `stage` property."]
pub fn has_stage(self) -> bool {
match self.kind() {
BlockKind::OakSapling
| BlockKind::SpruceSapling
| BlockKind::BirchSapling
| BlockKind::JungleSapling
| BlockKind::AcaciaSapling
| BlockKind::DarkOakSapling => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `stairs_shape` property."]
pub fn has_stairs_shape(self) -> bool {
match self.kind() {
BlockKind::OakStairs
| BlockKind::CobblestoneStairs
| BlockKind::BrickStairs
| BlockKind::StoneBrickStairs
| BlockKind::NetherBrickStairs
| BlockKind::SandstoneStairs
| BlockKind::SpruceStairs
| BlockKind::BirchStairs
| BlockKind::JungleStairs
| BlockKind::QuartzStairs
| BlockKind::AcaciaStairs
| BlockKind::DarkOakStairs
| BlockKind::PrismarineStairs
| BlockKind::PrismarineBrickStairs
| BlockKind::DarkPrismarineStairs
| BlockKind::RedSandstoneStairs
| BlockKind::PurpurStairs => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `structure_block_mode` property."]
pub fn has_structure_block_mode(self) -> bool {
match self.kind() {
BlockKind::StructureBlock => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `triggered` property."]
pub fn has_triggered(self) -> bool {
match self.kind() {
BlockKind::Dispenser | BlockKind::Dropper => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `unstable` property."]
pub fn has_unstable(self) -> bool {
match self.kind() {
BlockKind::Tnt => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `up` property."]
pub fn has_up(self) -> bool {
match self.kind() {
BlockKind::Fire
| BlockKind::BrownMushroomBlock
| BlockKind::RedMushroomBlock
| BlockKind::MushroomStem
| BlockKind::Vine
| BlockKind::CobblestoneWall
| BlockKind::MossyCobblestoneWall
| BlockKind::ChorusPlant => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `water_level` property."]
pub fn has_water_level(self) -> bool {
match self.kind() {
BlockKind::Water | BlockKind::Lava => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `waterlogged` property."]
pub fn has_waterlogged(self) -> bool {
match self.kind() {
BlockKind::OakStairs
| BlockKind::Chest
| BlockKind::Sign
| BlockKind::Ladder
| BlockKind::CobblestoneStairs
| BlockKind::WallSign
| BlockKind::OakFence
| BlockKind::OakTrapdoor
| BlockKind::SpruceTrapdoor
| BlockKind::BirchTrapdoor
| BlockKind::JungleTrapdoor
| BlockKind::AcaciaTrapdoor
| BlockKind::DarkOakTrapdoor
| BlockKind::IronBars
| BlockKind::GlassPane
| BlockKind::BrickStairs
| BlockKind::StoneBrickStairs
| BlockKind::NetherBrickFence
| BlockKind::NetherBrickStairs
| BlockKind::SandstoneStairs
| BlockKind::EnderChest
| BlockKind::SpruceStairs
| BlockKind::BirchStairs
| BlockKind::JungleStairs
| BlockKind::CobblestoneWall
| BlockKind::MossyCobblestoneWall
| BlockKind::TrappedChest
| BlockKind::QuartzStairs
| BlockKind::WhiteStainedGlassPane
| BlockKind::OrangeStainedGlassPane
| BlockKind::MagentaStainedGlassPane
| BlockKind::LightBlueStainedGlassPane
| BlockKind::YellowStainedGlassPane
| BlockKind::LimeStainedGlassPane
| BlockKind::PinkStainedGlassPane
| BlockKind::GrayStainedGlassPane
| BlockKind::LightGrayStainedGlassPane
| BlockKind::CyanStainedGlassPane
| BlockKind::PurpleStainedGlassPane
| BlockKind::BlueStainedGlassPane
| BlockKind::BrownStainedGlassPane
| BlockKind::GreenStainedGlassPane
| BlockKind::RedStainedGlassPane
| BlockKind::BlackStainedGlassPane
| BlockKind::AcaciaStairs
| BlockKind::DarkOakStairs
| BlockKind::IronTrapdoor
| BlockKind::PrismarineStairs
| BlockKind::PrismarineBrickStairs
| BlockKind::DarkPrismarineStairs
| BlockKind::PrismarineSlab
| BlockKind::PrismarineBrickSlab
| BlockKind::DarkPrismarineSlab
| BlockKind::RedSandstoneStairs
| BlockKind::OakSlab
| BlockKind::SpruceSlab
| BlockKind::BirchSlab
| BlockKind::JungleSlab
| BlockKind::AcaciaSlab
| BlockKind::DarkOakSlab
| BlockKind::StoneSlab
| BlockKind::SandstoneSlab
| BlockKind::PetrifiedOakSlab
| BlockKind::CobblestoneSlab
| BlockKind::BrickSlab
| BlockKind::StoneBrickSlab
| BlockKind::NetherBrickSlab
| BlockKind::QuartzSlab
| BlockKind::RedSandstoneSlab
| BlockKind::PurpurSlab
| BlockKind::SpruceFence
| BlockKind::BirchFence
| BlockKind::JungleFence
| BlockKind::AcaciaFence
| BlockKind::DarkOakFence
| BlockKind::PurpurStairs
| BlockKind::DeadTubeCoral
| BlockKind::DeadBrainCoral
| BlockKind::DeadBubbleCoral
| BlockKind::DeadFireCoral
| BlockKind::DeadHornCoral
| BlockKind::TubeCoral
| BlockKind::BrainCoral
| BlockKind::BubbleCoral
| BlockKind::FireCoral
| BlockKind::HornCoral
| BlockKind::DeadTubeCoralWallFan
| BlockKind::DeadBrainCoralWallFan
| BlockKind::DeadBubbleCoralWallFan
| BlockKind::DeadFireCoralWallFan
| BlockKind::DeadHornCoralWallFan
| BlockKind::TubeCoralWallFan
| BlockKind::BrainCoralWallFan
| BlockKind::BubbleCoralWallFan
| BlockKind::FireCoralWallFan
| BlockKind::HornCoralWallFan
| BlockKind::DeadTubeCoralFan
| BlockKind::DeadBrainCoralFan
| BlockKind::DeadBubbleCoralFan
| BlockKind::DeadFireCoralFan
| BlockKind::DeadHornCoralFan
| BlockKind::TubeCoralFan
| BlockKind::BrainCoralFan
| BlockKind::BubbleCoralFan
| BlockKind::FireCoralFan
| BlockKind::HornCoralFan
| BlockKind::SeaPickle
| BlockKind::Conduit => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `west_connected` property."]
pub fn has_west_connected(self) -> bool {
match self.kind() {
BlockKind::Fire
| BlockKind::OakFence
| BlockKind::BrownMushroomBlock
| BlockKind::RedMushroomBlock
| BlockKind::MushroomStem
| BlockKind::IronBars
| BlockKind::GlassPane
| BlockKind::Vine
| BlockKind::NetherBrickFence
| BlockKind::Tripwire
| BlockKind::CobblestoneWall
| BlockKind::MossyCobblestoneWall
| BlockKind::WhiteStainedGlassPane
| BlockKind::OrangeStainedGlassPane
| BlockKind::MagentaStainedGlassPane
| BlockKind::LightBlueStainedGlassPane
| BlockKind::YellowStainedGlassPane
| BlockKind::LimeStainedGlassPane
| BlockKind::PinkStainedGlassPane
| BlockKind::GrayStainedGlassPane
| BlockKind::LightGrayStainedGlassPane
| BlockKind::CyanStainedGlassPane
| BlockKind::PurpleStainedGlassPane
| BlockKind::BlueStainedGlassPane
| BlockKind::BrownStainedGlassPane
| BlockKind::GreenStainedGlassPane
| BlockKind::RedStainedGlassPane
| BlockKind::BlackStainedGlassPane
| BlockKind::SpruceFence
| BlockKind::BirchFence
| BlockKind::JungleFence
| BlockKind::AcaciaFence
| BlockKind::DarkOakFence
| BlockKind::ChorusPlant => true,
_ => false,
}
}
#[doc = "Determines whether or not a block has the `west_wire` property."]
pub fn has_west_wire(self) -> bool {
match self.kind() {
BlockKind::RedstoneWire => true,
_ => false,
}
}
} | pub fn has_comparator_mode(self) -> bool { |
leetcode.252.meeting-rooms.py | # Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def canAttendMeetings(self, intervals):
| """
:type intervals: List[Interval]
:rtype: bool
"""
if len(intervals) < 2:
return True
intervals.sort(key = lambda x: x.start)
for i in xrange(1, len(intervals)):
if intervals[i-1].end > intervals[i].start:
return False
return True |
|
sa.py | from configparser import ConfigParser
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score
import pygmo as pg
from tengp.individual import IndividualBuilder, NPIndividual
from tengp import Parameters, FunctionSet
from tengp_eval.coevolution import TrainersSet, GaPredictors
def fitness_function(individual, x, y):
output = individual.transform(x)
try:
#return adjusted_r2_score(y, output, len(x), len(individual.genes))
return mean_squared_error(output, y)
except ValueError:
return 10e10
class cost_function:
def __init__(self, X, Y, params, bounds):
|
def fitness(self, x):
individual = NPIndividual(list(x), self.bounds, self.params)
fitness = fitness_function(individual, self.X, self.Y)
return [fitness]
def get_bounds(self):
return self.bounds
def define_cgp_system(n_nodes, n_inputs, n_outputs, funset, max_back):
"""
define CCGP system
Return:
IndividualBuilder object
Parameters
bounds (tuple)
"""
params = Parameters(n_inputs, n_outputs, 1, n_nodes, funset, real_valued=True, max_back=max_back)
ib = IndividualBuilder(params)
bounds = ib.create().bounds
return ib, params, bounds
def run_benchmark_coevolution(cp, x_train, y_train, funset):
ib, params, bounds = define_cgp_system(
cp.getint('CGPPARAMS', 'n_nodes'),
x_train.shape[1] if len(x_train.shape) > 1 else 1,
y_train.shape[1] if len(y_train.shape) > 1 else 1,
funset,
cp.getint('CGPPARAMS', 'max_back'))
# setup the coevolution elements
ts = TrainersSet(ib, 16, fitness_function, x_train, y_train)
predictors = GaPredictors(x_train, y_train, 10, 24)
predictors.evaluate_fitness(ts)
x_reduced, y_reduced = predictors.best_predictors_data()
GENS_STEP = 50
cf = cost_function(x_reduced, y_reduced, params, bounds)
prob = pg.problem(cf)
algo = pg.algorithm(pg.pso(
gen=GENS_STEP,
omega=cp.getfloat('OPTIMPARAMS', 'omega'),
eta1=cp.getfloat('OPTIMPARAMS', 'eta1'),
eta2=cp.getfloat('OPTIMPARAMS', 'eta2'),
memory=True))
algo.set_verbosity(1)
pop = pg.population(prob, cp.getint('DEFAULT', 'population_size'))
n_gens = GENS_STEP
while n_gens < 500:
pop = algo.evolve(pop)
# calculate exact fitness of champion and
# add it to the trainers set
champion = NPIndividual(pop.champion_x, cf.bounds, cf.params)
try:
champion.fitness = fitness_function(champion, x_train, y_train)
ts.add_trainer(champion)
except ValueError:
print('unsuccessful adding of champion')
# update random population
ts.update_random_population()
predictors.predictors_evolution_step(ts)
print('changing the subset, best predictor: ', predictors.best_predictor.fitness)
x_reduced, y_reduced = predictors.best_predictors_data()
pop.problem.extract(object).X = x_reduced
pop.problem.extract(object).Y = y_reduced
n_gens += GENS_STEP
uda = algo.extract(pg.pso)
champion = NPIndividual(pop.champion_x, cf.bounds, cf.params)
champion.fitness = fitness_function(champion, x_train, y_train)
fitnesses = [x[2] for x in uda.get_log()]
fitnesses.append(champion.fitness)
return fitnesses
def run_benchmark(cp, x_train, y_train, funset):
ib, params, bounds = define_cgp_system(
cp.getint('CGPPARAMS', 'n_nodes'),
x_train.shape[1] if len(x_train.shape) > 1 else 1,
y_train.shape[1] if len(y_train.shape) > 1 else 1,
funset,
cp.getint('CGPPARAMS', 'max_back'))
cf = cost_function(x_train, y_train, params, bounds)
prob = pg.problem(cf)
algo = pg.algorithm(pg.simulated_annealing(
Ts=cp.getfloat('OPTIMPARAMS', 'Ts'),
Tf=cp.getfloat('OPTIMPARAMS', 'Tf'),
n_T_adj=cp.getint('OPTIMPARAMS', 'n_T_adj'),
n_range_adj=cp.getint('OPTIMPARAMS', 'n_range_adj'),
bin_size=cp.getint('OPTIMPARAMS', 'bin_size'),
start_range=cp.getfloat('OPTIMPARAMS', 'start_range')))
algo.set_verbosity(100)
pop = pg.population(prob, 1)
pop = algo.evolve(pop)
uda = algo.extract(pg.simulated_annealing)
return [x[2] for x in uda.get_log()]
RUNNERS = [run_benchmark]
| self.params = params
self.bounds = bounds
self.X = X
self.Y = Y |
initialized_service.rs | /*
Copyright 2021 Integritee AG and Supercomputing Systems AG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//! Service to determine if the integritee services is initialized and registered on the node,
//! hosted on a http server.
use crate::error::ServiceResult;
use lazy_static::lazy_static;
use log::*;
use parking_lot::RwLock;
use std::net::SocketAddr;
use warp::Filter;
lazy_static! {
static ref INITIALIZED_HANDLE: RwLock<bool> = RwLock::new(false);
}
pub async fn start_is_initialized_server(port: u16) -> ServiceResult<()> {
let is_initialized_route = warp::path!("is_initialized").and_then(|| async move {
if *INITIALIZED_HANDLE.read() {
Ok("I am initialized.")
} else {
Err(warp::reject::not_found())
}
});
let socket_addr: SocketAddr = ([0, 0, 0, 0], port).into();
info!("Running initialized server on: {:?}", socket_addr);
warp::serve(is_initialized_route).run(socket_addr).await;
info!("Initialized server shut down");
Ok(())
}
/// Set initialized handler value to true.
pub fn | () {
let mut initialized_lock = INITIALIZED_HANDLE.write();
*initialized_lock = true;
}
| set_initialized |
mod.rs | use metaplex_token_metadata::ID as TOKEN_METADATA_PROGRAM_ID;
use solana_account_decoder::UiAccountEncoding;
use solana_client::{
rpc_client::RpcClient,
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
rpc_filter::{Memcmp, MemcmpEncodedBytes, RpcFilterType},
};
use solana_sdk::{
account::Account,
commitment_config::{CommitmentConfig, CommitmentLevel},
pubkey::Pubkey,
};
use std::str::FromStr;
pub mod errors;
use crate::constants::*;
use errors::SnapshotError;
pub fn get_metadata_accounts_by_update_authority(
client: &RpcClient,
update_authority: &str,
) -> Result<Vec<(Pubkey, Account)>, SnapshotError> {
let config = RpcProgramAccountsConfig {
filters: Some(vec![RpcFilterType::Memcmp(Memcmp {
offset: 1, // key
bytes: MemcmpEncodedBytes::Base58(update_authority.to_string()),
encoding: None,
})]),
account_config: RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64),
data_slice: None,
commitment: Some(CommitmentConfig {
commitment: CommitmentLevel::Confirmed,
}),
min_context_slot: None,
},
with_context: None,
};
let accounts = match client.get_program_accounts_with_config(&TOKEN_METADATA_PROGRAM_ID, config)
{
Ok(accounts) => accounts,
Err(err) => return Err(SnapshotError::ClientError(err.kind)),
};
Ok(accounts)
}
pub fn get_metadata_accounts_by_creator(
client: &RpcClient, | creator_id: &str,
creator_position: usize,
) -> Result<Vec<(Pubkey, Account)>, SnapshotError> {
let config = RpcProgramAccountsConfig {
filters: Some(vec![RpcFilterType::Memcmp(Memcmp {
offset: OFFSET_TO_CREATORS + creator_position * PUBKEY_LENGTH,
bytes: MemcmpEncodedBytes::Base58(creator_id.to_string()),
encoding: None,
})]),
account_config: RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64),
data_slice: None,
commitment: Some(CommitmentConfig {
commitment: CommitmentLevel::Confirmed,
}),
min_context_slot: None,
},
with_context: None,
};
let accounts = match client.get_program_accounts_with_config(&TOKEN_METADATA_PROGRAM_ID, config)
{
Ok(accounts) => accounts,
Err(err) => return Err(SnapshotError::ClientError(err.kind)),
};
Ok(accounts)
}
pub fn get_holder_token_accounts(
client: &RpcClient,
mint_account: String,
) -> Result<Vec<(Pubkey, Account)>, SnapshotError> {
let token_program_id = match Pubkey::from_str(TOKEN_PROGRAM_ID) {
Ok(token_program_id) => token_program_id,
Err(_) => {
return Err(SnapshotError::PubkeyParseFailed(
TOKEN_PROGRAM_ID.to_string(),
))
}
};
let filter1 = RpcFilterType::Memcmp(Memcmp {
offset: 0,
bytes: MemcmpEncodedBytes::Base58(mint_account),
encoding: None,
});
let filter2 = RpcFilterType::DataSize(165);
let account_config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64),
data_slice: None,
commitment: Some(CommitmentConfig {
commitment: CommitmentLevel::Confirmed,
}),
min_context_slot: None,
};
let config = RpcProgramAccountsConfig {
filters: Some(vec![filter1, filter2]),
account_config,
with_context: None,
};
let holders = match client.get_program_accounts_with_config(&token_program_id, config) {
Ok(accounts) => accounts,
Err(err) => return Err(SnapshotError::ClientError(err.kind)),
};
Ok(holders)
}
pub fn get_edition_accounts_by_master(
client: &RpcClient,
parent_pubkey: &str,
) -> Result<Vec<(Pubkey, Account)>, SnapshotError> {
let key_filter = RpcFilterType::Memcmp(Memcmp {
offset: 0,
bytes: MemcmpEncodedBytes::Base58(EDITION_V1_BS58.to_string()),
encoding: None,
});
let parent_filter = RpcFilterType::Memcmp(Memcmp {
offset: 1,
bytes: MemcmpEncodedBytes::Base58(parent_pubkey.to_string()),
encoding: None,
});
let filters = vec![key_filter, parent_filter];
let config = RpcProgramAccountsConfig {
filters: Some(filters),
account_config: RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64),
data_slice: None,
commitment: Some(CommitmentConfig {
commitment: CommitmentLevel::Confirmed,
}),
min_context_slot: None,
},
with_context: None,
};
let accounts = match client.get_program_accounts_with_config(&TOKEN_METADATA_PROGRAM_ID, config)
{
Ok(accounts) => accounts,
Err(err) => return Err(SnapshotError::ClientError(err.kind)),
};
Ok(accounts)
} | |
test_mars.py | import pytest
import ray
import mars
import mars.dataframe as md
import pyarrow as pa
@pytest.fixture(scope="module")
def ray_start_regular(request): # pragma: no cover
try:
yield ray.init(num_cpus=16)
finally:
ray.shutdown()
def test_mars(ray_start_regular):
|
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| import pandas as pd
cluster = mars.new_cluster_in_ray(worker_num=2, worker_cpu=1)
n = 10000
pdf = pd.DataFrame({"a": list(range(n)), "b": list(range(n, 2 * n))})
df = md.DataFrame(pdf)
# Convert mars dataframe to ray dataset
ds = ray.data.from_mars(df)
pd.testing.assert_frame_equal(ds.to_pandas(), df.to_pandas())
ds2 = ds.filter(lambda row: row["a"] % 2 == 0)
assert ds2.take(5) == [{"a": 2 * i, "b": n + 2 * i} for i in range(5)]
# Convert ray dataset to mars dataframe
df2 = ds2.to_mars()
pd.testing.assert_frame_equal(
df2.head(5).to_pandas(),
pd.DataFrame({"a": list(range(0, 10, 2)), "b": list(range(n, n + 10, 2))}),
)
# Test Arrow Dataset
pdf2 = pd.DataFrame({c: range(5) for c in "abc"})
ds3 = ray.data.from_arrow([pa.Table.from_pandas(pdf2) for _ in range(3)])
df3 = ds3.to_mars()
pd.testing.assert_frame_equal(
df3.head(5).to_pandas(),
pdf2,
)
# Test simple datasets
with pytest.raises(NotImplementedError):
ray.data.range(10).to_mars()
cluster.stop() |
usuario.controller.ts | import {
BadRequestException,
Body,
Controller,
Get,
Param,
Post,
ReflectMetadata,
Req,
Res,
UseGuards
} from "@nestjs/common";
import {JwtService} from "../servicios/jwt.service";
import {UsuarioService} from "../servicios/usuario.service";
import {Connection, getConnection} from "typeorm";
import {UsuarioEntity} from "./usuario.entity";
import {UsuarioGuard} from "../guards/usuario.guard";
export const Roles = (...roles: string[]) => ReflectMetadata('roles', roles);
@Controller('usuario')
@UseGuards(UsuarioGuard)
export class | {
constructor(private _jwtService: JwtService, private _usuarioService:UsuarioService){
}
@Get('verificar/:correoElectronico')
async validarUsuario(@Param() params, @Req() req, @Res() res){
const existe= await getConnection().getRepository(UsuarioEntity).findOne({correoElectronico: params.correoElectronico});
if(existe){
return res.send(existe)
}else
{
return res.send({mensaje: 'No existe'})
}
}
@Get('mostrar')
findAll(): Promise<UsuarioEntity[]> {
return this._usuarioService.llenar();
}
@Get('buscar:/correoElectronico')
async buscar(@Param() param, @Res() res):Promise<UsuarioEntity[]>{
const existe= await getConnection().getRepository(UsuarioEntity).findOne({correoElectronico: param.correoElectronico});
return res.response({existe});
}
@Post('crear')
async agregarUsuario(
@Body('nombre') nombre,
@Body('apellido') apellido,
@Body('fechaNacimiento') fechaNacimiento,
@Body('pais') pais,
@Body('ciudad') ciudad,
@Body('numeroTelefono') numeroTelefono,
@Body('correoElectronico') correoElectonico,
@Res() res, @Req() req
) {
const parametros = (nombre && apellido && fechaNacimiento && pais && ciudad && numeroTelefono && correoElectonico);
if (parametros) {
const userRepository = getConnection().getRepository(UsuarioEntity);
const user = userRepository.create(req.body);
return userRepository.save(user);
}
else {
throw new BadRequestException({
mensaje: 'No envia parametros'
})
}
}
} | UsuarioController |
epoch.test.ts | import { describe, expect, it, suite } from 'vitest'
import { datetime, longitude } from '.'
import {
getCurrentYearEnd,
getCurrentYearStart,
getGreenwhichSiderealTime,
getJulianDate,
getJulianYearInSeconds,
getLocalSiderealTime,
getModifiedJulianDate,
getNumberOfJulianCenturiesSinceEpoch,
getNumberOfJulianCenturiesSinceEpoch1900,
getNumberOfJulianCenturiesSinceEpoch2000
} from '../src'
import { J1900, J2000 } from '../src/epoch/constants'
suite('@observerly/polaris Epoch', () => {
describe('Current Year', () => {
it('getCurrentYearStart should be defined', () => {
expect(getCurrentYearStart).toBeDefined()
})
it('getCurrentYearStart should be 1st January', () => {
const yearStart = getCurrentYearStart(datetime)
expect(yearStart.getFullYear()).toBe(2021)
expect(yearStart.getMonth()).toBe(0)
expect(yearStart.getDate()).toBe(1)
expect(yearStart.getHours()).toBe(0)
expect(yearStart.getMinutes()).toBe(0)
})
it('getCurrentYearEnd should be defined', () => {
expect(getCurrentYearEnd).toBeDefined()
})
it('getCurrentYearEnd should be 31st December', () => {
const yearEnd = getCurrentYearEnd(datetime)
expect(yearEnd.getFullYear()).toBe(2021)
expect(yearEnd.getMonth()).toBe(11)
expect(yearEnd.getDate()).toBe(31)
expect(yearEnd.getHours()).toBe(0)
expect(yearEnd.getMinutes()).toBe(0)
})
})
describe('Julian Year', () => {
it('getJulianYearInSeconds should be defined', () => {
expect(getJulianYearInSeconds).toBeDefined()
})
it('getJulianYearInSeconds should be', () => {
const julianYearInSeconds = getJulianYearInSeconds()
expect(julianYearInSeconds).toBe(31557600)
})
})
describe('Julian Date', () => {
it('getJulianDate should be defined', () => {
expect(getJulianDate).toBeDefined()
})
it('getJulianDate should be', () => {
const julianDate = getJulianDate(datetime)
expect(julianDate).toBe(2459348.5)
})
})
describe('Modified Julian Date', () => {
it('getModifiedJulianDate should be defined', () => {
expect(getModifiedJulianDate).toBeDefined()
})
it('getModifiedJulianDate should be', () => { | const modifiedJulianDate = getModifiedJulianDate(datetime)
expect(modifiedJulianDate).toBe(59348)
})
})
describe('Number Of Centuries Since Epoch', () => {
it('getNumberOfJulianCenturiesSinceEpoch should be defined', () => {
expect(getNumberOfJulianCenturiesSinceEpoch).toBeDefined()
})
it('getNumberOfJulianCenturiesSinceEpoch should be', () => {
const T = getNumberOfJulianCenturiesSinceEpoch(datetime, J1900)
expect(T).toBe(1.2136481861738535)
})
it('getNumberOfJulianCenturiesSinceEpoch should be', () => {
const T = getNumberOfJulianCenturiesSinceEpoch(datetime, J2000)
expect(T).toBe(0.21364818617385353)
})
it('getNumberOfJulianCenturiesSinceEpoch1900 should be defined', () => {
expect(getNumberOfJulianCenturiesSinceEpoch1900).toBeDefined()
})
it('getNumberOfJulianCenturiesSinceEpoch1900 should be', () => {
const T = getNumberOfJulianCenturiesSinceEpoch1900(datetime)
expect(T).toBe(1.2136481861738535)
})
it('getNumberOfJulianCenturiesSinceEpoch2000 should be defined', () => {
expect(getNumberOfJulianCenturiesSinceEpoch2000).toBeDefined()
})
it('getNumberOfJulianCenturiesSinceEpoch2000 should be', () => {
const T = getNumberOfJulianCenturiesSinceEpoch2000(datetime)
expect(T).toBe(0.21364818617385353)
})
})
describe('Greenwhich Sidereal Time', () => {
it('getGreenwhichSiderealTime should be defined', () => {
expect(getGreenwhichSiderealTime).toBeDefined()
})
it('getGreenwhichSiderealTime should be', () => {
const GST = getGreenwhichSiderealTime(datetime)
expect(GST).toBe(15.463990399019053)
})
})
describe('Local Sidereal Time', () => {
it('getLocalSiderealTime should be defined', () => {
expect(getLocalSiderealTime).toBeDefined()
})
it('getLocalSiderealTime should be', () => {
const LST = getLocalSiderealTime(datetime, longitude)
expect(LST).toBe(5.099450799019053)
})
})
}) | |
SecondTableInA.py | # automatically generated, do not modify
# namespace: NamespaceA
import flatbuffers
class SecondTableInA(object):
__slots__ = ['_tab']
# SecondTableInA
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SecondTableInA
def ReferToC(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
|
return None
def SecondTableInAStart(builder): builder.StartObject(1)
def SecondTableInAAddReferToC(builder, referToC): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(referToC), 0)
def SecondTableInAEnd(builder): return builder.EndObject()
| x = self._tab.Indirect(o + self._tab.Pos)
from .TableInC import TableInC
obj = TableInC()
obj.Init(self._tab.Bytes, x)
return obj |
Subsets and Splits