hexsha
stringlengths 40
40
| size
int64 140
1.03M
| ext
stringclasses 94
values | lang
stringclasses 21
values | max_stars_repo_path
stringlengths 3
663
| max_stars_repo_name
stringlengths 4
120
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
663
| max_issues_repo_name
stringlengths 4
120
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
663
| max_forks_repo_name
stringlengths 4
135
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 140
1.03M
| avg_line_length
float64 2.32
23.1k
| max_line_length
int64 11
938k
| alphanum_fraction
float64 0.01
1
| score
float32 3
4.25
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
75553f12364fcac7b0d50118d7540b7a870c3770 | 4,482 | go | Go | pkg/operator/client/client.go | haotaogeng/fabedge | 029ef6f41254ac131f468f5968f167c1e123e3ae | [
"Apache-2.0"
] | 380 | 2021-07-16T04:53:57.000Z | 2022-03-31T12:54:34.000Z | pkg/operator/client/client.go | haotaogeng/fabedge | 029ef6f41254ac131f468f5968f167c1e123e3ae | [
"Apache-2.0"
] | 12 | 2021-08-04T02:04:44.000Z | 2022-02-18T10:28:26.000Z | pkg/operator/client/client.go | haotaogeng/fabedge | 029ef6f41254ac131f468f5968f167c1e123e3ae | [
"Apache-2.0"
] | 48 | 2021-07-16T06:23:27.000Z | 2022-03-22T12:04:53.000Z | package client
import (
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/url"
"time"
apis "github.com/fabedge/fabedge/pkg/apis/v1alpha1"
"github.com/fabedge/fabedge/pkg/operator/apiserver"
certutil "github.com/fabedge/fabedge/pkg/util/cert"
)
const defaultTimeout = 5 * time.Second
type Interface interface {
GetEndpointsAndCommunities() (apiserver.EndpointsAndCommunity, error)
UpdateEndpoints(endpoints []apis.Endpoint) error
SignCert(csr []byte) (Certificate, error)
}
type client struct {
clusterName string
baseURL *url.URL
client *http.Client
}
type Certificate struct {
Raw *x509.Certificate
DER []byte
PEM []byte
}
func NewClient(apiServerAddr string, clusterName string, transport http.RoundTripper) (Interface, error) {
baseURL, err := url.Parse(apiServerAddr)
if err != nil {
return nil, err
}
return &client{
baseURL: baseURL,
clusterName: clusterName,
client: &http.Client{
Timeout: defaultTimeout,
Transport: transport,
},
}, nil
}
func (c *client) SignCert(csr []byte) (cert Certificate, err error) {
req, err := http.NewRequest(http.MethodPost, join(c.baseURL, apiserver.URLSignCERT), csrBody(csr))
if err != nil {
return cert, err
}
req.Header.Set("Content-Type", "text/plain")
req.Header.Set(apiserver.HeaderClusterName, c.clusterName)
resp, err := c.client.Do(req)
if err != nil {
return cert, err
}
return readCertFromResponse(resp)
}
func (c *client) UpdateEndpoints(endpoints []apis.Endpoint) error {
data, err := json.Marshal(endpoints)
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPut, join(c.baseURL, apiserver.URLUpdateEndpoints), bytes.NewReader(data))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set(apiserver.HeaderClusterName, c.clusterName)
resp, err := c.client.Do(req)
if err != nil {
return err
}
_, err = handleResponse(resp)
return err
}
func (c *client) GetEndpointsAndCommunities() (ea apiserver.EndpointsAndCommunity, err error) {
req, err := http.NewRequest(http.MethodGet, join(c.baseURL, apiserver.URLGetEndpointsAndCommunities), nil)
if err != nil {
return ea, err
}
req.Header.Set(apiserver.HeaderClusterName, c.clusterName)
resp, err := c.client.Do(req)
if err != nil {
return ea, err
}
data, err := handleResponse(resp)
err = json.Unmarshal(data, &ea)
return ea, err
}
func GetCertificate(apiServerAddr string) (cert Certificate, err error) {
baseURL, err := url.Parse(apiServerAddr)
if err != nil {
return cert, err
}
cli := &http.Client{
Timeout: defaultTimeout,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
}
resp, err := cli.Get(join(baseURL, apiserver.URLGetCA))
if err != nil {
return cert, err
}
return readCertFromResponse(resp)
}
func SignCertByToken(apiServerAddr string, token string, csr []byte, certPool *x509.CertPool) (cert Certificate, err error) {
baseURL, err := url.Parse(apiServerAddr)
if err != nil {
return cert, err
}
cli := &http.Client{
Timeout: defaultTimeout,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: certPool,
},
},
}
req, err := http.NewRequest(http.MethodPost, join(baseURL, apiserver.URLSignCERT), csrBody(csr))
if err != nil {
return cert, err
}
req.Header.Set(apiserver.HeaderAuthorization, "bearer "+token)
req.Header.Set("Content-Type", "text/html")
resp, err := cli.Do(req)
if err != nil {
return cert, err
}
return readCertFromResponse(resp)
}
func join(baseURL *url.URL, ref string) string {
u, _ := baseURL.Parse(ref)
return u.String()
}
func readCertFromResponse(resp *http.Response) (cert Certificate, err error) {
cert.PEM, err = handleResponse(resp)
if err != nil {
return
}
cert.DER, err = certutil.DecodePEM(cert.PEM)
if err != nil {
return
}
cert.Raw, err = x509.ParseCertificate(cert.DER)
return cert, err
}
func handleResponse(resp *http.Response) (content []byte, err error) {
defer resp.Body.Close()
if resp.StatusCode >= 400 {
content, err = ioutil.ReadAll(resp.Body)
if err != nil {
return
}
return nil, &HttpError{
Response: resp,
Message: string(content),
}
}
if resp.StatusCode == http.StatusNoContent {
return nil, nil
}
return ioutil.ReadAll(resp.Body)
}
func csrBody(csr []byte) io.Reader {
return bytes.NewReader(certutil.EncodeCertRequestPEM(csr))
}
| 21.342857 | 125 | 0.700357 | 3.0625 |
e23ac0c99162a0ea92f7f76c6664dfe96d46bb75 | 3,603 | py | Python | sila_library/sila2lib/fdl_parser/tests/test_fdl_parser.py | lemmi25/sila2lib | ac4db8ee7fe6c99bde498151a539b25be2021d2f | [
"MIT"
] | null | null | null | sila_library/sila2lib/fdl_parser/tests/test_fdl_parser.py | lemmi25/sila2lib | ac4db8ee7fe6c99bde498151a539b25be2021d2f | [
"MIT"
] | null | null | null | sila_library/sila2lib/fdl_parser/tests/test_fdl_parser.py | lemmi25/sila2lib | ac4db8ee7fe6c99bde498151a539b25be2021d2f | [
"MIT"
] | null | null | null | # Set pylint configuration for this file
# pylint: disable=missing-docstring, protected-access
# import general Packages
import unittest
import os
# import package related modules and classes
from ..fdl_parser import FDLParser
from ..command import Command
from ..property import Property
from ..data_type_definition import DataTypeDefinition
from ..standard_errors import DefinedExecutionError
class TestFDLParser(unittest.TestCase):
def setUp(self):
"""
Sets up basic attributes for the unit tests run in this class.
Create the basic path in which the input xml files are stored.
"""
self.base_path = os.path.join(os.path.dirname(__file__), "fdl")
def test_feature(self):
obj = FDLParser(os.path.join(self.base_path, "Simple.sila.xml"))
self.assertEqual(obj.root.tag, '{http://www.sila-standard.org}Feature')
def test_attributes(self):
"""
Test of all attributes are read correctly
For this test it is assumed that no optional attributes are present and only default values are found.
"""
obj = FDLParser(os.path.join(self.base_path, "Simple.sila.xml"))
# start with mandatory attributes
self.assertEqual(obj.feature_version, '1.3')
self.assertEqual(obj.feature_version_major, 1)
self.assertEqual(obj.feature_version_minor, 3)
self.assertEqual(obj.originator, 'org.silastandard')
self.assertEqual(obj.sila2_version, '1.0')
# optional arguments and defaults
self.assertEqual(obj.maturity_level, 'Draft')
self.assertEqual(obj.category, 'example')
self.assertEqual(obj.locale, 'en-us')
def test_attributes_optional(self):
"""
Tests if optional attributes are read correctly if not set.
For this test all optional attributes must be set.
"""
obj = FDLParser(os.path.join(self.base_path, "Simple_AttributesOptional.sila.xml"))
self.assertEqual(obj.locale, 'en-us')
self.assertEqual(obj.maturity_level, 'Draft')
def test_elements_base(self):
"""Tests if the base elements of a feature are read correctly."""
obj = FDLParser(os.path.join(self.base_path, "Simple.sila.xml"))
self.assertEqual(obj.identifier, 'SimpleFeature')
self.assertEqual(obj.name, 'Simple Feature')
self.assertEqual(
obj.description,
'Minimal feature definition, nothing is required. Can be used to check (default) attributes.'
)
def test_elements_complete(self):
"""Tests if all elements (one of each) are read correctly."""
obj = FDLParser(os.path.join(self.base_path, "Complete.sila.xml"))
self.assertEqual(len(obj.commands), 1)
self.assertIn('CommandIdentifier', obj.commands)
self.assertIs(type(obj.commands['CommandIdentifier']), Command)
self.assertEqual(len(obj.properties), 1)
self.assertIn('PropertyIdentifier', obj.properties)
self.assertIs(type(obj.properties['PropertyIdentifier']), Property)
self.assertEqual(len(obj.data_type_definitions), 1)
self.assertIn('DataTypeDefinitionIdentifier', obj.data_type_definitions)
self.assertIs(type(obj.data_type_definitions['DataTypeDefinitionIdentifier']), DataTypeDefinition)
self.assertEqual(len(obj.defined_execution_errors), 1)
self.assertIn('DefinedExecutionErrorIdentifier', obj.defined_execution_errors)
self.assertIs(type(obj.defined_execution_errors['DefinedExecutionErrorIdentifier']), DefinedExecutionError)
| 40.943182 | 115 | 0.696087 | 3.125 |
662749aa4f924566389584889915ed287ffe6301 | 23,848 | py | Python | classification/ngram_based_classifier.py | gbosetti/ca | 3f37edc4b8f69f61d02b881242522f6fa15e2695 | [
"MIT"
] | null | null | null | classification/ngram_based_classifier.py | gbosetti/ca | 3f37edc4b8f69f61d02b881242522f6fa15e2695 | [
"MIT"
] | 4 | 2021-06-08T22:30:03.000Z | 2022-03-12T00:48:52.000Z | classification/ngram_based_classifier.py | gbosetti/cati | 3f37edc4b8f69f61d02b881242522f6fa15e2695 | [
"MIT"
] | null | null | null | import string
import nltk
import traceback
from nltk.corpus import stopwords
nltk.download('stopwords')
from collections import Counter
from mabed.es_connector import Es_connector
from nltk.tokenize import TweetTokenizer
import re
import json
from elasticsearch_dsl import UpdateByQuery
from nltk.stem.snowball import FrenchStemmer
from nltk.stem.snowball import EnglishStemmer
from nltk.stem.snowball import ArabicStemmer
from nltk.stem.snowball import SpanishStemmer
class NgramBasedClasifier:
def __init__(self, config_relative_path=''):
# self.logs = []
self.current_thread_percentage = 0
self.config_relative_path = config_relative_path
self.tknzr = TweetTokenizer()
self.retrievedLangs = set() # Matching the languages in the dataset
def get_n_grams(self, text, length=2):
n_grams = zip(*[text[i:] for i in range(length)])
# n_grams = list(nltk.bigrams(text))
return n_grams
def remove_stop_words(self, full_text, langs=["en", "fr", "es"]):
punctuation = list(string.punctuation + "…" + "’" + "'" + '🔴' + '•' + '...' + '.')
multilang_stopwords = self.get_stopwords_for_langs(langs) + ["Ã", "rt", "im"] + punctuation
full_text = full_text.lower().translate(str.maketrans('', '', string.punctuation))
tokenized_text = self.tknzr.tokenize(full_text) # nltk.word_tokenize(full_text)
filtered_words = list(filter(lambda word: len(word)>1 and word not in multilang_stopwords, tokenized_text))
full_text = " ".join(filtered_words)
full_text_no_emojis = self.remove_emojis(full_text)
full_text_no_emojis = " ".join(full_text_no_emojis.split())
return full_text_no_emojis
def remove_urls(self, text):
return re.sub(r'http\S+', '', text).strip()
def lemmatize(self, text, lang):
# spacy.prefer_gpu()
# nlp = spacy.load(lang) # en fr "en_core_web_sm"
if lang == "fr":
stemmer = FrenchStemmer()
elif lang == "es":
stemmer = SpanishStemmer()
else:
stemmer = EnglishStemmer()
stemmed = []
for word in text.split(" "):
stemmed.append(stemmer.stem(word))
# doc = nlp(u""+text)
# lem_terms = []
# for token in doc:
# lem_terms.append(token.lemma_)
return " ".join(stemmed)
def search_bigrams_related_tweets(self, **kwargs):
my_connector = Es_connector(index=kwargs["index"])
if kwargs.get('full_search', False): # All tweets
query = {
"query": {
"bool": {
"must": [
{"match_phrase": {kwargs["ngramsPropName"]: kwargs["ngram"]}},
{"match": {kwargs["session"]: kwargs["label"]}}
]
}
}
}
else: # matching keywords
query = {
"query": {
"bool": {
"must": [
{"match": {"text": kwargs["word"]}},
{"match_phrase": {kwargs["ngramsPropName"]: kwargs["ngram"]}},
{"match": {kwargs["session"]: kwargs["label"]}}
]
}
}
}
print(query)
return my_connector.init_paginatedSearch(query)
def update_tweets_state_by_ngram(self, **kwargs):
tweets_connector = Es_connector(index=kwargs["index"], doc_type="tweet")
if kwargs.get('full_search', False): # All tweets
query = {
"query": {
"bool": {
"must": [
{"match_phrase": {kwargs["ngramsPropName"]: kwargs["ngram"]}},
{"match": {kwargs["session"]: kwargs["query_label"]}}
]
}
}
}
else: # Tweets matching a user-generated query
query = {
"query": {
"bool": {
"must": [
{"match": {"text": kwargs["word"]}},
{"match_phrase": {kwargs["ngramsPropName"]: kwargs["ngram"]}},
{"match": {kwargs["session"]: kwargs["query_label"]}}
]
}
}
}
return tweets_connector.update_query(query, kwargs["session"], kwargs["new_label"])
def search_event_bigrams_related_tweets(self, **kwargs):
my_connector = Es_connector(index=kwargs["index"])
query = {
"query": {
"bool": {
"should": kwargs["target_terms"],
"minimum_should_match": 1,
"must": [
{"match_phrase": {kwargs["ngramsPropName"]: kwargs["ngram"]}},
{"match": {kwargs["session"]: kwargs["label"]}}
]
}
}
}
return my_connector.init_paginatedSearch(query)
def update_tweets_state_by_event_ngram(self, **kwargs):
tweets_connector = Es_connector(index=kwargs["index"], doc_type="tweet")
query = {
"query": {
"bool": {
"should": kwargs["target_terms"],
"minimum_should_match": 1,
"must": [
{"match_phrase": {kwargs["ngramsPropName"]: kwargs["ngram"]}},
{"match": {kwargs["session"]: kwargs["query_label"]}}
]
}
}
}
return tweets_connector.update_query(query, kwargs["session"], kwargs["new_label"])
def get_ngrams(self, **kwargs):
if kwargs.get('full_search', False):
query = {
"bool": {
"must": [
{"match": {kwargs["session"]: kwargs["label"]}}
]
}
}
else:
query = {
"bool": {
"must": [
{"match": {"text": kwargs["word"]}},
{"match": {kwargs["session"]: kwargs["label"]}}
]
}
}
return self.get_ngrams_by_query(query=query, **kwargs)
def chunks(self, target_list, target_size):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(target_list), target_size):
yield target_list[i:i + target_size]
def get_positive_unlabeled_ngrams(self, **kwargs):
res = self.get_ngrams_by_query(query={
"match": {
kwargs["field"]: "confirmed"
}
}, **kwargs)
try:
return res["aggregations"]["ngrams_count"]["buckets"]
except KeyError as e:
return []
def get_negative_unlabeled_ngrams(self, **kwargs):
res = self.get_ngrams_by_query(query={
"match": {
kwargs["field"]: "negative"
}
}, **kwargs)
try:
return res["aggregations"]["ngrams_count"]["buckets"]
except KeyError as e:
return []
def get_ngrams_for_ids(self, **kwargs):
ids_chunks = self.chunks(kwargs["ids"], 50)
total_buckets = []
for chunk in ids_chunks:
ids = ""
for id in chunk:
ids += id + " or "
ids = ids[:-4]
query = {
"match": {
"id_str": ids
}
}
res = self.get_ngrams_by_query(query=query, **kwargs)
buckets = res["aggregations"]["ngrams_count"]["buckets"]
if len(buckets)>0:
total_buckets += buckets
try:
return total_buckets
except KeyError as e:
return []
def get_ngrams_for_event(self, **kwargs):
query = {
"bool": {
"should": kwargs["target_terms"],
"minimum_should_match": 1,
"must": [
{"match": {kwargs["session"]: kwargs["label"]}}
]
}
}
return self.get_ngrams_by_query(query=query, **kwargs)
def get_ngrams_by_query(self, query="", **kwargs):
try:
my_connector = Es_connector(index=kwargs["index"], config_relative_path=self.config_relative_path)
full_query = {
"query": query,
"size": 0,
"aggs": {
"ngrams_count": {
"terms": {
"field": kwargs["n_size"] + "grams.keyword",
"size": kwargs["results_size"]
},
"aggs": {
"status": {
"terms": {
"field": kwargs["session"] + ".keyword"
}
}
}
}
}
}
return my_connector.search(full_query)
except Exception as e:
print('Error: ' + str(e))
traceback.print_exc()
return {}
def get_search_related_classification_data(self, index="test3", word="", session="", label="confirmed OR proposed OR negative", matching_ngrams=[], full_search=False):
if full_search:
query = {
"bool": {
"must": [
{"match": {session: label}}
]
}
}
else:
query = {
"bool": {
"must": [
{"match": {"text": word}},
{"match": {session: label}}
]
}
}
my_connector = Es_connector(index=index)
res = my_connector.search({
"size": 0,
"query": query,
"aggs": {
"query_classification": {
"terms": {
"field": session + ".keyword"
}
}
}
})
return res['aggregations']['query_classification']['buckets']
def get_bigrams_related_classification_data(self, matching_ngrams=[]):
# Counting the matching bigrams results by category
total_ngrams = matching_ngrams["hits"]["total"]
confirmed_ngrams = 0
negative_ngrams = 0
unlabeled_ngrams = 0
accum_total_ngrams = 0
for ngram in matching_ngrams['aggregations']['ngrams_count']['buckets']:
curr_confirmed = self.get_classif_doc_count("confirmed", ngram["status"]["buckets"])
confirmed_ngrams += curr_confirmed
curr_negative = self.get_classif_doc_count("negative", ngram["status"]["buckets"])
negative_ngrams += curr_negative
curr_unlabeled = self.get_classif_doc_count("proposed", ngram["status"]["buckets"])
unlabeled_ngrams += curr_unlabeled
accum_total_ngrams += curr_confirmed + curr_negative + curr_unlabeled
if accum_total_ngrams ==0:
return 0,0,0
else:
return (confirmed_ngrams / accum_total_ngrams) * total_ngrams, \
(negative_ngrams / accum_total_ngrams) * total_ngrams, \
(unlabeled_ngrams / accum_total_ngrams) * total_ngrams # confirmed_ngrams, negative_ngrams, unlabeled_ngrams
def get_classification_data(self, **kwargs):
query_classification = self.get_search_related_classification_data(kwargs["index"], kwargs["word"], kwargs["session"], kwargs["label"], kwargs["matching_ngrams"], kwargs['full_search'])
confirmed_ngrams, negative_ngrams, unlabeled_ngrams = self.get_bigrams_related_classification_data(kwargs["matching_ngrams"])
return [
{
"label": "Query",
"confirmed": self.get_classif_doc_count("confirmed", query_classification),
"negative": self.get_classif_doc_count("negative", query_classification),
"unlabeled": self.get_classif_doc_count("proposed", query_classification)
},
{
"label": "Ngrams",
"confirmed": confirmed_ngrams,
"negative": negative_ngrams,
"unlabeled": unlabeled_ngrams
}
]
def get_classif_doc_count(self, tag, classification):
category = list(filter(lambda item: item["key"] == tag, classification))
if len(category) > 0:
return category[0]["doc_count"]
else:
return 0
def gerenate_ngrams_for_tweets(self, tweets, **kwargs ): # remove_stopwords=True, stemming=True):
length = int(kwargs.get('length', 2))
tweets_to_update = [] # So the URL doesn't get too large
prop = kwargs['from_property']
for tweet in tweets:
try:
if prop in tweet["_source"]:
clean_text = self.remove_stop_words(tweet["_source"][prop]).split()
ngrams = list(self.get_n_grams(clean_text, length))
tweets_to_update.append({
"_ngrams": self.format_single_tweet_ngrams(ngrams),
"_id": tweet["_id"]
})
# for prop in tweet["_source"]:
# if tweet["_source"][prop] == None:
# tweet["_source"][prop] = "None"
full_tweet_ngrams = self.format_single_tweet_ngrams(ngrams)
self.updatePropertyValue(tweet=tweet, property_name=kwargs["prop"], property_value=full_tweet_ngrams, index=kwargs["index"])
else:
print("The tweet has no ", prop, " property.")
except Exception as e:
print('Error: ' + str(e))
# cnn = Es_connector(index=kwargs["index"])
# script_source = "for (int i = 0; i < docs.length; ++i) { if(docs[i]['_id'] == ctx._id){ ctx._source['" + kwargs[
# "prop"] + "'] = docs[i]['_ngrams']; break; }}"
# ubq = UpdateByQuery(using=cnn.es, index=cnn.index).script(source=script_source)
# for i in range(0, len(tweets_to_update), 5):
#
# tweets_chunk = tweets_to_update[i:i + 5]
# str_tweets = str(tweets_chunk).replace("None", "\'None\'").replace("\'", "\"")
# tweet_ids = [ { "match": {"_id": tweet["_id"]}} for tweet in tweets_chunk]
#
# query = {"query": {"bool": {"should": tweet_ids, "minimum_should_match": "1"}}}
# ubq.update_from_dict(query).params(docs=tweets_chunk)
# ubq.execute()
# Es_connector(index=kwargs["index"]).update_by_query(
# {"query": {"bool":{"should": tweet_ids, "minimum_should_match":"1" }}},
# "for (int i = 0; i < docs.length; ++i) { if(docs[i]['_id'] == ctx._id){ ctx._source['" +
# kwargs["prop"] + "'] = docs[i]['_ngrams']; break; }}",
# tweets_chunk
# )
# # "params": { "docs": tweets_chunk }
# q = {
# "script": {
# "inline": "def tweets = " + str_tweets + "; for (int i = 0; i < params.docs.length; ++i) { if(params.docs[i]['_id'] == ctx._id){ ctx._source['" + kwargs["prop"] + "'] = params.docs[i]['_ngrams']; break; }}",
# "lang": "painless"
# },
# "query": {
# "bool":{
# "should":tweet_ids,
# "minimum_should_match":"1"
# }
# }
# }
# print("...")
# cnn.es.update_by_query(body=q, doc_type='tweet', index=kwargs["index"]) #, params={ "docs": tweets_chunk })
# def tweets = " + str_tweets + ";
# ubq = UpdateByQuery(index=cnn.es.index).using(cnn.es).script(
# source="for (int i = 0; i < params.docs.length; ++i) { if(params.docs[i]['_id'] == ctx._id){ ctx._source['" + kwargs["prop"] + "'] = params.docs[i]['_ngrams']; break; }}",
# params={ "docs": str_tweets }
# )
# response = ubq.execute()
# ubq = UpdateByQuery(index=cnn.es.index).using(cnn.es).script(
# source="def tweets = " + str_tweets + "; for (int i = 0; i < params.docs.length; ++i) { if(params.docs[i]['_id'] == ctx._id){ ctx._source['" +
# kwargs["prop"] + "'] = params.docs[i]['_ngrams']; break; }}"
# )
# response = ubq.execute()
def remove_emojis(self, string):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', string)
def generate_ngrams_for_index(self, **kwargs):
try:
# Get the data for performinga paginated search
self.current_thread_percentage = 0
print("Starting")
my_connector = Es_connector(index=kwargs["index"])
query = kwargs.get('query', {
"query": {
"match_all": {}
}
})
res = my_connector.init_paginatedSearch(query)
sid = res["sid"]
scroll_size = res["scroll_size"]
total = int(res["total"])
# Analyse and process page by page
i = 0
total_scrolls = int(total/scroll_size)
processed_scrolls = 0
print("from_property:", kwargs['from_property'])
while scroll_size > 0:
tweets = res["results"]
self.gerenate_ngrams_for_tweets(tweets, from_property=kwargs['from_property'], prop=kwargs["prop"], index=kwargs["index"], length=kwargs["length"])
i += 1
res = my_connector.loop_paginatedSearch(sid, scroll_size)
scroll_size = res["scroll_size"]
processed_scrolls += 1
self.current_thread_percentage = round(processed_scrolls * 100 / total_scrolls, 0)
print("Completed: ", self.current_thread_percentage, "%")
# Clean it at the end so the clien knows when to end asking for more logs
self.current_thread_percentage = 100
return True
except Exception as e:
print('Error: ' + str(e))
return False
# def generate_ngrams_for_unlabeled_tweets_on_index(self, **kwargs):
#
# query={
# "query": {
# "bool": {
# "must_not": {
# "exists" : { "field" : kwargs["prop"] }
# }
# }
# }
# }
#
# return self.generate_ngrams_for_index(**dict(kwargs, query=query))
def format_single_tweet_ngrams(self, ngrams):
full_tweet_ngrams = []
for ngram in ngrams:
single_ngram_text = ""
for term in ngram:
single_ngram_text = single_ngram_text + term + "-"
single_ngram_text = single_ngram_text[:-1] #remove the last - of the single ngram
full_tweet_ngrams.append(single_ngram_text)
return full_tweet_ngrams
#
# for k, v in ngrams:
#
# ngram_text = ""
# for term in k:
# ngram_text = ngram_text + term + "-"
# ngram_text = ngram_text.strip()
#
# return ngram_text
def get_current_backend_logs(self):
return { "percentage": self.current_thread_percentage }
def updatePropertyValue(self, **kwargs):
tweet = kwargs["tweet"]
# cnn = Es_connector(index=kwargs["index"]);
#
# q = {
# "script": {
# "inline": "ctx._source." + kwargs["property_name"] + " = params.value",
# "lang": "painless",
# "params": {
# "value": str(kwargs["property_value"])
# }
# },
# "query": {
# "match": {
# "_id": tweet["_id"]
# }
# }
# }
#
# cnn.es.update_by_query(body=q, doc_type='tweet', index=kwargs["index"])
Es_connector(index=kwargs["index"]).es.update(
index=kwargs["index"],
doc_type="tweet",
id=tweet["_id"],
body={"doc": {
kwargs["property_name"]: kwargs["property_value"]
}},
retry_on_conflict=5
)
def get_stopwords_for_langs(self, langs):
swords = []
if "en" in langs:
swords = swords + stopwords.words('english')
self.retrievedLangs.add("en")
if "fr" in langs:
swords = swords + stopwords.words('french')
self.retrievedLangs.add("fr")
if "ar" in langs:
swords = swords + stopwords.words('arabic')
self.retrievedLangs.add("ar")
if "nl" in langs:
swords = swords + stopwords.words('dutch')
self.retrievedLangs.add("nl")
if "id" in langs:
swords = swords + stopwords.words('indonesian')
self.retrievedLangs.add("id")
if "fi" in langs:
swords = swords + stopwords.words('finnish')
self.retrievedLangs.add("fi")
if "de" in langs:
swords = swords + stopwords.words('german')
self.retrievedLangs.add("de")
if "hu" in langs:
swords = swords + stopwords.words('hungarian')
self.retrievedLangs.add("hu")
if "it" in langs:
swords = swords + stopwords.words('italian')
self.retrievedLangs.add("it")
if "nb" in langs:
swords = swords + stopwords.words('norwegian')
self.retrievedLangs.add("nb")
if "pt" in langs:
swords = swords + stopwords.words('portuguese')
self.retrievedLangs.add("pt")
if "ro" in langs:
swords = swords + stopwords.words('romanian')
self.retrievedLangs.add("ro")
if "ru" in langs:
swords = swords + stopwords.words('russian')
self.retrievedLangs.add("ru")
if "es" in langs:
swords = swords + stopwords.words('spanish')
self.retrievedLangs.add("es")
if "sv" in langs:
swords = swords + stopwords.words('swedish')
self.retrievedLangs.add("sv")
if "tr" in langs:
swords = swords + stopwords.words('turkish')
self.retrievedLangs.add("tr")
# TODO: complete with the full list of supported langs (there are some langs supported but miissing and not documented. E.g. Bulgarian or Ukrainian https://pypi.org/project/stop-words/ )
# The full list of languages may be found in C:/Users/username/AppData/Roming/nltk_data/corpora/stopwords
return swords
| 36.024169 | 225 | 0.497358 | 3.171875 |
1a61caa4639109bcd033cdcb3e111c6b139be2a4 | 14,428 | py | Python | roboTraining/unitTest.py | Gabs48/SpringMassNetworks | d917ef242ed99b02f82e9ff8697960d0d1d65178 | [
"MIT"
] | 3 | 2019-11-27T15:35:20.000Z | 2021-12-09T08:20:34.000Z | roboTraining/unitTest.py | Gabs48/SpringMassNetworks | d917ef242ed99b02f82e9ff8697960d0d1d65178 | [
"MIT"
] | null | null | null | roboTraining/unitTest.py | Gabs48/SpringMassNetworks | d917ef242ed99b02f82e9ff8697960d0d1d65178 | [
"MIT"
] | 1 | 2021-03-16T15:42:36.000Z | 2021-03-16T15:42:36.000Z | from robot import SoftEnvironment, HardEnvironment, Morphology, SineControl, RobotState, Robot, SpringMorphology, SpringMorphology3D
from simulate import Plotter, Simulation, SimulationEnvironment, VerletSimulation
from training import Training, TrainingScheme, TrainingVariable, RandomTraining
from utils import SpaceList, Save
import utils
import numpy as np
import unittest
""" test basic functionalities of the RoboTraining Package """
class Constants(object):
thoroughness = 2;
def emptyRobot( spring = 0, damping = 0,gravity=0,groundFriction=0,groundContact=0,airFriction=0,amplitude = 0, ground = False):
""" create a simple robot with two nodes connected by a spring with restlength 1"""
if ground:
env = HardEnvironment(gravity = gravity, airFriction = airFriction)
else:
env = SoftEnvironment(gravity = gravity, groundFriction = groundFriction, groundContact = groundContact, airFriction = airFriction)
morph = SpringMorphology(noNodes=2, mass = 1, spring = spring, damping = damping, noNeighbours = 1, environment = env)
morph.restLength = np.array([[0,1],[1,0]])
control = SineControl(morph = morph, amplitude = amplitude, phase = np.pi, omega =2* np.pi)
state = RobotState(0, morph)
robot = Robot(morph, control, state)
return robot
def emptyRobot3D( spring = 0, damping = 0,gravity=0,groundFriction=0,groundContact=0,airFriction=0,amplitude = 0, ground = False):
""" create a simple 3D robot with two nodes connected by a spring with restlength 1"""
if ground:
env = HardEnvironment(gravity = gravity, airFriction = airFriction, threeD = true)
else:
env = SoftEnvironment(gravity = gravity, groundFriction = groundFriction, groundContact = groundContact, airFriction = airFriction, threeD =True)
morph = SpringMorphology3D(noNodes=2, mass = 1, spring = spring, damping = damping, noNeighbours = 1, environment = env)
morph.restLength = np.array([[0,1],[1,0]])
control = SineControl(amplitude = amplitude, phase = np.pi, omega =2* np.pi, morph = morph)
state = RobotState(0, morph)
robot = Robot(morph, control, state)
return robot
def setState2D (robot, xpos =[0,0], ypos =[0,0] , xspeed =[0,0], yspeed = [0,0]):
robot.state.setState2D(xpos, ypos, xspeed, yspeed, 0)
def setState3D (robot, xpos =[0,0], ypos =[0,0] , zpos = [0,0] , xspeed =[0,0], yspeed = [0,0], zspeed = [0,0]):
robot.state.setState3D(xpos, ypos, zpos, xspeed, yspeed, zspeed, 0)
def simpleSimulation(robot, timeStep = 1e-3, simulationLength = 1000, verlet = True):
""" create a simple simulation without any plotting """
plotenv = Plotter(plot=False);
simulenv = SimulationEnvironment(timeStep = timeStep, simulationLength = simulationLength, plot = plotenv, verlet = verlet)
if verlet:
simulation = VerletSimulation(simulenv, robot)
else:
simulation = Simulation(simulenv, robot)
return simulation
def plotSimulation(robot, timeStep = 1e-3, simulationLength = 1000, verlet = True, movie = False):
""" create a simple simulation without any plotting """
plotenv = Plotter(plot=True, movie = movie);
simulenv = SimulationEnvironment(timeStep = timeStep, simulationLength = simulationLength, plot = plotenv, verlet = verlet)
if verlet:
simulation = VerletSimulation(simulenv, robot)
else:
simulation = Simulation(simulenv, robot)
return simulation
def forceEqual(robot, xforceAim=[0, 0], yforceAim= [0, 0] ):
""" Are forces equal to the prescribed force"""
f = robot.computeForces()
return all((np.allclose(f.x, xforceAim), np.allclose(f.y, yforceAim)))
def forceEqual3D(robot, xforceAim=[0, 0], yforceAim= [0, 0], zforceAim = [0, 0] ):
""" Are forces equal to the prescribed force"""
f = robot.computeForces()
return all(((np.allclose(f.x, xforceAim), np.allclose(f.y, yforceAim)), np.allclose(f.z, zforceAim)))
def stateEqual(robot, xposAim =[0, 0], yposAim = [0, 0], xspeedAim = [0, 0], yspeedAim= [0, 0],tolerance = 1e-3):
"""Is state is equal to prescribed state"""
pos,speed, t = robot.getStateParameters()
return all (( np.allclose(pos.x, xposAim,atol = tolerance),
np.allclose(pos.y, yposAim, atol = tolerance),
np.allclose(speed.x, xspeedAim, atol = tolerance),
np.allclose(speed.y, yspeedAim, atol = tolerance) ))
def stateEqual3D(robot, xposAim =[0, 0], yposAim = [0, 0], zposAim = [0, 0 ], xspeedAim = [0, 0], yspeedAim= [0, 0], zspeedAim= [0,0], tolerance = 1e-3):
"""Is state is equal to prescribed state"""
pos,speed, t = robot.getStateParameters()
return all (( np.allclose(pos.x, xposAim,atol = tolerance),
np.allclose(pos.y, yposAim, atol = tolerance),
np.allclose(pos.z, zposAim, atol = tolerance),
np.allclose(speed.x, xspeedAim, atol = tolerance),
np.allclose(speed.y, yspeedAim, atol = tolerance),
np.allclose(speed.z, zspeedAim, atol = tolerance)))
class TestRobot(unittest.TestCase):
def testStaticSpring2D(self):
"""static spring force 2D"""
robot = emptyRobot(spring = 10)
setState2D(robot, xpos= [0,2])
assert forceEqual(robot, xforceAim = [10, -10])
setState2D(robot, ypos= [0,3])
assert forceEqual(robot, yforceAim = [20, -20])
def testStaticSpring3D(self):
"""static spring force 3D"""
robot = emptyRobot3D(spring = 10)
setState3D(robot, xpos= [0,2])
assert forceEqual3D(robot, xforceAim = [10, -10])
setState3D(robot, ypos= [0,3])
assert forceEqual3D(robot, yforceAim = [20, -20])
setState3D(robot, zpos= [0,2])
assert forceEqual3D(robot, zforceAim = [10, -10])
def testKineticSpring2D(self):
"""dynamic damping spring force 2D"""
robot = emptyRobot(damping = 100)
setState2D(robot, xpos= [0,1], xspeed =[0,2])
assert forceEqual(robot, xforceAim = [200, -200])
setState2D(robot, xpos= [0,1], yspeed =[-1, 1])
assert forceEqual(robot, yforceAim = [200, -200])
def testKineticSpring3D(self):
"""dynamic damping spring force 3D"""
robot = emptyRobot3D(damping = 100)
setState3D(robot, xpos= [0,1], xspeed =[0,2])
assert forceEqual3D(robot, xforceAim = [200, -200])
setState3D(robot, xpos= [0,1], yspeed =[-1, 1])
assert forceEqual3D(robot, yforceAim = [200, -200])
setState3D(robot, xpos= [0,1], zspeed =[0,2])
assert forceEqual3D(robot, zforceAim = [200, -200])
def testGravity(self):
""" Gravity Force"""
robot = emptyRobot(gravity = 10)
assert forceEqual(robot, yforceAim = [-10, -10])
def testAirfriction(self):
""" Air Friction Force"""
robot = emptyRobot(airFriction = 10)
setState2D(robot, xpos= [0, 1], xspeed =[-5, 5], yspeed= [-1, 1])
assert forceEqual(robot, xforceAim = [50, -50], yforceAim = [10, -10])
def testAirfriction3D(self):
""" Air Friction Force"""
robot = emptyRobot3D(airFriction = 10)
setState3D(robot, xpos= [0, 1], xspeed =[-5, 5], yspeed= [-1, 1], zspeed = [10, 10])
assert forceEqual3D(robot, xforceAim = [50, -50], yforceAim = [10, -10], zforceAim = [-100, -100])
def testNormalforce(self):
""" Normal Force """
robot = emptyRobot(groundContact = 1)
robot.state.setState2D([0, 1], [-1, -1] , [0, 0], [-1, 1], 0)
f = robot.computeForces()
assert np.allclose(f.x, [0, -0])
assert np.alltrue(f.y > [0, 0])
def testFriction(self):
""" Friction Force """
robot = emptyRobot(groundFriction = 1)
setState2D(robot, [0, 1], [-1, 1] , [4, 4], [-1, 1])
f = robot.computeForces()
assert f.x[0] < 0
assert f.x[1] == 0
assert np.allclose(f.y , [0, 0])
def testdefault(self):
""" default robot calculates force """
morph = SpringMorphology()
control = SineControl(morph)
robot = Robot(morph, control);
f = robot.computeForces()
assert all(np.isfinite(f.x))
assert all(np.isfinite(f.y))
def testCopyState(self):
"""create a deep copy of the state"""
robot = emptyRobot();
setState2D(robot, xpos = [0, 1])
state = robot.getState()
robot.state.pos.x[1] = 4
assert state.pos.x[1] == 1
def testCopyState3D(self):
"""create a deep copy of the state"""
robot = emptyRobot3D();
setState3D(robot, zpos = [0, 1])
state = robot.getState()
robot.state.pos.z[1] = 4
assert state.pos.z[1] == 1
def testControl(self):
robot = emptyRobot3D( amplitude = 0.5)
assert np.allclose(robot.control.modulationFactor(robot.state), [[1, 1], [1, 1]])
robot.state.currentTime = 0.25
assert np.allclose(robot.control.modulationFactor(robot.state), [[1, 0.5], [0.5, 1]])
robot.state.currentTime = 0.75
assert np.allclose(robot.control.modulationFactor(robot.state), [[1, 1.5],[1.5, 1]])
class TestSimulation(unittest.TestCase):
def testSinusX(self):
"""Robot behaves as harmonic oscillator with period 2*Pi """
if Constants.thoroughness >= 2:
robot = emptyRobot(spring = 0.5)
timestep = 1.0 / 1000;
halfPeriod = int (1* np.pi * 1000)
simulation = simpleSimulation(robot,timestep,halfPeriod)
tolerance = 1e-3
"""
# X direction
setState2D(robot, xpos = [0, 1.5])
# half period
simulation.runSimulation()
assert stateEqual(robot, xposAim = [0.5, 1])
# full period
simulation.runSimulation();
assert stateEqual(robot, xposAim = [0, 1.5])
# Y direction
setState2D(robot, ypos = [0, 1.5])
# half period
simulation.runSimulation();
assert stateEqual(robot, yposAim = [0.5, 1])
# full period
simulation.runSimulation();
assert stateEqual(robot, yposAim = [0, 1.5])
"""
# Z direction
robot = emptyRobot3D(spring = 0.5)
simulation.robot = robot
setState3D(robot, zpos = [0, 1.5])
# half period
simulation.runSimulation();
assert stateEqual3D(robot, zposAim = [0.5, 1])
# full period
simulation.runSimulation();
assert stateEqual3D(robot, zposAim = [0, 1.5])
else: print "testthoroughness is set too low for this test"
class Testutils(unittest.TestCase):
def testArray2Connections(self):
"""conversion from an array to the connections matrix and back"""
robot = emptyRobot()
array = [10]
connections = utils.array2Connections(array, robot.getConnections())
arrayAccent = utils.connections2Array(connections, robot.getConnections())
assert np.allclose(array, arrayAccent)
class TestTraining(unittest.TestCase):
def testNormalize(self):
"""normalization and denormalization procedure of TrainVariable """
trainvar = TrainingVariable("spring",0,1000)
testArray = [500, 300, 3.2 , 0]
testArraynorm = trainvar.normalize(testArray)
testArrayAccent = trainvar.denormalize(testArraynorm)
assert np.allclose(testArray, testArrayAccent)
def testSetterSpring(self):
"""array to robot and back"""
trainScheme = TrainingScheme();
trainScheme.createTrainVariable("spring", 0, 1000)
robot = emptyRobot();
# normal test
array = np.array([[0.4]])
trainScheme.normalizedMatrix2robot(array, robot)
arrayAccent = trainScheme.robot2normalizedMatrix(robot)
assert np.allclose(array, arrayAccent)
# check whether exceptions are thrown in case of invalid input
with self.assertRaises(AssertionError):
array = np.array([[0.4, 0.4]])
trainScheme.normalizedMatrix2robot(array, robot)
with self.assertRaises(AssertionError):
array = np.array([0.4])
trainScheme.normalizedMatrix2robot(array, robot)
with self.assertRaises(AssertionError):
array = np.array([5])
trainScheme.normalizedMatrix2robot(array, robot)
def testCreateTraining(self):
""" no exceptions may be thrown """
if Constants.thoroughness >= 2:
env=SoftEnvironment()
morph=SpringMorphology(noNodes = 10,spring = 1000, noNeighbours = 3,environment = env)
control=SineControl(morph)
state=RobotState(0,morph)
robot=Robot(morph,control,state)
plotter =Plotter(plotCycle=50,plot=False);
simulenv=SimulationEnvironment(timeStep = 0.0005,simulationLength=2000,plot =plotter)
simul = Simulation(simulenv,robot)
simul.runSimulation()
trainscheme = TrainingScheme()
trainscheme.createTrainVariable("spring",0,3000)
trainscheme.createTrainVariable("phase",0,2*np.pi)
training=RandomTraining(trainscheme,robot,simulenv)
trainSave = Save(training, 'temp', 'default')
trainSave.save([[10,10],[20,20]])
else: print "testthoroughness is set too low for this test"
class TestSpaceList(unittest.TestCase):
space2Da = SpaceList(np.array([[1],[2]]))
space2Db = SpaceList(np.array([4]),np.array([10]))
space2Dc = SpaceList(np.array([[1,2],[6,15]]))
space3Da = SpaceList(np.array([[1],[2.0],[3]]))
space3Db = SpaceList(np.array([10.0]),np.array([100]),np.array([1000.0]))
space3Dc = SpaceList(np.array([[1, 2 , 3, 4],[10 , 20 , 30, 40],[100, 200, 300, 400]]))
array = np.array([1,2,3,4])
def testAdd2D(self):
sum = self.space2Da + self.space2Db
assert sum.x == 5
assert sum.y == 12
def testAdd3D(self):
sum = self.space3Da + self.space3Db
assert sum.x == 11
assert sum.y == 102
assert sum.z == 1003
def testAddCopy3D(self):
copy = self.space3Da.copy()
copy += self.space3Da
assert copy.x == 2
assert copy.y == 4
assert copy.z == 6
assert self.space3Da.x == 1
assert self.space3Da.y == 2
assert self.space3Da.z == 3
def testMult3D(self):
product = self.space3Da * self.space3Db
assert product.x == 10
assert product.y == 200
assert product.z == 3000
def testMult2DCopy(self):
copy = self.space2Da.copy()
copy *= self.space2Da
assert copy.x == 1
assert copy.y == 4
assert self.space2Da.x == 1
assert self.space2Da.y == 2
def testMult3Darray(self):
product= self.array * self.space3Dc
aim = np.array([1, 4, 9, 16])
assert np.all(product.x == aim)
assert np.all(product.y == aim * 10)
assert np.all(product.z == aim * 100)
def testMult3Dscalar(self):
product= 4 * self.space3Dc
aim = 4 * np.array([1, 2, 3, 4])
assert np.all(product.x == aim)
assert np.all(product.y == aim * 10)
assert np.all(product.z == aim * 100)
def testdiff2D(self):
xdiff, ydiff = self.space2Dc.getDifference()
assert np.all( xdiff == [[0, -1],[1, 0]])
assert np.all( ydiff == [[0,- 9],[9, 0]])
def run(verbosity = 2, thorogouhness = 1):
Constants.thoroughness = thorogouhness
suite = unittest.TestSuite();
suite.addTests(unittest.makeSuite(TestRobot))
suite.addTests(unittest.makeSuite(TestSimulation))
suite.addTests(unittest.makeSuite(TestTraining))
suite.addTests(unittest.makeSuite(Testutils))
suite.addTests(unittest.makeSuite(TestSpaceList))
unittest.TextTestRunner(verbosity = verbosity).run(suite)
def runSpecial(verbosity = 2):
suite = unittest.TestSuite();
suite.addTest(TestRobot("testControl"))
unittest.TextTestRunner(verbosity = verbosity).run(suite)
if __name__ == '__main__':
unittest.main() | 36.342569 | 153 | 0.696562 | 3.15625 |
a00b4757562002db1c2ed29755305c5fd1be9807 | 8,286 | ts | TypeScript | src/ServiceWorker/sw.ts | NYPL-Simplified/web-reader | 601521d411299e2453d9942bd8c43b0aa5fd1a33 | [
"MIT"
] | 14 | 2016-08-04T16:32:51.000Z | 2022-01-24T14:55:51.000Z | src/ServiceWorker/sw.ts | NYPL-Simplified/web-reader | 601521d411299e2453d9942bd8c43b0aa5fd1a33 | [
"MIT"
] | 110 | 2017-02-10T15:26:10.000Z | 2022-03-30T13:40:51.000Z | src/ServiceWorker/sw.ts | NYPL-Simplified/web-reader | 601521d411299e2453d9942bd8c43b0aa5fd1a33 | [
"MIT"
] | 3 | 2021-06-28T19:35:27.000Z | 2022-02-12T07:34:13.000Z | import { clientsClaim } from 'workbox-core';
import { ExpirationPlugin } from 'workbox-expiration';
import { CacheFirst, StaleWhileRevalidate } from 'workbox-strategies';
import { IS_DEV } from '../constants';
import { WebpubManifest } from '../types';
import { ReadiumLink } from '../WebpubManifestTypes/ReadiumLink';
import {
CACHE_EXPIRATION_SECONDS,
PRECACHE_PUBLICATIONS,
WEBPUB_CACHE_NAME,
} from './constants';
import { registerRoute } from 'workbox-routing';
import { PublicationConfig, WebReaderSWConfig } from './types';
declare let self: ServiceWorkerGlobalScope;
const VERSION = 'v2';
/**
* We claim the clients immediately and skip waiting because we don't care if
* half the page resources come from the SW and half from the network. We use
* content hashes for this to work
*/
clientsClaim();
/**
* Sets up the event listeners to:
* - On Fetch
* - Serve cached responses if they exist and are less than a week old.
*/
export default function initWebReaderSW({
cacheExpirationSeconds = CACHE_EXPIRATION_SECONDS,
}: WebReaderSWConfig | undefined = {}): void {
log('INITIALIZING');
self.addEventListener('install', (event) => {
log('INSTALLING ');
async function installSW() {
// perform any install tasks
// skip the waiting phase and activate immediately
await self.skipWaiting();
log('INSTALLED');
}
event.waitUntil(installSW());
});
/**
* Allow the client to send a message telling us to pre-cache
* webpub manifests and resources within them.
*/
self.addEventListener('message', async (event) => {
if (event.data.type === PRECACHE_PUBLICATIONS) {
log('Precaching publications');
if (typeof event.data.publications !== 'object') {
console.error('Precache event missing publications');
return;
}
await cachePublications(event.data.publications);
}
});
const cacheFirst = new CacheFirst({
cacheName: WEBPUB_CACHE_NAME,
plugins: [
new ExpirationPlugin({
maxAgeSeconds: cacheExpirationSeconds,
}),
],
});
/**
* Register the additional urls we sent with a stale-while-revalidate strategy
* Cache all the manifests in parallel. They're top priority.
* Then cache all their resources.
* Only cache items that don't already exist in the cache.
*/
async function cachePublications(publications: PublicationConfig[]) {
const cache = await caches.open(WEBPUB_CACHE_NAME);
// first route the swr urls
for (const pub of publications) {
for (const url of pub.swrUrls ?? []) {
log(`Routing ${url}`);
registerRoute(
url,
new StaleWhileRevalidate({ cacheName: WEBPUB_CACHE_NAME })
);
}
}
// route, fetch and cache the manifests.
// but don't re-fetch if they already exist in cache.
const pubResults: PromiseSettledResult<PubWithManifest>[] = await Promise.allSettled(
publications.map(async (pub) => {
const finalManifestUrl = getProxiedUrl(pub.manifestUrl, pub.proxyUrl);
// route it so that workbox knows to respond.
registerRoute(finalManifestUrl, cacheFirst);
// bail out if the manifest already exists
const match = await cache.match(finalManifestUrl);
if (match) {
return { ...pub, manifest: await match.json() };
}
// otherwise fetch it
const manifestResponse = await fetch(finalManifestUrl);
handleBadResponse(finalManifestUrl, manifestResponse);
// add the manifest response to the cache
await cache.put(finalManifestUrl, manifestResponse.clone());
const manifest: WebpubManifest = await manifestResponse.json();
return { ...pub, manifest };
})
);
// filter out any errored results
const pubs = pubResults
.map((result) =>
result.status === 'fulfilled' ? result.value : undefined
)
.filter(isPub);
// then route, fetch and cache all resources in each.
const promises = pubs.map(async (pub) => {
// make a list of resources with proxy included
const resourceHrefs = extractHrefs(
pub.manifest.resources ?? [],
pub.manifestUrl,
pub.proxyUrl
);
const readingOrderHrefs = extractHrefs(
pub.manifest.readingOrder ?? [],
pub.manifestUrl,
pub.proxyUrl
);
// make sure array is deduped using set or we may get a cache error
const allResourcesToCache = Array.from(
new Set([...resourceHrefs, ...readingOrderHrefs])
);
// route, fetch and cache each one.
// but don't re-fetch if it is already in the cache.
await Promise.all(
allResourcesToCache.map(async (url) => {
// route it
registerRoute(url, cacheFirst);
// bail out if it already exists
const match = await cache.match(url);
if (match) {
return;
}
const response = await fetch(url);
handleBadResponse(url, response);
return await cache.put(url, response);
})
);
});
return await Promise.allSettled(promises);
}
}
type PubWithManifest = PublicationConfig & { manifest: WebpubManifest };
function isPub(maybe: PubWithManifest | undefined): maybe is PubWithManifest {
return !!maybe;
}
function handleBadResponse(url: string, response: Response) {
if (!response.ok) {
const message = `Bad response status for: ${url}. Status: ${response.status}`;
console.warn(message);
throw new Error(message);
}
}
/**
* Prepends the proxy url if there is one
*/
function getProxiedUrl(url: string, proxyUrl: string | undefined) {
return proxyUrl ? `${proxyUrl}${encodeURIComponent(url)}` : url;
}
/**
* If the passed in url is relative, it will resolve it relative to the
* manifest url. Otherwise it should stay the same. Finally, the proxy is
* conditionally added
*/
function getAbsoluteUrl(
maybeRelative: string,
manifestUrl: string,
proxyUrl?: string
) {
return getProxiedUrl(
new URL(maybeRelative, manifestUrl).toString(),
proxyUrl
);
}
/**
* Gets an array of raw href values from an array of readium links
*/
function extractHrefs(
links: ReadiumLink[],
manifestUrl: string,
proxyUrl: string | undefined
): string[] {
return links.map((res) => getAbsoluteUrl(res.href, manifestUrl, proxyUrl));
}
// each logging line will be prepended with the service worker version
function log(message: string) {
if (IS_DEV) console.log(`SW (${VERSION}) -`, message);
}
/**
* On a fetch event, respond with an item from the cache, if
* it exists. We don't ever add things to the cache here,
* because the fetch event is called for _all_ network requests,
* and we can't tell if any given request is for app resources or
* publication resources. Thus publication resources are added
* to the cache separately, and then just returned if found here.
*
* This event listener MUST be run as the last fetch event listener
* of all in the host app because it always responds to the event
* in order to be able to use async functionality.
*/
// self.addEventListener('fetch', (event) => {
// if (event.request.method !== 'GET') {
// return;
// }
// async function matchOrFetch() {
// const pubCache = await caches.open(WEBPUB_CACHE_NAME);
// const match = await pubCache.match(event.request);
// // check if there is a match
// if (match) {
// return new CacheFirst({
// cacheName: WEBPUB_CACHE_NAME,
// plugins: [
// new ExpirationPlugin({
// // Only cache requests for a week
// maxAgeSeconds: cacheExpirationSeconds,
// }),
// ],
// }).handle(event);
// }
// // otherwise go to network
// return fetch(event.request);
// }
// // we have to make the event wait if we want to use async work. This will
// // make the network tab show "ServiceWorker" in all requests, despite the
// // fact that not every request actually goes through the service worker:
// // https://stackoverflow.com/questions/33590378/status-code200-ok-from-serviceworker-in-chrome-network-devtools/33655173
// event.respondWith(matchOrFetch());
// });
| 31.505703 | 125 | 0.657012 | 3.03125 |
d27eae2bf6995f06d39f58fcb2b49558cf90a952 | 2,218 | sh | Shell | by-tag/os/verify_copy_process.sh | milosz/shell-octo-adventure | d5db9c42043f0952aaae6fc5ca788084497e7444 | [
"MIT"
] | null | null | null | by-tag/os/verify_copy_process.sh | milosz/shell-octo-adventure | d5db9c42043f0952aaae6fc5ca788084497e7444 | [
"MIT"
] | null | null | null | by-tag/os/verify_copy_process.sh | milosz/shell-octo-adventure | d5db9c42043f0952aaae6fc5ca788084497e7444 | [
"MIT"
] | null | null | null | #!/bin/sh
# Simple "copy [directories] and verify [files]" shell script
# Sample usage: copy.sh /from_directory /to_directory
# https://blog.sleeplessbeastie.eu/2014/11/21/how-to-verify-copy-process/
# used commands
find_command=$(which find)
shasum_command=$(which sha256sum)
cat_command=$(which cat)
unlink_command=$(which unlink)
# copy command with additional arguments
copy_command=$(which cp)
copy_arguments="-rp" # recursive mode
# preserve mode, ownership, timestamps
# mail command and with used email address
mail_command=$(which mail)
mail_subject_argument="-s"
mail_address="milosz"
if [ -d "$1" -a ! -d "$2" ]; then
# first directory exists
# second directory does not exists
# compute 256-bit checksums
shasum_log=$(mktemp)
(cd $1 && $find_command . -type f -exec $shasum_command '{}' \; > $shasum_log)
# copy data
copy_log=$(mktemp)
$copy_command $copy_arguments "$1" "$2" > $copy_log
# verify computed checksums
verify_log=$(mktemp)
(cd $2 && $cat_command $shasum_log | $shasum_command -c > $verify_log)
shasum_exit_code="$?"
# prepare message and send mail message
mail_file=$(mktemp)
if [ "$shasum_exit_code" -eq "0" ]; then
mail_subject="Subject: ${0}: Success"
else
mail_subject="Subject: ${0}: Error"
fi
echo > $mail_file
echo "Command-line: ${0} ${1} ${2}" >> $mail_file
if [ -s "$copy_log" ]; then
echo >> $mail_file
echo "Copy process" >> $mail_file
$cat_command $copy_log >> $mail_file
fi
if [ "$shasum_exit_code" -ne "0" ]; then
echo >> $mail_file
echo "Verify process" >> $mail_file
$cat_command $verify_log | grep -v OK$ >> $mail_file
fi
$mail_command $mail_subject_argument "${mail_subject}" $mail_address < $mail_file
# cleanup temporary files
$unlink_command $mail_file
$unlink_command $verify_log
$unlink_command $copy_log
$unlink_command $shasum_log
else
echo "Problem with parameters\nCommand-line: ${0} ${1} ${2}" | $mail_command $mail_subject_argument "${0}" $mail_address
exit 5
fi
| 30.805556 | 122 | 0.632552 | 3.28125 |
49162563ee7a5c06c5af0b306fed87f317ec328d | 12,136 | py | Python | Fiji/imgproc.py | eufmike/storm_image_processing | 076335519be0be3b66d289a180421d36770ab820 | [
"CC-BY-4.0"
] | null | null | null | Fiji/imgproc.py | eufmike/storm_image_processing | 076335519be0be3b66d289a180421d36770ab820 | [
"CC-BY-4.0"
] | null | null | null | Fiji/imgproc.py | eufmike/storm_image_processing | 076335519be0be3b66d289a180421d36770ab820 | [
"CC-BY-4.0"
] | null | null | null | #@ UIService uiService
#@ LogService log
#@ File(label="Select the main directory", style="directory", value="/Volumes/LaCie_DataStorage/xiaochao_wei_STORM imaging/STORM_imaging/", persist=false) path
#@ String(label="Name of Analysis Folder", value = "analysis_20190419", persist=false) dir_output
#@ File(label="Folder for input images", style="directory", value="/Volumes/LaCie_DataStorage/xiaochao_wei_STORM imaging/STORM_imaging/resource/testdata", persist=false) ippath
#@ Boolean(label="Batchmode", value=false, persist=true) batchmodeop
print('Script Starts')
print('Importing modules ...')
# Import ImageJ/Fiji package
import sys
import os
import re
import csv
import gc
import time
from ij import IJ
from ij import plugin
from ij import gui
from ij.io import FileSaver
from ij import WindowManager as wm
from ij.process import ImageStatistics as IS
from ij.macro import Interpreter
# Import JAVA modules
import java.awt.Color as Color
import java.lang.System.gc as javagc
# Import Bio-Formats
from loci.plugins import BF
from loci.plugins.in import ImporterOptions
# Functions Section Begins ----------------------------------------------------- #
print('Loading functions ...')
# log.info('Loading functions ...')
def filesavercheck(fs, outputdir, filename):
"""
FileSaverCheck first check if the input folder exists,
then check if the input file exists in the targer folder
If the folder does not exist, it returns error message:
"Folder does not exist or it's not a folder!"
If the folder exists but the file also exists:
File exists! Not saving the image, would overwrite a file!"
Otherwise, it will save file and return message:
"File saved successfully at given filepath"
Arguments:
outputdir: output directory
filename: input file with absolute path
"""
if os.path.exists(outputdir) and os.path.isdir(outputdir):
print "folder exists:", outputdir
filepath = os.path.join(outputdir, filename) # Operating System-specific
if os.path.exists(filepath):
print "File exists! Not saving the image, would overwrite a file!"
elif fs.saveAsTiff(filepath):
print "File saved successfully at ", filepath
else:
print "Folder does not exist or it's not a folder!"
def dircheck(targetpaths):
"""
dircheck checks the target folder and create the folder if it does not exist.
targetdirlist: list of folderpath
"""
# print(type(targetpaths))
if type(targetpaths) is unicode:
print(os.path.exists(targetpaths))
if not os.path.exists(targetpaths):
os.makedirs(targetpaths)
elif type(targetpaths) is list:
for path in targetpaths:
if not os.path.exists(path):
os.makedirs(path)
def getprocessedimg(op_dir, pattern = r'(.+?).'):
"""
NOT USING
getprocessedimg check the output folder and create a list of processed data
pattern: the pattern re search
"""
processed_img = []
for (directory, dir_names, file_names) in os.walk(op_dir):
for file_name in file_names:
# print(file_name)
# search the processed files by using re.search
m = re.search(pattern, file_name)
if m:
# print(m)
file_name_temp = m.group(1)
processed_img.append(file_name_temp)
# replace the duplicated filenames
processed_img = list(set(processed_img))
return (processed_img)
def listfiles(path, extension = None):
"""
"""
filelist = []
fileabslist = []
for directory, dir_names, file_names in os.walk(path):
# print(file_names)
for file_name in file_names:
if (not file_name.startswith('.')) & (file_name.endswith(extension)):
file_name_base = file_name.replace(extension, '')
filepath_tmp = os.path.join(directory, file_name)
fileabslist.append(filepath_tmp)
return fileabslist
# def getpendinglist(src_dir, op_dir, src_ext = '.nd2', op_ext = '.csv', pattern = r'(.+?).'):
def getpendinglist(src_dir, op_dir, src_ext = '.nd2', op_ext = '.csv'):
"""
getpendinglist compares the files from src_dir and the accomplisjed file in op_dir,
then creates a pending list of unprocessed image.
"""
srclist = listfiles(src_dir, src_ext)
oplist = listfiles(op_dir, op_ext)
oplist_basename = []
for i in oplist:
name = os.path.basename(i)
basename = os.path.splitext(name)[0]
oplist_basename.append(basename)
pendingfllist = []
pendingpathlist_input = []
pendingpathlist_output = []
for i in range(len(srclist)):
srcflname = os.path.basename(srclist[i])
srcflbasename = os.path.splitext(srcflname)[0]
if not srcflbasename in oplist_basename:
pendingfllist.append(srcflbasename)
pendingpathlist_input.append(srclist[i])
pendingpathlist_output.append(os.path.join(op_dir, srcflbasename + op_ext))
return (pendingfllist, pendingpathlist_input, pendingpathlist_output)
def getStatistics(imp):
""" Return statistics for the given ImagePlus """
options = IS.MEAN | IS.MEDIAN | IS.MIN_MAX
ip = imp.getProcessor()
stats = IS.getStatistics(ip, options, imp.getCalibration())
return stats.mean, stats.median, stats.min, stats.max
def garbagecollect(iteration = 3):
for i in range(iteration):
gc.collect()
# Functions Section Ends ----------------------------------------------------- #
# STORM Image Analysis ------------------------------------------------------- #
def run_script(path=path):
import gc
path = str(path)
# Prepare workspace ------------------------------------------------------ #
print('Preparing ...')
# log.info('Preparing ...')
if batchmodeop:
Interpreter.batchMode = True
# define workspace
# path = '/Volumes/LaCie_DataStorage/xiaochao_wei_STORM imaging/STORM_imaging'
# dir_output = 'analysis_20190305'
pathlist = []
# define temp folder
dir_temp = 'temp'
file_par = 'par'
# create temp folder
path_temp = os.path.join(path, dir_output, dir_temp)
pathlist.append(path_temp)
# define input folder
path_srcimg = str(ippath)
print(path_srcimg)
# define output folder
outputdir = 'preprocessing'
dir_preproimg = 'preproimg'
dir_imginfo = 'imginfo'
dir_imgintensity = 'imgintensity'
dir_imgmetadata = 'imgmetadata'
file_imgstat = 'imgstat.csv'
# create output path
path_preproimg = os.path.join(path, dir_output, outputdir, dir_preproimg)
path_imginfo = os.path.join(path, dir_output, outputdir, dir_imginfo)
path_imgintensity = os.path.join(path, dir_output, outputdir, dir_imgintensity)
path_imgmetadata = os.path.join(path_imginfo, dir_imgmetadata)
pathlist.append(path_preproimg)
pathlist.append(path_imginfo)
pathlist.append(path_imgintensity)
pathlist.append(path_imgmetadata)
# create output file path
path_imgstat = os.path.join(path_imginfo, file_imgstat)
# check the existence of output folders and create folders if necessary
dircheck(pathlist)
# Create img list for processing ============ #
# create a file list for all images
# return the pending files
src_ext = '.nd2'
pendingfllist, pendingpathlist_input, pendingpathlist_output = getpendinglist(path_srcimg, path_imgintensity, src_ext = src_ext)
print(pendingfllist)
# log.info(pendingfllist)
# Processing start ========================== #
# load and crop the image
for i in range(len(pendingfllist)):
# for i in range(2):
print(pendingfllist[i])
# check if stat .csv file exist
pendingfllist_pre = []
processed_filename = []
list_xsize = []
list_ysize = []
list_nSlices = []
list_nFrames = []
list_nChannels = []
list_sizebytes = []
if os.path.exists(path_imgstat):
with open(path_imgstat, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
csv_header = next(csvreader)
for row in csvreader:
pendingfllist_pre.append(row[0])
processed_filename.append(row[1])
list_xsize.append(row[2])
list_ysize.append(row[3])
list_nSlices.append(row[4])
list_nFrames.append(row[5])
list_nChannels.append(row[6])
list_sizebytes.append(row[7])
# load image
imps = BF.openImagePlus(pendingpathlist_input[i])
ipflbasename = pendingfllist[i]
pendingfllist_pre.append(pendingfllist[i])
for imp in imps:
imp.show()
imp_main = IJ.getImage()
# Save average intensity ===================== #
processed_filename.append(imp_main.title)
# Save img metadata ========================== #
print('Save image metadata...')
# log.info('Save image metadata...')
# print info for each image
# No need to have image object in the arguments
IJ.run("Show Info...")
# create window name
img_info_title = 'Info for ' + imp_main.title
# select the info window
img_info = wm.getWindow(img_info_title)
# wm.addWindow(img_info)
wm.setWindow(img_info)
# save the info information
IJ.saveAs("Text", os.path.join(path_imgmetadata, (ipflbasename + ".txt")))
# close the info window
img_info.close()
# Save img information ======================== #
# return basic information of the image
img_x = imp_main.getWidth()
img_y = imp_main.getWidth()
img_nslices = imp_main.getNSlices()
img_nFrames = imp_main.getNFrames()
img_nChannels = imp_main.getNChannels()
img_sizebytes = imp_main.getSizeInBytes()
list_xsize.append(img_x)
list_ysize.append(img_y)
list_nSlices.append(img_nslices)
list_nFrames.append(img_nFrames)
list_nChannels.append(img_nChannels)
list_sizebytes.append(img_sizebytes/(1024*1024))
with open(path_imgstat, 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=",")
csvwriter.writerow(['image_name', 'ip_file_name','xSize', 'ySize', 'nSlices', 'nFrames', 'nChannels', 'size_MB'])
for j in range(len(list_xsize)):
csvwriter.writerow([pendingfllist_pre[j], processed_filename[j], list_xsize[j], list_ysize[j], \
list_nSlices[j], list_nFrames[j], list_nChannels[j], list_sizebytes[j]])
# Print information =========================== #
# return the title of the window (optional)
print('Current Image File: {}'.format(img_info.title))
# return in log windows
# log.info('Current Image File: {}'.format(img_info.title))
# log.info('SizeZ: {}'.format(img_nslices))
# log.info('SizeT: {}'.format(img_nFrames))
# log.info('SizeC: {}'.format(img_nChannels))
# log.info('Size in Bytes: {}'.format(img_sizebytes))
# define the size of center ROI
roi = [(img_x/2) - 64, (img_y/2) - 64, 128, 128]
crop_roi = gui.Roi(roi[0], roi[1], roi[2], roi[3])
stats_allframe = []
# Export individual channel ================== #
for j in range(img_nChannels):
IJ.run(imp_main, "Duplicate...", "duplicate channels=" + str(j+1))
imp_channel = IJ.getImage()
fs = FileSaver(imp_channel)
path_preproim_c = os.path.join(path_preproimg, str(j+1))
dircheck(path_preproim_c)
filesavercheck(fs, path_preproim_c, ipflbasename + '.tif')
imp_channel.setRoi(crop_roi)
for k in range(img_nFrames):
imp_channel.setT(k+1)
stats = list(getStatistics(imp_channel))
head = [j+1, k+1]
stats_allframe.append(head + stats)
imp_channel.close()
del imp_channel
del fs
# save into a csv ============================= #
with open(os.path.join(path_imgintensity, pendingfllist[i] + '.csv'), 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=",")
csvwriter.writerow(['channel', 'frame', 'mean', 'median','min', 'max'])
for j in range(len(stats_allframe)):
csvwriter.writerow(stats_allframe[j])
# Ending the loop ============================= #
# Close image windows and delete variables
imp_main.close()
# not sure if this is necessary
del stats_allframe
del imp_main
del imps
# Close Exception window
'''
img_ex = wm.getWindow('')
wm.setWindow(img_ex)
img_ex.close()
'''
time.sleep(3)
# garbage collection
gc.collect()
time.sleep(3)
gc.collect()
# break
print('Saving image stats ...')
# log.info('Saving image stats ...')
print("Script Ends ...")
# log.info("Script Ends ...")
time.sleep(3)
if batchmodeop:
Interpreter.batchMode = False
import gc
gc.collect()
return
if __name__ in ['__builtin__','__main__']:
run_script()
# quit script after running the script
from java.lang import System
System.exit(0) | 29.672372 | 176 | 0.689436 | 3.296875 |
7489b4c4ba9e53aed3a3a7e068aa7f1e27348224 | 15,659 | dart | Dart | lib/solitaire/game_screen.dart | lepak-xyz/Flutter-Games | 84185c988f96683b0a13816b602afcab75859ce5 | [
"MIT"
] | null | null | null | lib/solitaire/game_screen.dart | lepak-xyz/Flutter-Games | 84185c988f96683b0a13816b602afcab75859ce5 | [
"MIT"
] | null | null | null | lib/solitaire/game_screen.dart | lepak-xyz/Flutter-Games | 84185c988f96683b0a13816b602afcab75859ce5 | [
"MIT"
] | null | null | null | import 'dart:math';
import 'package:flutter/material.dart';
import 'package:flutter_games/solitaire/card_column.dart';
import 'package:flutter_games/solitaire/empty_card.dart';
import 'package:flutter_games/solitaire/playing_card.dart';
import 'package:flutter_games/solitaire/transformed_card.dart';
class GameScreen extends StatefulWidget {
@override
_GameScreenState createState() => _GameScreenState();
}
class _GameScreenState extends State<GameScreen> {
// Stores the cards on the seven columns
List<PlayingCard> cardColumn1 = [];
List<PlayingCard> cardColumn2 = [];
List<PlayingCard> cardColumn3 = [];
List<PlayingCard> cardColumn4 = [];
List<PlayingCard> cardColumn5 = [];
List<PlayingCard> cardColumn6 = [];
List<PlayingCard> cardColumn7 = [];
// Stores the card deck
List<PlayingCard> cardDeckClosed = [];
List<PlayingCard> cardDeckOpened = [];
// Stores the card in the upper boxes
List<PlayingCard> finalHeartsDeck = [];
List<PlayingCard> finalDiamondsDeck = [];
List<PlayingCard> finalSpadesDeck = [];
List<PlayingCard> finalClubsDeck = [];
@override
void initState() {
super.initState();
_initialiseGame();
}
@override
Widget build(BuildContext context) {
return Scaffold(
backgroundColor: Colors.green,
appBar: AppBar(
title: Text("Flutter Solitaire"),
elevation: 0.0,
backgroundColor: Colors.green,
actions: <Widget>[
InkWell(
child: Padding(
padding: const EdgeInsets.all(8.0),
child: Icon(
Icons.refresh,
color: Colors.white,
),
),
splashColor: Colors.white,
onTap: () {
_initialiseGame();
},
)
],
),
body: Column(
children: <Widget>[
Row(
mainAxisAlignment: MainAxisAlignment.spaceEvenly,
children: <Widget>[
_buildCardDeck(),
_buildFinalDecks(),
],
),
SizedBox(
height: 16.0,
),
Row(
mainAxisAlignment: MainAxisAlignment.spaceEvenly,
children: <Widget>[
Expanded(
child: CardColumn(
cards: cardColumn1,
onCardsAdded: (cards, index) {
setState(() {
cardColumn1.addAll(cards);
int length = _getListFromIndex(index).length;
_getListFromIndex(index)
.removeRange(length - cards.length, length);
_refreshList(index);
});
},
columnIndex: 1,
),
),
Expanded(
child: CardColumn(
cards: cardColumn2,
onCardsAdded: (cards, index) {
setState(() {
cardColumn2.addAll(cards);
int length = _getListFromIndex(index).length;
_getListFromIndex(index)
.removeRange(length - cards.length, length);
_refreshList(index);
});
},
columnIndex: 2,
),
),
Expanded(
child: CardColumn(
cards: cardColumn3,
onCardsAdded: (cards, index) {
setState(() {
cardColumn3.addAll(cards);
int length = _getListFromIndex(index).length;
_getListFromIndex(index)
.removeRange(length - cards.length, length);
_refreshList(index);
});
},
columnIndex: 3,
),
),
Expanded(
child: CardColumn(
cards: cardColumn4,
onCardsAdded: (cards, index) {
setState(() {
cardColumn4.addAll(cards);
int length = _getListFromIndex(index).length;
_getListFromIndex(index)
.removeRange(length - cards.length, length);
_refreshList(index);
});
},
columnIndex: 4,
),
),
Expanded(
child: CardColumn(
cards: cardColumn5,
onCardsAdded: (cards, index) {
setState(() {
cardColumn5.addAll(cards);
int length = _getListFromIndex(index).length;
_getListFromIndex(index)
.removeRange(length - cards.length, length);
_refreshList(index);
});
},
columnIndex: 5,
),
),
Expanded(
child: CardColumn(
cards: cardColumn6,
onCardsAdded: (cards, index) {
setState(() {
cardColumn6.addAll(cards);
int length = _getListFromIndex(index).length;
_getListFromIndex(index)
.removeRange(length - cards.length, length);
_refreshList(index);
});
},
columnIndex: 6,
),
),
Expanded(
child: CardColumn(
cards: cardColumn7,
onCardsAdded: (cards, index) {
setState(() {
cardColumn7.addAll(cards);
int length = _getListFromIndex(index).length;
_getListFromIndex(index)
.removeRange(length - cards.length, length);
_refreshList(index);
});
},
columnIndex: 7,
),
),
],
),
],
),
);
}
// Build the deck of cards left after building card columns
Widget _buildCardDeck() {
return Container(
child: Row(
children: <Widget>[
InkWell(
child: cardDeckClosed.isNotEmpty
? Padding(
padding: const EdgeInsets.all(4.0),
child: TransformedCard(
playingCard: cardDeckClosed.last,
),
)
: Opacity(
opacity: 0.4,
child: Padding(
padding: const EdgeInsets.all(4.0),
child: TransformedCard(
playingCard: PlayingCard(
cardSuit: CardSuit.diamonds,
cardType: CardType.five,
),
),
),
),
onTap: () {
setState(() {
if (cardDeckClosed.isEmpty) {
cardDeckClosed.addAll(cardDeckOpened.map((card) {
return card
..opened = false
..faceUp = false;
}));
cardDeckOpened.clear();
} else {
cardDeckOpened.add(
cardDeckClosed.removeLast()
..faceUp = true
..opened = true,
);
}
});
},
),
cardDeckOpened.isNotEmpty
? Padding(
padding: const EdgeInsets.all(4.0),
child: TransformedCard(
playingCard: cardDeckOpened.last,
attachedCards: [
cardDeckOpened.last,
],
columnIndex: 0,
),
)
: Container(
width: 40.0,
),
],
),
);
}
// Build the final decks of cards
Widget _buildFinalDecks() {
return Container(
child: Row(
children: <Widget>[
Padding(
padding: const EdgeInsets.all(4.0),
child: EmptyCardDeck(
cardSuit: CardSuit.hearts,
cardsAdded: finalHeartsDeck,
onCardAdded: (cards, index) {
finalHeartsDeck.addAll(cards);
int length = _getListFromIndex(index).length;
_getListFromIndex(index)
.removeRange(length - cards.length, length);
_refreshList(index);
},
columnIndex: 8,
),
),
Padding(
padding: const EdgeInsets.all(4.0),
child: EmptyCardDeck(
cardSuit: CardSuit.diamonds,
cardsAdded: finalDiamondsDeck,
onCardAdded: (cards, index) {
finalDiamondsDeck.addAll(cards);
int length = _getListFromIndex(index).length;
_getListFromIndex(index)
.removeRange(length - cards.length, length);
_refreshList(index);
},
columnIndex: 9,
),
),
Padding(
padding: const EdgeInsets.all(4.0),
child: EmptyCardDeck(
cardSuit: CardSuit.spades,
cardsAdded: finalSpadesDeck,
onCardAdded: (cards, index) {
finalSpadesDeck.addAll(cards);
int length = _getListFromIndex(index).length;
_getListFromIndex(index)
.removeRange(length - cards.length, length);
_refreshList(index);
},
columnIndex: 10,
),
),
Padding(
padding: const EdgeInsets.all(4.0),
child: EmptyCardDeck(
cardSuit: CardSuit.clubs,
cardsAdded: finalClubsDeck,
onCardAdded: (cards, index) {
finalClubsDeck.addAll(cards);
int length = _getListFromIndex(index).length;
_getListFromIndex(index)
.removeRange(length - cards.length, length);
_refreshList(index);
},
columnIndex: 11,
),
),
],
),
);
}
// Initialise a new game
void _initialiseGame() {
cardColumn1 = [];
cardColumn2 = [];
cardColumn3 = [];
cardColumn4 = [];
cardColumn5 = [];
cardColumn6 = [];
cardColumn7 = [];
// Stores the card deck
cardDeckClosed = [];
cardDeckOpened = [];
// Stores the card in the upper boxes
finalHeartsDeck = [];
finalDiamondsDeck = [];
finalSpadesDeck = [];
finalClubsDeck = [];
List<PlayingCard> allCards = [];
// Add all cards to deck
CardSuit.values.forEach((suit) {
CardType.values.forEach((type) {
allCards.add(PlayingCard(
cardType: type,
cardSuit: suit,
faceUp: false,
));
});
});
Random random = Random();
// Add cards to columns and remaining to deck
for (int i = 0; i < 28; i++) {
int randomNumber = random.nextInt(allCards.length);
if (i == 0) {
PlayingCard card = allCards[randomNumber];
cardColumn1.add(
card
..opened = true
..faceUp = true,
);
allCards.removeAt(randomNumber);
} else if (i > 0 && i < 3) {
if (i == 2) {
PlayingCard card = allCards[randomNumber];
cardColumn2.add(
card
..opened = true
..faceUp = true,
);
} else {
cardColumn2.add(allCards[randomNumber]);
}
allCards.removeAt(randomNumber);
} else if (i > 2 && i < 6) {
if (i == 5) {
PlayingCard card = allCards[randomNumber];
cardColumn3.add(
card
..opened = true
..faceUp = true,
);
} else {
cardColumn3.add(allCards[randomNumber]);
}
allCards.removeAt(randomNumber);
} else if (i > 5 && i < 10) {
if (i == 9) {
PlayingCard card = allCards[randomNumber];
cardColumn4.add(
card
..opened = true
..faceUp = true,
);
} else {
cardColumn4.add(allCards[randomNumber]);
}
allCards.removeAt(randomNumber);
} else if (i > 9 && i < 15) {
if (i == 14) {
PlayingCard card = allCards[randomNumber];
cardColumn5.add(
card
..opened = true
..faceUp = true,
);
} else {
cardColumn5.add(allCards[randomNumber]);
}
allCards.removeAt(randomNumber);
} else if (i > 14 && i < 21) {
if (i == 20) {
PlayingCard card = allCards[randomNumber];
cardColumn6.add(
card
..opened = true
..faceUp = true,
);
} else {
cardColumn6.add(allCards[randomNumber]);
}
allCards.removeAt(randomNumber);
} else {
if (i == 27) {
PlayingCard card = allCards[randomNumber];
cardColumn7.add(
card
..opened = true
..faceUp = true,
);
} else {
cardColumn7.add(allCards[randomNumber]);
}
allCards.removeAt(randomNumber);
}
}
cardDeckClosed = allCards;
cardDeckOpened.add(
cardDeckClosed.removeLast()
..opened = true
..faceUp = true,
);
setState(() {});
}
void _refreshList(int index) {
if (finalDiamondsDeck.length +
finalHeartsDeck.length +
finalClubsDeck.length +
finalSpadesDeck.length ==
52) {
_handleWin();
}
setState(() {
if (_getListFromIndex(index).length != 0) {
_getListFromIndex(index)[_getListFromIndex(index).length - 1]
..opened = true
..faceUp = true;
}
});
}
// Handle a win condition
void _handleWin() {
showDialog(
context: context,
builder: (context) {
return AlertDialog(
title: Text("Congratulations!"),
content: Text("You Win!"),
actions: <Widget>[
FlatButton(
onPressed: () {
_initialiseGame();
Navigator.pop(context);
},
child: Text("Play again"),
),
],
);
},
);
}
List<PlayingCard> _getListFromIndex(int index) {
switch (index) {
case 0:
return cardDeckOpened;
case 1:
return cardColumn1;
case 2:
return cardColumn2;
case 3:
return cardColumn3;
case 4:
return cardColumn4;
case 5:
return cardColumn5;
case 6:
return cardColumn6;
case 7:
return cardColumn7;
case 8:
return finalHeartsDeck;
case 9:
return finalDiamondsDeck;
case 10:
return finalSpadesDeck;
case 11:
return finalClubsDeck;
default:
return null;
}
}
}
| 29.769962 | 70 | 0.456351 | 3.09375 |
e1bd0d9ce6d8461ee82dc741593ffee23a0dea80 | 4,612 | ps1 | PowerShell | DomainManagement/functions/passwordpolicies/Register-DMPasswordPolicy.ps1 | WillyMoselhy/DomainManagement | e9b2fff30e7c7b66d9057389909447180a0fb634 | [
"MIT"
] | 6 | 2020-02-24T12:34:47.000Z | 2020-08-25T08:48:16.000Z | DomainManagement/functions/passwordpolicies/Register-DMPasswordPolicy.ps1 | WillyMoselhy/DomainManagement | e9b2fff30e7c7b66d9057389909447180a0fb634 | [
"MIT"
] | 22 | 2020-02-03T15:37:16.000Z | 2021-03-15T07:33:48.000Z | DomainManagement/functions/passwordpolicies/Register-DMPasswordPolicy.ps1 | WillyMoselhy/DomainManagement | e9b2fff30e7c7b66d9057389909447180a0fb634 | [
"MIT"
] | 2 | 2020-09-28T15:19:35.000Z | 2021-12-30T02:10:06.000Z | function Register-DMPasswordPolicy
{
<#
.SYNOPSIS
Register a new Finegrained Password Policy as the desired state.
.DESCRIPTION
Register a new Finegrained Password Policy as the desired state.
These policies are then compared to the current state in a domain.
.PARAMETER Name
The name of the PSO.
.PARAMETER DisplayName
The display name of the PSO.
.PARAMETER Description
The description for the PSO.
.PARAMETER Precedence
The precedence rating of the PSO.
The lower the precedence number, the higher the priority.
.PARAMETER MinPasswordLength
The minimum number of characters a password must have.
.PARAMETER SubjectGroup
The group that the PSO should be assigned to.
.PARAMETER LockoutThreshold
How many bad password entries will lead to account lockout?
.PARAMETER MaxPasswordAge
The maximum age a password may have before it must be changed.
.PARAMETER ComplexityEnabled
Whether complexity rules are applied to users affected by this policy.
By default, complexity rules requires 3 out of: "Lowercase letter", "Uppercase letter", "number", "special character".
However, custom password filters may lead to very validation rules.
.PARAMETER LockoutDuration
If the account is being locked out, how long will the lockout last.
.PARAMETER LockoutObservationWindow
What is the time window before the bad password count is being reset.
.PARAMETER MinPasswordAge
How soon may a password be changed again after updating the password.
.PARAMETER PasswordHistoryCount
How many passwords are kept in memory to prevent going back to a previous password.
.PARAMETER ReversibleEncryptionEnabled
Whether the password should be stored in a manner that allows it to be decrypted into cleartext.
By default, only un-reversible hashes are being stored.
.PARAMETER SubjectDomain
The domain the group is part of.
Defaults to the target domain.
.PARAMETER Present
Whether the PSO should exist.
Defaults to $true.
If this is set to $false, no PSO will be created, instead the PSO will be removed if it exists.
.EXAMPLE
PS C:\> Get-Content $configPath | ConvertFrom-Json | Write-Output | Register-DMPasswordPolicy
Imports all the configured policies from the defined config json file.
#>
[CmdletBinding()]
param (
[Parameter(Mandatory = $true, ValueFromPipelineByPropertyName = $true)]
[string]
$Name,
[Parameter(Mandatory = $true, ValueFromPipelineByPropertyName = $true)]
[string]
$DisplayName,
[Parameter(Mandatory = $true, ValueFromPipelineByPropertyName = $true)]
[string]
$Description,
[Parameter(Mandatory = $true, ValueFromPipelineByPropertyName = $true)]
[int]
$Precedence,
[Parameter(Mandatory = $true, ValueFromPipelineByPropertyName = $true)]
[int]
$MinPasswordLength,
[Parameter(Mandatory = $true, ValueFromPipelineByPropertyName = $true)]
[string[]]
$SubjectGroup,
[Parameter(Mandatory = $true, ValueFromPipelineByPropertyName = $true)]
[int]
$LockoutThreshold,
[Parameter(Mandatory = $true, ValueFromPipelineByPropertyName = $true)]
[PSFTimespan]
$MaxPasswordAge,
[Parameter(ValueFromPipelineByPropertyName = $true)]
[bool]
$ComplexityEnabled = $true,
[Parameter(ValueFromPipelineByPropertyName = $true)]
[PSFTimespan]
$LockoutDuration = '1h',
[Parameter(ValueFromPipelineByPropertyName = $true)]
[PSFTimespan]
$LockoutObservationWindow = '1h',
[Parameter(ValueFromPipelineByPropertyName = $true)]
[PSFTimespan]
$MinPasswordAge = '30m',
[Parameter(ValueFromPipelineByPropertyName = $true)]
[int]
$PasswordHistoryCount = 24,
[Parameter(ValueFromPipelineByPropertyName = $true)]
[bool]
$ReversibleEncryptionEnabled = $false,
[Parameter(ValueFromPipelineByPropertyName = $true)]
[string]
$SubjectDomain = '%DomainFqdn%',
[bool]
$Present = $true
)
process
{
$script:passwordPolicies[$Name] = [PSCustomObject]@{
PSTypeName = 'DomainManagement.PasswordPolicy'
Name = $Name
Precedence = $Precedence
ComplexityEnabled = $ComplexityEnabled
LockoutDuration = $LockoutDuration.Value
LockoutObservationWindow = $LockoutObservationWindow.Value
LockoutThreshold = $LockoutThreshold
MaxPasswordAge = $MaxPasswordAge.Value
MinPasswordAge = $MinPasswordAge.Value
MinPasswordLength = $MinPasswordLength
DisplayName = $DisplayName
Description = $Description
PasswordHistoryCount = $PasswordHistoryCount
ReversibleEncryptionEnabled = $ReversibleEncryptionEnabled
SubjectDomain = $SubjectDomain
SubjectGroup = $SubjectGroup
Present = $Present
}
}
}
| 28.645963 | 120 | 0.754553 | 3.078125 |
70666cc9346f32437dccb5e903a66c9843e1095a | 1,325 | lua | Lua | StarfallEx/Libraries/PNGWriter/Example.lua | Vurv78/lua | 715c654a6a3130c8de3142a700d5463a73f3ebe0 | [
"Apache-2.0"
] | 2 | 2021-08-08T22:06:37.000Z | 2022-02-11T12:29:50.000Z | StarfallEx/Libraries/PNGWriter/Example.lua | Vurv78/Lua | 7b07d4a13f16ea5222229c5cda24090bcf6e98be | [
"Apache-2.0"
] | 2 | 2020-06-23T08:25:57.000Z | 2020-12-09T04:17:11.000Z | StarfallEx/Libraries/PNGWriter/Example.lua | Vurv78/starfallex-creations | 7b07d4a13f16ea5222229c5cda24090bcf6e98be | [
"Apache-2.0"
] | null | null | null | --@name PNG Library Example 2
--@author Vurv
--@client
--@include pnglib.txt
if player() ~= owner() then return end
local createPNG = require("pnglib.txt")
local function canRun()
return quotaTotalAverage() < quotaMax()*0.4
end
local main_routine = coroutine.create(function()
render.createRenderTarget("rt")
render.selectRenderTarget("rt")
local png = createPNG(512, 512, "rgb") -- Create the png
local to_col = 255/512
-- Make sure you don't write RGB that goes over 255. At worst it might break the image, best it'll just write black pixels.
for Y = 0,511 do
for X = 0,511 do
render.setRGBA(X * to_col,Y * to_col,0,255)
png:writeRGB(X * to_col,Y * to_col,0)
render.drawRectFast(X,Y,1,1)
if not canRun() then
coroutine.yield()
render.selectRenderTarget("rt") -- Re-select the RT when we continue
end
end
end
print("Finished drawing.")
png:export("bruh.png")
end)
hook.add("renderoffscreen","",function()
if canRun() then
if coroutine.status(main_routine) ~= "dead" then
coroutine.resume(main_routine)
end
end
end)
hook.add("render","",function()
render.setRenderTargetTexture("rt")
render.drawTexturedRect(0,0,1024,1024)
end) | 28.804348 | 127 | 0.630189 | 3.359375 |
2fa49b0ebec4aa7284ee66766356611800b89a25 | 4,072 | py | Python | test/test_mittens.py | akanshajainn/mittens | da8986385f785ef33954f59f3f907565e7b3da2f | [
"Apache-2.0"
] | 234 | 2018-03-28T11:55:53.000Z | 2022-01-05T13:09:35.000Z | test/test_mittens.py | akanshajainn/mittens | da8986385f785ef33954f59f3f907565e7b3da2f | [
"Apache-2.0"
] | 17 | 2018-04-16T03:50:38.000Z | 2022-03-16T16:36:38.000Z | test/test_mittens.py | akanshajainn/mittens | da8986385f785ef33954f59f3f907565e7b3da2f | [
"Apache-2.0"
] | 32 | 2018-03-29T08:17:57.000Z | 2021-09-03T13:08:35.000Z | """test_mittens.py
Test Mittens and GloVe using both NumPy and TensorFlow (if available).
If TensorFlow is not installed, those tests are skipped. If it is,
all tests are run twice: first with NumPy and then with TensorFlow,
according to the `framework` fixture.
Tests use pytest: from the command line, run:
$ pytest PATH/TO/MITTENS/test/
Add a `-v` flag to get detailed output.
Author: Nick Dingwall
"""
import numpy as np
import pytest
import mittens.np_mittens as np_mittens
try:
TENSORFLOW_INSTALLED = True
import mittens.tf_mittens as tf_mittens
except ImportError:
TENSORFLOW_INSTALLED = False
tf_mittens = None
FRAMEWORK_TO_MODULE = {'np': np_mittens, 'tf': tf_mittens}
@pytest.fixture(scope="module", params=['np', 'tf'])
def framework(request):
return request.param
def test_glove(framework):
if not TENSORFLOW_INSTALLED and framework == 'tf':
pytest.skip("Tensorflow not installed.")
np.random.seed(42)
corr = _run_glove(FRAMEWORK_TO_MODULE[framework].GloVe, max_iter=1000)
assert corr > 0.4
def test_glove_initialization(framework):
if not TENSORFLOW_INSTALLED and framework == 'tf':
pytest.skip("Tensorflow not installed.")
np.random.seed(42)
corr = _run_glove(FRAMEWORK_TO_MODULE[framework].GloVe, max_iter=0)
assert abs(corr) < 0.2
def test_mittens(framework):
"""Test that Mittens moves initial representations in the correct
direction.
"""
if not TENSORFLOW_INSTALLED and framework == 'tf':
pytest.skip("Tensorflow not installed.")
np.random.seed(42)
embedding_dim = 10
vocab = ['a', 'b', 'c', 'd', 'e']
initial_embeddings = {v: np.random.normal(0, 1, size=embedding_dim)
for v in vocab}
X = _make_word_word_matrix(len(vocab))
true = X.ravel()
mittens = FRAMEWORK_TO_MODULE[framework].Mittens(n=embedding_dim,
max_iter=50)
post_G = mittens.fit(X, vocab=vocab,
initial_embedding_dict=initial_embeddings)
pre_G = mittens.G_start
pre_pred = pre_G.dot(pre_G.T).ravel()
post_pred = post_G.dot(post_G.T).ravel()
pre_corr = _get_correlation(true, pre_pred)
post_corr = _get_correlation(true, post_pred)
assert post_corr > pre_corr
def test_mittens_parameter(framework):
"""Test that a large Mittens parameter keeps learned representations
closer to the original than a small Mittens parameter.
"""
if not TENSORFLOW_INSTALLED and framework == 'tf':
pytest.skip("Tensorflow not installed.")
np.random.seed(42)
embedding_dim = 50
vocab = ['a', 'b', 'c', 'd', 'e']
initial_embeddings = {v: np.random.normal(0, 1, size=embedding_dim)
for v in vocab}
X = _make_word_word_matrix(len(vocab))
diffs = dict()
small = 0.001
mid = 1
big = 1000
for m in [small, mid, big]:
mittens = FRAMEWORK_TO_MODULE[framework].Mittens(n=embedding_dim,
max_iter=50,
mittens=m)
G = mittens.fit(X, vocab=vocab,
initial_embedding_dict=initial_embeddings)
original = mittens.G_start
diffs[m] = np.linalg.norm(G - original)
assert diffs[small] > diffs[mid]
assert diffs[mid] > diffs[big]
def _make_word_word_matrix(n=50):
"""Returns a symmetric matrix where the entries are drawn from a
Poisson distribution"""
base = np.random.zipf(2, size=(n, n)) - 1
return base + base.T
def _get_correlation(true, pred):
"""Check correlation for nonzero elements of 'true'"""
nonzero = true > 0
return np.corrcoef(np.log(true[nonzero]), pred[nonzero])[0][1]
def _run_glove(glove_implementation, w=50, n=200, max_iter=100):
X = _make_word_word_matrix(w)
glove = glove_implementation(n=n, max_iter=max_iter)
G = glove.fit(X)
pred = G.dot(G.T).ravel()
true = X.ravel()
return _get_correlation(true, pred)
| 30.616541 | 74 | 0.649312 | 3.015625 |
3901b90912c92ecbec531d317c18d9b08a3fe90d | 2,106 | py | Python | molior/molior/utils.py | randombenj/molior | 5f22935a1860c9ab206acfa52ba6206ae1755594 | [
"Apache-2.0"
] | null | null | null | molior/molior/utils.py | randombenj/molior | 5f22935a1860c9ab206acfa52ba6206ae1755594 | [
"Apache-2.0"
] | null | null | null | molior/molior/utils.py | randombenj/molior | 5f22935a1860c9ab206acfa52ba6206ae1755594 | [
"Apache-2.0"
] | null | null | null | """
Provides utilities for molior core.
"""
import re
import os
import shlex
from launchy import Launchy
from molior.aptly import AptlyApi
from .configuration import Configuration
from .logger import get_logger
logger = get_logger()
def get_aptly_connection():
"""
Connects to aptly server and returns aptly
object.
Returns:
AptlyApi: The connected aptly api instance.
"""
cfg = Configuration()
api_url = cfg.aptly.get("api_url")
gpg_key = cfg.aptly.get("gpg_key")
aptly_user = cfg.aptly.get("user")
aptly_passwd = cfg.aptly.get("pass")
aptly = AptlyApi(api_url, gpg_key, username=aptly_user, password=aptly_passwd)
return aptly
def parse_repository_name(url):
"""
Returns the repository name
of a git clone url.
Args:
url (str): Git clone url to parse
Returns:
name (str): The name of the repository
Examples:
>>> url = 'ssh://[email protected]:1337/~jon/foobar.git'
>>> parse_repository_name(repo_name)
>>> 'foobar'
or:
>>> url = 'ssh://[email protected]:1337/~jon/foobar'
>>> parse_repository_name(repo_name)
>>> 'foobar'
"""
if url.endswith(".git"):
search = re.search(r"([0-9a-zA-Z_\-.]+).git$", url)
if search:
return search.group(1)
return os.path.basename(url)
async def get_changelog_attr(name, path):
"""
Gets given changelog attribute from given
repository path.
Args:
name (str): The attr's name.
path (pathlib.Path): The repo's path.
"""
attr = ""
err = ""
async def outh(line):
nonlocal attr
attr += line
async def errh(line):
nonlocal err
err += line
process = Launchy(shlex.split("dpkg-parsechangelog -S {}".format(name)), outh, errh, cwd=str(path))
await process.launch()
ret = await process.wait()
if ret != 0:
logger.error("error occured while getting changelog attribute: %s", str(err, "utf-8"))
raise Exception("error running dpkg-parsechangelog")
return attr.strip()
| 23.931818 | 103 | 0.617759 | 3.25 |
25c0f376d9ae58354eed6e30a086ccc6edd4d77f | 821 | js | JavaScript | src/main.js | 1808vue/vue | 9969d3ca1f28a8525207dd2e2c126c988eef533a | [
"MIT"
] | null | null | null | src/main.js | 1808vue/vue | 9969d3ca1f28a8525207dd2e2c126c988eef533a | [
"MIT"
] | null | null | null | src/main.js | 1808vue/vue | 9969d3ca1f28a8525207dd2e2c126c988eef533a | [
"MIT"
] | null | null | null | import "./styls/reset.less"
import Vue from 'vue'
import App from './App'
import Axios from "axios"
// 组件是vue实例的一个子类,所有vue原型链上的属性子类也会继承
Vue.prototype.$axios=Axios
Vue.config.productionTip = false
import router from './router'
// mint-ui的使用
import MintUI from 'mint-ui'
import 'mint-ui/lib/style.css'
Vue.use(MintUI)
import { Upload} from 'element-ui';
Vue.component(Upload.name, Upload);
import 'element-ui/lib/theme-chalk/index.css';
import Vuex from 'vuex';
Vue.use(Vuex);
const store = new Vuex.Store({
state: {
count: 0,
name:'韩梅梅',
shuzu:[]
},
mutations: {
shuju(state,val){
state.shuzu=val;
}
}
})
// 组建是vue实例的一个子类 所有 vue原型链上的属性 子类也会继承
/* eslint-disable no-new */
new Vue({
el: '#app',
router,
store,
components: { App},//注册组件
template: '<App/>' // 用组件元素来替换根元素
}) | 19.093023 | 46 | 0.666261 | 3.0625 |
5ae11a799c495bdb135f12cf80ee513c6bbd408d | 1,399 | lua | Lua | ld48/app/window.lua | aglitchman/defold-ld48-game | 04844f623adcad675307566b0256d0a0a6145942 | [
"Apache-2.0"
] | 7 | 2021-04-27T12:52:16.000Z | 2021-08-08T19:30:10.000Z | ld48/app/window.lua | aglitchman/defold-ld48-game | 04844f623adcad675307566b0256d0a0a6145942 | [
"Apache-2.0"
] | null | null | null | ld48/app/window.lua | aglitchman/defold-ld48-game | 04844f623adcad675307566b0256d0a0a6145942 | [
"Apache-2.0"
] | null | null | null | local M = {}
function M.resize(game_width, game_height)
if sys.get_sys_info().system_name == "Windows" then
local displays = defos.get_displays()
local current_display_id = defos.get_current_display_id()
local screen_width = displays[current_display_id].bounds.width
local screen_height = displays[current_display_id].bounds.height
game_width = game_width or 2500
game_height = game_height or 1400
local factor = 0.5
if tonumber(sys.get_config("display.high_dpi", 0)) == 1 then
factor = 1
end
local x, y, w, h = defos.get_view_size()
w = game_width * factor
h = game_height * factor
while screen_height * 0.9 <= h do
w = w / 1.25
h = h / 1.25
end
defos.set_view_size(x, y, w, h)
end
end
function M.center()
if sys.get_sys_info().system_name == "Windows" then
local displays = defos.get_displays()
local current_display_id = defos.get_current_display_id()
local screen_width = displays[current_display_id].bounds.width
local screen_height = displays[current_display_id].bounds.height
local x, y, w, h = defos.get_window_size()
x = math.floor((screen_width - w) / 2)
y = math.floor((screen_height - h) / 2)
defos.set_window_size(x, y, w, h)
end
end
return M
| 31.795455 | 72 | 0.626876 | 3.484375 |
99e532df9beb7587e655ef6e1140fc111900aeb4 | 2,786 | rs | Rust | src/lib.rs | mith-mmk/rust-rect | 48544c09c6320e4413d740f31fc3df2ef7452425 | [
"Apache-2.0",
"MIT"
] | null | null | null | src/lib.rs | mith-mmk/rust-rect | 48544c09c6320e4413d740f31fc3df2ef7452425 | [
"Apache-2.0",
"MIT"
] | null | null | null | src/lib.rs | mith-mmk/rust-rect | 48544c09c6320e4413d740f31fc3df2ef7452425 | [
"Apache-2.0",
"MIT"
] | null | null | null | mod utils;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
extern {
fn alert(s: &str);
#[wasm_bindgen(js_namespace = Math)]
fn random() -> f64;
#[wasm_bindgen(js_namespace = console)]
fn log(s: &str);
}
fn rand_u32(range: u32) -> u32 {
return ( random() * (range as f64)) as u32;
}
#[wasm_bindgen]
pub struct Universe {
width: u32,
height: u32,
buffer: Vec<u8>,
}
#[wasm_bindgen]
impl Universe {
pub fn new (width: u32, height: u32) -> Universe {
let buffersize = width * height * 4;
let buffer = (0..buffersize)
.map(|_| {0})
.collect();
Universe {
width,
height,
buffer,
}
}
pub fn width(&self) -> u32 {
self.width
}
pub fn height(&self) -> u32 {
self.height
}
pub fn buffer(&self) -> *const u8 {
self.buffer.as_ptr()
}
pub fn fillbox(&mut self,color: u32){
let height = self.height;
let width = self.width;
let buf = &mut self.buffer;
// Color model u32 LE (RGBA) -> u8 BGRA
let red: u8 = ((color >> 16) & 0xff) as u8; // R = 1.0
let green: u8 = ((color >> 8) & 0xff) as u8; // G = 1.0
let blue: u8 = ((color >> 0) & 0xff) as u8; // B = 1.0
let alpha: u8 = 0xff;
log(&format!("{} {} {}",blue,green,red));
for y in 0..height {
let offset = y * width * 4;
for x in 0..width {
let pos :usize = (offset + x * 4) as usize;
buf[pos] = red;
buf[pos + 1] = green;
buf[pos + 2] = blue;
buf[pos + 3] = alpha;
}
}
}
pub fn fillrandomrect(&mut self){
let height = self.height;
let width = self.width;
let buf = &mut self.buffer;
let startx:u32 = rand_u32(width);
let starty:u32 = rand_u32(height);
let endx:u32 = rand_u32(width-startx);
let endy:u32 = rand_u32(height-starty);
let red:u8 = rand_u32(255) as u8;
let green:u8 = rand_u32(255) as u8;
let blue:u8 = rand_u32(255) as u8;
let alpha:u8 = rand_u32(255) as u8;
for y in starty..endy {
let offset = y * width * 4;
for x in startx..endx {
let pos :usize= (offset + (x * 4)) as usize;
buf[pos] = red;
buf[pos + 1] = green;
buf[pos + 2] = blue;
buf[pos + 3] = alpha;
}
}
}
}
| 25.559633 | 73 | 0.488155 | 3.0625 |
dae32a68d1054438c2c880ab4935911ec9323be4 | 12,469 | dart | Dart | lib/ui/bordered_container.dart | ashishbeck/slide_puzzle | e07be4cd82df851b5ce2099322495426a98cb6f5 | [
"MIT"
] | 1 | 2022-03-28T16:25:09.000Z | 2022-03-28T16:25:09.000Z | lib/ui/bordered_container.dart | ashishbeck/slide_puzzle | e07be4cd82df851b5ce2099322495426a98cb6f5 | [
"MIT"
] | null | null | null | lib/ui/bordered_container.dart | ashishbeck/slide_puzzle | e07be4cd82df851b5ce2099322495426a98cb6f5 | [
"MIT"
] | null | null | null | import 'package:flutter/material.dart';
import 'package:flutter/rendering.dart';
import 'package:flutter/scheduler.dart';
import 'package:provider/provider.dart';
import 'package:slide_puzzle/code/audio.dart';
import 'package:slide_puzzle/code/constants.dart';
import 'package:slide_puzzle/code/providers.dart';
class BorderedContainer extends StatefulWidget {
final String label;
final Widget child;
final double spacing;
final Color? color;
final bool isBottom;
final bool isRight;
final bool shouldAnimateEntry;
// final AnimationController? animationController;
final Function(AnimationController controller)? buttonController;
const BorderedContainer({
Key? key,
// this.animationController,
required this.label,
required this.child,
this.spacing = 10,
this.color,
this.isBottom = true,
this.isRight = true,
this.shouldAnimateEntry = true,
this.buttonController,
}) : super(key: key);
@override
State<BorderedContainer> createState() => _BorderedContainerState();
}
class _BorderedContainerState extends State<BorderedContainer>
with TickerProviderStateMixin {
late AnimationController controller;
late AnimationController buttonController;
bool isAnimatingBase = true;
Duration buttonDuration = Duration(milliseconds: 100);
_animateEntry() async {
ConfigProvider configProvider = context.read<ConfigProvider>();
if (configProvider.entryAnimationDone[widget.label] != null &&
configProvider.entryAnimationDone[widget.label]!) {
isAnimatingBase = false;
return;
}
buttonController.value = 1;
if (widget.label == "3x3" || widget.label == "4x4") {
configProvider.seenEntryAnimation("3x3");
configProvider.seenEntryAnimation("4x4");
}
if (widget.label == "Shuffle" || widget.label == "Reset") {
configProvider.seenEntryAnimation("Shuffle");
configProvider.seenEntryAnimation("Reset");
}
configProvider.seenEntryAnimation(widget.label);
AudioService.instance.elementEntry();
controller.forward().then((value) => setState(() {
isAnimatingBase = false;
buttonController.duration = Duration(milliseconds: 500);
buttonController
.reverse()
.then((value) => buttonController.duration = buttonDuration);
}));
}
@override
void initState() {
super.initState();
buttonController =
AnimationController(vsync: this, duration: buttonDuration);
if (widget.buttonController != null) {
widget.buttonController!(buttonController);
}
controller = AnimationController(
vsync: this, duration: Duration(milliseconds: defaultEntryTime));
if (widget.shouldAnimateEntry) {
_animateEntry();
} else {
// setState(() {
isAnimatingBase = false;
// });
}
}
@override
void dispose() {
controller.dispose();
buttonController.dispose();
super.dispose();
}
@override
Widget build(BuildContext context) {
return LayoutBuilder(builder: (context, constraints) {
return Container(
// height: height + 50,
// width: width + 50,
child: isAnimatingBase
? ClipPath(
clipper: CustomClipperShape(
isBottom: widget.isBottom,
isRight: widget.isRight,
spacing: widget.spacing,
),
child: SlideTransition(
position: Tween<Offset>(
begin: const Offset(0, -1),
end: const Offset(0, 0),
).animate(
CurvedAnimation(
parent: controller, curve: Curves.easeInOutSine),
),
child: Stack(
clipBehavior: Clip.none,
children: [
Positioned(
bottom: widget.isBottom ? -widget.spacing : null,
top: widget.isBottom ? null : -widget.spacing,
right: widget.isRight ? -widget.spacing : null,
left: widget.isRight ? null : -widget.spacing,
child: Container(
height: constraints.maxHeight,
width: constraints.maxWidth,
color: widget.buttonController != null
? primaryColor
: secondaryColor),
),
],
),
),
)
: Stack(
clipBehavior: Clip.none,
alignment: Alignment.bottomRight,
children: [
Positioned(
bottom: widget.isBottom ? -widget.spacing : null,
top: widget.isBottom ? null : -widget.spacing,
right: widget.isRight ? -widget.spacing : null,
left: widget.isRight ? null : -widget.spacing,
child: CustomPaint(
painter: MyFrameShape(
spacing: widget.spacing,
color: widget.color ?? primaryColor,
isBottom: widget.isBottom,
isRight: widget.isRight,
animationController: buttonController,
),
child: Container(
height: constraints.maxHeight,
width: constraints.maxWidth,
),
),
),
(widget.buttonController != null || widget.shouldAnimateEntry)
? SlideTransition(
position: Tween<Offset>(
begin: const Offset(0, 0),
end: Offset(
(widget.isRight
? widget.spacing
: -widget.spacing) /
constraints.maxWidth,
(widget.isBottom
? widget.spacing
: -widget.spacing) /
constraints.maxHeight))
.animate(buttonController),
child: widget.child)
: widget.child,
],
),
);
});
}
}
class MyFrameShape extends CustomPainter {
final double spacing;
final Color? color;
final bool isBottom;
final bool isRight;
final AnimationController? animationController;
MyFrameShape({
this.spacing = 10,
this.color,
this.isBottom = true,
this.isRight = true,
this.animationController,
});
@override
void paint(Canvas canvas, Size size) {
double height = size.height;
double width = size.width;
Paint paint_0 = Paint()
..color = color ?? primaryColor
..style = PaintingStyle.fill
..strokeWidth = 0.5;
double animationOffset =
animationController != null ? (1 - animationController!.value) : 1;
Path bottomRight = Path()
..moveTo(-spacing * animationOffset, -spacing * animationOffset)
..lineTo(width - spacing * animationOffset, -spacing * animationOffset)
..lineTo(width, 0) // (width, spacing)
..lineTo(width, height)
..lineTo(0, height) // (spacing, height)
..lineTo(-spacing * animationOffset, height - spacing * animationOffset)
..lineTo(-spacing * animationOffset, -spacing * animationOffset)
..close();
if (isBottom && isRight) canvas.drawPath(bottomRight, paint_0);
// Paint bottomLeft = Paint()
// ..color = color
// ..style = PaintingStyle.fill
// ..strokeWidth = 0.5;
Path bottomLeft = Path()
..moveTo(0, 0)
..lineTo(spacing * animationOffset, -spacing * animationOffset)
..lineTo(width + spacing * animationOffset, -spacing * animationOffset)
..lineTo(
width + spacing * animationOffset, height - spacing * animationOffset)
..lineTo(width, height)
..lineTo(0, height)
..lineTo(0, 0);
if (isBottom && !isRight) canvas.drawPath(bottomLeft, paint_0);
Path topRight = Path()
..moveTo(-spacing * animationOffset, spacing * animationOffset)
..lineTo(0, 0)
..lineTo(width, 0)
..lineTo(width, height)
..lineTo(
width - spacing * animationOffset, height + spacing * animationOffset)
..lineTo(-spacing * animationOffset, height + spacing * animationOffset)
..lineTo(-spacing * animationOffset, spacing * animationOffset);
if (!isBottom && isRight) canvas.drawPath(topRight, paint_0);
}
@override
bool shouldRepaint(covariant CustomPainter oldDelegate) {
// TODO: implement shouldRepaint
return true;
}
}
class CustomClipperShape extends CustomClipper<Path> {
final double spacing;
final bool isBottom;
final bool isRight;
CustomClipperShape({
required this.spacing,
required this.isBottom,
required this.isRight,
});
@override
Path getClip(Size size) {
double height = size.height;
double width = size.width;
var bottomRight = Path()
..moveTo(spacing, spacing)
..lineTo(width + spacing, spacing)
..lineTo(width + spacing, height + spacing)
..lineTo(0 + spacing, height + spacing)
..lineTo(0 + spacing, 0 + spacing)
..close();
var bottomLeft = Path()
..moveTo(-spacing, spacing)
..lineTo(width - spacing, spacing)
..lineTo(width - spacing, height + spacing)
..lineTo(0 - spacing, height + spacing)
..lineTo(0 - spacing, 0 + spacing)
..close();
var topRight = Path()
..moveTo(spacing, -spacing)
..lineTo(width + spacing, -spacing)
..lineTo(width + spacing, height - spacing)
..lineTo(0 + spacing, height - spacing)
..lineTo(0 + spacing, 0 - spacing)
..close();
if (isBottom && isRight) {
return bottomRight;
} else if (isBottom && !isRight) {
return bottomLeft;
} else if (!isBottom && isRight) {
return topRight;
}
return bottomRight;
}
@override
bool shouldReclip(covariant CustomClipper<Path> oldClipper) {
return true;
}
}
class ArrowClipperShape extends CustomClipper<Path> {
final double spacing;
final bool isBottom;
final bool isRight;
ArrowClipperShape({
required this.spacing,
required this.isBottom,
required this.isRight,
});
@override
Path getClip(Size size) {
double height = size.height;
double width = size.width;
var bottomRight = Path()
..moveTo(spacing, spacing)
..lineTo(width + spacing, spacing)
..lineTo(width + spacing, height + spacing)
..lineTo(0 + spacing, height + spacing)
..lineTo(0 + spacing, 0 + spacing)
..close();
var bottomLeft = Path()
..moveTo(-spacing, -spacing)
..lineTo(width, -spacing)
..lineTo(width, height + spacing)
..lineTo(0 - spacing, height + spacing)
..lineTo(0 - spacing, 0 - spacing)
..close();
// var bottomLeft = Path()
// ..moveTo(-spacing, spacing)
// ..lineTo(width - spacing, spacing)
// ..lineTo(width - spacing, height + spacing)
// ..lineTo(0 - spacing, height + spacing)
// ..lineTo(0 - spacing, 0 + spacing)
// ..close();
// var topRight = Path()
// ..moveTo(width - spacing, -spacing)
// ..lineTo(width + spacing, -spacing)
// ..lineTo(width + spacing, height + spacing)
// ..lineTo(0 + spacing, height + spacing)
// ..lineTo(0 + spacing, 0 - spacing)
// ..close();
var topRight = Path()
..moveTo(0, 0)
..lineTo(spacing, -spacing)
..lineTo(width + spacing, -spacing)
..lineTo(width + spacing, height - spacing)
..lineTo(width, height)
..lineTo(0, height)
..lineTo(0, 0)
..close();
if (isBottom && isRight) {
return bottomRight;
} else if (isBottom && !isRight) {
return bottomLeft;
} else if (!isBottom && isRight) {
return topRight;
}
return bottomRight;
}
@override
bool shouldReclip(covariant CustomClipper<Path> oldClipper) {
return true;
}
}
| 32.471354 | 80 | 0.565723 | 3.1875 |
14bc716203b428eaa5bae937e64391a16dfa6668 | 4,419 | ts | TypeScript | src/MesaView.ts | 9sako6/MESA | 5a4f170a132d20410cf554890f5d26d107ca0664 | [
"MIT"
] | 3 | 2019-02-12T10:03:07.000Z | 2020-09-05T07:07:19.000Z | src/MesaView.ts | 9sako6/MESA | 5a4f170a132d20410cf554890f5d26d107ca0664 | [
"MIT"
] | 9 | 2019-02-11T20:11:31.000Z | 2020-01-03T20:35:40.000Z | src/MesaView.ts | 9sako6/MESA | 5a4f170a132d20410cf554890f5d26d107ca0664 | [
"MIT"
] | null | null | null | import $ from "jquery";
import MesaModel, { Attribute, Tag } from "./MesaModel";
export default class MesaView {
writeTextArea(text: string, model: MesaModel): void {
model.editor.session.setValue(text);
}
initUploadButton(): void {
const button: string = `
<form>
<label class="func-btn" for="upload-button">
Open
<input type="file" id="upload-button" style="display:none;">
</label>
<span class="file-info" id="file-name"></span>
</form>`;
$("#upload-button").replaceWith(button);
}
initSaveButton(): void {
const button: string = `
<table>
<tr>
<td>
<div class="func-btn"><a id="text-donwload" download="mesa_file.xml" href="#">Save</a></div>
</td>
<td>
<input type='text' id="download-filename" placeholder="Enter a file name">
<span class="file-info">.xml</span>
</td>
</tr>
</table>`;
$("#save-button").replaceWith(button);
}
initTagUploadButton(): void {
const button: string = `
<form>
<label class="func-btn" id="load-json" for="load-tags-button">
Load Tags
<input type="file" id="load-tags-button" style="display:none;">
</label>
<span class="file-info" id="tag-file-name"></span>
</form>`;
$("#tag-upload-button").replaceWith(button);
}
initTagSaveButton(): void {
const button: string = `
<div class="func-btn"><a id="json-donwload" download="mesa_tags.json" href="#">Save Tags</a></div>
<input type='text' id="download-jsonname" placeholder="Enter a file name">
<span class="file-info">.json</span>`;
$("#tag-save-button").replaceWith(button);
}
initTagSettingTable(): void {
const nameRow: string = `
<td class="table-header">Name</td>
<td><input type='text' id="tag-name-form" placeholder="Enter a tag name"></td>`;
const sepRow: string = `
<td class=" table-header">Separator</td>
<td><input type='text' id="tag-sep-form" placeholder="If you need ..."></td>`;
const isXmlRow: string = `
<td class="table-header">XML Tag?</td>
<td>
<input id="xml-flag" type="checkbox">
<label for="xml-flag"></label>
</td>`;
const attributeRow: string = `
<td><input type='text' class="attribute-name-form" placeholder="Enter a name"></td>
<td><input type='text' class="attribute-value-form" placeholder="Enter a value"></td>`;
const addAttributeButton: string = `<div class="func-btn" id="add-attribute">Add an attribute</div>`;
const table: string = `
<table class="tag-setting-table">
<tr>
${isXmlRow}
</tr>
<tr>
${nameRow}
</tr>
<tr id="tag-separator">
${sepRow}
</tr>
<tr id="attributes-header">
<td class="table-header">Attributes</td><td>${addAttributeButton}</td>
</tr>
</table>
<table class="tag-setting-table" id="attributes-input">
<tr>
${attributeRow}
</tr>
</table>`;
$("#tag-setting-table").html(table);
}
makeTagButton(tagList: Tag[]): void {
let addElem: string = "";
for (let tag of tagList) {
if (tag.xmlFlag) {
// get attributes
let attributes: string = "";
if (tag.attributes !== undefined) {
tag.attributes.forEach(function(attr: Attribute) {
attributes += `${attr.name}__MESA_ATTRIBUTE_SEPARATOR__${attr.value},`; // __MESA_ATTRIBUTE_SEPARATOR__ and comma is neccessary
});
}
// make tag
addElem += `<div class="func-btn xml-tag-btn" val="${tag.name}" attributes="${attributes}">${tag.name}</div>`;
} else {
addElem += `<div class="func-btn tag-btn" val="${tag.sepChar +
tag.name}">${tag.name}</div>`;
}
}
// add buttons
$("#tags").append(addElem);
}
hideAddedMsg(): void {
$("#added-message").hide();
}
showAddedMsg(tagInfoDic: Tag): void {
$("#added-message").append("");
document.getElementById(
"added-message"
)!.innerText = `${tagInfoDic.name} was added.`;
$("#added-message").show();
$("#added-message").fadeOut(1500);
}
addAttributesInput(): void {
const attributeRow: string = `
<td><input type='text' id="attribute-name-form" placeholder="Enter a name"></td>
<td><input type='text' id="attribute-value-form" placeholder="Enter a value"></td>`;
$("#attributes-input").append(`<tr>${attributeRow}</tr>`);
}
}
| 30.6875 | 139 | 0.589274 | 3.015625 |
148fa05e5b14693b5257d98c8e442f1c68fe7737 | 6,180 | ts | TypeScript | src/modules/cau-notice-watcher/index.ts | paywteam/eodiro-api-mysql | 5276aa32340da11a627bf61dc3b37db63c73ead2 | [
"MIT"
] | 10 | 2020-06-22T11:41:15.000Z | 2021-07-16T02:19:22.000Z | src/modules/cau-notice-watcher/index.ts | paywteam/eodiro-api-mysql | 5276aa32340da11a627bf61dc3b37db63c73ead2 | [
"MIT"
] | 19 | 2020-06-02T11:40:17.000Z | 2021-04-13T23:14:30.000Z | src/modules/cau-notice-watcher/index.ts | paywteam/eodiro-api2 | 5276aa32340da11a627bf61dc3b37db63c73ead2 | [
"MIT"
] | 3 | 2020-10-04T13:02:05.000Z | 2021-01-22T16:05:35.000Z | import prisma from '@/modules/prisma'
import appRoot from 'app-root-path'
import chalk from 'chalk'
import fs from 'fs'
import { JSDOM } from 'jsdom'
import { PendingXHR } from 'pending-xhr-puppeteer'
import puppeteer, { Browser, Page } from 'puppeteer'
import { telegramBot } from '../telegram-bot'
export type TitleBuilder = (
/** A single notice item */ noticeItemElement: HTMLElement | Element
) => string
export type UrlBuilder = (
/** A single notice item */ noticeItemElement: HTMLElement | Element
) => string
export type FeedOptions = {
/**
* Minutes
* @default 10
*/
interval?: number
}
export interface Publisher {
/** Notice name which will be displayed on the end users */
name: string
/** Unique key(id) for differentiating each subscriber */
key: string
url: string
/** A CSS selector of */
noticeItemSelector: string
titleBuilder: TitleBuilder
urlBuilder?: UrlBuilder
}
export type PublisherBuilder = (siteInformation: {
name: string
key: string
url: string
}) => Publisher
export type LastNotice = Record<
string,
{
displayName: string
title: string
}
>
const eodiroTempDir = appRoot.resolve('/.eodiro')
const lastNoticeFilePath = appRoot.resolve('/.eodiro/last_notice.json')
export class CauNoticeWatcher {
private feedOptions: FeedOptions
private publishers: Publisher[] = []
private lastNotice: LastNotice
constructor(feedOptions?: FeedOptions) {
if (!feedOptions) {
feedOptions = {
interval: 10,
}
} else if (!feedOptions?.interval) {
feedOptions.interval = 10
}
this.feedOptions = feedOptions
this.lastNotice = CauNoticeWatcher.loadLastNoticeFile()
}
public register(publisher: Publisher): void {
for (const registeredSubscriber of this.publishers) {
if (registeredSubscriber.key === publisher.key) {
throw new Error(
`${chalk.blueBright(
'[Notice Watcher]'
)} Duplicate subscriber key detected: ${publisher.key}`
)
}
}
this.publishers.push(publisher)
if (!this.lastNotice[publisher.key]) {
this.lastNotice[publisher.key] = {
displayName: publisher.name,
title: '',
}
}
}
/**
* Get the `last_notice.json` file inside '.eodiro' directory
*/
public static loadLastNoticeFile(): LastNotice {
let lastNotice: LastNotice
if (!fs.existsSync(eodiroTempDir)) {
fs.mkdirSync(eodiroTempDir)
}
if (!fs.existsSync(lastNoticeFilePath)) {
lastNotice = {}
fs.writeFileSync(lastNoticeFilePath, JSON.stringify(lastNotice, null, 2))
} else {
lastNotice = JSON.parse(fs.readFileSync(lastNoticeFilePath, 'utf8'))
}
return lastNotice
}
private writeLastNoticeFile() {
fs.writeFileSync(
lastNoticeFilePath,
JSON.stringify(this.lastNotice, null, 2)
)
}
private getLastNoticeTitle(publisher: Publisher) {
return this.lastNotice[publisher.key].title
}
private updateLastNotice(publisher: Publisher, title: string) {
this.lastNotice[publisher.key] = {
displayName: publisher.name,
title,
}
}
public async run(): Promise<void> {
const browser = await puppeteer.launch()
const processResults = []
for (const subscriber of this.publishers) {
processResults.push(this.processPublisher(browser, subscriber))
}
await Promise.all(processResults)
// Dispose the browser
await browser.close()
}
private async processPublisher(browser: Browser, publisher: Publisher) {
const page = await browser.newPage()
page.setViewport({ width: 1280, height: 800 })
// page.setMaxListeners(Infinity)
const noticesSet = await CauNoticeWatcher.visit(page, publisher)
const notices = Array.from(noticesSet)
if (notices.length === 0) {
return
}
// Get subscriptions
const subscriptions = await prisma.noticeNotificationsSubscription.findMany(
{
where: {
noticeKey: publisher.key,
},
select: {
user: {
select: {
telegrams: {
select: {
chatId: true,
},
},
},
},
},
}
)
const chatIds = subscriptions
.map((sub) => sub.user.telegrams.map((tel) => tel.chatId))
.flat()
const shouldSendPush = chatIds.length > 0
const lastNoticeIndex = notices.findIndex(
(notice) => notice.title === this.getLastNoticeTitle(publisher)
)
if (lastNoticeIndex > 0 && shouldSendPush) {
for (let i = lastNoticeIndex - 1; i >= 0; i -= 1) {
const notice = notices[i]
// Send Telegram notifications
chatIds.forEach((chatId) => {
return telegramBot.sendMessage(
chatId,
`
새로운 <b>${publisher.name}</b> 공지사항이 올라왔습니다.
<b>${notice.title}</b>
${notice.noticeItemUrl}
`,
{ parse_mode: 'HTML' }
)
})
}
}
await page.close()
this.updateLastNotice(publisher, notices[0].title)
this.writeLastNoticeFile()
}
static async visit(
page: Page,
publisher: Publisher,
pageNumber?: number
): Promise<
{
title: string
noticeItemUrl: string
}[]
> {
const pendingXHR = new PendingXHR(page)
try {
await page.goto(publisher.url)
await pendingXHR.waitForAllXhrFinished()
await page.waitForSelector(publisher.noticeItemSelector)
} catch (err) {
throw new Error(err)
}
const bodyHtml = await page.$eval('body', (body) => body.innerHTML)
const { body } = new JSDOM(bodyHtml).window.document
const notices: {
title: string
noticeItemUrl: string
}[] = []
const noticeElms = body.querySelectorAll(publisher.noticeItemSelector)
for (const noticeElm of Array.from(noticeElms)) {
const title = publisher.titleBuilder(noticeElm)
const noticeItemUrl = publisher.urlBuilder
? publisher.urlBuilder(noticeElm)
: publisher.url
notices.push({
title,
noticeItemUrl,
})
}
return notices
}
}
| 23.769231 | 80 | 0.628964 | 3.109375 |
66471c1bcf910bbfac3a5d1798572d1132da56b2 | 14,811 | py | Python | roverpro/rover_data.py | RoverRobotics/openrover_python | bbbd24596db9f1c4e5a57d92fca048e289b668f0 | [
"BSD-3-Clause"
] | 1 | 2020-05-20T18:43:28.000Z | 2020-05-20T18:43:28.000Z | roverpro/rover_data.py | RoverRobotics/openrover_python | bbbd24596db9f1c4e5a57d92fca048e289b668f0 | [
"BSD-3-Clause"
] | 3 | 2019-04-22T21:48:07.000Z | 2020-06-17T19:10:04.000Z | roverpro/rover_data.py | RoverRobotics/openrover_python | bbbd24596db9f1c4e5a57d92fca048e289b668f0 | [
"BSD-3-Clause"
] | 1 | 2020-08-16T21:40:00.000Z | 2020-08-16T21:40:00.000Z | import abc
import enum
import functools
import re
from typing import NamedTuple, Optional
class ReadDataFormat(abc.ABC):
python_type = None
@abc.abstractmethod
def description(self):
raise NotImplementedError
@abc.abstractmethod
def unpack(self, b: bytes):
raise NotImplementedError
class WriteDataFormat(abc.ABC):
python_type = None
@abc.abstractmethod
def description(self):
raise NotImplementedError
@abc.abstractmethod
def pack(self, value) -> bytes:
raise NotImplementedError
class IntDataFormat(ReadDataFormat, WriteDataFormat):
def __init__(self, nbytes, signed):
self.nbytes = nbytes
self.signed = signed
def description(self):
s = "signed" if self.signed else "unsigned"
n = self.nbytes * 8
return f"{s} integer ({n} bits)"
def pack(self, value):
return int(value).to_bytes(self.nbytes, byteorder="big", signed=self.signed)
def unpack(self, b: bytes):
return int.from_bytes(b, byteorder="big", signed=self.signed)
ROVER_LEGACY_VERSION = 40621
@functools.total_ordering
class RoverFirmwareVersion(NamedTuple):
@classmethod
def parse(cls, a_str):
ver_re = re.compile(r"(\d+(?:[.]\d+){0,2})(?:-([^+])+)?(?:[+](.+))?", re.VERBOSE)
match = ver_re.fullmatch(a_str)
if match is None:
raise ValueError
parts = [int(p) for p in match.group(0).split(".")]
return RoverFirmwareVersion(*parts)
major: int
minor: int = 0
patch: int = 0
build: str = ""
prerelease: str = ""
@property
def value(self):
return self.major * 10000 + self.minor * 100 + self.patch * 10
def __lt__(self, other):
return (self.major, self.minor, self.patch, other.prerelease) < (
other.major,
other.minor,
other.patch,
self.prerelease,
)
def __str__(self):
return (
f"{self.major}.{self.minor}.{self.patch}"
+ (("-" + self.prerelease) if self.prerelease else "")
+ (("+" + self.build) if self.build else "")
)
class DataFormatFirmwareVersion(ReadDataFormat):
python_type = RoverFirmwareVersion
def unpack(self, b):
v = UINT16.unpack(b)
if v == ROVER_LEGACY_VERSION:
return RoverFirmwareVersion(1, 0, 0)
return RoverFirmwareVersion(v // 10000, v // 100 % 100, v % 10)
def description(self):
return (
"XYYZZ, where X=major version, Y=minor version, Z = patch version."
"e.g. 10502 = version 1.05.02. The special value 16421 represents pre-1.3 versions"
)
class DataFormatChargerState(ReadDataFormat, WriteDataFormat):
CHARGER_ACTIVE_MAGIC_BYTES = bytes.fromhex("dada")
CHARGER_INACTIVE_MAGIC_BYTES = bytes.fromhex("0000")
python_type = bool
def pack(self, value):
if value:
return self.CHARGER_ACTIVE_MAGIC_BYTES
else:
return self.CHARGER_INACTIVE_MAGIC_BYTES
def unpack(self, b):
return bytes(b) == self.CHARGER_ACTIVE_MAGIC_BYTES
def description(self):
return "0xDADA if charging, else 0x0000"
class BatteryStatus(enum.Flag):
overcharged_alarm = enum.auto()
terminate_charge_alarm = enum.auto()
over_temp_alarm = enum.auto()
terminate_discharge_alarm = enum.auto()
remaining_capacity_alarm = enum.auto()
remaining_time_alarm = enum.auto()
initialized = enum.auto()
discharging = enum.auto()
fully_charged = enum.auto()
fully_discharged = enum.auto()
class DataFormatBatteryStatus(ReadDataFormat):
python_type = BatteryStatus
def unpack(self, b: bytes):
assert len(b) == 2
as_int = int.from_bytes(b, byteorder="big", signed=False)
result = BatteryStatus(0)
for mask, val in (
(0x8000, BatteryStatus.overcharged_alarm),
(0x4000, BatteryStatus.terminate_charge_alarm),
(0x1000, BatteryStatus.over_temp_alarm),
(0x0800, BatteryStatus.terminate_discharge_alarm),
(0x0200, BatteryStatus.remaining_capacity_alarm),
(0x0100, BatteryStatus.remaining_time_alarm),
(0x0080, BatteryStatus.initialized),
(0x0040, BatteryStatus.discharging),
(0x0020, BatteryStatus.fully_charged),
(0x0010, BatteryStatus.fully_discharged),
):
if as_int & mask:
result |= val
return result
def description(self):
return "bit flags"
class DriveMode(enum.IntEnum):
OPEN_LOOP = 0
CLOSED_LOOP = 1
UINT16 = IntDataFormat(2, False)
INT16 = IntDataFormat(2, True)
UINT8 = IntDataFormat(1, signed=False)
class DataFormatFixedPrecision(ReadDataFormat, WriteDataFormat):
"""A fractional number packed as an integer, but representing a fractional number"""
def __init__(self, base_type, step=1.0, zero=0.0):
self.base_type = base_type
# a change of 1 in the python type corresponds to a change of this many in the base type
self.step = step
# the value of 0 in the python type corresponds to this value in the base type
self.zero = zero
def unpack(self, b: bytes):
n = self.base_type.unpack(b)
return (n - self.zero) / self.step
def pack(self, p):
n = round(p * self.step + self.zero)
return self.base_type.pack(n)
def description(self):
return "fractional (resolution=1/{}, zero={}) stored as {}".format(
self.step, self.zero, self.base_type.description()
)
class DataFormatDriveMode(ReadDataFormat):
python_type = DriveMode
def unpack(self, b: bytes):
return DriveMode(UINT16.unpack(b))
def pack(self, p: DriveMode):
return UINT16.pack(p.value)
def description(self):
return DriveMode.__doc__
OLD_CURRENT_FORMAT = DataFormatFixedPrecision(UINT16, 34)
SIGNED_MILLIS_FORMAT = DataFormatFixedPrecision(INT16, 1000)
UNSIGNED_MILLIS_FORMAT = DataFormatFixedPrecision(UINT16, 1000)
OLD_VOLTAGE_FORMAT = DataFormatFixedPrecision(UINT16, 58)
FAN_SPEED_RESPONSE_FORMAT = DataFormatFixedPrecision(UINT16, 240)
DECIKELVIN_FORMAT = DataFormatFixedPrecision(UINT16, 10, zero=2731.5)
PERCENTAGE_FORMAT = DataFormatFixedPrecision(UINT16, 100)
MOTOR_EFFORT_FORMAT = DataFormatFixedPrecision(UINT8, 125, 125)
CHARGER_STATE_FORMAT = DataFormatChargerState()
FIRMWARE_VERSION_FORMAT = DataFormatFirmwareVersion()
DRIVE_MODE_FORMAT = DataFormatDriveMode()
BATTERY_STATUS_FORMAT = DataFormatBatteryStatus()
class MotorStatusFlag(enum.Flag):
NONE = 0
FAULT1 = enum.auto()
FAULT2 = enum.auto()
DECAY_MODE = enum.auto()
REVERSE = enum.auto()
BRAKE = enum.auto()
COAST = enum.auto()
class DataFormatMotorStatus(ReadDataFormat):
def description(self):
return "motor status bit flags"
def unpack(self, b: bytes):
u = UINT16.unpack(b)
bit_meanings = [
MotorStatusFlag.FAULT1,
MotorStatusFlag.FAULT2,
MotorStatusFlag.DECAY_MODE,
MotorStatusFlag.REVERSE,
MotorStatusFlag.BRAKE,
MotorStatusFlag.COAST,
]
if len(bit_meanings) <= u.bit_length():
raise ValueError("too many bits to unpack")
result = MotorStatusFlag.NONE
for i, flag in enumerate(bit_meanings):
if u & 1 << i:
result |= flag
return result
class DataFormatIgnored(WriteDataFormat):
def description(self):
return f"Ignored data {self.n_bytes} bytes long"
def pack(self, value=None) -> bytes:
assert value is None
return bytes(self.n_bytes)
def __init__(self, n_bytes):
self.n_bytes = n_bytes
class SystemFaultFlag(enum.Flag):
NONE = 0
OVERSPEED = enum.auto()
OVERCURRENT = enum.auto()
class DataFormatSystemFault(ReadDataFormat):
def description(self):
return "System fault bit flags"
def unpack(self, b: bytes):
u = UINT16.unpack(b)
bit_meanings = [SystemFaultFlag.OVERSPEED, SystemFaultFlag.OVERCURRENT]
if len(bit_meanings) <= u.bit_length():
raise ValueError("too many bits to unpack")
result = SystemFaultFlag.NONE
for i, flag in enumerate(bit_meanings):
if u & 1 << i:
result |= flag
return result
class DataElement:
def __init__(
self,
index: int,
data_format: ReadDataFormat,
name: str,
description: str = None,
not_implemented: bool = False,
since: Optional[str] = None,
until: Optional[str] = None,
):
self.index = index
self.data_format = data_format
self.name = name
self.description = description
self.not_implemented = not_implemented
self.since_version = None if since is None else RoverFirmwareVersion.parse(since)
self.until_version = None if until is None else RoverFirmwareVersion.parse(until)
def supported(self, version):
if isinstance(version, str):
v = RoverFirmwareVersion.parse(version)
elif isinstance(version, RoverFirmwareVersion):
v = version
else:
raise TypeError(
f"Expected string or {type(RoverFirmwareVersion)}, but got {type(version)}"
)
if self.not_implemented:
return False
if self.since_version is not None and v < self.since_version:
return False
if self.until_version is not None:
if self.until_version <= v:
return False
return True
elements = [
DataElement(
0, OLD_CURRENT_FORMAT, "battery (A+B) current (external)", "total current from batteries"
),
DataElement(2, UINT16, "left motor speed", not_implemented=True),
DataElement(4, UINT16, "right motor speed", not_implemented=True),
DataElement(
6,
UINT16,
"flipper position 1",
"flipper position sensor 1. 0=15 degrees; 1024=330 degrees;",
),
DataElement(
8,
UINT16,
"flipper position 2",
"flipper position sensor 2. 0=15 degrees; 1024=330 degrees;",
),
DataElement(10, OLD_CURRENT_FORMAT, "left motor current"),
DataElement(12, OLD_CURRENT_FORMAT, "right motor current"),
DataElement(
14,
UINT16,
"left motor encoder count",
"May overflow or underflow. Increments when motor driven forward, decrements backward",
since="1.4",
),
DataElement(
16,
UINT16,
"right motor encoder count",
"May overflow or underflow. Increments when motor driven forward, decrements backward",
since="1.4",
),
DataElement(18, UINT16, "motors fault flag", not_implemented=True),
DataElement(20, UINT16, "left motor temperature"),
DataElement(22, UINT16, "right motor temperature", not_implemented=True),
DataElement(24, OLD_VOLTAGE_FORMAT, "battery A voltage (external)"),
DataElement(26, OLD_VOLTAGE_FORMAT, "battery B voltage (external)"),
DataElement(
28,
UINT16,
"left motor encoder interval",
"0 when motor stopped. Else proportional to motor period (inverse motor speed)",
),
DataElement(
30,
UINT16,
"right motor encoder interval",
"0 when motor stopped. Else proportional to motor period (inverse motor speed)",
),
DataElement(
32,
UINT16,
"flipper motor encoder interval",
"0 when motor stopped. Else proportional to motor period (inverse motor speed)",
not_implemented=True,
),
DataElement(
34,
PERCENTAGE_FORMAT,
"battery A state of charge",
"Proportional charge, 0.0=empty, 1.0=full",
),
DataElement(
36,
PERCENTAGE_FORMAT,
"battery B state of charge",
"Proportional charge, 0.0=empty, 1.0=full",
),
DataElement(38, CHARGER_STATE_FORMAT, "battery charging state"),
DataElement(40, FIRMWARE_VERSION_FORMAT, "release version"),
DataElement(42, OLD_CURRENT_FORMAT, "battery A current (external)"),
DataElement(44, OLD_CURRENT_FORMAT, "battery B current (external)"),
DataElement(46, UINT16, "motor flipper angle"),
DataElement(48, FAN_SPEED_RESPONSE_FORMAT, "fan speed"),
DataElement(50, DRIVE_MODE_FORMAT, "drive mode", until="1.7"),
DataElement(52, BATTERY_STATUS_FORMAT, "battery A status", since="1.2"),
DataElement(54, BATTERY_STATUS_FORMAT, "battery B status", since="1.2"),
DataElement(56, UINT16, "battery A mode", since="1.2"),
DataElement(58, UINT16, "battery B mode", since="1.2"),
DataElement(60, DECIKELVIN_FORMAT, "battery A temperature (internal)", since="1.2"),
DataElement(62, DECIKELVIN_FORMAT, "battery B temperature (internal)", since="1.2"),
DataElement(64, UNSIGNED_MILLIS_FORMAT, "battery A voltage (internal)", since="1.2"),
DataElement(66, UNSIGNED_MILLIS_FORMAT, "battery B voltage (internal)", since="1.2"),
DataElement(
68,
SIGNED_MILLIS_FORMAT,
"battery A current (internal)",
">0 = charging; <0 = discharging",
since="1.2",
),
DataElement(
70,
SIGNED_MILLIS_FORMAT,
"battery B current (internal)",
">0 = charging; <0 = discharging",
since="1.2",
),
DataElement(72, DataFormatMotorStatus(), "left motor status", since="1.7"),
DataElement(74, DataFormatMotorStatus(), "right motor status", since="1.7"),
DataElement(76, DataFormatMotorStatus(), "flipper motor status", since="1.7"),
DataElement(78, FAN_SPEED_RESPONSE_FORMAT, "fan 1 duty", since="1.9"),
DataElement(80, FAN_SPEED_RESPONSE_FORMAT, "fan 2 duty", since="1.9"),
DataElement(82, DataFormatSystemFault(), "system fault flags", since="1.10"),
]
ROVER_DATA_ELEMENTS = {e.index: e for e in elements}
def strike(s):
return f"~~{s}~~"
def doc():
lines = ["| # | Name | Data Type | Description |", "| - | ---- | --------- | ----------- |"]
for de in elements:
lines.append(
"|"
+ "|".join(
[
strike(de.index) if de.not_implemented else de.index,
de.name,
de.data_format.description(),
de.description,
]
)
+ "|"
)
return "\n".join(lines)
if __name__ == "__main__":
print(doc())
def fix_encoder_delta(delta):
MAX_ENCODER = 2 ** 16
delta %= MAX_ENCODER
if delta < MAX_ENCODER / 2:
return delta
else:
return delta - MAX_ENCODER
| 30.85625 | 97 | 0.629735 | 3.109375 |
1a3f153b6496e72131bae8cb8f13d77c521410a0 | 42,940 | py | Python | classification/ecrire2.py | kachaloali/m1Stage | 9fd254637dca0aea7f0e930164079fd18c499284 | [
"MIT"
] | null | null | null | classification/ecrire2.py | kachaloali/m1Stage | 9fd254637dca0aea7f0e930164079fd18c499284 | [
"MIT"
] | null | null | null | classification/ecrire2.py | kachaloali/m1Stage | 9fd254637dca0aea7f0e930164079fd18c499284 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import commands
import operator
from optparse import OptionParser
parser = OptionParser(usage="%prog -f FILE, FILE,... -o FILE -l FILE")
parser.add_option("-f", "--files", dest="files",help ="The classification files separated by commas")
parser.add_option("-o", "--out", dest="out",help ="The output file name")
parser.add_option("-i", "--fas", dest="fas",help ="The fasta file name")
parser.add_option("-a", "--attr", dest="attr",help ="The attibutes file PDB")
parser.add_option("-p", "--path", dest="path",help ="Path to programs")
(args, options) = parser.parse_args()
def main(files=args.files,output=args.out,fas=args.fas,attr=args.attr, pathToProg=args.path):
#We retrieve the names of the classification files
if ',' in files: files = files.split(',')
else: files = files.split()
diQueriesSeq, diNewFamily, param = {}, {}, []
diAttrib, diNumSeqAndIdSeq = getIdSeqNumSeqAndColrs(fas,attr)
fastaFileName = fas.replace('.fasta', '')
if os.path.exists(fastaFileName +'_rClassif/'): print commands.getoutput('rm -r '+ fastaFileName +'_rClassif/')
print commands.getoutput('mkdir '+ fastaFileName +'_rClassif/')
####################################################################################################
#We retrieve only the test sequences
for idSeq, comment in diAttrib.items():
if comment == 'black' :
diQueriesSeq[idSeq]=[]
for i in range(len(files)):
diQueriesSeq[idSeq].append([[], []])
#For each file we replace each space with a line break and then retrieve the parameters of the file
for ifile in files:
print commands.getoutput("cat "+ ifile +" | tr \' \' \'\n\' > "+ ifile +'bis')
print commands.getoutput("rm "+ ifile)
print commands.getoutput("mv "+ ifile +'bis '+ ifile)
#looking for the parameters
liste, index1 = [], 0
if "_" in ifile: liste = ifile.split("_")
elem = [ elt for elt in liste if "-classif" in elt ]
for elt in liste:
if "-classif" not in elt: index1 += len(elt) + 1
else: index2 = elt.find('-classif')
index2 += index1
param.append(ifile[index1:index2])
###################################################################################################
"""
Here, if there are several classification files that are submitted, we run through each file and then recence
the information provided. A sequence may be classified according to a classification file, and may not be
classified according to another file. It depends on the parameters used for the construction of these files.
The parameters are those used since the alignment step (paloma)
"""
diFile_concepts, counter = {}, 0
for ifile in files:
fileName, diBlocks, diTriFile, diClassement = ifile, {}, {}, {}
xfile = open(ifile, 'r')
if "/" in fileName:
chemin = fileName.split('/')[1]
else:
chemin = os.getcwd()
lines = xfile.read().split('Answer:')[-1]
for iSeq in diQueriesSeq: diClassement[iSeq] = []
#=========================================================================================
if 'Optimization' in lines: lines = lines.split('Optimization')[0]; print 'Optimisation...'
elif 'Models' in lines: lines = lines.split('Models')[0]; print 'Models...'
#=========================================================================================
bestclassified = list(filter(lambda line: 'bestclassified' in line.strip().split('(') and ',' in line.strip().split('(')[1], lines.split()))
classified = list(filter(lambda line: 'classified' in line.strip().split('(') and ',' in line.strip().split('(')[1], lines.split()))
bestambiguous = list(filter(lambda line: 'bestambiguous' in line.strip().split('(') and ',' in line.strip().split('(')[1], lines.split()))
ambiguous = list(filter(lambda line: 'ambiguous' in line.strip().split('(') and ',' in line.strip().split('(')[1], lines.split()))
unclassified = list(filter(lambda line: 'unclassified' in line.strip().split('('), lines.split()))
new_family = list(filter(lambda line: 'support_new_family' in line, lines.split()))
#=========================================================================================
for line in bestclassified:
idSeq = (line.split(',')[0]).split('(')[1].strip('"')
if idSeq in diQueriesSeq:
diQueriesSeq[idSeq][counter][0].append(line.split(',')[1])
diQueriesSeq[idSeq][counter][1].append('best classified')
diClassement[idSeq].append(line.split(',')[1])
diTriFile[idSeq] = 6
for line in classified:
idSeq = (line.split(',')[0]).split('(')[1].strip('"')
if idSeq in diQueriesSeq:
diQueriesSeq[idSeq][counter][0].append(line.split(',')[1])
diQueriesSeq[idSeq][counter][1].append('classified')
diClassement[idSeq].append(line.split(',')[1])
diTriFile[idSeq] = 5
for line in bestambiguous:
idSeq = (line.split(',')[0]).split('(')[1].strip('"')
if idSeq in diQueriesSeq:
diQueriesSeq[idSeq][counter][0].append(line.split(',')[1])
diQueriesSeq[idSeq][counter][1].append('best ambiguous')
diClassement[idSeq].append(line.split(',')[1])
diTriFile[idSeq] = 3
for line in ambiguous:
idSeq = (line.split(',')[0]).split('(')[1].strip('"')
if idSeq in diQueriesSeq:
diQueriesSeq[idSeq][counter][0].append(line.split(',')[1])
diQueriesSeq[idSeq][counter][1].append('ambiguous')
diClassement[idSeq].append(line.split(',')[1])
diTriFile[idSeq] = 2
for line in unclassified:
idSeq = (line.split('("')[1]).strip('")')
if idSeq in diQueriesSeq:
diQueriesSeq[idSeq][counter][0].append('unclassified')
diQueriesSeq[idSeq][counter][1].append('')
diClassement[idSeq].append('unclassified')
diTriFile[idSeq] = 1
##################################################################################################
#Search for comcepts, associated blocks & associated sequences
members_new = list(filter(lambda line: 'membernew(' in line, lines.split()))
blocks_new = list(filter(lambda line: 'blocknew(' in line, lines.split()))
test_quality = ['best classified', 'classified', 'best ambiguous', 'ambiguous', 'unclassified']
diConcept = {}
for line in new_family:
numConcept, iBlocks, iSeqs, infosConcept = (line.split('(')[1]).split(',')[0], [], [], []
#The blocks members of the concept per file
blocks_of_concept = list(filter(lambda line: 'blocknew('+numConcept+',' in line,blocks_new))
for iline in blocks_of_concept:
numBlock = iline.split(',')[1].strip(')')
iBlocks.append(numBlock)
infosConcept.append(iBlocks)
#The sequences members of the concept per file
members_new_concept = list(filter(lambda line: ','+ numConcept +')' in line, members_new))
for iline in members_new_concept:
idSeq = iline.split('(')[1].split(',')[0].strip('"')
#If the sequence is among the queries sequences
if idSeq in diQueriesSeq:
iSeqs.append(idSeq)
diQueriesSeq[idSeq][counter][0].append('new('+ numConcept +')')
if len(diQueriesSeq[idSeq][counter][1]) == 0:
diClassement[idSeq].append('new('+ numConcept +')')
diTriFile[idSeq] = 4
infosConcept.append(iSeqs)
diConcept[numConcept] = infosConcept
diFile_concepts['File_'+str(counter+1)] = diConcept
##################################################################################################
#Here we find the exceptions seauences ('except') if they exist.
for idSeq in diQueriesSeq:
if len(diQueriesSeq[idSeq][counter][0]) == 0:
diQueriesSeq[idSeq][counter][0].append('except')
diClassement[idSeq].append('except')
diTriFile[idSeq] = 0
#Sorting the dictionary in descending order
diTriFile = sorted(diTriFile.iteritems(), reverse=True, key=operator.itemgetter(1))
if "/" in fileName:
outPutFile=open(fastaFileName+'_rClassif/'+fileName.split('/')[2].replace('classif-out.lp','res')+'.csv','w')
else:
outPutFile=open(fastaFileName+'_rClassif/'+fileName.replace('classif-out.lp','res')+'.csv','w')
outPutFile.write('File: '+fastaFileName+', param: '+ param[counter]+'\n\n\n')
outPutFile.write('sequences , subfamily , quality \n\n'.upper())
#Writing results for each input classification file
for i in range(len(diTriFile)):
idSeq = diTriFile[i][0]
outPutFile.write(idSeq+ ',')
for Class in list(set(diClassement[idSeq])) : outPutFile.write(Class + ' ')
outPutFile.write(','+ str(diTriFile[i][1]))
outPutFile.write('\n')
xfileName = chemin+"/"+fastaFileName+"_"+param[counter]+"_plma.dot"
diBlocks = getBlocks(xfileName)
seqAndBlocks = getSeqAndInvolvedInBlocks(diNumSeqAndIdSeq,diBlocks)
#Writing blocks
outPutFile.write('\n\n news families \n\n\n'.upper())
if diConcept != {}:
outPutFile.write("Concepts ,Members,Number of sequences,Number of blocks, interesting blocks\n")
for numConcept, conceptInfos in diConcept.iteritems():
if conceptInfos[1] !=[]:
outPutFile.write(numConcept + ', ,'+ str(len(conceptInfos[1]))
+','+ str(len(conceptInfos[0])) +'\n')
for seq in list(set(conceptInfos[1])):
suite_of_block = ''
for numBlock in list(set(conceptInfos[0])):
if numBlock in seqAndBlocks[seq].keys():
suite_of_block += seqAndBlocks[seq][numBlock]+' '
outPutFile.write(","+ seq +',,,'+ suite_of_block+ "\n")
outPutFile.write('\n')
outPutFile.close()
#Part Coloring PLMA by Families
colorClassify(fas, attr, fileName, diQueriesSeq, diClassement, diConcept, param, counter, pathToProg)
counter += 1
xfile.close()
"""
Writing step in the .csv file of the globals results, each sequence is written in the file with its status i.e
Classified, ambiguous, unclassified etc. The subfamily field indicates the family (s) in which it was classified.
"""
outPutFile = open(fastaFileName+'_rClassif/'+output[:len(output)-4]+'Global'+output[len(output)-4:], 'w')
outPutFile.write('File: '+fastaFileName+'\n\n\n')
outPutFile.write(' sequences , parameters , subfamily , quality \n\n'.upper())
for idSeq, infosSeq in diQueriesSeq.iteritems():
outPutFile.write(idSeq)
i = 0
for liste in infosSeq:
outPutFile.write(',' + param[i] + ',')
for Class in list(set(liste[0])) : outPutFile.write(Class + ' ')
if len(liste[1]) > 0:
outPutFile.write(',' + liste[1][0] + '\n')
else: outPutFile.write(', ' + '\n')
i +=1
outPutFile.write('\n')
#For the new family
outPutFile.write('\n\n news families \n\n\n'.upper())
for File, Concept in diFile_concepts.iteritems():
#=======================================================================================
numFile = File[File.find('_')+1:]
xfileName = chemin+"/"+fastaFileName+'_'+param[int(numFile)-1]+'_plma.dot'
diBlocks = getBlocks(xfileName)
seqAndBlocks = getSeqAndInvolvedInBlocks(diNumSeqAndIdSeq,diBlocks)
#=======================================================================================
if Concept != {}:
numFile = File[File.find('_')+1:]
outPutFile.write(File + ": param : " + param[int(numFile) - 1]
+ ",Concepts ,Members,Number of sequences,Number of blocks, interesting blocks\n")
for numConcept, conceptInfos in Concept.iteritems() :
if conceptInfos[1] !=[]:
outPutFile.write(','+ numConcept + ', ,'+ str(len(conceptInfos[1]))
+','+ str(len(conceptInfos[0])) +'\n')
for seq in conceptInfos[1]:
suite_of_block = ''
for numBlock in list(set(conceptInfos[0])):
if numBlock in seqAndBlocks[seq].keys():
suite_of_block +=seqAndBlocks[seq][numBlock]+' '
outPutFile.write(", ,"+ seq +',,,'+ suite_of_block+ "\n")
outPutFile.write('\n')
outPutFile.close()
#########################################################################################################
def getIdSeqNumSeqAndColrs(fas,attr):
"""
This function returns two dictionaries where one of them, the keys are the id of the sequences & the values are
the comments for each sequence. The other dictionary (diNumSeqAndIdSeq) its keys are the numbers of the sequences
in the PLMA file and the values are the identifiers of the corresponding sequences.
"""
with open(fas, 'r') as fFile:
fastaFile=fFile.readlines()
fFile.close()
with open(attr, 'r') as aFile:
attrFile=aFile.readlines()
aFile.close()
diQueriesSeq, diNumSeqAndIdSeq, numSeq = {}, {}, 0
for fLine in fastaFile:
if fLine[0] == '>':
numSeq += 1
if '|' in fLine:
idSeq = fLine.split('|')[1].strip()
else:
idSeq = fLine[1:].strip()
diQueriesSeq[idSeq] = ''
diNumSeqAndIdSeq[str(numSeq)] = idSeq
for aLine in attrFile:
if 'range=' in aLine and 'comments=' in aLine:
borneInf = int(aLine.split('"')[1].split('-')[0])
borneSup = int(aLine.split('"')[1].split('-')[1])
if (borneInf <= numSeq and numSeq <= borneSup):
diQueriesSeq[idSeq] = aLine.split('"')[5]
return diQueriesSeq, diNumSeqAndIdSeq
#################################################################################################
def getBlocks(dotFile):
"""
This function returns a dictionary of all the PLMA blocks contained in a dot file
"""
with open(dotFile, 'r') as fd:
dotfile = fd.readlines()
subClustersDico = {}
concatDotFile = reduce(lambda line1, line2: line1.strip()+line2.strip(), dotfile)
subClusters = concatDotFile.split('subgraph cluster_')
for subCluster in subClusters[3:]:
subClusterTemp = subCluster.split('{')[1].split('"];')[:-1]
tmp = subClusterTemp[0].strip().split(';')[2]
subClusterTemp[0] = tmp
subClustersDico[subCluster.split('{')[0]] = subClusterTemp
lastSubCluster = subClusters[len(subClusters)-1:]
lastSubClusterTemp = lastSubCluster[0].split('{')[1].split('}')[0].split('"];')[:-1]
tmp = lastSubClusterTemp[0].strip().split(';')[2]
lastSubClusterTemp[0] = tmp
subClustersDico[lastSubCluster[0].split('{')[0]] = lastSubClusterTemp
return subClustersDico
#################################################################################################
def getSeqAndInvolvedInBlocks(diNumSeq, diBlocks):
diSeqBlocks = {}
for numSeq, idSeq in diNumSeq.items():
dico = {}
for numblock, valueBlock in diBlocks.items():
for line in valueBlock:
if '"('+numSeq+', ' in line:
dico[numblock] = line.split('label = "')[1]
diSeqBlocks[idSeq] = dico
return diSeqBlocks
##################################################################################################
def getNumSeqAndColrs(attribFile):
"""
This function will make it possible to recover the sequence numbers and the color of their families
"""
attributs = open(attribFile,'r')
dico = {}
for line in attributs.readlines():
if 'range=' in line:
ranger = line.split('"')[1]
borneInf, borneSup = int(ranger.split('-')[0]), int(ranger.split('-')[1])
color = line.split('"')[3]
if borneInf > borneSup:
error = "In the range section, the '-' has to find "
error += "between two numbers, and the first number "
error += "has to be smaller than the second one!"
printError(error)
elif borneInf == borneSup:
numSeq = borneInf
dico[str(numSeq)] = color
else:
for numSeq in range(borneInf, borneSup+1):
dico[str(numSeq)] = color
attributs.close()
return dico
#################################################################################################
def colorClassify(fas, attr, fileName, diQueriesSeq, diClassement, diConcept, param, counter, pathToProg):
fastaFileName = fastaFileName = fas.replace('.fasta', '')
plma_seq1, plma_seq2 = getIdSeqNumSeqAndColrs(fas, attr)
known_family = [family for family in list(set(plma_seq1.values())) if family != 'black']
plma_seq3 = getNumSeqAndColrs(attr)
colorNewFamily = "burlywood"
colorAmbiguous = "olive"
colorUnclassified = "black"
diColor_of_family ={}
for family in known_family:
colors = []
for numSeq in plma_seq3:
if plma_seq1[plma_seq2[numSeq]] == family.upper():
colors.append(plma_seq3[numSeq])
diColor_of_family[family] = list(set(colors))
colored_seq_by_family = {}
for numSeq in plma_seq3:
if plma_seq1[plma_seq2[numSeq]] != colorUnclassified:
colored_seq_by_family[numSeq] = []
colored_seq_by_family[numSeq].append(plma_seq3[numSeq])
plma_seq2_temp = dict([[v,k] for v,k in plma_seq2.items()])
#Inverting a dictionary
invert_dict = dict([[v,k] for k,v in plma_seq2.items()])
plma_seq2 = invert_dict
for idSeq in plma_seq1:
if idSeq in diClassement:
numSeq = plma_seq2[idSeq]
colored_seq_by_family[numSeq] = []
for family, color_of_family in diColor_of_family.items():
if family.lower() in diClassement[idSeq]:
colored_seq_by_family[numSeq].append(color_of_family[0])
colored_seq_by_family_tmp = dict([[cle,val] for cle,val in colored_seq_by_family.items()])
#Give the color "colorNewFamily" for news families
for idSeq in diClassement:
for elem in diClassement[idSeq]:
if "new" in elem:
numSeq = plma_seq2[idSeq]
colored_seq_by_family[numSeq] = []
colored_seq_by_family[numSeq].append(colorNewFamily)
#Give the color "colorAmbiguous" for ambiguous
for numSeq, list_color in colored_seq_by_family.items():
if len(list_color) > 1:
colored_seq_by_family[numSeq] = []
colored_seq_by_family[numSeq].append(colorAmbiguous)
#pools of family
diFamily_by_colors = {}
list_tmp = [ elem[0] for elem in colored_seq_by_family.values() if elem != [] ]
if colorNewFamily in set(list_tmp):
diColor_of_family["new"] = [colorNewFamily]
#Reverse of the dictionary of families and their colors
invert_dict = dict([[v[0].lower(),k] for k,v in diColor_of_family.items()])
diColor_family = invert_dict
#A dictionary is created that contains the colors of the families and all the
#sequences belonging to families
for color_of_family in diColor_of_family.values():
NumSeqs = []
for numSeq, colorSeq in colored_seq_by_family.items():
if colorSeq != [] and colorSeq[0] == color_of_family[0]:
NumSeqs.append(numSeq)
diFamily_by_colors[color_of_family[0]] = NumSeqs
#Other unclassified sequences
unclassified_seqs, list_tmp2 = [], []
list_tmp1 = [ elem for elem in diFamily_by_colors.values()]
for liste in list_tmp1:
for elem in liste:
list_tmp2.append(elem)
list_tmp2 = list(set(list_tmp2))
for numSeq in plma_seq3:
if numSeq not in list_tmp2:
unclassified_seqs.append(numSeq)
#Looking for ambiguous sequences
ambiguous, reste_seqs, diClass = {}, {}, {}
for numSeq, tColor in colored_seq_by_family.items():
if numSeq in unclassified_seqs and tColor != []:
color = tColor[0]
ambiguous[numSeq] = color
elif numSeq in unclassified_seqs:
reste_seqs[numSeq] = colorUnclassified
for numSeq in unclassified_seqs:
color = colored_seq_by_family_tmp[numSeq]
if color != []: color = colored_seq_by_family_tmp[numSeq][0].lower()
else: color = ""
if color != "":
if numSeq in colored_seq_by_family_tmp:
classes = diColor_family[color]
for color in colored_seq_by_family_tmp[numSeq][1:]:
classes += ", " + diColor_family[color.lower()]
diClass[numSeq] = classes
#==================================================================================================================
#==================================================================================================================
dotInFile = "./"+fastaFileName+"_paloma/"+fastaFileName+"_"+param[counter]+"_plma.dot"
dotOutFile = "./"+fastaFileName+"_paloma/"+fastaFileName+"_"+param[counter]+"-col.dot"
#==================================================================================================================
#==================================================================================================================
dic_blocks = {}
lines = open(fileName, "r").readlines()
#Looking for the characteristic blocks for each family
for Class in diColor_of_family:
blocks_support = list(filter(lambda line: 'characteristic_block' in line and Class.lower() in line, lines))
blocks = []
for line in blocks_support:
block = line.split(",")[2].split(")")[0]
blocks.append(block)
dic_blocks[Class] = list(set(blocks))
diChar_blocks = {}
for Class, blocks in dic_blocks.items():
for block in blocks:
diChar_blocks[block] = Class
####################################################################################################################
#Creating of a dictionary that contains all the clusters of the plmadot
dotFile = open(dotInFile, "r").readlines()
subClustersDico, colorsSeq = {}, {}
concatDotFile = reduce(lambda line1, line2: line1.strip()+line2.strip(), dotFile)
subClusters = concatDotFile.split('subgraph cluster_')
for subCluster in subClusters[1:]:
subClusterTemp = subCluster.split('{')[1].split('"];')[:-1]
tmp = subClusterTemp[0].strip().split(';')[2]
subClusterTemp[0] = tmp
subClustersDico[subCluster.split('{')[0]] = subClusterTemp
lastSubCluster = subClusters[len(subClusters)-1:]
lastSubClusterTemp = lastSubCluster[0].split('{')[1].split('}')[0].split('"];')[:-1]
tmp = lastSubClusterTemp[0].strip().split(';')[2]
lastSubClusterTemp[0] = tmp
subClustersDico[lastSubCluster[0].split('{')[0]] = lastSubClusterTemp
infoSeqs = lastSubCluster[0].split('{')[1].split('}')[1].split('];')[:-1]
#===================================================================================================================
#===================================================================================================================
#The Input plmadot file
inputFile = open(dotInFile, "r")
#The output plmadot file
outputFile = open(dotOutFile, "w")
lines = inputFile.readlines()
for index, elem in enumerate(lines):
if "subgraph" in elem:
if elem.strip() == "subgraph cluster_1":
index1 = index
if elem.strip() == "subgraph cluster_2":
index2 = index
if elem.strip() == "subgraph cluster_3":
index3 = index
head = lines[:index1]
cluster1 = lines[index1:index2]
cluster2 = lines[index2:index3]
#The sequences numbers and their labels
diCluster1_tmp = {}
for line in cluster1:
if 'label' in line:
numSeq = line.split(",")[0].split('(')[1]
label = line.split(')"')[1]
diCluster1_tmp[numSeq] = label
diCluster2_tmp = {}
for line in cluster2:
if 'label' in line:
numSeq = line.split(",")[0].split('(')[1]
diCluster2_tmp[numSeq] = line
#===================================================================================================================
#===================================================================================================================
#The head of the dot is written
for line in head:
outputFile.write(line)
#===================================================================================================================
#===================================================================================================================
#Part for cluster 1
for line in cluster1:
if "cluster" in line:
outputFile.write(line)
outputFile.write("{\n")
elif "node" in line:
colorSeq = line.split('color =')[1].strip().split(',')[0]
line = line.replace(colorSeq.strip(), "black")
outputFile.write(line)
elif "style" in line:
style_of_cluster = line.split("style =")[1].split(";")[0]
line = line.replace(style_of_cluster.strip(), "filled")
outputFile.write(line)
#Writing for the sub-families (cluster 1)
i = 1
allNewBlocks = []
for color, NumSeqs in diFamily_by_colors.items():
if color != colorNewFamily:
outputFile.write("subgraph cluster_" + str(i) +"p1 \n")
outputFile.write("{\n")
outputFile.write("label = \"Family: "+ diColor_family[color.lower()] +"\nNumber: "+ str(i) +"\";\n")
outputFile.write("node [shape = record, color = black, fontcolor = black];\n")
for numSeq in NumSeqs:
if plma_seq2_temp[numSeq] in diQueriesSeq:
line = diCluster1_tmp[numSeq].replace("\"];", " [**]\"];")
outputFile.write('"('+numSeq+', 1, 0)"' + line)
else: outputFile.write('"('+numSeq+', 1, 0)"' + diCluster1_tmp[numSeq])
outputFile.write('}\n')
i += 1
#Case for pools of new families (if there are several)
else:
i = 1
for concept, infosConcept in diConcept.iteritems():
outputFile.write("subgraph cluster_new" + str(i) +" \n")
outputFile.write("{\n")
outputFile.write("label = \"Family: "+ diColor_family[color.lower()] + "\nNumber: "+ str(i)
+"\";\n")
outputFile.write("node [shape = record, color = black, fontcolor = black];\n")
for idSeq in infosConcept[1]:
numSeq = plma_seq2[idSeq]
if idSeq in diQueriesSeq:
line = diCluster1_tmp[numSeq].replace("\"];", " [**]\"];")
outputFile.write('"('+numSeq+', 1, 0)"' + line)
else: outputFile.write('"('+numSeq+', 1, 0)"' + diCluster1_tmp[numSeq])
outputFile.write('}\n')
allNewBlocks += list(set(infosConcept[0]))
i += 1
#We add the characteristic blocks of the new families
for bloc in allNewBlocks:
diChar_blocks[bloc] = "new"
#The rest of the sequences (cluster 1)
for line in cluster1:
if 'label' in line: numSeq = line.split(",")[0].split('(')[1]
if numSeq in unclassified_seqs:
color = colored_seq_by_family_tmp[numSeq]
if color != []:
color = colored_seq_by_family_tmp[numSeq][0].lower()
else: color = ""
if color != "":
if numSeq in colored_seq_by_family_tmp:
classes = diColor_family[color]
for color in colored_seq_by_family_tmp[numSeq][1:]:
classes += ", " + diColor_family[color.lower()]
line = line.replace(numSeq+ ':', "[" + classes.upper() +"] "+ numSeq+":")
if plma_seq2_temp[numSeq] in diQueriesSeq:
line = line.replace("\"];", " [**]\"];")
outputFile.write(line)
else:
if plma_seq2_temp[numSeq] in diQueriesSeq:
line = line.replace("\"];", " [**]\"];")
outputFile.write(line)
outputFile.write("}\n")
#=================================================================================================================
#=================================================================================================================
#Part for cluster2
for line in cluster2:
if "cluster" in line:
outputFile.write(line)
outputFile.write("{\n")
elif "node" in line:
colorSeq = line.split('color =')[1].strip().split(',')[0]
line = line.replace(colorSeq.strip(), "black")
outputFile.write(line)
elif "style" in line:
style_of_cluster = line.split("style =")[1].split(";")[0]
line = line.replace(style_of_cluster.strip(), "filled")
outputFile.write(line)
outputFile.write("fontcolor = gray;\n")
#Writing for the sub-families (cluster 2)
i = 1
for color, NumSeqs in diFamily_by_colors.items():
if color != colorNewFamily:
outputFile.write("subgraph cluster_" + str(i) +"p2 \n")
outputFile.write("{\n")
outputFile.write("node [shape = record,style = filled, color = "+color.lower()
+", fontcolor = black];\n")
outputFile.write("color = "+color.lower()+";\n")
for numSeq in NumSeqs:
outputFile.write(diCluster2_tmp[numSeq])
outputFile.write('}\n')
i += 1
else:
i = 1
for concept, infosConcept in diConcept.iteritems():
outputFile.write("subgraph cluster_new" + str(i) +"\n")
outputFile.write("{\n")
outputFile.write("node [shape = record,style = filled, color = "+color.lower()
+", fontcolor = black];\n")
outputFile.write("color = "+color.lower()+";\n")
for idSeq in infosConcept[1]:
numSeq = plma_seq2[idSeq]
outputFile.write(diCluster2_tmp[numSeq])
outputFile.write('}\n')
i += 1
#The rest of the sequences (cluster 2)
for line in cluster2:
if 'label' in line: numSeq = line.split(",")[0].split('(')[1]
if numSeq in unclassified_seqs: outputFile.write(line)
outputFile.write("}\n")
#=================================================================================================================
#=================================================================================================================
#Part for the rest of the clusters (PLMA blocks)
for numCluster, cluster in subClustersDico.items():
if numCluster in diChar_blocks:
outputFile.write("subgraph cluster_"+numCluster+"\n{\n")
outputFile.write("node [shape = record, style = filled, color = yellow, fontcolor = black];\n")
outputFile.write("color = "+diColor_of_family[diChar_blocks[numCluster]][0].lower()+";\n")
for line in cluster:
numSeq = line.split(",")[0].split("(")[1]
outputFile.write(line + "\"];\n")
outputFile.write("}\n")
elif numCluster not in ["1","2"]:
outputFile.write("subgraph cluster_"+numCluster+"\n{\n")
outputFile.write("node [shape = record, style = filled, color = yellow, fontcolor = black];\n")
outputFile.write("color = black;\n")
for line in cluster:
outputFile.write(line+"\"];\n")
outputFile.write("}\n")
#Part for arrows
for line in infoSeqs:
if '->' in line:
numSeqTemp, numSeq = line.split('label = ')[1], ''
if ':' in line:
numSeq = numSeqTemp.split(':')[0].strip('"')
else:
numSeq = numSeqTemp.split(',')[0]
colorSeq = line.split(', color =')[1].strip().split(',')[0]
if numSeq in ambiguous:
line = line.replace("fontsize = 8","fontsize = 15")
line = line.replace("label = " + numSeq+ ',', "label = "+ numSeq +"("+ diClass[numSeq].upper()+")\"")
line = line.replace(colorSeq.strip(), ambiguous[numSeq].lower())
elif numSeq in reste_seqs:
color = plma_seq3[numSeq].lower()
if color != colorUnclassified:
classe = diColor_family[color]
line = line.replace("label = "+ numSeq+ ',', "label = \""+ numSeq+"("+ classe.upper() +")\"")
line = line.replace("fontsize = 8","fontsize = 15")
line = line.replace(colorSeq.strip(), "black")
elif numSeq in colored_seq_by_family:
if numSeq in colored_seq_by_family_tmp and colored_seq_by_family_tmp[numSeq] != []:
color = plma_seq3[numSeq].lower()
line = line.replace("fontsize = 8","fontsize = 15")
if color != colorUnclassified:
classe = diColor_family[color]
line = line.replace("label = "+numSeq+ ',',"label = \""+ numSeq+" ("+ classe.upper() +")\"")
else:
line = line.replace("label = "+numSeq+ ',',"label = \""+ numSeq+" (?)\"")
elif colored_seq_by_family_tmp[numSeq] == []:
color = colored_seq_by_family[numSeq][0]
line = line.replace("fontsize = 8","fontsize = 15")
classe = diColor_family[color]
line = line.replace("label = "+numSeq+ ',',"label = \"" + numSeq+" (?)\"")
line = line.replace(colorSeq.strip(), colored_seq_by_family[numSeq][0].lower())
outputFile.write(line+"];\n")
outputFile.write("}\n")
inputFile.close()
outputFile.close()
#================================================================================================================
#================================================================================================================
#Converting the product dot file to pdf format
print commands.getoutput("python ./"+ pathToProg +"/plmadot2pdf.py -f ./"+fastaFileName+"_paloma/"+fastaFileName+"_"
+ param[counter] +"-col.dot")
print commands.getoutput("rm "+fastaFileName+"_paloma/"+fastaFileName+"_"+param[counter]+"-col.ps")
print commands.getoutput("mv ./"+fastaFileName+"_paloma/"+fastaFileName+"_"+param[counter]+"-col.pdf ./"
+fastaFileName+"_rClassif")
#main
if __name__ == '__main__':
main()
| 57.948718 | 156 | 0.437215 | 3.359375 |
7d2feac86e578379277416be2a214f8e414779f1 | 6,468 | ps1 | PowerShell | CommonScripts/SqlSsisFunctions.ps1 | DrJohnT/devops-your-dwh | 9c06912b7fd82f2a05f3a2416ea73c509deb0ad9 | [
"MIT"
] | 2 | 2019-08-19T19:27:34.000Z | 2019-12-06T15:44:19.000Z | CommonScripts/SqlSsisFunctions.ps1 | DrJohnT/devops-your-dwh | 9c06912b7fd82f2a05f3a2416ea73c509deb0ad9 | [
"MIT"
] | null | null | null | CommonScripts/SqlSsisFunctions.ps1 | DrJohnT/devops-your-dwh | 9c06912b7fd82f2a05f3a2416ea73c509deb0ad9 | [
"MIT"
] | 4 | 2018-12-31T06:51:16.000Z | 2019-08-19T19:29:18.000Z | #####################################################################################################
# Script written by © Dr. John Tunnicliffe, 2015-2018 https://github.com/DrJohnT/devops-your-dwh
# This PowerShell script is released under the MIT license http://www.opensource.org/licenses/MIT
#
# Functions to build and deploy SSIS projects
#####################################################################################################
function Deploy-SsisSolution ([string] $SolutionName = $(throw "Solution name required.") ) {
<#
.SYNOPSIS
Deploys the SSIS packages to the target environment using project deployment mode
#>
try
{
$SolutionFolderPath = Get-SolutionPath($SolutionName);
$SolutionFolderPath = split-path $SolutionFolderPath
$solutionNode = $deployConfig.DeploymentConfig.Solutions.Solution | where Name -EQ $SolutionName;
foreach ($project in $solutionNode.SSIS_Project) {
$projectPath = Join-Path $SolutionFolderPath $project.Project;
Deploy-SsisProject -ProjectPath $projectPath -Project $project.Project -Folder $project.Folder;
}
} catch {
logError -Message "Deploy-SsisSolution Failed to deploy solution $SolutionName Error: $_";
}
}
function Deploy-SsisProject ([string] $projectPath = $(throw "Project path required!"), [string] $project = $(throw "project name required!"), [string] $folder = $(throw "folder name required!") ) {
<#
.SYNOPSIS
Deploys the SSIS project to the target environment using project deployment mode
Must use isdeploymentwizard.exe to deploy SSIS projecs
For isdeploymentwizard.exe command-line options see https://docs.microsoft.com/en-us/sql/integration-services/packages/deploy-integration-services-ssis-projects-and-packages
SSISDB Folder setup with thanks to https://www.hansmichiels.com/2016/11/04/how-to-automate-your-ssis-package-deployment-and-configuration-ssis-series/
#>
try {
$ServerName = Get-SsisServerFromConfig;
$SQLCmdVaribles = Get-SqlCmdVariablesFromConfig -UseServerRoles $false;
$sqlFilePath = Join-Path $SsisDeploySQLScriptPath "CreateSsisDbFolder.sql";
assert(Test-Path($sqlFilePath)) "SQL script CreateSsisDbFolder.sql not exist!"
Run-SqlScriptAgainstServer -ServerName $ServerName -DatabaseName "SSISDB" -SqlFilePath $sqlFilePath -SQLCmdVaribles $SQLCmdVaribles;
$ispacPath = Join-Path $projectPath "bin\$configuration\$project.ispac";
assert(Test-Path($ispacPath)) "SSIS ISPAC does not exist in $ispacPath";
Write-Host "Deploying $project to $folder folder from ispac path $ispacPath" -ForegroundColor Yellow;
# As this is a windows EXE we need to wait for it to end before applying the scripts, so we pipe to Out-Null
exec { &"$SsisDeploymentWizard" /Silent /SourcePath:"$ispacPath" /DestinationServer:"$ServerName" /DestinationPath:"/SSISDB/$folder/$project" | Out-Null }
} catch {
logError -Message "Deploy-SsisProject Failed to deploy SSIS $project Error: $_";
}
}
function Deploy-SsisEnvironments ([string] $SolutionName = $(throw "Solution name required.") ) {
<#
.SYNOPSIS
Create an environment in SSISDB for the solution
#>
try {
$SolutionFolderPath = Get-SolutionPath($SolutionName);
$SolutionFolderPath = split-path $SolutionFolderPath
$solutionNode = $deployConfig.DeploymentConfig.Solutions.Solution | where Name -EQ $SolutionName;
foreach ($project in $solutionNode.SSIS_Project) {
Deploy-SsisEnvironment $project.Project $project.Folder;
}
} catch {
logError -Message "Deploy-SsisEnvironments failed. Error: $_";
}
}
function Deploy-SsisEnvironment ([string] $project = $(throw "project name required!"), [string] $folder = $(throw "folder name required!") ) {
<#
.SYNOPSIS
Create an environment in SSISDB for the project
SSISDB Environment setup with thanks to https://www.hansmichiels.com/2016/11/04/how-to-automate-your-ssis-package-deployment-and-configuration-ssis-series/
#>
try {
$ServerName = Get-SsisServerFromConfig;
$SQLCmdVaribles = Get-SqlCmdVariablesFromConfig -UseServerRoles $false;
$sqlFilePath = Join-Path $SsisDeploySQLScriptPath "CreateSsisDbEnvironment.sql";
assert(Test-Path($sqlFilePath)) "SQL script CreateSsisDbEnvironment.sql not exist!"
Run-SqlScriptAgainstServer -ServerName $ServerName -DatabaseName "SSISDB" -SqlFilePath $sqlFilePath -SQLCmdVaribles $SQLCmdVaribles;
$sqlFilePath = Join-Path $SsisDeploySQLScriptPath "LinkSsisDbEnvToProject.sql";
assert(Test-Path($sqlFilePath)) "SQL script LinkSsisDbEnvToProject.sql not exist!"
Run-SqlScriptAgainstServer -ServerName $ServerName -DatabaseName "SSISDB" -SqlFilePath $sqlFilePath -SQLCmdVaribles $SQLCmdVaribles;
} catch {
logError -Message "Deploy-SsisEnvironment failed. Error: $_";
}
}
function Drop-SsisFolder {
<#
.SYNOPSIS
Drops the SSIS folder
#>
try {
$ServerName = Get-SsisServerFromConfig;
$SQLCmdVaribles = Get-SqlCmdVariablesFromConfig -UseServerRoles $false;
$sqlFilePath = Join-Path $SsisDeploySQLScriptPath "Drop_SsisDb_Folder.sql";
assert(Test-Path($sqlFilePath)) "SQL script $sqlFilePath does not exist!"
Write-Host "Dropping SSIS folder";
Run-SqlScriptAgainstServer -ServerName $ServerName -DatabaseName "SSISDB" -SqlFilePath $sqlFilePath -SQLCmdVaribles $SQLCmdVaribles;
} catch {
logError -Message "Drop-SsisFolder failed to drop folder $folder in SSISDB Error: $_";
}
}
function Invoke-SsisPackage ([string] $SsisPackageName = $(throw "SSIS Package name required!")) {
<#
.SYNOPSIS
Executes an SSIS package in SSISDB
#>
try {
$ServerName = Get-SsisServerFromConfig;
$SQLCmdVaribles = Get-SqlCmdVariablesFromConfig -UseServerRoles $false;
$SQLCmdVaribles += "SsisPackageName=$SsisPackageName";
$sqlFilePath = Join-Path $SsisDeploySQLScriptPath "ExecuteSsisPackage.sql";
assert(Test-Path($sqlFilePath)) "SQL script ExecuteSsisPackage.sql not exist!"
Write-Host "Running SSIS package $SsisPackageName";
Run-SqlScriptAgainstServer -ServerName $ServerName -DatabaseName "SSISDB" -SqlFilePath $sqlFilePath -SQLCmdVaribles $SQLCmdVaribles;
} catch {
logError -Message "Invoke-SsisPackage failed. Error: $_";
}
}
| 45.230769 | 203 | 0.69697 | 3.078125 |
c9b51d740ca506f46d6f6272eb571f10a2679f47 | 6,909 | rs | Rust | mqttbytes/src/v5/pubrel.rs | IniterWorker/rumqtt | eb1713241584bf602bf3efe02ecceff0ed1c15d7 | [
"Apache-2.0"
] | 342 | 2020-06-25T01:00:36.000Z | 2022-03-28T21:46:35.000Z | mqttbytes/src/v5/pubrel.rs | IniterWorker/rumqtt | eb1713241584bf602bf3efe02ecceff0ed1c15d7 | [
"Apache-2.0"
] | 209 | 2020-06-30T16:49:01.000Z | 2022-03-31T08:11:37.000Z | mqttbytes/src/v5/pubrel.rs | thin-edge/rumqtt | 73e8ea2d91d3136c02c8eec55c9aa26076fa6ab7 | [
"Apache-2.0"
] | 102 | 2020-07-01T06:33:21.000Z | 2022-03-21T07:28:15.000Z | use super::*;
use bytes::{Buf, BufMut, Bytes, BytesMut};
/// Return code in connack
#[derive(Debug, Clone, Copy, PartialEq)]
#[repr(u8)]
pub enum PubRelReason {
Success = 0,
PacketIdentifierNotFound = 146,
}
/// Acknowledgement to QoS1 publish
#[derive(Debug, Clone, PartialEq)]
pub struct PubRel {
pub pkid: u16,
pub reason: PubRelReason,
pub properties: Option<PubRelProperties>,
}
impl PubRel {
pub fn new(pkid: u16) -> PubRel {
PubRel {
pkid,
reason: PubRelReason::Success,
properties: None,
}
}
fn len(&self) -> usize {
let mut len = 2 + 1; // pkid + reason
// If there are no properties during success, sending reason code is optional
if self.reason == PubRelReason::Success && self.properties.is_none() {
return 2;
}
if let Some(properties) = &self.properties {
let properties_len = properties.len();
let properties_len_len = len_len(properties_len);
len += properties_len_len + properties_len;
}
len
}
pub fn read(fixed_header: FixedHeader, mut bytes: Bytes) -> Result<Self, Error> {
let variable_header_index = fixed_header.fixed_header_len;
bytes.advance(variable_header_index);
let pkid = read_u16(&mut bytes)?;
if fixed_header.remaining_len == 2 {
return Ok(PubRel {
pkid,
reason: PubRelReason::Success,
properties: None,
});
}
let ack_reason = read_u8(&mut bytes)?;
if fixed_header.remaining_len < 4 {
return Ok(PubRel {
pkid,
reason: reason(ack_reason)?,
properties: None,
});
}
let puback = PubRel {
pkid,
reason: reason(ack_reason)?,
properties: PubRelProperties::extract(&mut bytes)?,
};
Ok(puback)
}
pub fn write(&self, buffer: &mut BytesMut) -> Result<usize, Error> {
let len = self.len();
buffer.put_u8(0x62);
let count = write_remaining_length(buffer, len)?;
buffer.put_u16(self.pkid);
// If there are no properties during success, sending reason code is optional
if self.reason == PubRelReason::Success && self.properties.is_none() {
return Ok(4);
}
buffer.put_u8(self.reason as u8);
if let Some(properties) = &self.properties {
properties.write(buffer)?;
}
Ok(1 + count + len)
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct PubRelProperties {
pub reason_string: Option<String>,
pub user_properties: Vec<(String, String)>,
}
impl PubRelProperties {
pub fn len(&self) -> usize {
let mut len = 0;
if let Some(reason) = &self.reason_string {
len += 1 + 2 + reason.len();
}
for (key, value) in self.user_properties.iter() {
len += 1 + 2 + key.len() + 2 + value.len();
}
len
}
pub fn extract(mut bytes: &mut Bytes) -> Result<Option<PubRelProperties>, Error> {
let mut reason_string = None;
let mut user_properties = Vec::new();
let (properties_len_len, properties_len) = length(bytes.iter())?;
bytes.advance(properties_len_len);
if properties_len == 0 {
return Ok(None);
}
let mut cursor = 0;
// read until cursor reaches property length. properties_len = 0 will skip this loop
while cursor < properties_len {
let prop = read_u8(&mut bytes)?;
cursor += 1;
match property(prop)? {
PropertyType::ReasonString => {
let reason = read_mqtt_string(&mut bytes)?;
cursor += 2 + reason.len();
reason_string = Some(reason);
}
PropertyType::UserProperty => {
let key = read_mqtt_string(&mut bytes)?;
let value = read_mqtt_string(&mut bytes)?;
cursor += 2 + key.len() + 2 + value.len();
user_properties.push((key, value));
}
_ => return Err(Error::InvalidPropertyType(prop)),
}
}
Ok(Some(PubRelProperties {
reason_string,
user_properties,
}))
}
fn write(&self, buffer: &mut BytesMut) -> Result<(), Error> {
let len = self.len();
write_remaining_length(buffer, len)?;
if let Some(reason) = &self.reason_string {
buffer.put_u8(PropertyType::ReasonString as u8);
write_mqtt_string(buffer, reason);
}
for (key, value) in self.user_properties.iter() {
buffer.put_u8(PropertyType::UserProperty as u8);
write_mqtt_string(buffer, key);
write_mqtt_string(buffer, value);
}
Ok(())
}
}
/// Connection return code type
fn reason(num: u8) -> Result<PubRelReason, Error> {
let code = match num {
0 => PubRelReason::Success,
146 => PubRelReason::PacketIdentifierNotFound,
num => return Err(Error::InvalidConnectReturnCode(num)),
};
Ok(code)
}
#[cfg(test)]
mod test {
use super::*;
use alloc::vec;
use bytes::BytesMut;
use pretty_assertions::assert_eq;
fn sample() -> PubRel {
let properties = PubRelProperties {
reason_string: Some("test".to_owned()),
user_properties: vec![("test".to_owned(), "test".to_owned())],
};
PubRel {
pkid: 42,
reason: PubRelReason::PacketIdentifierNotFound,
properties: Some(properties),
}
}
fn sample_bytes() -> Vec<u8> {
vec![
0x62, // payload type
0x18, // remaining length
0x00, 0x2a, // packet id
0x92, // reason
0x14, // properties len
0x1f, 0x00, 0x04, 0x74, 0x65, 0x73, 0x74, // reason_string
0x26, 0x00, 0x04, 0x74, 0x65, 0x73, 0x74, 0x00, 0x04, 0x74, 0x65, 0x73,
0x74, // user properties
]
}
#[test]
fn pubrel_parsing_works() {
let mut stream = bytes::BytesMut::new();
let packetstream = &sample_bytes();
stream.extend_from_slice(&packetstream[..]);
let fixed_header = parse_fixed_header(stream.iter()).unwrap();
let pubrel_bytes = stream.split_to(fixed_header.frame_length()).freeze();
let pubrel = PubRel::read(fixed_header, pubrel_bytes).unwrap();
assert_eq!(pubrel, sample());
}
#[test]
fn pubrel_encoding_works() {
let pubrel = sample();
let mut buf = BytesMut::new();
pubrel.write(&mut buf).unwrap();
assert_eq!(&buf[..], sample_bytes());
}
}
| 29.029412 | 92 | 0.549139 | 3.078125 |
66339ce83f38e414fa9f11177e154a8677ed132f | 8,041 | py | Python | foods3/direct_corn_supply_chain.py | taegon/spatial-scale-lca-us-corn | c76e8477a222e98ff1c28332006447d45b12960f | [
"CNRI-Python"
] | null | null | null | foods3/direct_corn_supply_chain.py | taegon/spatial-scale-lca-us-corn | c76e8477a222e98ff1c28332006447d45b12960f | [
"CNRI-Python"
] | null | null | null | foods3/direct_corn_supply_chain.py | taegon/spatial-scale-lca-us-corn | c76e8477a222e98ff1c28332006447d45b12960f | [
"CNRI-Python"
] | null | null | null | import csv
import os
import numpy as np
from foods3 import util
from gurobipy import *
county_size = 3109
def optimize_gurobi(supply_code, supply_corn, demand_code, demand_corn, dist_mat):
env = Env("gurobi_spatial_lca.log")
model = Model("lp_for_spatiallca")
var = []
# add constraint for corn product
# all flow value bigger than equals 0
no_of_supply = len(supply_code)
no_of_demand = len(demand_code)
var = []
sol = np.zeros(no_of_supply * no_of_demand)
for i, vs in enumerate(supply_code):
for j, vd in enumerate(demand_code):
var.append(model.addVar(0.0, min(supply_corn[i], demand_corn[j]), 0.0, GRB.CONTINUOUS, "S_s[{:d},{:d}]".format(i, j)))
model.update()
print("corn flow constraint = all number positive")
# Set objective: minimize cost
expr = LinExpr()
for i, vs in enumerate(supply_code):
for j, vd in enumerate(demand_code):
expr.addTerms(dist_mat[i][j], var[i * no_of_demand + j])
model.setObjective(expr, GRB.MINIMIZE)
# sum of supply(specific row's all columns) is small than product of corn
# Add constraint
for i, vs in enumerate(supply_code):
expr = LinExpr()
for j, vd in enumerate(demand_code):
expr.addTerms(1.0, var[i * no_of_demand + j])
model.addConstr(expr, GRB.LESS_EQUAL, supply_corn[i], "c{:d}".format(i + 1))
print("sum of corn flow from specific county smaller than total product of that county")
# sum of supply (specific column's all row) is equals to the demand of county
for j, vd in enumerate(demand_code):
expr = LinExpr()
for i, vs in enumerate(supply_code):
expr.addTerms(1.0, var[i * no_of_demand + j])
model.addConstr(expr, GRB.EQUAL, demand_corn[j], "d{:d}".format(j + 1))
print("all constraints are set.")
# Optimize model
model.optimize()
for i, vs in enumerate(supply_code):
for j, vd in enumerate(demand_code):
sol[i * no_of_demand + j] = var[i * no_of_demand + j].x
return sol
def read_csv_int(filename, col_idx):
values = []
with open(filename, "r", encoding='utf-8') as f:
csv_reader = csv.reader(f)
next(csv_reader)
for row in csv_reader:
v = row[col_idx]
values.append(int(v))
return values
def read_csv_float(filename, col_idx):
values = []
with open(filename, "r", encoding='utf-8') as f:
csv_reader = csv.reader(f)
next(csv_reader)
for row in csv_reader:
v = row[col_idx]
v = v.replace(",", "")
# print(v)
if v is None or v == "" or v.strip() == "-":
values.append(0)
else:
values.append(float(v))
return values
def read_csv_float_range(filename, col_idx, col_idx_end):
values = []
with open(filename, "r", encoding='utf-8') as f:
csv_reader = csv.reader(f)
next(csv_reader)
for row in csv_reader:
sum_value = 0.
for col in range(col_idx, col_idx_end):
v = row[col]
v = v.replace(",", "")
if v is None or v == "" or v.strip() == "-":
v = 0
else:
v = float(v)
sum_value += v
values.append(sum_value)
return values
def read_dist_matrix(filename):
matrix = np.zeros((county_size, county_size))
with open(filename, "r") as f:
csv_reader = csv.reader(f)
for i, row in enumerate(csv_reader):
for c in range(county_size):
matrix[i][c] = float(row[c])
return matrix
def expand_list(corn_demand_file, input_file, output_file):
demand = {}
with open(corn_demand_file, "r") as f:
reader = csv.reader(f)
next(reader)
for row in reader:
demand[row[0]] = [float(row[8]), float(row[9]), float(row[10]),
float(row[11]), float(row[12]), float(row[13]),
float(row[7])]
sub_sector = ["layer", "pullet", "turkey", "milkcow", "wetmill", "export", "others"]
data_list = []
with open(input_file, "r") as f:
reader = csv.reader(f)
header = next(reader)
for row in reader:
data_list.append(row)
expanded_list = []
for row in data_list:
if row[0] == "others":
weighted_col_idx = [3,]
target_county = row[1]
total_demand = sum(demand[target_county])
for ss in range(len(sub_sector)):
if total_demand == 0:
weight = 1
else:
weight = demand[target_county][ss] / total_demand
split_row = [row[x] if x not in weighted_col_idx else float(row[x])*weight for x in range(len(row))]
split_row[0] = sub_sector[ss]
if split_row[3] != 0:
expanded_list.append(split_row)
else:
expanded_list.append(row)
with open(output_file, "w") as f:
f.write(",".join(header))
f.write("\n")
for row in expanded_list:
f.write(",".join([str(x) for x in row]))
f.write("\n")
def main(output_filename, demand_filename):
county_code = read_csv_int("../input/county_FIPS.csv", 0)
supply_code = county_code[:]
supply_amount = read_csv_float(demand_filename, 1)
demand_code = []
for i in range(5):
demand_code.extend(county_code)
demand_amount = []
# cattle(0), poultry(1), ethanol(2), hog(3), others(4)
demand_amount.extend(read_csv_float(demand_filename, 3))
demand_amount.extend(read_csv_float(demand_filename, 5))
demand_amount.extend(read_csv_float(demand_filename, 6))
demand_amount.extend(read_csv_float(demand_filename, 4))
demand_amount.extend(read_csv_float_range(demand_filename, 7, 14))
print(sum(supply_amount))
print(sum(demand_amount))
all_imp_filename = "../input/allDist_imp.csv"
dist_imp_all_matrix = read_dist_matrix(all_imp_filename)
dist_mat = np.zeros((len(supply_code), len(demand_code)))
print("making distance matrix")
dist_mat[0:3109, 0 + 0 * 3109:3109 * 1] = dist_imp_all_matrix
dist_mat[0:3109, 0 + 1 * 3109:3109 * 2] = dist_imp_all_matrix
dist_mat[0:3109, 0 + 2 * 3109:3109 * 3] = dist_imp_all_matrix
dist_mat[0:3109, 0 + 3 * 3109:3109 * 4] = dist_imp_all_matrix
dist_mat[0:3109, 0 + 4 * 3109:3109 * 5] = dist_imp_all_matrix
print("run simulation model")
sol = optimize_gurobi(supply_code, supply_amount, demand_code, demand_amount, dist_mat)
no_of_demand = len(demand_code)
sector_name = ("cattle", "broiler", "ethanol", "hog", "others")
with open(output_filename, "w") as f:
headline = [
"sector", "demand_county", "corn_county", "corn_bu",
]
f.write(",".join(headline))
f.write("\n")
for i, v in enumerate(sol):
if v > 0:
sector = (i % no_of_demand) // county_size
src_county_idx = i // no_of_demand
des_county_idx = i % no_of_demand % county_size
supply_corn_bu = v
src_county_fips = county_code[src_county_idx]
des_county_fips = county_code[des_county_idx]
f.write("{},{},{},{}\n".format(sector_name[sector], des_county_fips, src_county_fips, supply_corn_bu))
if __name__ == '__main__':
ROOT_DIR = util.get_project_root()
output_dir = ROOT_DIR / "output"
if not os.path.exists(output_dir):
os.mkdir(output_dir)
corn_flow_filename = "../output/corn_flow_county_scale_major_category.csv"
corn_demand_filename = "../input/corn_demand_2012.csv"
main(corn_flow_filename, corn_demand_filename)
expand_list(corn_demand_filename,
corn_flow_filename,
"../output/impacts_scale_county_all_category.csv")
| 33.644351 | 130 | 0.596443 | 3.171875 |
2fd55a9541f3cdf811e93acd233be903776f8c4b | 2,268 | py | Python | spNQueenTheorem/generator.py | lightofanima/Adventures | efb9d001520ff0b88746d8b3cf024de3307e45c7 | [
"MIT"
] | null | null | null | spNQueenTheorem/generator.py | lightofanima/Adventures | efb9d001520ff0b88746d8b3cf024de3307e45c7 | [
"MIT"
] | null | null | null | spNQueenTheorem/generator.py | lightofanima/Adventures | efb9d001520ff0b88746d8b3cf024de3307e45c7 | [
"MIT"
] | null | null | null | # This script generates a theorem for them Z3 SAT solver. The output of this program
# is designed to be the input for http://rise4fun.com/z3
# The output of z3 is the coordinates of the queens for a solution to the N Queen problem :)
#Prints an assert statement
def zassert(x):
print("( assert ( {} ) )".format(x))
#Prints a declaration
def zdeclare(x, type="Int"):
print("( declare-const {} {} )".format(x,type))
#Generates a Z3 proof.
# N = number of queens.
# G = grid size (8 = chess board)
def generate(N, G) :
zdeclare("N") #Nuber of queens
zdeclare("G") #Board size
zassert("= N {}".format(N)) #Init N
zassert("= G {}".format(G)) #Init G
#Generate queen names
queensX = ["P{}_x".format(n) for n in range(0, N) ]
queensY = ["P{}_y".format(n) for n in range(0, N) ]
#Declare queens
for i in range(N):
zdeclare(queensX[i])
zdeclare(queensY[i])
#For each queen Position
for P in range(N):
#Assert bounds
zassert(">= {} 0".format(queensX[P]))
zassert(">= {} 0".format(queensY[P]))
zassert("< {} G".format(queensX[P]))
zassert("< {} G".format(queensY[P]))
for PP in range(P+1, N):
#Assert Horizontal and Vertical Uniqueness
zassert("not ( or (= {ax} {bx} ) (= {ay} {by} ) )"
.format(ax=queensX[P], bx=queensX[PP], ay=queensY[P], by=queensY[PP]))
#Assert Diagonal uniqueness
# / angle
zassert("not ( exists (( t Int )) ( and ( and ( and ( = (+ {ax} t) {bx} ) ( >= (+ {ax} t) 0 ) ) ( < (+ {ax} t) G ) ) ( and ( and ( = (+ {ay} t) {by} ) ( >= (+ {ay} t) 0 ) ) ( < (+ {ay} t) G ) ) ) )"
.format(ax=queensX[P], bx=queensX[PP], ay=queensY[P], by=queensY[PP]))
# \ angle
zassert("not ( exists (( t Int )) ( and ( and ( and ( = (+ {ax} t) {bx} ) ( >= (+ {ax} t) 0 ) ) ( < (+ {ax} t) G ) ) ( and ( and ( = (- {ay} t) {by} ) ( >= (- {ay} t) 0 ) ) ( < (- {ay} t) G ) ) ) )"
.format(ax=queensX[P], bx=queensX[PP], ay=queensY[P], by=queensY[PP]))
print("(check-sat)")
print("(get-model)")
#Generate proof for 8 queens on an 8x8 grid
generate(8,8) | 36 | 211 | 0.507055 | 3.53125 |
448226b0b0bb3e7dfc88ffd8561110cbae44b23e | 941 | py | Python | gpu_watchdog.py | AlphaGoMK/GPU_Watchdog | 01aa3370d4521d800c23a8afe396e4424a94b77e | [
"MIT"
] | null | null | null | gpu_watchdog.py | AlphaGoMK/GPU_Watchdog | 01aa3370d4521d800c23a8afe396e4424a94b77e | [
"MIT"
] | null | null | null | gpu_watchdog.py | AlphaGoMK/GPU_Watchdog | 01aa3370d4521d800c23a8afe396e4424a94b77e | [
"MIT"
] | null | null | null | import GPUtil
import time
import datetime
import argparse
import sys
import requests
parser = argparse.ArgumentParser()
parser.add_argument('--threshold', type=int, default=1024, help='GPU memory threshold (MB)')
parser.add_argument('--index', type=int, default=0, help='Index of the GPU to be monitored')
opt = parser.parse_args()
sckey = '' # your sckey
assert sckey != '' # No sckey is given
li = []
while True:
gpu = GPUtil.getGPUs()[opt.index]
li.append(gpu.memoryUsed < opt.threshold)
if sum(li) > 5:
now = datetime.datetime.now()
err_info = 'GPU mem drop @ %s'%(now.strftime('%b %d %H:%M'))
try:
requests.get('https://sc.ftqq.com/%s.send?text=%s'%(sckey, err_info.replace(' ', '_').replace(':', '_')))
except:
print('Send error')
print('\033[31m%s\033[0m'%err_info)
sys.exit(0)
elif len(li) > 5:
del li[0]
time.sleep(1)
| 29.40625 | 117 | 0.609989 | 3.234375 |
c6f175b70fd25d8633c97d3d0d59465e21251689 | 978 | py | Python | example/apps/buttons/methods/Buttons.py | NinjaDero/Directly | bb241b49c54c8a1510438f955b39d1785594cc64 | [
"MIT"
] | 4 | 2015-03-09T10:50:05.000Z | 2020-07-02T17:22:37.000Z | example/apps/buttons/methods/Buttons.py | NinjaDero/Directly | bb241b49c54c8a1510438f955b39d1785594cc64 | [
"MIT"
] | null | null | null | example/apps/buttons/methods/Buttons.py | NinjaDero/Directly | bb241b49c54c8a1510438f955b39d1785594cc64 | [
"MIT"
] | null | null | null | from Directly import Ext
@Ext.cls
class Buttons():
@staticmethod
@Ext.method
def ping(request):
return "Pong!"
@staticmethod
@Ext.method
def reverse(request, text):
return text[::-1]
@staticmethod
@Ext.method
def full_caps(request, text):
all_caps = Buttons.make_caps(text)
return all_caps
@staticmethod
@Ext.method
def full_lows(request, text):
all_lows = Buttons.make_lows(text)
return all_lows
# Not included, remains hidden to Ext.direct.Manager
# You don't have to separate your exposed and hidden methods, if you don't want to.
# They can also not be called if the Manager is edited manually
@staticmethod
def make_caps(_text):
if 'upper' in dir(_text):
_text = _text.upper()
return _text
@staticmethod
def make_lows(_text):
if 'lower' in dir(_text):
_text = _text.lower()
return _text | 23.853659 | 87 | 0.621677 | 3.015625 |
489240a5d6ab7946d2288fbd32e8a8541cb5a64d | 534 | sql | SQL | db/Visitante/sp_visitante_add.sql | WilmoMorandeDeodato/Hotel-Restaurante-Golai | ce646176c55a729c83a35a13b33431aed2d872bf | [
"CC-BY-3.0"
] | null | null | null | db/Visitante/sp_visitante_add.sql | WilmoMorandeDeodato/Hotel-Restaurante-Golai | ce646176c55a729c83a35a13b33431aed2d872bf | [
"CC-BY-3.0"
] | null | null | null | db/Visitante/sp_visitante_add.sql | WilmoMorandeDeodato/Hotel-Restaurante-Golai | ce646176c55a729c83a35a13b33431aed2d872bf | [
"CC-BY-3.0"
] | null | null | null | DROP PROCEDURE sp_visitante_add;
DELIMITER //
CREATE PROCEDURE sp_visitante_add(
v_id_hotel INT,
v_ip VARCHAR(15),
v_navegador VARCHAR(30),
v_sistema VARCHAR(30)
)
BEGIN
DECLARE v_auto_id INT;
SELECT COUNT(id_visitante) + 1 FROM tb_visitante INTO v_auto_id;
INSERT
INTO tb_visitante(
id_visitante, id_hotel, ip, navegador, sistema, data_ace
)
VALUES(
v_auto_id, v_id_hotel, v_ip, v_navegador, v_sistema, NOW()
);
END //
DELIMITER ;
CALL sp_visitante_add(1,'','',''); | 25.428571 | 65 | 0.679775 | 3.078125 |
e2276f9a3a6d51e31f6fa4d77cfffd8a30ff5c23 | 1,959 | py | Python | remorse/wavegen.py | jhobbs/morsegen | 771b8bb38fd3ad6f94d775edc00323dd0b42f566 | [
"Apache-2.0"
] | 2 | 2016-05-27T04:08:50.000Z | 2016-05-27T12:48:59.000Z | remorse/wavegen.py | jhobbs/remorse | 771b8bb38fd3ad6f94d775edc00323dd0b42f566 | [
"Apache-2.0"
] | null | null | null | remorse/wavegen.py | jhobbs/remorse | 771b8bb38fd3ad6f94d775edc00323dd0b42f566 | [
"Apache-2.0"
] | null | null | null | import itertools
import math
from pyaudio import PyAudio
BITRATE = 16000
FADE_LENGTH = 0.003
FADE_FRAMES = int(BITRATE * FADE_LENGTH)
MAX_AMPLITUDE = 127
def sine(frequency, length):
"""Generate a sine wave in 8-bit unsigned PCM format.
Uses linear fading at the beginning and end to avoid click noise.
Good reference on how simple digital sound generation works:
http://www.cs.nmsu.edu/~rth/cs/computermusic/Simple%20sound%20generation.html
We use s_n = (a * sin(2*pi*f*n/sr)) + 128 where:
- n is the sample number.
- s_n is sample n.
- f is frequency in hertz.
- sr is the sample rate in samples per second.
- a is the amplitude in the range of 0 to 127.
Adding 128 serves to center the samples at 128, which is silence in 8-bit
unsigned PCM format.
"""
wave_data = ''
number_of_frames = int(BITRATE * length)
factor = (float(frequency) * (math.pi * 2)) / BITRATE
for n in xrange(number_of_frames):
if n < FADE_FRAMES:
amplitude_factor = float(n) / FADE_FRAMES
elif number_of_frames - n < FADE_FRAMES:
amplitude_factor = float(number_of_frames - n) / FADE_FRAMES
else:
amplitude_factor = 1
amplitude = MAX_AMPLITUDE * amplitude_factor
zero_centered = int(math.sin(n * factor) * amplitude)
wave_data += chr(zero_centered + 128)
return wave_data
def silence(length):
wave_data = ''
number_of_frames = int(BITRATE * length)
for x in xrange(number_of_frames):
wave_data += chr(128)
return wave_data
def play(wave_data):
chunk_size = BITRATE/10
p = PyAudio()
stream = p.open(format = p.get_format_from_width(1),
channels = 1,
rate = BITRATE,
output = True)
for chunk in itertools.islice(wave_data, chunk_size):
stream.write(chunk)
stream.stop_stream()
stream.close()
p.terminate()
| 28.808824 | 81 | 0.64829 | 3.390625 |
af91089c95e4ed7d63c9f6848966cba3c13704d1 | 1,259 | py | Python | tests/task/test_can_task.py | jha929/pyMODI | 6175e6579b7fab4d26a8add852bbc8357eb8bf30 | [
"MIT"
] | 2 | 2020-11-25T03:20:12.000Z | 2020-11-25T03:20:14.000Z | tests/task/test_can_task.py | jha929/pyMODI | 6175e6579b7fab4d26a8add852bbc8357eb8bf30 | [
"MIT"
] | null | null | null | tests/task/test_can_task.py | jha929/pyMODI | 6175e6579b7fab4d26a8add852bbc8357eb8bf30 | [
"MIT"
] | null | null | null | import json
import unittest
from queue import Queue
from modi.task.can_task import CanTask
from modi.util.msgutil import parse_message
class MockCan:
def __init__(self):
self.recv_buffer = Queue()
def recv(self, timeout):
json_pkt = parse_message(0x03, 0, 1)
return CanTask.compose_can_msg(json.loads(json_pkt))
def send(self, item):
self.recv_buffer.put(item)
class TestCanTask(unittest.TestCase):
"""Tests for 'CanTask' class"""
def setUp(self):
"""Set up test fixtures, if any."""
self.can_task = CanTask()
self.can_task._bus = MockCan()
def tearDown(self):
"""Tear down test fixtures, if any."""
del self.can_task
CanTask._instances.clear()
def test_recv(self):
"""Test _recv_data method"""
self.assertEqual(self.can_task.recv(), parse_message(0x03, 0, 1))
def test_send(self):
"""Test _send_data method"""
json_pkt = parse_message(0x03, 0, 1)
self.can_task.send(json_pkt)
self.assertEqual(self.can_task.bus.recv_buffer.get().data,
CanTask.compose_can_msg(json.loads(json_pkt)).data
)
if __name__ == "__main__":
unittest.main()
| 25.693878 | 75 | 0.625894 | 3.140625 |
be8465a0f3ff16adcd08cbb851646be9bdf7fa57 | 5,905 | ts | TypeScript | packages/plugin-node-unhandled-rejection/test/unhandled-rejection.test.ts | orinamio/bugsnag-js-7.14.2-patch | bc15fa8cf6c5442710aaeeb7749c489c5558f85c | [
"MIT"
] | 682 | 2015-01-02T06:35:11.000Z | 2022-03-31T07:49:05.000Z | packages/plugin-node-unhandled-rejection/test/unhandled-rejection.test.ts | orinamio/bugsnag-js-7.14.2-patch | bc15fa8cf6c5442710aaeeb7749c489c5558f85c | [
"MIT"
] | 1,456 | 2015-01-10T01:56:02.000Z | 2022-03-31T07:26:24.000Z | packages/plugin-node-unhandled-rejection/test/unhandled-rejection.test.ts | orinamio/bugsnag-js-7.14.2-patch | bc15fa8cf6c5442710aaeeb7749c489c5558f85c | [
"MIT"
] | 266 | 2015-01-23T02:25:34.000Z | 2022-03-19T04:37:48.000Z | import Client from '@bugsnag/core/client'
import { schema } from '@bugsnag/core/config'
import plugin from '../'
import EventWithInternals from '@bugsnag/core/event'
describe('plugin: node unhandled rejection handler', () => {
it('should listen to the process#unhandledRejection event', () => {
const before = process.listeners('unhandledRejection').length
const c = new Client({ apiKey: 'api_key', plugins: [plugin] })
const after = process.listeners('unhandledRejection').length
expect(before < after).toBe(true)
expect(c).toBe(c)
plugin.destroy()
})
it('does not add a process#unhandledRejection listener if autoDetectErrors=false', () => {
const before = process.listeners('unhandledRejection').length
const c = new Client({ apiKey: 'api_key', autoDetectErrors: false, plugins: [plugin] })
const after = process.listeners('unhandledRejection').length
expect(c).toBe(c)
expect(after).toBe(before)
})
it('does not add a process#unhandledRejection listener if enabledErrorTypes.unhandledRejections=false', () => {
const before = process.listeners('unhandledRejection').length
const c = new Client({
apiKey: 'api_key',
enabledErrorTypes: { unhandledExceptions: false, unhandledRejections: false },
plugins: [plugin]
})
const after = process.listeners('unhandledRejection').length
expect(c).toBe(c)
expect(after).toBe(before)
})
it('should call the configured onUnhandledRejection callback', done => {
const c = new Client({
apiKey: 'api_key',
onUnhandledRejection: (err: Error, event: EventWithInternals) => {
expect(err.message).toBe('never gonna catch me')
expect(event.errors[0].errorMessage).toBe('never gonna catch me')
expect(event._handledState.unhandled).toBe(true)
expect(event._handledState.severity).toBe('error')
expect(event._handledState.severityReason).toEqual({ type: 'unhandledPromiseRejection' })
plugin.destroy()
done()
},
plugins: [plugin]
}, {
...schema,
onUnhandledRejection: {
validate: (val: unknown) => typeof val === 'function',
message: 'should be a function',
defaultValue: () => {}
}
})
c._setDelivery(client => ({
sendEvent: (payload, cb) => cb(),
sendSession: (payload, cb) => cb()
}))
process.listeners('unhandledRejection')[0](new Error('never gonna catch me'), Promise.resolve())
})
it('should tolerate delivery errors', done => {
const c = new Client({
apiKey: 'api_key',
onUnhandledRejection: (err: Error, event: EventWithInternals) => {
expect(err.message).toBe('never gonna catch me')
expect(event.errors[0].errorMessage).toBe('never gonna catch me')
expect(event._handledState.unhandled).toBe(true)
expect(event._handledState.severity).toBe('error')
expect(event._handledState.severityReason).toEqual({ type: 'unhandledPromiseRejection' })
plugin.destroy()
done()
},
plugins: [plugin]
}, {
...schema,
onUnhandledRejection: {
validate: (val: unknown) => typeof val === 'function',
message: 'should be a function',
defaultValue: () => {}
}
})
c._setDelivery(client => ({
sendEvent: (payload, cb) => cb(new Error('floop')),
sendSession: (payload, cb) => cb()
}))
process.listeners('unhandledRejection')[0](new Error('never gonna catch me'), Promise.resolve())
})
it('should return a promise that resolves after the onUnhandledRejection callback is called', async () => {
try {
const options = {
apiKey: 'api_key',
onUnhandledRejection: jest.fn(),
plugins: [plugin]
}
const pluginSchema = {
...schema,
onUnhandledRejection: {
validate: (val: unknown) => typeof val === 'function',
message: 'should be a function',
defaultValue: () => {}
}
}
const client = new Client(options, pluginSchema)
client._setDelivery(client => ({
sendEvent: (payload, cb) => cb(),
sendSession: (payload, cb) => cb()
}))
const listener = process.listeners('unhandledRejection')[0]
expect(options.onUnhandledRejection).not.toHaveBeenCalled()
await listener(new Error('never gonna catch me'), Promise.resolve())
expect(options.onUnhandledRejection).toHaveBeenCalledTimes(1)
} finally {
plugin.destroy()
}
})
it('should prepend its listener (Node 6+)', async () => {
// Skip this test on Node 4/5 as prependListener doesn't exist
if (process.version.startsWith('v4.') || process.version.startsWith('v5.')) {
return
}
const listener = () => {}
try {
process.on('unhandledRejection', listener)
const listenersBefore = process.listeners('unhandledRejection')
expect(listenersBefore).toHaveLength(1)
expect(listenersBefore[0]).toBe(listener)
const options = {
apiKey: 'api_key',
onUnhandledRejection: jest.fn(),
plugins: [plugin]
}
const pluginSchema = {
...schema,
onUnhandledRejection: {
validate: (val: unknown) => typeof val === 'function',
message: 'should be a function',
defaultValue: () => {}
}
}
const client = new Client(options, pluginSchema)
client._setDelivery(client => ({
sendEvent: (payload, cb) => cb(),
sendSession: (payload, cb) => cb()
}))
const listenersAfter = process.listeners('unhandledRejection')
expect(listenersAfter).toHaveLength(2)
expect(listenersAfter[0]).not.toBe(listener)
expect(listenersAfter[1]).toBe(listener)
} finally {
process.removeListener('unhandledRejection', listener)
plugin.destroy()
}
})
})
| 33.361582 | 113 | 0.62591 | 3.03125 |
9376b61a21ae5b5a375e0f74ef6c2f453a4c152c | 3,917 | cs | C# | Assets/Scripts/Handlers/MultiTouchCurvedNoteDetector.cs | nopponaim603/BMP-U | 52072a1134d8a4304a41013c3b8d141da8a348a9 | [
"Artistic-2.0"
] | 74 | 2016-05-29T21:06:53.000Z | 2022-02-11T11:42:44.000Z | Assets/Scripts/Handlers/MultiTouchCurvedNoteDetector.cs | nopponaim603/BMP-U | 52072a1134d8a4304a41013c3b8d141da8a348a9 | [
"Artistic-2.0"
] | 1 | 2020-08-02T13:00:14.000Z | 2020-08-02T13:00:14.000Z | Assets/Scripts/Handlers/MultiTouchCurvedNoteDetector.cs | nopponaim603/BMP-U | 52072a1134d8a4304a41013c3b8d141da8a348a9 | [
"Artistic-2.0"
] | 17 | 2017-06-30T13:20:39.000Z | 2021-12-28T02:58:50.000Z | using UnityEngine;
using System.Collections.Generic;
using BMS;
using BMS.Visualization;
[RequireComponent(typeof(NoteSpawnerSP))]
public class MultiTouchCurvedNoteDetector : MonoBehaviour {
public Camera hitTestCamera;
public NoteDetector noteDetector;
public NoteSpawnerSP noteSpawner;
public float startLength, endLength;
readonly Dictionary<int, int> touchMapping = new Dictionary<int, int>();
int mouseMapping;
bool previousMouseState;
Vector3 previousMousePosition;
void Start() {
Input.multiTouchEnabled = true;
if(Input.touchSupported) Input.simulateMouseWithTouches = false;
}
void Update() {
IList<int> mapping = noteSpawner.MappedChannels;
int idx;
bool touchHandled = false;
foreach(var touch in Touches.Instance) {
switch(touch.phase) {
case TouchPhase.Began:
idx = DetectIndex(touch.position, mapping);
touchMapping[touch.fingerId] = idx;
HandleTouch(idx, mapping, true);
break;
case TouchPhase.Moved:
idx = DetectIndex(touch.position, mapping);
if(idx != touchMapping[touch.fingerId]) {
HandleTouch(touchMapping[touch.fingerId], mapping, false);
HandleTouch(idx, mapping, true);
touchMapping[touch.fingerId] = idx;
}
break;
case TouchPhase.Ended:
case TouchPhase.Canceled:
idx = DetectIndex(touch.position, mapping);
HandleTouch(idx, mapping, false);
touchMapping[touch.fingerId] = -1;
break;
}
touchHandled = true;
}
if(!touchHandled) {
bool currentMouseState = Input.GetMouseButton(0);
Vector3 currentMousePosition = Input.mousePosition;
if(currentMouseState != previousMouseState) {
idx = DetectIndex(currentMousePosition, mapping);
HandleTouch(idx, mapping, currentMouseState);
mouseMapping = currentMouseState ? idx : -1;
} else if(currentMousePosition != previousMousePosition && currentMouseState) {
idx = DetectIndex(currentMousePosition, mapping);
if(idx != mouseMapping) {
HandleTouch(mouseMapping, mapping, false);
HandleTouch(idx, mapping, true);
mouseMapping = idx;
}
}
previousMouseState = currentMouseState;
previousMousePosition = currentMousePosition;
}
}
int DetectIndex(Vector3 position, IList<int> mapping) {
int mappingCount = mapping.Count;
if(mappingCount <= 0) return -1;
Vector3 localPosition = hitTestCamera.ScreenToWorldPoint(position, Vector3.Distance(hitTestCamera.transform.position, noteSpawner.centroid));
float distance = Vector2.Distance(localPosition, noteSpawner.centroid);
if(distance < startLength || distance > endLength) return -1;
float anglePerSlot = (noteSpawner.clampRangeEnd - noteSpawner.clampRangeStart) / mappingCount;
float angle = Mathf.Repeat(Mathf.Atan2(localPosition.y - noteSpawner.centroid.y, localPosition.x - noteSpawner.centroid.x) * Mathf.Rad2Deg, 360F) + anglePerSlot / 2;
if(angle < noteSpawner.clampRangeStart || angle > noteSpawner.clampRangeEnd + anglePerSlot) return -1;
return Mathf.FloorToInt(Mathf.Clamp(Mathf.InverseLerp(noteSpawner.clampRangeStart, noteSpawner.clampRangeEnd + anglePerSlot, angle) * mappingCount, 0, mappingCount - 1));
}
void HandleTouch(int i, IList<int> mapping, bool isDown) {
if(i >= 0) noteDetector.OnClick(mapping[i], isDown);
}
}
| 44.011236 | 178 | 0.615777 | 3.0625 |
e2f603c5ae6e42e16ccac0c3158c3b5c4e70a4e6 | 1,514 | py | Python | coarseactin/utils/HexGrid.py | cabb99/CoarseGrainedActin | 037dfddec2b985e529620e1b83d1cc48bd930b93 | [
"MIT"
] | 1 | 2021-03-02T22:45:04.000Z | 2021-03-02T22:45:04.000Z | coarseactin/utils/HexGrid.py | cabb99/CoarseGrainedActin | 037dfddec2b985e529620e1b83d1cc48bd930b93 | [
"MIT"
] | 1 | 2021-09-17T18:21:39.000Z | 2021-09-17T18:21:39.000Z | coarseactin/utils/HexGrid.py | cabb99/CoarseGrainedActin | 037dfddec2b985e529620e1b83d1cc48bd930b93 | [
"MIT"
] | null | null | null | import numpy as np
class HexGrid():
deltas = [[1, 0, -1], [0, 1, -1], [-1, 1, 0], [-1, 0, 1], [0, -1, 1], [1, -1, 0]]
a0 = 0
a1 = np.pi / 3
a2 = -np.pi / 3
vecs = np.array([[np.sqrt(3) * np.cos(a0), np.sin(a0) / np.sqrt(3)],
[np.sqrt(3) * np.cos(a1), np.sin(a1) / np.sqrt(3)],
[np.sqrt(3) * np.cos(a2), np.sin(a2) / np.sqrt(3)]])
def __init__(self, radius):
self.radius = radius
self.tiles = {(0, 0, 0): "X"}
for r in range(radius):
a = 0
b = -r
c = +r
for j in range(6):
num_of_hexas_in_edge = r
for i in range(num_of_hexas_in_edge):
a = a + self.deltas[j][0]
b = b + self.deltas[j][1]
c = c + self.deltas[j][2]
self.tiles[a, b, c] = "X"
def coords(self):
tiles = np.array([a for a in self.tiles.keys()])
coords = np.dot(tiles, self.vecs)
return coords
def show(self):
l = []
for y in range(20):
l.append([])
for x in range(60):
l[y].append(".")
for (a, b, c), tile in self.tiles.items():
l[self.radius - 1 - b][a - c + (2 * (self.radius - 1))] = self.tiles[a, b, c]
mapString = ""
for y in range(len(l)):
for x in range(len(l[y])):
mapString += l[y][x]
mapString += "\n"
print(mapString) | 34.409091 | 89 | 0.416116 | 3.109375 |
0d36f1d9b9a915debcac4d12e07eee5d7d8cab16 | 4,512 | cs | C# | CSharp Advanced/CSharp Advanced Exams/CSharp Advanced Exam - 19 June 2016/01. Cubic Artillery/Program.cs | stoyanovmiroslav/CSharp-Fundamentals | f941c73adc341e39a696ad5a5563c845ca296141 | [
"MIT"
] | null | null | null | CSharp Advanced/CSharp Advanced Exams/CSharp Advanced Exam - 19 June 2016/01. Cubic Artillery/Program.cs | stoyanovmiroslav/CSharp-Fundamentals | f941c73adc341e39a696ad5a5563c845ca296141 | [
"MIT"
] | null | null | null | CSharp Advanced/CSharp Advanced Exams/CSharp Advanced Exam - 19 June 2016/01. Cubic Artillery/Program.cs | stoyanovmiroslav/CSharp-Fundamentals | f941c73adc341e39a696ad5a5563c845ca296141 | [
"MIT"
] | null | null | null | using System;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
namespace _01._Cubic_Artillery
{
class Program
{
static void Main(string[] args)
{
int bunkersCapacity = int.Parse(Console.ReadLine());
string input = "";
var dict = new Dictionary<string, Queue<int>>();
List<string> bunkers = new List<string>();
while ((input = Console.ReadLine()) != "Bunker Revision")
{
string[] splitInput = input.Split(new string[] { " " }, StringSplitOptions.RemoveEmptyEntries);
for (int i = 0; i < splitInput.Length; i++)
{
if (IsNumerics(splitInput[i]))
{
int weapon = int.Parse(splitInput[i]);
StoreWeapons(bunkers, dict, weapon, bunkersCapacity);
}
else
{
string bunker = splitInput[i];
bunkers.Add(bunker);
dict.Add(bunker, new Queue<int>());
}
if (bunkers.Count > 1)
{
bool bunkerIsFull = IsBunkerFull(bunkers, dict, bunkersCapacity);
if (bunkerIsFull)
{
Console.WriteLine($"{bunkers[0]} -> {string.Join(", ", dict[bunkers[0]])}");
dict.Remove(bunkers[0]);
bunkers.RemoveAt(0);
}
}
}
}
}
private static bool IsBunkerFull(List<string> bunkers, Dictionary<string, Queue<int>> dict, int bunkersCapacity)
{
if (dict[bunkers[0]].Sum() == bunkersCapacity)
{
return true;
}
else
{
return false;
}
}
private static void StoreWeapons(List<string> bunkers, Dictionary<string, Queue<int>> dict, int weapon, int bunkersCapacity)
{
if (bunkers.Count > 0 && bunkersCapacity >= weapon)
{
for (int i = 0; i < bunkers.Count; i++)
{
string bunkerName = bunkers[i];
if (IsWeaponsCanFit(dict, weapon, bunkerName, bunkersCapacity))
{
dict[bunkerName].Enqueue(weapon);
break;
}
else if (bunkers.Count > 1)
{
Console.WriteLine($"{bunkers[0]} -> {string.Join(", ", dict[bunkers[0]])}");
dict.Remove(bunkers[0]);
bunkers.RemoveAt(0);
i--;
}
else if (bunkers.Count == 1)
{
OrderLastBunker(bunkers, dict, weapon, bunkersCapacity);
}
}
}
else if (bunkers.Count > 1)
{
if (dict[bunkers[0]].Sum() > 0)
{
Console.WriteLine($"{bunkers[0]} -> {string.Join(", ", dict[bunkers[0]])}");
}
else
{
Console.WriteLine($"{bunkers[0]} -> Empty");
}
dict.Remove(bunkers[0]);
bunkers.RemoveAt(0);
}
}
private static bool IsWeaponsCanFit(Dictionary<string, Queue<int>> dict, int weapon, string bunkerName, int bunkersCapacity)
{
int currentSumWeapons = dict[bunkerName].Sum();
int capacity = weapon + currentSumWeapons;
return capacity <= bunkersCapacity;
}
private static void OrderLastBunker(List<string> bunkers, Dictionary<string, Queue<int>> dict, int weapon, int bunkersCapacity)
{
int n = dict[bunkers[0]].Count;
for (int i = 0; i < n; i++)
{
dict[bunkers[0]].Dequeue();
if (IsWeaponsCanFit(dict, weapon, bunkers[0], bunkersCapacity))
{
dict[bunkers[0]].Enqueue(weapon);
break;
}
}
}
private static bool IsNumerics(string value)
{
return value.All(char.IsNumber);
}
}
}
| 34.442748 | 135 | 0.432624 | 3.234375 |
b0d74d87e094d09c0881fb214c8227631dbb222d | 4,864 | py | Python | speech_enhance/utils/utils.py | hit-thusz-RookieCJ/FullSubNet-plus | a6c89083cd083e729ca3def9a291743e8c3b516b | [
"Apache-2.0"
] | 41 | 2022-01-23T12:45:13.000Z | 2022-03-31T03:04:26.000Z | speech_enhance/utils/utils.py | hit-thusz-RookieCJ/FullSubNet-plus | a6c89083cd083e729ca3def9a291743e8c3b516b | [
"Apache-2.0"
] | 5 | 2022-01-24T07:05:01.000Z | 2022-03-29T12:58:07.000Z | speech_enhance/utils/utils.py | hit-thusz-RookieCJ/FullSubNet-plus | a6c89083cd083e729ca3def9a291743e8c3b516b | [
"Apache-2.0"
] | 10 | 2022-01-23T01:40:18.000Z | 2022-03-29T12:11:11.000Z | import os
import shutil
import yaml
import numpy as np
import torch
import torch.nn.functional as F
from .logger import log
def touch_dir(d):
os.makedirs(d, exist_ok=True)
def is_file_exists(f):
return os.path.exists(f)
def check_file_exists(f):
if not os.path.exists(f):
log(f"not found file: {f}")
assert False, f"not found file: {f}"
def read_lines(data_path):
lines = []
with open(data_path, encoding="utf-8") as fr:
for line in fr.readlines():
if len(line.strip().replace(" ", "")):
lines.append(line.strip())
# log("read {} lines from {}".format(len(lines), data_path))
# log("example(last) {}\n".format(lines[-1]))
return lines
def write_lines(data_path, lines):
with open(data_path, "w", encoding="utf-8") as fw:
for line in lines:
fw.write("{}\n".format(line))
# log("write {} lines to {}".format(len(lines), data_path))
# log("example(last line): {}\n".format(lines[-1]))
return
def get_name_from_path(abs_path):
return ".".join(os.path.basename(abs_path).split(".")[:-1])
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
return
def load_hparams(yaml_path):
with open(yaml_path, encoding="utf-8") as yaml_file:
hparams = yaml.safe_load(yaml_file)
return AttrDict(hparams)
def dump_hparams(yaml_path, hparams):
touch_dir(os.path.dirname(yaml_path))
with open(yaml_path, "w") as fw:
yaml.dump(hparams, fw)
log("save hparams to {}".format(yaml_path))
return
def get_all_wav_path(file_dir):
wav_list = []
for path, dir_list, file_list in os.walk(file_dir):
for file_name in file_list:
if file_name.endswith(".wav") or file_name.endswith(".WAV"):
wav_path = os.path.join(path, file_name)
wav_list.append(wav_path)
return sorted(wav_list)
def clean_and_new_dir(data_dir):
if os.path.exists(data_dir):
shutil.rmtree(data_dir)
os.makedirs(data_dir)
return
def generate_dir_tree(synth_dir, dir_name_list, del_old=False):
os.makedirs(synth_dir, exist_ok=True)
dir_path_list = []
if del_old:
shutil.rmtree(synth_dir, ignore_errors=True)
for name in dir_name_list:
dir_path = os.path.join(synth_dir, name)
dir_path_list.append(dir_path)
os.makedirs(dir_path, exist_ok=True)
return dir_path_list
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def pad(input_ele, mel_max_length=None):
if mel_max_length:
max_len = mel_max_length
else:
max_len = max([input_ele[i].size(0) for i in range(len(input_ele))])
out_list = list()
for i, batch in enumerate(input_ele):
if len(batch.shape) == 1:
one_batch_padded = F.pad(
batch, (0, max_len - batch.size(0)), "constant", 0.0
)
elif len(batch.shape) == 2:
one_batch_padded = F.pad(
batch, (0, 0, 0, max_len - batch.size(0)), "constant", 0.0
)
out_list.append(one_batch_padded)
out_padded = torch.stack(out_list)
return out_padded
def pad_1D(inputs, PAD=0):
def pad_data(x, length, PAD):
x_padded = np.pad(
x, (0, length - x.shape[0]), mode="constant", constant_values=PAD
)
return x_padded
max_len = max((len(x) for x in inputs))
padded = np.stack([pad_data(x, max_len, PAD) for x in inputs])
return padded
def pad_2D(inputs, maxlen=None):
def pad(x, max_len):
PAD = 0
if np.shape(x)[0] > max_len:
raise ValueError("not max_len")
s = np.shape(x)[1]
x_padded = np.pad(
x, (0, max_len - np.shape(x)[0]), mode="constant", constant_values=PAD
)
return x_padded[:, :s]
if maxlen:
output = np.stack([pad(x, maxlen) for x in inputs])
else:
max_len = max(np.shape(x)[0] for x in inputs)
output = np.stack([pad(x, max_len) for x in inputs])
return output
def get_mask_from_lengths(lengths, max_len=None):
batch_size = lengths.shape[0]
if max_len is None:
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len).unsqueeze(0).expand(batch_size, -1).to(lengths.device)
mask = ids >= lengths.unsqueeze(1).expand(-1, max_len)
return mask
if __name__ == '__main__':
pass
| 27.636364 | 90 | 0.58861 | 3.3125 |
2caf4f07438e65cc804bea91704c942233c37e81 | 4,344 | cpp | C++ | Client/Client.cpp | laboox/Computer-Networks-CA3-S2015 | 1c6d6cd03be06c1978dda355bdeb2401d6d154d5 | [
"MIT"
] | null | null | null | Client/Client.cpp | laboox/Computer-Networks-CA3-S2015 | 1c6d6cd03be06c1978dda355bdeb2401d6d154d5 | [
"MIT"
] | null | null | null | Client/Client.cpp | laboox/Computer-Networks-CA3-S2015 | 1c6d6cd03be06c1978dda355bdeb2401d6d154d5 | [
"MIT"
] | null | null | null | /**
* File "Client.cpp"
* Created by Sina on Sun May 31 13:39:03 2015.
*/
#include "Client.h"
Client::Client(string name, address IP, address serverIp, int routerPort)
: SuperClient(IP, serverIp, routerPort)
{
this->name = name;
}
void Client::run(){
fd_set router_fds, read_fds;
FD_ZERO(&router_fds);
FD_ZERO(&read_fds);
FD_SET(0, &router_fds);
FD_SET(routerFd, &router_fds);
sh(routerFd);
int max_fd = routerFd;
while(true){
read_fds = router_fds;
if(select(max_fd+1, &read_fds, NULL, NULL, NULL) < 0)
throw Exeption("problem in sockets select!");
for(int client_fd=0; client_fd<=max_fd ; client_fd++)
{
try
{
if(FD_ISSET(client_fd , &read_fds))
{
if(client_fd==0)
{
//cerr<<"in recive\n";
string cmd;
getline(cin, cmd);
parseCmd(cmd);
}
else if(client_fd==routerFd)
{
//cerr<<"sock recive\n";
Packet p;
p.recive(routerFd);
parsePacket(p);
}
}
}
catch(Exeption ex)
{
cout<<ex.get_error()<<endl;
}
}
}
}
void Client::updateGroups(string data){
istringstream iss(data);
string name, addr;
while(iss>>name>>addr){
groups[name] = stringToAddr(addr);
}
}
void Client::parsePacket(Packet p){
if(p.getType() == GET_GROUPS_LIST){
cout<<"Groups are:\n";
cout<<p.getDataStr();
updateGroups(p.getDataStr());
}
else if(p.getType() == DATA){
//SuperClient::reciveUnicast(p);
cout<<"Data: "<<p.getDataStr()<<endl;
}
else if(p.getType() == SHOW_MY_GROUPS){
cout<<"i'm in groups:\n"<<p.getDataStr()<<endl;
}
}
void Client::parseCmd(string line){
string cmd0, cmd1, cmd2;
istringstream iss(line);
iss>>cmd0;
if(cmd0=="Get"){
if(iss>>cmd1>>cmd2 && cmd1=="group" && cmd2=="list"){
getGroupList();
cout<<"group list request sent.\n";
} else {
throw Exeption("invalid cmd");
}
}
else if(cmd0=="Select"){
if(iss>>cmd1){
selectGroup(cmd1);
} else {
throw Exeption("invalid cmd");
}
}
else if(cmd0=="Join"){
if(iss>>cmd1){
joinGroup(cmd1);
} else {
throw Exeption("invalid cmd");
}
}
else if(cmd0=="Send"){
if(iss>>cmd1 && cmd1=="message"){
string message;
getline(iss, message);
sendMessage(message);
} else {
throw Exeption("invalid cmd");
}
}
else if(cmd0=="Show"){
if(iss>>cmd1 && cmd1=="group"){
showGroup();
} else {
throw Exeption("invalid cmd");
}
}
else if(cmd0=="SendUniCast"){
if(iss>>cmd1>>cmd2){
SuperClient::sendUnicast(stringToAddr(cmd1), cmd2);
} else {
throw Exeption("invalid cmd");
}
}
}
void Client::sendMessage(string message){
if(selGroup == "")
throw Exeption("no Group selected!\n");
Packet p;
p.setType(SEND_MESSAGE);
p.setSource(IP);
p.setDest(groups[selGroup]);
p.setData(message);
p.send(routerFd);
}
void Client::getGroupList(){
Packet p;
p.setType(GET_GROUPS_LIST);
p.setSource(IP);
p.setDest(serverIP);
p.send(routerFd);
}
void Client::showGroup(){
Packet p;
p.setType(SHOW_MY_GROUPS);
p.setSource(IP);
p.setDest(serverIP);
p.send(routerFd);
}
void Client::selectGroup(string g){
if(groups.count(g)<=0)
throw Exeption("Group does not exist");
selGroup = g;
cout<<"group "<< g << " with ip " << addrToString( groups[g] ) <<" selected!\n";
}
void Client::joinGroup(string g){
if(groups.count(g)<=0)
throw Exeption("Group does not exist");
Packet p;
p.setType(REQ_JOIN);
p.setSource(IP);
p.setDest(serverIP);
p.setData(g);
p.send(routerFd);
cout<<"group "<< g << " with ip " << addrToString( groups[g] ) <<" joined!\n";
}
| 24.542373 | 84 | 0.508748 | 3.015625 |
e1f47856e8e72282c7ab646d364bda2cc40a97bc | 1,091 | cpp | C++ | stacks_queues/assignments/stock_span.cpp | ramchandra94/datastructures_in_cpp | 28274ff4f0d9736cfe690ef002b90b9ebbfaf2f7 | [
"MIT"
] | null | null | null | stacks_queues/assignments/stock_span.cpp | ramchandra94/datastructures_in_cpp | 28274ff4f0d9736cfe690ef002b90b9ebbfaf2f7 | [
"MIT"
] | null | null | null | stacks_queues/assignments/stock_span.cpp | ramchandra94/datastructures_in_cpp | 28274ff4f0d9736cfe690ef002b90b9ebbfaf2f7 | [
"MIT"
] | null | null | null | class Node {
public:
int data;
int index;
Node *next;
Node(int data, int index){
this -> data = data;
this -> index = index;
this -> next = NULL;
}
};
class Stack {
Node *stackArr;
int size;
public:
Stack(){
stackArr = NULL;
size = 0;
}
void push(int data, int index){
Node *newNode = new Node(data, index);
newNode -> next = stackArr;
stackArr = newNode;
size++;
}
void pop(){
Node *temp = stackArr -> next;
delete stackArr;
stackArr = temp;
}
Node * top(){
return stackArr;
}
};
int* stockSpan(int *price, int size) {
// Write your code here
Stack s;
int * spanArr = new int[size];
for(int i=0; i < size; i++){
while(s.top() != NULL && s.top() -> data < price[i]){
s.pop();
}
if(s.top() == NULL){
spanArr[i] = i+1;
}else{
spanArr[i] = i - s.top() -> index;
}
s.push(price[i], i);
}
return spanArr;
} | 19.140351 | 61 | 0.454629 | 3.265625 |
dd9e94ae6db649288500bbba7f5726dbdbe7c3aa | 1,873 | dart | Dart | test/widget_test.dart | AndriousSolutions/app_template | 6937c151b924f42a214b00bb6def005e700c7a53 | [
"BSD-2-Clause"
] | 1 | 2021-12-20T10:19:08.000Z | 2021-12-20T10:19:08.000Z | test/widget_test.dart | AndriousSolutions/app_template | 6937c151b924f42a214b00bb6def005e700c7a53 | [
"BSD-2-Clause"
] | null | null | null | test/widget_test.dart | AndriousSolutions/app_template | 6937c151b924f42a214b00bb6def005e700c7a53 | [
"BSD-2-Clause"
] | null | null | null | ///
import 'src/view.dart';
void main() {
/// Define a test. The TestWidgets function also provides a WidgetTester
/// to work with. The WidgetTester allows you to build and interact
/// with widgets in the test environment.
testWidgets('app_template testing', (WidgetTester tester) async {
//
await tester.pumpWidget(TemplateApp());
/// Flutter won’t automatically rebuild your widget in the test environment.
/// Use pump() or pumpAndSettle() to ask Flutter to rebuild the widget.
/// pumpAndSettle() waits for all animations to complete.
await tester.pumpAndSettle();
final con = TemplateController();
// for (var interface = 1; interface <= 2; interface++) {
//
int cnt = 1;
while (cnt <= 3) {
switch (con.application) {
case 'Counter':
/// Counter app testing
await counterTest(tester);
break;
case 'Word Pairs':
/// Random Word Pairs app
await wordsTest(tester);
break;
case 'Contacts':
/// Contacts app
await contactsTest(tester);
break;
}
/// Switch the app programmatically.
// con.changeApp();
/// Switch the app through the popupmenu
await openApplicationMenu(tester);
/// Wait for the transition in the Interface
await tester.pumpAndSettle();
cnt++;
}
/// Open the Locale window
await openLocaleMenu(tester);
/// Open About menu
await openAboutMenu(tester);
/// Switch the Interface
await openInterfaceMenu(tester);
// }
/// Unit testing does not involve integration or widget testing.
/// WordPairs App Model Unit Testing
await wordPairsModelTest(tester);
/// Unit testing the App's controller object
await testTemplateController(tester);
reportTestErrors();
});
}
| 24.973333 | 80 | 0.6252 | 3.046875 |
e5902b445cf25928a537a552d8b72f9bef862fae | 5,277 | go | Go | google/SheetData.go | RescueDen/restlib | 91c3050fdf31305bc8576a5da06d505ce8be9eb6 | [
"MIT"
] | 1 | 2019-07-26T23:15:21.000Z | 2019-07-26T23:15:21.000Z | google/SheetData.go | RescueDen/restlib | 91c3050fdf31305bc8576a5da06d505ce8be9eb6 | [
"MIT"
] | 4 | 2020-06-24T23:04:11.000Z | 2020-11-19T20:23:34.000Z | google/SheetData.go | RescueDen/restlib | 91c3050fdf31305bc8576a5da06d505ce8be9eb6 | [
"MIT"
] | 2 | 2019-11-23T21:10:13.000Z | 2020-05-23T21:37:23.000Z | // Copyright 2019 Reaction Engineering International. All rights reserved.
// Use of this source code is governed by the MIT license in the file LICENSE.txt.
package google
import (
"errors"
"fmt"
"strings"
)
/**
Used to store a row of data
*/
type SheetData struct {
//Store the values in the row
Values [][]interface{} //[row][col]
//Store the Headers
Headers []string
//Store a map from the Headers to location
headersLoc map[string]int
//Store a list of original row numbers
RowNumb []int
}
//Create a new sheet data
func NewSheetData(values [][]interface{}) (*SheetData, error) {
//We need at least a header row
if len(values) == 0 {
return nil, errors.New("header row missing from sheet")
}
//Get the size
headerSize := len(values[0])
//Build the needed header info
data := &SheetData{
Headers: make([]string, headerSize),
headersLoc: make(map[string]int, 0),
RowNumb: make([]int, len(values)-1), //No need for the header
Values: values[1:], //Remove the header row from the data
}
//Now store each of the Headers location for easy look up
for loc, name := range values[0] {
//Convert the name to a string
nameString := fmt.Sprint(name)
//Save the info
data.headersLoc[SanitizeHeader(nameString)] = loc
data.Headers[loc] = nameString
}
//Now just set the values for the row location
for i := 0; i < len(data.RowNumb); i++ {
data.RowNumb[i] = i + 2 //C style index plus the missing header row
}
return data, nil
}
//March over each header and look for value
func (sheet *SheetData) findHeaderContaining(header string) int {
for ind, value := range sheet.Headers {
if strings.Contains(strings.ToLower(value), header) {
return ind
}
}
return -1
}
//Create a new sheet data
func (sheet *SheetData) FilterSheet(header string, value string) *SheetData {
//See if we have the header
headerCol := sheet.findHeaderContaining(header)
//If it is there, return nil
if headerCol < 0 {
return nil
}
//Now create a new data sheet
newSheet := &SheetData{
Headers: sheet.Headers, //Headers are the same
headersLoc: sheet.headersLoc, //Headers are the same
RowNumb: make([]int, 0), //No need for the header
Values: make([][]interface{}, 0),
}
//Clean up the value
value = strings.TrimSpace(value)
//Now check to see if each row has the data
for r, rowData := range sheet.Values {
//Make sure we have enough data to check
if len(rowData) > headerCol {
//If they are equal
if strings.EqualFold(value, strings.TrimSpace(fmt.Sprint(rowData[headerCol]))) {
//Add the data
newSheet.Values = append(newSheet.Values, rowData)
newSheet.RowNumb = append(newSheet.RowNumb, sheet.RowNumb[r])
}
}
}
//Return the new sheet
return newSheet
}
//Create a new sheet data
func (sheet *SheetData) GetColumn(col int) []interface{} {
//We need to transpose the data
colData := make([]interface{}, 0)
//March over each row
for r := range sheet.Values {
//If it has the column add it
if len(sheet.Values[r]) > col+1 {
colData = append(colData, sheet.Values[r][col])
}
}
return colData
}
//Create a new sheet data
func (sheet *SheetData) GetRow(row int) *SheetDataRow {
//Look up the row number
indexNumber := -1
//Now search over the rows for the row index,
for index, rowTest := range sheet.RowNumb {
if rowTest == row {
indexNumber = index
}
}
//If it avaiable return
if indexNumber < 0 {
return nil
}
//Extract the row info
dataRow := &SheetDataRow{
Values: sheet.Values[indexNumber],
Headers: sheet.Headers,
headersLoc: sheet.headersLoc,
RowNumber: row,
}
//If we have fewer values then header size
if len(dataRow.Values) < len(dataRow.Headers) {
//Make a new array
array := make([]interface{}, len(dataRow.Headers)-len(dataRow.Values))
dataRow.Values = append(dataRow.Values, array...)
}
//Return the new sheet
return dataRow
}
//Create a new sheet data
func (sheet *SheetData) GetEmptyDataRow() *SheetDataRow {
//Get the size. number of headers
size := len(sheet.Headers)
//Extract the row info
dataRow := &SheetDataRow{
Values: make([]interface{}, size),
Headers: sheet.Headers,
headersLoc: sheet.headersLoc,
RowNumber: -1,
}
//Return the new sheet
return dataRow
}
//Create a new sheet data
func (sheet *SheetData) PrintToScreen() {
//March over each header
fmt.Print("row,")
for _, header := range sheet.Headers {
fmt.Print(header)
fmt.Print(",")
}
fmt.Println()
//Now print each row
for r, rowData := range sheet.Values {
//Now print the row number
fmt.Print(sheet.RowNumb[r])
fmt.Print(",")
//Now each data
for _, data := range rowData {
fmt.Print(data)
fmt.Print(",")
}
fmt.Println()
}
}
//Create a new sheet data
func (sheet *SheetData) NumberRows() int {
return len(sheet.Values)
}
/**
Count the number of entries for this column
*/
func (sheet *SheetData) CountEntries(index int) int {
//Start with a count
count := 0
//March over each row
for _, rowData := range sheet.Values {
//If the row as data in the index
if index < len(rowData) {
//If there is data
if rowData[index] != nil && len(fmt.Sprint(rowData[index])) > 0 {
count++
}
}
}
return count
}
| 21.627049 | 83 | 0.672162 | 3.421875 |
af2a406838efea757cc4acb358285b7045e3197f | 5,021 | py | Python | code/figures/Chure2019_FigS17-FigS18_IND_pairwise_comparison.py | RPGroup-PBoC/mwc_mutants | 35581602c35793fc8ec42c8aff37b8305c5e54e1 | [
"MIT"
] | 3 | 2020-11-11T21:33:26.000Z | 2021-07-14T21:22:43.000Z | code/figures/Chure2019_FigS17-FigS18_IND_pairwise_comparison.py | RPGroup-PBoC/mwc_mutants | 35581602c35793fc8ec42c8aff37b8305c5e54e1 | [
"MIT"
] | null | null | null | code/figures/Chure2019_FigS17-FigS18_IND_pairwise_comparison.py | RPGroup-PBoC/mwc_mutants | 35581602c35793fc8ec42c8aff37b8305c5e54e1 | [
"MIT"
] | 1 | 2021-07-14T21:22:45.000Z | 2021-07-14T21:22:45.000Z | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import mut.thermo
import mut.stats
import mut.viz
color = mut.viz.color_selector('mut')
pboc = mut.viz.color_selector('pboc')
constants = mut.thermo.load_constants()
mut.viz.plotting_style()
# Load the data
data = pd.read_csv('../../data/Chure2019_summarized_data.csv', comment='#')
data = data[data['class'] == 'IND']
KaKi_only_samples = pd.read_csv('../../data/Chure2019_KaKi_only_samples.csv')
KaKi_epAI_samples = pd.read_csv('../../data/Chure2019_KaKi_epAI_samples.csv')
# Determine the unique repressor copy numbers
ops = np.sort(data['operator'].unique())
c_range = np.logspace(-2, 4, 200)
c_range[0] = 0
# Change this parameter to decide which plot to make
MODEL = 'KaKi_epAI'
# ##############################################################################
# FIGURE WITH KAKI FIT ONLY
# ##############################################################################
fig, ax = plt.subplots(len(ops),len(ops), figsize=(7,5), sharex=True, sharey=True)
# Format the axes
for a in ax.ravel():
a.xaxis.set_tick_params(labelsize=8)
a.yaxis.set_tick_params(labelsize=8)
a.set_xscale('symlog', linthreshx=1E-2)
# Add appropriate labels
for i in range(len(ops)):
ax[i, 0].set_ylabel('fold-change', fontsize=8)
ax[-1, i].set_xlabel('IPTG [µM]', fontsize=8)
ax[i, 0].text(-0.5, 0.55, ops[i], fontsize=8, backgroundcolor=pboc['pale_yellow'],
transform=ax[i,0].transAxes, rotation='vertical')
ax[0, i].set_title(ops[i], fontsize=8, backgroundcolor=pboc['pale_yellow'], y=1.08)
ax[i, i].set_facecolor('#e4e7ec')
for i in range(3):
ax[-1, i].set_xticks([0, 1E-2, 1E0, 1E2, 1E4])
# Add predictor titles
fig.text(-0.04, 0.53, 'fit strain', fontsize=8, backgroundcolor='#E3DCD0', rotation='vertical')
fig.text(0.435, 0.98, 'comparison strain', fontsize=8, backgroundcolor='#E3DCD0', rotation='horizontal')
# Plot the data.
for g, d in data.groupby(['mutant']):
g = g.upper()
for i, _ in enumerate(ops):
for j, _ in enumerate(ops):
_d = d[d['operator'] == ops[j]]
if i == j:
face = 'w'
edge = color[g]
else:
face = color[g]
edge = color[g]
if g == 'F164T':
label = 'F161T'
elif g == 'Q294K':
label = 'Q291K'
elif g == 'Q294V':
label = 'Q291V'
elif g == 'Q294R':
label = 'Q291R'
else:
label = g
_ = ax[i, j].errorbar(_d['IPTGuM'], _d['mean'], _d['sem'], markerfacecolor=face,
markeredgecolor=edge, color=edge, lw=0.15, linestyle='none', fmt='o',
ms=2.5, label=label)
# Plot the best-fit lines.
for k, m in enumerate(data['mutant'].unique()):
_d = data[(data['mutant']==m) & (data['operator'] == ops[i])]
# Get the binding energies.
if MODEL == 'KaKi_only':
_samps = KaKi_only_samples[(KaKi_only_samples['mutant']==m) &\
(KaKi_only_samples['operator']==ops[i])]
_samps['ep_AI'] = 4.5
else:
_samps = KaKi_epAI_samples[(KaKi_epAI_samples['mutant']==m) &\
(KaKi_epAI_samples['operator']==ops[i])]
Ka = _samps['Ka']
Ki = _samps['Ki']
epAI = _samps['ep_AI']
cred_region = np.zeros((2, len(c_range)))
for z, c in enumerate(c_range):
# Compute the fold-change
fc = mut.thermo.SimpleRepression(R=constants['RBS1027'],
ep_r=constants[ops[j]], ka=Ka,
ki=Ki, ep_ai=epAI,
effector_conc=c, n_sites=constants['n_sites'],
n_ns=constants['Nns']).fold_change()
cred_region[:, z] = mut.stats.compute_hpd(fc, 0.95)
# Plot the fit.
# _ = ax[i, j].plot(c_range / 1E6, fc[:, 0], color=color[m], lw=0.75)
_ = ax[i, j].fill_between(c_range, cred_region[0, :],
cred_region[1, :], color=color[m], alpha=0.2)
cred_region[0, :]
cred_region[1, :]
_ = ax[0, 2].legend(fontsize=8, bbox_to_anchor=(1.04, 0.95))
plt.subplots_adjust(wspace=0.05, hspace=0.05)
if MODEL == 'KaKi_only':
plt.savefig('../../figures/Chure2019_FigS18_KaKi_IND_pairwise_predictions.pdf',
bbox_inches='tight', facecolor='white')
elif MODEL == 'KaKi_epAI':
plt.savefig('../../figures/Chure2019_FigS19_KaKi_epAI_IND_pairwise_predictions.pdf',
bbox_inches='tight', facecolor='white')
| 41.495868 | 104 | 0.520215 | 3.09375 |
e27214e5385893d35837af6ee99a8f609104855b | 5,580 | py | Python | mupf/log/_tracks.py | kimbar/mupf_project | 21de9ed94f604220f8b8dcc64d45e30a0b94d2a1 | [
"MIT"
] | null | null | null | mupf/log/_tracks.py | kimbar/mupf_project | 21de9ed94f604220f8b8dcc64d45e30a0b94d2a1 | [
"MIT"
] | 44 | 2019-06-14T03:43:43.000Z | 2020-12-27T19:17:15.000Z | mupf/log/_tracks.py | kimbar/mupf_project | 21de9ed94f604220f8b8dcc64d45e30a0b94d2a1 | [
"MIT"
] | null | null | null | from . import _main as main
from . import settings
_tracks = []
_styles = dict(
_reference = "|,`}+-{><.T^tE",
default = "│┌└├┼─┤><·┐┘─E",
rounded = "│╭╰├┼─┤><·╮╯─E",
simple = "|,`|+-|><*,'-E",
)
glyphs = {}
ligatures = {}
def get_style(name):
if not isinstance(name, str):
name = name.value
return _styles.get(name, "default")
def set_style(characters):
global ligatures, glyphs, _styles
if len(characters) != len(_styles['_reference']):
raise ValueError('Wrong length of character set')
glyphs = {_styles['_reference'][i]:characters[i] for i in range(len(characters))}
ligatures = {lig: "".join(characters[_styles['_reference'].index(ch)] for ch in lig) for lig in (
"<-", "->", "<{", "}>", "E}>"
)}
set_style(_styles['default'])
def is_occupied(n):
""" Check if track is already taken """
global _tracks
if n >= len(_tracks):
return False
else:
return _tracks[n]
def reserve(n):
""" Reserve a track w/o checking """
global _tracks
if n >= len(_tracks):
_tracks += [False]*(n-len(_tracks)+1)
_tracks[n] = True
def free(n):
""" Free a track w/o checking """
global _tracks
_tracks[n] = False
while not _tracks[-1]:
_tracks.pop()
if len(_tracks) == 0:
break
def find_free(min_=0):
""" Find a free track, but at least `min_` one """
while is_occupied(min_):
min_ += 1
return min_
def write(branch=None, branch_track=None, inner=True, connect_to=None):
""" Print tracks for a single line
The line is connected to the line (branched) if `branch_track` number is given. The track number
`branch_track` should be occupied. `branch` can have three values: `"start"` or `"end"` if the
branch should start or end the track, `"mid"` if the branch should only attach to a track. Any
other value to only mark the track for single line. When the single line is used, the `"<"`
draws left pointing arrow after the track mark; `">"` draws right pointing arrow; and `"."`
draws no arrow (only the track mark).
"""
global _tracks, glyphs
if inner:
result = " "
else:
if branch == 'start':
result = glyphs[">"]
elif branch == 'end':
result = glyphs["<"]
else:
result = glyphs["-"]
for n, track in enumerate(_tracks):
if track:
if branch:
if n < branch_track:
if n == connect_to:
result += glyphs["}"]
elif inner or branch == 'mid':
result += glyphs["|"]
else:
result += glyphs["+"]
elif n == branch_track:
if branch == 'start':
if inner:
result += glyphs[","]
else:
result += glyphs["T"]
elif branch == 'end':
if inner:
result += glyphs["`"]
else:
result += glyphs["^"]
elif branch == 'mid':
if inner:
result += glyphs["}"]
else:
result += glyphs["|"]
else:
result += glyphs["."]
elif n > branch_track:
result += glyphs["+"]
else:
result += glyphs["|"]
else:
if branch:
if n < branch_track:
if inner or branch == 'mid':
result += " "
else:
result += glyphs["-"]
elif n == branch_track:
result += "?"
elif n > branch_track:
result += glyphs["-"]
else:
result += " "
if inner:
if branch:
if branch == 'start' or branch == '<':
result += ligatures['<-']
elif branch == 'end' or branch == '>':
result += ligatures['->']
else:
result += glyphs["-"]+glyphs["-"]
else:
result += " "
else:
if branch == '<':
result += ligatures['<-']
elif branch == '>':
result += ligatures['->']
else:
result += glyphs["-"]+glyphs["-"]
return result+":"
_groups_indent = {}
_last_group_indent = 0
def get_group_indent(group):
global _groups_indent, _last_group_indent
if group not in _groups_indent:
if len(_groups_indent) > 0:
_last_group_indent += settings.GROUP_WIDTH
_groups_indent[group] = _last_group_indent
return _groups_indent[group]
_stack_frames_by_track = {}
_tracks_by_stack_frames = {}
def register_stack_frame(frame, track):
global _stack_frames_by_track, _tracks_by_stack_frames
_stack_frames_by_track[track] = frame
_tracks_by_stack_frames[frame] = track
def deregister_stack_frame(track):
global _stack_frames_by_track, _tracks_by_stack_frames
if track in _stack_frames_by_track:
frame = _stack_frames_by_track[track]
del _tracks_by_stack_frames[frame]
del _stack_frames_by_track[track]
def get_track_by_stack_frame(frame):
global _tracks_by_stack_frames
return _tracks_by_stack_frames.get(frame, None)
| 31.348315 | 101 | 0.505556 | 3.171875 |
20e16cb94dcb9dfa59991a388be7903b9e6de468 | 5,829 | py | Python | chemml/chem/magpie_python/attributes/generators/composition/YangOmegaAttributeGenerator.py | iamchetry/DataChallenge-Fall2021 | fa7748c9ea2f3c0f6bde8d0b094fc75463e28f33 | [
"BSD-3-Clause"
] | 108 | 2018-03-23T20:06:03.000Z | 2022-01-06T19:32:46.000Z | chemml/chem/magpie_python/attributes/generators/composition/YangOmegaAttributeGenerator.py | hachmannlab/ChemML | 42b152579872a57c834884596f700c76b9320280 | [
"BSD-3-Clause"
] | 18 | 2019-08-09T21:16:14.000Z | 2022-02-14T21:52:06.000Z | chemml/chem/magpie_python/attributes/generators/composition/YangOmegaAttributeGenerator.py | hachmannlab/ChemML | 42b152579872a57c834884596f700c76b9320280 | [
"BSD-3-Clause"
] | 28 | 2018-04-28T17:07:33.000Z | 2022-02-28T07:22:56.000Z | # coding=utf-8
import math
import types
import numpy as np
import pandas as pd
from ....data.materials.CompositionEntry import CompositionEntry
from ....data.materials.util.LookUpData import LookUpData
class YangOmegaAttributeGenerator:
"""Class to compute the attributes :math:`\Omega` and :math:`\delta`
developed by Yang and Zhang [1].
These parameters are based on the liquid formation enthalpy and atomic
sizes of elements respectively and were originally developed to predict
whether a metal alloy will form a solid solution of bulk metallic glass.
Notes
-----
:math: `\Omega` is derived from the melting temperature, ideal mixing
entropy, and regular solution solution interaction parameter (
:math: `\Omega_{i,j}`) predicted by the Miedema model for binary liquids.
Specifically, it is computed using the relationship:
.. math:: \Omega = \displaystyle\frac{T_m \Delta S_{mix}} {|\Delta H_{mix}|}
where :math: `T_m` is the composition-weighted average of the melting
temperature, :math: `\Delta S_{mix}` is the ideal solution entropy,
and :math: `\Delta H_{mix}` is the mixing enthalpy. The mixing enthalpy
is computed using the Miedema mixing enthalpies tabulated by Takeuchi and
Inoue [2] where:
.. math:: \Delta H_{mix} = \displaystyle\sum \Omega_{i,j} c_i c_j
and :math: `\Omega_{i,j} = 4 * \Delta H_{mix}`.
:math: `\delta` is related to the polydispersity of atomic sizes, and is
computed using the relationship:
.. math:: \delta = [\displaystyle\sum c_i (1 - \frac{r_i}{r_{
average})^2]^0.5
where :math: `r_i` is the atomic size. Here, we use the atomic radii
compiled by Miracle et al. [3] rather than those compiled by Kittel,
as in the original work.
References
----------
.. [1] X. Yang and Y. Zhang, "Prediction of high-entropy stabilized
solid-solution in multi-component alloys," Materials Chemistry and
Physics, vol. 132, no. 2--3, pp. 233--238, Feb. 2012.
.. [2] A. Takeuchi and A. Inoue, "Classification of Bulk Metallic Glasses
by Atomic Size Difference, Heat of Mixing and Period of Constituent
Elements and Its Application to Characterization of the Main Alloying
Element," MATERIALS TRANSACTIONS, vol. 46, no. 12, pp. 2817--2829, 2005.
.. [3] D. B. Miracle, D. V. Louzguine-Luzgin, L. V. Louzguina-Luzgina,
and A. Inoue, "An assessment of binary metallic glasses: correlations
between structure, glass forming ability and stability," International
Materials Reviews, vol. 55, no. 4, pp. 218--256, Jul. 2010.
"""
def generate_features(self, entries):
"""Function to generate features as mentioned in the class description.
Parameters
----------
entries : array-like
Compositions for which features are to be generated. A list of
CompositionEntry's.
Returns
----------
features : DataFrame
Features for the given entries. Pandas data frame containing the
names and values of the descriptors.
Raises
------
ValueError
If input is not of type list.
If items in the list are not CompositionEntry instances.
"""
# Initialize lists of feature values and headers for pandas data frame.
feat_values = []
feat_headers = []
# Raise exception if input argument is not of type list of
# CompositionEntry's.
if not isinstance(entries, list):
raise ValueError("Argument should be of type list of "
"CompositionEntry's")
elif (entries and not isinstance(entries[0], CompositionEntry)):
raise ValueError("Argument should be of type list of "
"CompositionEntry's")
# Insert header names here.
feat_headers.append("Yang_Omega")
feat_headers.append("Yang_delta")
# Load property values here.
radii = LookUpData.load_property("MiracleRadius")
meltingT = LookUpData.load_property("MeltingT")
miedema = LookUpData.load_pair_property("MiedemaLiquidDeltaHf")
for entry in entries:
tmp_list = []
tmp_radii = []
tmp_meltingT = []
elem_fracs = entry.get_element_fractions()
elem_ids = entry.get_element_ids()
for elem_id in elem_ids:
tmp_radii.append(radii[elem_id])
tmp_meltingT.append(meltingT[elem_id])
# Compute the average melting point.
averageTm = np.average(tmp_meltingT, weights=elem_fracs)
# Compute the ideal entropy.
entropy = 0.0
for f in elem_fracs:
entropy += f*math.log(f) if f > 0 else 0.0
entropy *= 8.314/1000
# Compute the enthalpy
enthalpy = 0.0
for i in range(len(elem_ids)):
for j in range(i + 1, len(elem_ids)):
enthalpy += miedema[max(elem_ids[i], elem_ids[j])][min(
elem_ids[i], elem_ids[j])] * elem_fracs[i] * \
elem_fracs[j]
enthalpy *= 4
# Compute omega
tmp_list.append(abs(averageTm * entropy / enthalpy))
# Compute delta
delta_squared = 0.0
average_r = np.average(tmp_radii, weights=elem_fracs)
for i in range(len(elem_ids)):
delta_squared += elem_fracs[i] * (1 - tmp_radii[i] /
average_r)**2
tmp_list.append(math.sqrt(delta_squared))
feat_values.append(tmp_list)
features = pd.DataFrame(feat_values, columns=feat_headers)
return features
| 40.2 | 80 | 0.618974 | 3.078125 |
be3922d18c6120c11563e6101f16589df23b4c90 | 1,483 | ts | TypeScript | src/ts/tool/shapeStroke.ts | wjheesen/vector-art | d73729c1b0ee236fe9d45a879e5e83cb00e245a0 | [
"MIT"
] | null | null | null | src/ts/tool/shapeStroke.ts | wjheesen/vector-art | d73729c1b0ee236fe9d45a879e5e83cb00e245a0 | [
"MIT"
] | null | null | null | src/ts/tool/shapeStroke.ts | wjheesen/vector-art | d73729c1b0ee236fe9d45a879e5e83cb00e245a0 | [
"MIT"
] | null | null | null | import { MouseOrTouchEvent } from '../event/mouseOrTouch';
import { Surface } from '../rendering/surface';
import { Status } from 'gl2d/event/status';
import { Point } from 'gl2d/struct/point';
import { MouseOrTouchTool } from 'gl2d/tool/mouseOrTouch';
export class ShapeStrokeTool extends MouseOrTouchTool<Surface> {
private previous: Point;
onSurfaceEvent(event: MouseOrTouchEvent): void {
switch(event.status){
case Status.Start:
return this.onStart(event);
case Status.Drag:
return this.onDrag(event);
case Status.End:
return this.onEnd(event);
}
}
onStart(event: MouseOrTouchEvent) {
this.previous = this.getPrimaryPointer(event);
}
onDrag(event: MouseOrTouchEvent) {
if(!this.previous) { return; }
let surface = event.target;
let stroke = surface.getTempShapeBatch();
let thickness = surface.lineWidth;
let current = this.getPrimaryPointer(event);
let previous = this.previous;
// Add line from current to previous shape if there is room
if(current.distance2(previous) > thickness * thickness){
this.previous = stroke.addLine(previous, current, thickness);
surface.requestRender();
}
}
onEnd(event: MouseOrTouchEvent) {
let surface = event.target;
surface.addTempShapeBatch();
surface.requestRender();
}
} | 31.553191 | 73 | 0.626433 | 3.03125 |
944c14e902127faa07a787e5c3c04d319c6b159f | 1,111 | lua | Lua | WIP/Set/Negation.spec.lua | Novaly-Studios/TableUtil | c9ed2ed3b7f4a43947f6d539f0f1cff0774260ee | [
"MIT"
] | null | null | null | WIP/Set/Negation.spec.lua | Novaly-Studios/TableUtil | c9ed2ed3b7f4a43947f6d539f0f1cff0774260ee | [
"MIT"
] | null | null | null | WIP/Set/Negation.spec.lua | Novaly-Studios/TableUtil | c9ed2ed3b7f4a43947f6d539f0f1cff0774260ee | [
"MIT"
] | null | null | null | return function()
local Negation = require(script.Parent.Negation)
local FromValues = require(script.Parent.FromValues)
describe("Set/Negation", function()
it("should return a blank set from two blank set inputs", function()
local Result = Negation(FromValues( {} ), FromValues( {} ))
expect(next(Result)).never.to.be.ok()
end)
it("should remove the latter from the former with one item", function()
local Result = Negation(FromValues( {1} ), FromValues( {1} ))
expect(next(Result)).never.to.be.ok()
end)
it("should remove the latter from the former with multiple items", function()
local Result = Negation(FromValues( {1, 4, 8} ), FromValues( {4, 8, 1} ))
expect(next(Result)).never.to.be.ok()
end)
it("should remove the latter from the former with multiple items and leave non-negated present", function()
local Result = Negation(FromValues( {1, 4, 8, 2} ), FromValues( {4, 8, 1} ))
expect(Result[2]).to.equal(true)
end)
end)
end | 42.730769 | 115 | 0.605761 | 3.1875 |
05cb604c87fea6779300d4d37db8a6a66707877e | 2,908 | py | Python | JPS_Chatbot/jps-chatbot/UI/source/Wikidata_Query/fuzzysearch_wiki.py | cambridge-cares/TheWorldAvatar | baf08ddc090414c6d01e48c74b408f2192461e9e | [
"MIT"
] | 21 | 2021-03-08T01:58:25.000Z | 2022-03-09T15:46:16.000Z | JPS_Chatbot/jps-chatbot/UI/source/Wikidata_Query/fuzzysearch_wiki.py | cambridge-cares/TheWorldAvatar | baf08ddc090414c6d01e48c74b408f2192461e9e | [
"MIT"
] | 63 | 2021-05-04T15:05:30.000Z | 2022-03-23T14:32:29.000Z | JPS_Chatbot/jps-chatbot/UI/source/Wikidata_Query/fuzzysearch_wiki.py | cambridge-cares/TheWorldAvatar | baf08ddc090414c6d01e48c74b408f2192461e9e | [
"MIT"
] | 15 | 2021-03-08T07:52:03.000Z | 2022-03-29T04:46:20.000Z | from rapidfuzz import process, fuzz
from .load_dicts import FORMULA_URI_DICT, SMILES_URI_DICT, NAME_URI_DICT, \
FORMULA_KEYS, NAME_KEYS, SMILES_KEYS, ATTRIBUTE_URI_DICT, \
ATTRIBUTE_KEYS, CLASS_URI_DICT, CLASS_KEYS, process_species, process_species_reversed
def find_nearest_match(entity_value, entity_type):
# rst = URI, score, candidate
if entity_type == 'attribute':
rst = find_nearest_match_in_attributes(entity_value)
elif entity_type == 'class':
rst = find_nearest_match_classes(entity_value)
elif entity_type == 'species':
rst = find_nearest_match_species(entity_value)
URI = [u.replace('http://www.wikidata.org/entity/', '') for u in rst[0]]
print('find_nearest_match - 16', URI)
score = rst[1]
candidate = rst[2]
return URI, candidate
def find_nearest_match_classes(_class):
_class = _class.upper()
KEYS = CLASS_KEYS
DICT = CLASS_URI_DICT
if _class not in DICT:
rst = process.extractOne(_class, KEYS, scorer=fuzz.ratio)
candidate = rst[0]
score = rst[1]
URI = DICT[candidate]
else:
score = 100
candidate = _class
URI = DICT[candidate]
return URI, score, candidate
def find_nearest_match_species(species):
species = process_species(species)
KEYS_LIST = [FORMULA_KEYS, SMILES_KEYS, NAME_KEYS]
DICT_LIST = [FORMULA_URI_DICT, SMILES_URI_DICT, NAME_URI_DICT]
LABELS = ['FORMULA', 'SMILE', 'NAME']
highest_score = 0
best_uri = []
best_label = ''
best_candidate = ''
for KEYS, DICTS, LABEL in zip(KEYS_LIST, DICT_LIST, LABELS):
rst = find_nearest_match_in_one_species(species, KEYS, DICTS)
URIS = rst[0]
score = rst[1]
candidate = rst[2]
if score > highest_score:
best_uri = URIS
best_label = LABEL
highest_score = score
best_candidate = candidate
return best_uri, highest_score, process_species_reversed(best_candidate), best_label
def find_nearest_match_in_one_species(species, KEYS, DICT):
if species not in DICT:
rst = process.extractOne(species, KEYS, scorer=fuzz.ratio)
candidate = process_species_reversed(rst[0])
score = rst[1]
URI = DICT[candidate]
else:
score = 100
candidate = process_species_reversed(species)
URI = DICT[species]
return URI, score, candidate
# it is exactly like the one for species
def find_nearest_match_in_attributes(attribute):
attribute = attribute.upper()
KEYS = ATTRIBUTE_KEYS
DICT = ATTRIBUTE_URI_DICT
if attribute not in DICT:
rst = process.extractOne(attribute, KEYS, scorer=fuzz.ratio)
candidate = rst[0]
score = rst[1]
URI = DICT[candidate]
else:
score = 100
candidate = attribute
URI = DICT[candidate]
return URI, score, candidate
| 33.425287 | 89 | 0.666437 | 3.171875 |
8dab17afdff53a31a1b8a809dd2bc03669e409b0 | 1,620 | js | JavaScript | test/specs/Format_Spec.js | isuttell/Amber | 98b88a932b4845ae9550a50da10c9e32a743ad60 | [
"MIT"
] | 1 | 2018-12-13T08:08:00.000Z | 2018-12-13T08:08:00.000Z | test/specs/Format_Spec.js | isuttell/Amber | 98b88a932b4845ae9550a50da10c9e32a743ad60 | [
"MIT"
] | null | null | null | test/specs/Format_Spec.js | isuttell/Amber | 98b88a932b4845ae9550a50da10c9e32a743ad60 | [
"MIT"
] | null | null | null | describe("Amber.Format", function() {
it("should be defined", function() {
expect(Amber.Format).toBeDefined();
});
describe('numberWithCommas', function(){
it("should have a function to add commas to large numbers", function() {
expect(Amber.Format.numberWithCommas).toBeDefined();
expect(Amber.Format.numberWithCommas(1000)).toBe('1,000');
expect(Amber.Format.numberWithCommas(1000.5)).toBe('1,000.5');
expect(Amber.Format.numberWithCommas(500)).toBe('500');
expect(Amber.Format.numberWithCommas(500.1)).toBe('500.1');
expect(Amber.Format.numberWithCommas(-500)).toBe('-500');
expect(Amber.Format.numberWithCommas(1000000)).toBe('1,000,000');
});
it("should return the num if its not a number", function() {
expect(Amber.Format.numberWithCommas(false)).toBe(false);
});
});
describe('stripTrailingZero', function(){
it("should have a function to strip trailing zeros", function() {
expect(Amber.Format.stripTrailingZero).toBeDefined();
var number = '5.0',
result = Amber.Format.stripTrailingZero(number);
expect(result).toBe('5');
});
it("should return the num if its not a number", function() {
expect(Amber.Format.stripTrailingZero(false)).toBe(false);
});
});
describe('basicPluralize', function(){
it("should have a function to do basic plurization", function() {
expect(Amber.Format.basicPluralize).toBeDefined();
var word = 'number',
result = Amber.Format.basicPluralize('number', 2);
expect(result).toBe('numbers');
result = Amber.Format.basicPluralize('number', 1);
expect(result).toBe(word);
});
});
});
| 27.931034 | 74 | 0.687037 | 3.03125 |
0d45e35e24e6350affa5974bb06d5ece05c0ff4d | 2,732 | cs | C# | Travelling Salesman Problem/Graph.cs | JR-Morgan/Travelling-Salesman-Problem | 99389190e7bb29af05904f1296c236eee4289704 | [
"MIT"
] | 4 | 2021-02-27T01:10:59.000Z | 2022-03-23T00:29:15.000Z | Travelling Salesman Problem/Graph.cs | JR-Morgan/Travelling-Salesman-Problem | 99389190e7bb29af05904f1296c236eee4289704 | [
"MIT"
] | null | null | null | Travelling Salesman Problem/Graph.cs | JR-Morgan/Travelling-Salesman-Problem | 99389190e7bb29af05904f1296c236eee4289704 | [
"MIT"
] | null | null | null | using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Numerics;
using System.Text.RegularExpressions;
namespace TSP
{
/// <summary>
/// This class encapsulates a graph of nodes
/// </summary>
public class Graph
{
public readonly List<Node> nodes;
public Node StartNode => nodes.First();
public int NodesCount => nodes.Count;
public Graph(List<Node> nodes)
{
this.nodes = nodes;
}
/// <summary>
/// Parses a <see cref="Graph"/> from <paramref name="file"/> in a CSV format<br/>
/// where column 1 is the Node id, column 2 is the X position, and column 3 is the Y position
/// </summary>
/// <example>
/// Example of a correct CSV line is
/// <code>1,38.24,20.42</code>
/// </example>
/// <param name="file">The file path of the file</param>
/// <returns>A Graph</returns>
public static Graph ParseGraphFromFile(string file)
{
var nodes = new List<Node>();
StreamReader reader = File.OpenText(file);
string? line;
var pattern = @"^\d+,-?\d+\.\d*,-?\d+\.\d*$";
while ((line = reader.ReadLine()) != null)
{
if (Regex.IsMatch(line, pattern))
{
string[] elements = line.Split(",");
nodes.Add(new Node(
id: int.Parse(elements[0]),
position: new Vector2(float.Parse(elements[1]),
float.Parse(elements[2]))
));
}
}
return new Graph(nodes);
}
/// <summary>
/// Generates and returns a random <see cref="Graph"/>
/// </summary>
/// <param name="numberOfNodes">The number of <see cref="Node"/>s to be generated</param>
/// <param name="bounds">The upper bounds for the <see cref="Node"/>s positions</param>
/// <param name="seed">The random seed</param>
/// <returns>The generated <see cref="Graph"/></returns>
public static Graph RandomGraph(uint numberOfNodes, Vector2 bounds, int seed)
{
Random random = new Random(seed);
var nodes = new List<Node>();
for(int i=0; i < numberOfNodes; i++)
{
nodes.Add(new Node(
id: i,
position: new Vector2((float)(random.NextDouble() * bounds.X), (float)(random.NextDouble() * bounds.Y))
));
}
return new Graph(nodes);
}
}
}
| 33.728395 | 127 | 0.499268 | 3.265625 |
46cefff5ed61863f93b98821cfc2c966db8b160e | 1,754 | py | Python | tests/test_radio_command.py | notfoundsam/smart-remote | 0c23fbb9b92ec1c8e7fbbf6d4613ae4b955eada8 | [
"Unlicense"
] | null | null | null | tests/test_radio_command.py | notfoundsam/smart-remote | 0c23fbb9b92ec1c8e7fbbf6d4613ae4b955eada8 | [
"Unlicense"
] | 14 | 2018-06-06T14:54:18.000Z | 2018-07-29T02:44:07.000Z | tests/test_radio_command.py | notfoundsam/smart-remote | 0c23fbb9b92ec1c8e7fbbf6d4613ae4b955eada8 | [
"Unlicense"
] | null | null | null | import serial
import time
import array
command = "status"
# radio_pipe = 'AABBCCDD33'
radio_pipe = 'AABBCCDD44'
success = 0
fail = 0
error = 0
ser = serial.Serial()
ser.baudrate = 500000
ser.port = '/dev/ttyUSB0'
ser.timeout = 10
ser.open()
# Only after writing sketch into Arduino
# print(repr(ser.readline()))
time.sleep(2)
ser.flushInput()
ser.flushOutput()
signal = '%sc%s\n' % (radio_pipe, command)
print(signal)
n = 32
partial_signal = [signal[i:i+n] for i in range(0, len(signal), n)]
try:
while True:
ser.flushInput()
ser.flushOutput()
print "-----------------"
response_in = ""
for part in partial_signal:
b_arr = bytearray(part)
ser.write(b_arr)
ser.flush()
response_in = ser.readline()
if response_in.rstrip() != 'next':
break;
response_in = ""
if response_in == "":
response_in = ser.readline()
response = response_in.rstrip()
data = response.split(':')
print(repr(response_in))
if data[1] == 'FAIL':
fail += 1
time.sleep(0.5)
elif data[1] == 'OK':
success += 1
else:
error += 1
print(repr(response_in))
print "Success: %d Fail: %d Error: %d" % (success, fail, error)
if data[0]:
print(data[0])
# sensors_data = dict(s.split(' ') for s in data[0].split(','))
# if 'bat' in sensors_data:
# bat = float(sensors_data['bat'])
# print(bat)
time.sleep(0.4)
except KeyboardInterrupt:
ser.flushInput()
ser.flushOutput()
ser.close()
print("QUIT")
| 20.880952 | 75 | 0.526226 | 3.390625 |
Subsets and Splits